mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-10 15:44:05 -08:00
Merge pull request #3152 from Gouthamve/go-kit/log
Move logging to go-kit logger
This commit is contained in:
commit
6c0070986d
|
@ -28,14 +28,18 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/asaskevich/govalidator"
|
"github.com/asaskevich/govalidator"
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/common/version"
|
"github.com/prometheus/common/version"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"gopkg.in/alecthomas/kingpin.v2"
|
"gopkg.in/alecthomas/kingpin.v2"
|
||||||
|
k8s_runtime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/promlog"
|
||||||
|
promlogflag "github.com/prometheus/common/promlog/flag"
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/notifier"
|
"github.com/prometheus/prometheus/notifier"
|
||||||
"github.com/prometheus/prometheus/promql"
|
"github.com/prometheus/prometheus/promql"
|
||||||
|
@ -86,8 +90,7 @@ func main() {
|
||||||
|
|
||||||
prometheusURL string
|
prometheusURL string
|
||||||
|
|
||||||
logFormat string
|
logLevel promlog.AllowedLevel
|
||||||
logLevel string
|
|
||||||
}{
|
}{
|
||||||
notifier: notifier.Options{
|
notifier: notifier.Options{
|
||||||
Registerer: prometheus.DefaultRegisterer,
|
Registerer: prometheus.DefaultRegisterer,
|
||||||
|
@ -100,14 +103,6 @@ func main() {
|
||||||
|
|
||||||
a.HelpFlag.Short('h')
|
a.HelpFlag.Short('h')
|
||||||
|
|
||||||
a.Flag("log.level",
|
|
||||||
"Only log messages with the given severity or above. One of: [debug, info, warn, error, fatal]").
|
|
||||||
Default("info").StringVar(&cfg.logLevel)
|
|
||||||
|
|
||||||
a.Flag("log.format",
|
|
||||||
`Set the log target and format. Example: "logger:syslog?appname=bob&local=7" or "logger:stdout?json=true"`).
|
|
||||||
Default("logger:stderr").StringVar(&cfg.logFormat)
|
|
||||||
|
|
||||||
a.Flag("config.file", "Prometheus configuration file path.").
|
a.Flag("config.file", "Prometheus configuration file path.").
|
||||||
Default("prometheus.yml").StringVar(&cfg.configFile)
|
Default("prometheus.yml").StringVar(&cfg.configFile)
|
||||||
|
|
||||||
|
@ -175,6 +170,8 @@ func main() {
|
||||||
a.Flag("query.max-concurrency", "Maximum number of queries executed concurrently.").
|
a.Flag("query.max-concurrency", "Maximum number of queries executed concurrently.").
|
||||||
Default("20").IntVar(&cfg.queryEngine.MaxConcurrentQueries)
|
Default("20").IntVar(&cfg.queryEngine.MaxConcurrentQueries)
|
||||||
|
|
||||||
|
promlogflag.AddFlags(a, &cfg.logLevel)
|
||||||
|
|
||||||
_, err := a.Parse(os.Args[1:])
|
_, err := a.Parse(os.Args[1:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.Usage(os.Args[1:])
|
a.Usage(os.Args[1:])
|
||||||
|
@ -203,13 +200,20 @@ func main() {
|
||||||
|
|
||||||
cfg.queryEngine.Timeout = time.Duration(cfg.queryTimeout)
|
cfg.queryEngine.Timeout = time.Duration(cfg.queryTimeout)
|
||||||
|
|
||||||
logger := log.NewLogger(os.Stdout)
|
logger := promlog.New(cfg.logLevel)
|
||||||
logger.SetLevel(cfg.logLevel)
|
|
||||||
logger.SetFormat(cfg.logFormat)
|
|
||||||
|
|
||||||
logger.Infoln("Starting prometheus", version.Info())
|
// XXX(fabxc): Kubernetes does background logging which we can only customize by modifying
|
||||||
logger.Infoln("Build context", version.BuildContext())
|
// a global variable.
|
||||||
logger.Infoln("Host details", Uname())
|
// Ultimately, here is the best place to set it.
|
||||||
|
k8s_runtime.ErrorHandlers = []func(error){
|
||||||
|
func(err error) {
|
||||||
|
level.Error(log.With(logger, "component", "k8s_client_runtime")).Log("err", err)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
level.Info(logger).Log("msg", "Starting prometheus", "version", version.Info())
|
||||||
|
level.Info(logger).Log("build_context", version.BuildContext())
|
||||||
|
level.Info(logger).Log("host_details", Uname())
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// sampleAppender = storage.Fanout{}
|
// sampleAppender = storage.Fanout{}
|
||||||
|
@ -221,22 +225,30 @@ func main() {
|
||||||
hup := make(chan os.Signal)
|
hup := make(chan os.Signal)
|
||||||
hupReady := make(chan bool)
|
hupReady := make(chan bool)
|
||||||
signal.Notify(hup, syscall.SIGHUP)
|
signal.Notify(hup, syscall.SIGHUP)
|
||||||
logger.Infoln("Starting tsdb")
|
|
||||||
localStorage, err := tsdb.Open(cfg.localStoragePath, prometheus.DefaultRegisterer, &cfg.tsdb)
|
level.Info(logger).Log("msg", "Starting TSDB")
|
||||||
|
|
||||||
|
localStorage, err := tsdb.Open(
|
||||||
|
cfg.localStoragePath,
|
||||||
|
log.With(logger, "component", "tsdb"),
|
||||||
|
prometheus.DefaultRegisterer,
|
||||||
|
&cfg.tsdb,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Opening storage failed: %s", err)
|
level.Error(logger).Log("msg", "Opening TSDB failed", "err", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
logger.Infoln("tsdb started")
|
|
||||||
|
|
||||||
remoteStorage := &remote.Storage{}
|
level.Info(logger).Log("msg", "TSDB succesfully started")
|
||||||
|
|
||||||
|
remoteStorage := remote.NewStorage(log.With(logger, "component", "remote"))
|
||||||
reloadables = append(reloadables, remoteStorage)
|
reloadables = append(reloadables, remoteStorage)
|
||||||
fanoutStorage := storage.NewFanout(tsdb.Adapter(localStorage), remoteStorage)
|
fanoutStorage := storage.NewFanout(logger, tsdb.Adapter(localStorage), remoteStorage)
|
||||||
|
|
||||||
cfg.queryEngine.Logger = logger
|
cfg.queryEngine.Logger = log.With(logger, "component", "query engine")
|
||||||
var (
|
var (
|
||||||
notifier = notifier.New(&cfg.notifier, logger)
|
notifier = notifier.New(&cfg.notifier, log.With(logger, "component", "notifier"))
|
||||||
targetManager = retrieval.NewTargetManager(fanoutStorage, logger)
|
targetManager = retrieval.NewTargetManager(fanoutStorage, log.With(logger, "component", "target manager"))
|
||||||
queryEngine = promql.NewEngine(fanoutStorage, &cfg.queryEngine)
|
queryEngine = promql.NewEngine(fanoutStorage, &cfg.queryEngine)
|
||||||
ctx, cancelCtx = context.WithCancel(context.Background())
|
ctx, cancelCtx = context.WithCancel(context.Background())
|
||||||
)
|
)
|
||||||
|
@ -247,7 +259,7 @@ func main() {
|
||||||
QueryEngine: queryEngine,
|
QueryEngine: queryEngine,
|
||||||
Context: ctx,
|
Context: ctx,
|
||||||
ExternalURL: cfg.web.ExternalURL,
|
ExternalURL: cfg.web.ExternalURL,
|
||||||
Logger: logger,
|
Logger: log.With(logger, "component", "rule manager"),
|
||||||
})
|
})
|
||||||
|
|
||||||
cfg.web.Context = ctx
|
cfg.web.Context = ctx
|
||||||
|
@ -271,12 +283,12 @@ func main() {
|
||||||
cfg.web.Flags[f.Name] = f.Value.String()
|
cfg.web.Flags[f.Name] = f.Value.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
webHandler := web.New(&cfg.web)
|
webHandler := web.New(log.With(logger, "component", "web"), &cfg.web)
|
||||||
|
|
||||||
reloadables = append(reloadables, targetManager, ruleManager, webHandler, notifier)
|
reloadables = append(reloadables, targetManager, ruleManager, webHandler, notifier)
|
||||||
|
|
||||||
if err := reloadConfig(cfg.configFile, logger, reloadables...); err != nil {
|
if err := reloadConfig(cfg.configFile, logger, reloadables...); err != nil {
|
||||||
logger.Errorf("Error loading config: %s", err)
|
level.Error(logger).Log("msg", "Error loading config", "err", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -289,11 +301,11 @@ func main() {
|
||||||
select {
|
select {
|
||||||
case <-hup:
|
case <-hup:
|
||||||
if err := reloadConfig(cfg.configFile, logger, reloadables...); err != nil {
|
if err := reloadConfig(cfg.configFile, logger, reloadables...); err != nil {
|
||||||
logger.Errorf("Error reloading config: %s", err)
|
level.Error(logger).Log("msg", "Error reloading config", "err", err)
|
||||||
}
|
}
|
||||||
case rc := <-webHandler.Reload():
|
case rc := <-webHandler.Reload():
|
||||||
if err := reloadConfig(cfg.configFile, logger, reloadables...); err != nil {
|
if err := reloadConfig(cfg.configFile, logger, reloadables...); err != nil {
|
||||||
logger.Errorf("Error reloading config: %s", err)
|
level.Error(logger).Log("msg", "Error reloading config", "err", err)
|
||||||
rc <- err
|
rc <- err
|
||||||
} else {
|
} else {
|
||||||
rc <- nil
|
rc <- nil
|
||||||
|
@ -305,7 +317,7 @@ func main() {
|
||||||
// Start all components. The order is NOT arbitrary.
|
// Start all components. The order is NOT arbitrary.
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := fanoutStorage.Close(); err != nil {
|
if err := fanoutStorage.Close(); err != nil {
|
||||||
log.Errorln("Error stopping storage:", err)
|
level.Error(logger).Log("msg", "Closing storage(s) failed", "err", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -337,20 +349,20 @@ func main() {
|
||||||
|
|
||||||
// Set web server to ready.
|
// Set web server to ready.
|
||||||
webHandler.Ready()
|
webHandler.Ready()
|
||||||
log.Info("Server is ready to receive requests.")
|
level.Info(logger).Log("msg", "Server is ready to receive requests.")
|
||||||
|
|
||||||
term := make(chan os.Signal)
|
term := make(chan os.Signal)
|
||||||
signal.Notify(term, os.Interrupt, syscall.SIGTERM)
|
signal.Notify(term, os.Interrupt, syscall.SIGTERM)
|
||||||
select {
|
select {
|
||||||
case <-term:
|
case <-term:
|
||||||
logger.Warn("Received SIGTERM, exiting gracefully...")
|
level.Warn(logger).Log("msg", "Received SIGTERM, exiting gracefully...")
|
||||||
case <-webHandler.Quit():
|
case <-webHandler.Quit():
|
||||||
logger.Warn("Received termination request via web service, exiting gracefully...")
|
level.Warn(logger).Log("msg", "Received termination request via web service, exiting gracefully...")
|
||||||
case err := <-errc:
|
case err := <-errc:
|
||||||
logger.Errorln("Error starting web server, exiting gracefully:", err)
|
level.Error(logger).Log("msg", "Error starting web server, exiting gracefully", "err", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Info("See you next time!")
|
level.Info(logger).Log("msg", "See you next time!")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reloadable things can change their internal state to match a new config
|
// Reloadable things can change their internal state to match a new config
|
||||||
|
@ -360,7 +372,8 @@ type Reloadable interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
func reloadConfig(filename string, logger log.Logger, rls ...Reloadable) (err error) {
|
func reloadConfig(filename string, logger log.Logger, rls ...Reloadable) (err error) {
|
||||||
logger.Infof("Loading configuration file %s", filename)
|
level.Info(logger).Log("msg", "Loading configuration file", "filename", filename)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
configSuccess.Set(1)
|
configSuccess.Set(1)
|
||||||
|
@ -378,7 +391,7 @@ func reloadConfig(filename string, logger log.Logger, rls ...Reloadable) (err er
|
||||||
failed := false
|
failed := false
|
||||||
for _, rl := range rls {
|
for _, rl := range rls {
|
||||||
if err := rl.ApplyConfig(conf); err != nil {
|
if err := rl.ApplyConfig(conf); err != nil {
|
||||||
logger.Error("Failed to apply configuration: ", err)
|
level.Error(logger).Log("msg", "Failed to apply configuration", "err", err)
|
||||||
failed = true
|
failed = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,8 +23,9 @@ import (
|
||||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||||
"github.com/Azure/go-autorest/autorest/azure"
|
"github.com/Azure/go-autorest/autorest/azure"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
@ -71,6 +72,9 @@ type Discovery struct {
|
||||||
|
|
||||||
// NewDiscovery returns a new AzureDiscovery which periodically refreshes its targets.
|
// NewDiscovery returns a new AzureDiscovery which periodically refreshes its targets.
|
||||||
func NewDiscovery(cfg *config.AzureSDConfig, logger log.Logger) *Discovery {
|
func NewDiscovery(cfg *config.AzureSDConfig, logger log.Logger) *Discovery {
|
||||||
|
if logger == nil {
|
||||||
|
logger = log.NewNopLogger()
|
||||||
|
}
|
||||||
return &Discovery{
|
return &Discovery{
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
interval: time.Duration(cfg.RefreshInterval),
|
interval: time.Duration(cfg.RefreshInterval),
|
||||||
|
@ -93,7 +97,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
|
|
||||||
tg, err := d.refresh()
|
tg, err := d.refresh()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.logger.Errorf("unable to refresh during Azure discovery: %s", err)
|
level.Error(d.logger).Log("msg", "Unable to refresh during Azure discovery", "err", err)
|
||||||
} else {
|
} else {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
|
@ -149,7 +153,7 @@ func newAzureResourceFromID(id string, logger log.Logger) (azureResource, error)
|
||||||
s := strings.Split(id, "/")
|
s := strings.Split(id, "/")
|
||||||
if len(s) != 9 {
|
if len(s) != 9 {
|
||||||
err := fmt.Errorf("invalid ID '%s'. Refusing to create azureResource", id)
|
err := fmt.Errorf("invalid ID '%s'. Refusing to create azureResource", id)
|
||||||
logger.Error(err)
|
level.Error(logger).Log("err", err)
|
||||||
return azureResource{}, err
|
return azureResource{}, err
|
||||||
}
|
}
|
||||||
return azureResource{
|
return azureResource{
|
||||||
|
@ -159,6 +163,8 @@ func newAzureResourceFromID(id string, logger log.Logger) (azureResource, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Discovery) refresh() (tg *config.TargetGroup, err error) {
|
func (d *Discovery) refresh() (tg *config.TargetGroup, err error) {
|
||||||
|
defer level.Debug(d.logger).Log("msg", "Azure discovery completed")
|
||||||
|
|
||||||
t0 := time.Now()
|
t0 := time.Now()
|
||||||
defer func() {
|
defer func() {
|
||||||
azureSDRefreshDuration.Observe(time.Since(t0).Seconds())
|
azureSDRefreshDuration.Observe(time.Since(t0).Seconds())
|
||||||
|
@ -187,7 +193,7 @@ func (d *Discovery) refresh() (tg *config.TargetGroup, err error) {
|
||||||
}
|
}
|
||||||
machines = append(machines, *result.Value...)
|
machines = append(machines, *result.Value...)
|
||||||
}
|
}
|
||||||
d.logger.Debugf("Found %d virtual machines during Azure discovery.", len(machines))
|
level.Debug(d.logger).Log("msg", "Found virtual machines during Azure discovery.", "count", len(machines))
|
||||||
|
|
||||||
// We have the slice of machines. Now turn them into targets.
|
// We have the slice of machines. Now turn them into targets.
|
||||||
// Doing them in go routines because the network interface calls are slow.
|
// Doing them in go routines because the network interface calls are slow.
|
||||||
|
@ -228,7 +234,7 @@ func (d *Discovery) refresh() (tg *config.TargetGroup, err error) {
|
||||||
}
|
}
|
||||||
networkInterface, err := client.nic.Get(r.ResourceGroup, r.Name, "")
|
networkInterface, err := client.nic.Get(r.ResourceGroup, r.Name, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.logger.Errorf("Unable to get network interface %s: %s", r.Name, err)
|
level.Error(d.logger).Log("msg", "Unable to get network interface", "name", r.Name, "err", err)
|
||||||
ch <- target{labelSet: nil, err: err}
|
ch <- target{labelSet: nil, err: err}
|
||||||
// Get out of this routine because we cannot continue without a network interface.
|
// Get out of this routine because we cannot continue without a network interface.
|
||||||
return
|
return
|
||||||
|
@ -239,7 +245,7 @@ func (d *Discovery) refresh() (tg *config.TargetGroup, err error) {
|
||||||
// yet support this. On deallocated machines, this value happens to be nil so it
|
// yet support this. On deallocated machines, this value happens to be nil so it
|
||||||
// is a cheap and easy way to determine if a machine is allocated or not.
|
// is a cheap and easy way to determine if a machine is allocated or not.
|
||||||
if networkInterface.Properties.Primary == nil {
|
if networkInterface.Properties.Primary == nil {
|
||||||
d.logger.Debugf("Virtual machine %s is deallocated. Skipping during Azure SD.", *vm.Name)
|
level.Debug(d.logger).Log("msg", "Skipping deallocated virtual machine", "machine", *vm.Name)
|
||||||
ch <- target{}
|
ch <- target{}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -274,6 +280,5 @@ func (d *Discovery) refresh() (tg *config.TargetGroup, err error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
d.logger.Debugf("Azure discovery completed.")
|
|
||||||
return tg, nil
|
return tg, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,9 +21,10 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
consul "github.com/hashicorp/consul/api"
|
consul "github.com/hashicorp/consul/api"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/util/httputil"
|
"github.com/prometheus/prometheus/util/httputil"
|
||||||
|
@ -97,6 +98,10 @@ type Discovery struct {
|
||||||
|
|
||||||
// NewDiscovery returns a new Discovery for the given config.
|
// NewDiscovery returns a new Discovery for the given config.
|
||||||
func NewDiscovery(conf *config.ConsulSDConfig, logger log.Logger) (*Discovery, error) {
|
func NewDiscovery(conf *config.ConsulSDConfig, logger log.Logger) (*Discovery, error) {
|
||||||
|
if logger == nil {
|
||||||
|
logger = log.NewNopLogger()
|
||||||
|
}
|
||||||
|
|
||||||
tls, err := httputil.NewTLSConfig(conf.TLSConfig)
|
tls, err := httputil.NewTLSConfig(conf.TLSConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -168,7 +173,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.logger.Errorf("Error refreshing service list: %s", err)
|
level.Error(d.logger).Log("msg", "Error refreshing service list", "err", err)
|
||||||
rpcFailuresCount.Inc()
|
rpcFailuresCount.Inc()
|
||||||
time.Sleep(retryInterval)
|
time.Sleep(retryInterval)
|
||||||
continue
|
continue
|
||||||
|
@ -184,7 +189,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
if d.clientDatacenter == "" {
|
if d.clientDatacenter == "" {
|
||||||
info, err := d.client.Agent().Self()
|
info, err := d.client.Agent().Self()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.logger.Errorf("Error retrieving datacenter name: %s", err)
|
level.Error(d.logger).Log("msg", "Error retrieving datacenter name", "err", err)
|
||||||
time.Sleep(retryInterval)
|
time.Sleep(retryInterval)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -265,7 +270,7 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*config.TargetG
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
srv.logger.Errorf("Error refreshing service %s: %s", srv.name, err)
|
level.Error(srv.logger).Log("msg", "Error refreshing service", "service", srv.name, "err", err)
|
||||||
rpcFailuresCount.Inc()
|
rpcFailuresCount.Inc()
|
||||||
time.Sleep(retryInterval)
|
time.Sleep(retryInterval)
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -16,14 +16,13 @@ package consul
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestConfiguredService(t *testing.T) {
|
func TestConfiguredService(t *testing.T) {
|
||||||
conf := &config.ConsulSDConfig{
|
conf := &config.ConsulSDConfig{
|
||||||
Services: []string{"configuredServiceName"}}
|
Services: []string{"configuredServiceName"}}
|
||||||
consulDiscovery, err := NewDiscovery(conf, log.Base())
|
consulDiscovery, err := NewDiscovery(conf, nil)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error when initialising discovery %v", err)
|
t.Errorf("Unexpected error when initialising discovery %v", err)
|
||||||
|
@ -38,7 +37,7 @@ func TestConfiguredService(t *testing.T) {
|
||||||
|
|
||||||
func TestNonConfiguredService(t *testing.T) {
|
func TestNonConfiguredService(t *testing.T) {
|
||||||
conf := &config.ConsulSDConfig{}
|
conf := &config.ConsulSDConfig{}
|
||||||
consulDiscovery, err := NewDiscovery(conf, log.Base())
|
consulDiscovery, err := NewDiscovery(conf, nil)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error when initialising discovery %v", err)
|
t.Errorf("Unexpected error when initialising discovery %v", err)
|
||||||
|
|
|
@ -18,7 +18,8 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/discovery/azure"
|
"github.com/prometheus/prometheus/discovery/azure"
|
||||||
"github.com/prometheus/prometheus/discovery/consul"
|
"github.com/prometheus/prometheus/discovery/consul"
|
||||||
|
@ -59,68 +60,68 @@ func ProvidersFromConfig(cfg config.ServiceDiscoveryConfig, logger log.Logger) m
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, c := range cfg.DNSSDConfigs {
|
for i, c := range cfg.DNSSDConfigs {
|
||||||
app("dns", i, dns.NewDiscovery(c, logger))
|
app("dns", i, dns.NewDiscovery(c, log.With(logger, "discovery", "dns")))
|
||||||
}
|
}
|
||||||
for i, c := range cfg.FileSDConfigs {
|
for i, c := range cfg.FileSDConfigs {
|
||||||
app("file", i, file.NewDiscovery(c, logger))
|
app("file", i, file.NewDiscovery(c, log.With(logger, "discovery", "file")))
|
||||||
}
|
}
|
||||||
for i, c := range cfg.ConsulSDConfigs {
|
for i, c := range cfg.ConsulSDConfigs {
|
||||||
k, err := consul.NewDiscovery(c, logger)
|
k, err := consul.NewDiscovery(c, log.With(logger, "discovery", "consul"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Cannot create Consul discovery: %s", err)
|
level.Error(logger).Log("msg", "Cannot create Consul discovery", "err", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
app("consul", i, k)
|
app("consul", i, k)
|
||||||
}
|
}
|
||||||
for i, c := range cfg.MarathonSDConfigs {
|
for i, c := range cfg.MarathonSDConfigs {
|
||||||
m, err := marathon.NewDiscovery(c, logger)
|
m, err := marathon.NewDiscovery(c, log.With(logger, "discovery", "marathon"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Cannot create Marathon discovery: %s", err)
|
level.Error(logger).Log("msg", "Cannot create Marathon discovery", "err", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
app("marathon", i, m)
|
app("marathon", i, m)
|
||||||
}
|
}
|
||||||
for i, c := range cfg.KubernetesSDConfigs {
|
for i, c := range cfg.KubernetesSDConfigs {
|
||||||
k, err := kubernetes.New(logger, c)
|
k, err := kubernetes.New(log.With(logger, "discovery", "k8s"), c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Cannot create Kubernetes discovery: %s", err)
|
level.Error(logger).Log("msg", "Cannot create Kubernetes discovery", "err", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
app("kubernetes", i, k)
|
app("kubernetes", i, k)
|
||||||
}
|
}
|
||||||
for i, c := range cfg.ServersetSDConfigs {
|
for i, c := range cfg.ServersetSDConfigs {
|
||||||
app("serverset", i, zookeeper.NewServersetDiscovery(c, logger))
|
app("serverset", i, zookeeper.NewServersetDiscovery(c, log.With(logger, "discovery", "zookeeper")))
|
||||||
}
|
}
|
||||||
for i, c := range cfg.NerveSDConfigs {
|
for i, c := range cfg.NerveSDConfigs {
|
||||||
app("nerve", i, zookeeper.NewNerveDiscovery(c, logger))
|
app("nerve", i, zookeeper.NewNerveDiscovery(c, log.With(logger, "discovery", "nerve")))
|
||||||
}
|
}
|
||||||
for i, c := range cfg.EC2SDConfigs {
|
for i, c := range cfg.EC2SDConfigs {
|
||||||
app("ec2", i, ec2.NewDiscovery(c, logger))
|
app("ec2", i, ec2.NewDiscovery(c, log.With(logger, "discovery", "ec2")))
|
||||||
}
|
}
|
||||||
for i, c := range cfg.OpenstackSDConfigs {
|
for i, c := range cfg.OpenstackSDConfigs {
|
||||||
openstackd, err := openstack.NewDiscovery(c, logger)
|
openstackd, err := openstack.NewDiscovery(c, log.With(logger, "discovery", "openstack"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Cannot initialize OpenStack discovery: %s", err)
|
level.Error(logger).Log("msg", "Cannot initialize OpenStack discovery", "err", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
app("openstack", i, openstackd)
|
app("openstack", i, openstackd)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, c := range cfg.GCESDConfigs {
|
for i, c := range cfg.GCESDConfigs {
|
||||||
gced, err := gce.NewDiscovery(c, logger)
|
gced, err := gce.NewDiscovery(c, log.With(logger, "discovery", "gce"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Cannot initialize GCE discovery: %s", err)
|
level.Error(logger).Log("msg", "Cannot initialize GCE discovery", "err", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
app("gce", i, gced)
|
app("gce", i, gced)
|
||||||
}
|
}
|
||||||
for i, c := range cfg.AzureSDConfigs {
|
for i, c := range cfg.AzureSDConfigs {
|
||||||
app("azure", i, azure.NewDiscovery(c, logger))
|
app("azure", i, azure.NewDiscovery(c, log.With(logger, "discovery", "azure")))
|
||||||
}
|
}
|
||||||
for i, c := range cfg.TritonSDConfigs {
|
for i, c := range cfg.TritonSDConfigs {
|
||||||
t, err := triton.New(logger.With("sd", "triton"), c)
|
t, err := triton.New(log.With(logger, "discovery", "trition"), c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Cannot create Triton discovery: %s", err)
|
level.Error(logger).Log("msg", "Cannot create Triton discovery", "err", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
app("triton", i, t)
|
app("triton", i, t)
|
||||||
|
|
|
@ -16,7 +16,6 @@ package discovery
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
yaml "gopkg.in/yaml.v2"
|
yaml "gopkg.in/yaml.v2"
|
||||||
|
@ -54,7 +53,7 @@ static_configs:
|
||||||
|
|
||||||
go ts.Run(ctx)
|
go ts.Run(ctx)
|
||||||
|
|
||||||
ts.UpdateProviders(ProvidersFromConfig(*cfg, log.Base()))
|
ts.UpdateProviders(ProvidersFromConfig(*cfg, nil))
|
||||||
<-called
|
<-called
|
||||||
|
|
||||||
verifyPresence(ts.tgroups, "static/0/0", true)
|
verifyPresence(ts.tgroups, "static/0/0", true)
|
||||||
|
@ -68,7 +67,7 @@ static_configs:
|
||||||
t.Fatalf("Unable to load YAML config sTwo: %s", err)
|
t.Fatalf("Unable to load YAML config sTwo: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ts.UpdateProviders(ProvidersFromConfig(*cfg, log.Base()))
|
ts.UpdateProviders(ProvidersFromConfig(*cfg, nil))
|
||||||
<-called
|
<-called
|
||||||
|
|
||||||
verifyPresence(ts.tgroups, "static/0/0", true)
|
verifyPresence(ts.tgroups, "static/0/0", true)
|
||||||
|
|
|
@ -20,9 +20,10 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/miekg/dns"
|
"github.com/miekg/dns"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
@ -71,6 +72,10 @@ type Discovery struct {
|
||||||
|
|
||||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||||
func NewDiscovery(conf *config.DNSSDConfig, logger log.Logger) *Discovery {
|
func NewDiscovery(conf *config.DNSSDConfig, logger log.Logger) *Discovery {
|
||||||
|
if logger == nil {
|
||||||
|
logger = log.NewNopLogger()
|
||||||
|
}
|
||||||
|
|
||||||
qtype := dns.TypeSRV
|
qtype := dns.TypeSRV
|
||||||
switch strings.ToUpper(conf.Type) {
|
switch strings.ToUpper(conf.Type) {
|
||||||
case "A":
|
case "A":
|
||||||
|
@ -114,7 +119,7 @@ func (d *Discovery) refreshAll(ctx context.Context, ch chan<- []*config.TargetGr
|
||||||
for _, name := range d.names {
|
for _, name := range d.names {
|
||||||
go func(n string) {
|
go func(n string) {
|
||||||
if err := d.refresh(ctx, n, ch); err != nil {
|
if err := d.refresh(ctx, n, ch); err != nil {
|
||||||
d.logger.Errorf("Error refreshing DNS targets: %s", err)
|
level.Error(d.logger).Log("msg", "Error refreshing DNS targets", "err", err)
|
||||||
}
|
}
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}(name)
|
}(name)
|
||||||
|
@ -149,7 +154,7 @@ func (d *Discovery) refresh(ctx context.Context, name string, ch chan<- []*confi
|
||||||
case *dns.AAAA:
|
case *dns.AAAA:
|
||||||
target = hostPort(addr.AAAA.String(), d.port)
|
target = hostPort(addr.AAAA.String(), d.port)
|
||||||
default:
|
default:
|
||||||
d.logger.Warnf("%q is not a valid SRV record", record)
|
level.Warn(d.logger).Log("msg", "Invalid SRV record", "record", record)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -183,11 +188,7 @@ func lookupAll(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) {
|
||||||
for _, lname := range conf.NameList(name) {
|
for _, lname := range conf.NameList(name) {
|
||||||
response, err = lookup(lname, qtype, client, servAddr, false)
|
response, err = lookup(lname, qtype, client, servAddr, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.
|
level.Warn(logger).Log("msg", "DNS resolution failed", "server", server, "name", name, "err", err)
|
||||||
With("server", server).
|
|
||||||
With("name", name).
|
|
||||||
With("reason", err).
|
|
||||||
Warn("DNS resolution failed.")
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if len(response.Answer) > 0 {
|
if len(response.Answer) > 0 {
|
||||||
|
|
|
@ -23,8 +23,9 @@ import (
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
|
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
@ -83,6 +84,9 @@ func NewDiscovery(conf *config.EC2SDConfig, logger log.Logger) *Discovery {
|
||||||
if conf.AccessKey == "" && conf.SecretKey == "" {
|
if conf.AccessKey == "" && conf.SecretKey == "" {
|
||||||
creds = nil
|
creds = nil
|
||||||
}
|
}
|
||||||
|
if logger == nil {
|
||||||
|
logger = log.NewNopLogger()
|
||||||
|
}
|
||||||
return &Discovery{
|
return &Discovery{
|
||||||
aws: &aws.Config{
|
aws: &aws.Config{
|
||||||
Region: &conf.Region,
|
Region: &conf.Region,
|
||||||
|
@ -104,7 +108,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
// Get an initial set right away.
|
// Get an initial set right away.
|
||||||
tg, err := d.refresh()
|
tg, err := d.refresh()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.logger.Error(err)
|
level.Error(d.logger).Log("msg", "Refresh failed", "err", err)
|
||||||
} else {
|
} else {
|
||||||
select {
|
select {
|
||||||
case ch <- []*config.TargetGroup{tg}:
|
case ch <- []*config.TargetGroup{tg}:
|
||||||
|
@ -118,7 +122,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
tg, err := d.refresh()
|
tg, err := d.refresh()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.logger.Error(err)
|
level.Error(d.logger).Log("msg", "Refresh failed", "err", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -22,8 +22,9 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"gopkg.in/fsnotify.v1"
|
"gopkg.in/fsnotify.v1"
|
||||||
|
@ -69,6 +70,9 @@ type Discovery struct {
|
||||||
|
|
||||||
// NewDiscovery returns a new file discovery for the given paths.
|
// NewDiscovery returns a new file discovery for the given paths.
|
||||||
func NewDiscovery(conf *config.FileSDConfig, logger log.Logger) *Discovery {
|
func NewDiscovery(conf *config.FileSDConfig, logger log.Logger) *Discovery {
|
||||||
|
if logger == nil {
|
||||||
|
logger = log.NewNopLogger()
|
||||||
|
}
|
||||||
return &Discovery{
|
return &Discovery{
|
||||||
paths: conf.Files,
|
paths: conf.Files,
|
||||||
interval: time.Duration(conf.RefreshInterval),
|
interval: time.Duration(conf.RefreshInterval),
|
||||||
|
@ -82,7 +86,7 @@ func (d *Discovery) listFiles() []string {
|
||||||
for _, p := range d.paths {
|
for _, p := range d.paths {
|
||||||
files, err := filepath.Glob(p)
|
files, err := filepath.Glob(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.logger.Errorf("Error expanding glob %q: %s", p, err)
|
level.Error(d.logger).Log("msg", "Error expanding glob", "glob", p, "err", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
paths = append(paths, files...)
|
paths = append(paths, files...)
|
||||||
|
@ -103,7 +107,7 @@ func (d *Discovery) watchFiles() {
|
||||||
p = "./"
|
p = "./"
|
||||||
}
|
}
|
||||||
if err := d.watcher.Add(p); err != nil {
|
if err := d.watcher.Add(p); err != nil {
|
||||||
d.logger.Errorf("Error adding file watch for %q: %s", p, err)
|
level.Error(d.logger).Log("msg", "Error adding file watch", "path", p, "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -114,7 +118,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
|
|
||||||
watcher, err := fsnotify.NewWatcher()
|
watcher, err := fsnotify.NewWatcher()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.logger.Errorf("Error creating file watcher: %s", err)
|
level.Error(d.logger).Log("msg", "Error adding file watcher", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
d.watcher = watcher
|
d.watcher = watcher
|
||||||
|
@ -152,7 +156,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
|
|
||||||
case err := <-d.watcher.Errors:
|
case err := <-d.watcher.Errors:
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.logger.Errorf("Error on file watch: %s", err)
|
level.Error(d.logger).Log("msg", "Error watching file", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -160,7 +164,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
|
|
||||||
// stop shuts down the file watcher.
|
// stop shuts down the file watcher.
|
||||||
func (d *Discovery) stop() {
|
func (d *Discovery) stop() {
|
||||||
d.logger.Debugf("Stopping file discovery for %s...", d.paths)
|
level.Debug(d.logger).Log("msg", "Stopping file discovery...", "paths", d.paths)
|
||||||
|
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
defer close(done)
|
defer close(done)
|
||||||
|
@ -178,10 +182,10 @@ func (d *Discovery) stop() {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
if err := d.watcher.Close(); err != nil {
|
if err := d.watcher.Close(); err != nil {
|
||||||
d.logger.Errorf("Error closing file watcher for %s: %s", d.paths, err)
|
level.Error(d.logger).Log("msg", "Error closing file watcher", "paths", d.paths, "err", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
d.logger.Debugf("File discovery for %s stopped.", d.paths)
|
level.Debug(d.logger).Log("File discovery stopped", "paths", d.paths)
|
||||||
}
|
}
|
||||||
|
|
||||||
// refresh reads all files matching the discovery's patterns and sends the respective
|
// refresh reads all files matching the discovery's patterns and sends the respective
|
||||||
|
@ -197,7 +201,8 @@ func (d *Discovery) refresh(ctx context.Context, ch chan<- []*config.TargetGroup
|
||||||
tgroups, err := readFile(p)
|
tgroups, err := readFile(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fileSDReadErrorsCount.Inc()
|
fileSDReadErrorsCount.Inc()
|
||||||
d.logger.Errorf("Error reading file %q: %s", p, err)
|
|
||||||
|
level.Error(d.logger).Log("msg", "Error reading file", "path", p, "err", err)
|
||||||
// Prevent deletion down below.
|
// Prevent deletion down below.
|
||||||
ref[p] = d.lastRefresh[p]
|
ref[p] = d.lastRefresh[p]
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -21,7 +21,6 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
@ -47,7 +46,7 @@ func testFileSD(t *testing.T, prefix, ext string, expect bool) {
|
||||||
conf.RefreshInterval = model.Duration(1 * time.Hour)
|
conf.RefreshInterval = model.Duration(1 * time.Hour)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
fsd = NewDiscovery(&conf, log.Base())
|
fsd = NewDiscovery(&conf, nil)
|
||||||
ch = make(chan []*config.TargetGroup)
|
ch = make(chan []*config.TargetGroup)
|
||||||
ctx, cancel = context.WithCancel(context.Background())
|
ctx, cancel = context.WithCancel(context.Background())
|
||||||
)
|
)
|
||||||
|
|
|
@ -21,8 +21,9 @@ import (
|
||||||
|
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
|
@ -81,6 +82,9 @@ type Discovery struct {
|
||||||
|
|
||||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||||
func NewDiscovery(conf *config.GCESDConfig, logger log.Logger) (*Discovery, error) {
|
func NewDiscovery(conf *config.GCESDConfig, logger log.Logger) (*Discovery, error) {
|
||||||
|
if logger == nil {
|
||||||
|
logger = log.NewNopLogger()
|
||||||
|
}
|
||||||
gd := &Discovery{
|
gd := &Discovery{
|
||||||
project: conf.Project,
|
project: conf.Project,
|
||||||
zone: conf.Zone,
|
zone: conf.Zone,
|
||||||
|
@ -108,7 +112,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
// Get an initial set right away.
|
// Get an initial set right away.
|
||||||
tg, err := d.refresh()
|
tg, err := d.refresh()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.logger.Error(err)
|
level.Error(d.logger).Log("msg", "Refresh failed", "err", err)
|
||||||
} else {
|
} else {
|
||||||
select {
|
select {
|
||||||
case ch <- []*config.TargetGroup{tg}:
|
case ch <- []*config.TargetGroup{tg}:
|
||||||
|
@ -124,7 +128,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
tg, err := d.refresh()
|
tg, err := d.refresh()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.logger.Error(err)
|
level.Error(d.logger).Log("msg", "Refresh failed", "err", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
|
|
|
@ -20,7 +20,8 @@ import (
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
apiv1 "k8s.io/client-go/pkg/api/v1"
|
apiv1 "k8s.io/client-go/pkg/api/v1"
|
||||||
|
@ -42,6 +43,9 @@ type Endpoints struct {
|
||||||
|
|
||||||
// NewEndpoints returns a new endpoints discovery.
|
// NewEndpoints returns a new endpoints discovery.
|
||||||
func NewEndpoints(l log.Logger, svc, eps, pod cache.SharedInformer) *Endpoints {
|
func NewEndpoints(l log.Logger, svc, eps, pod cache.SharedInformer) *Endpoints {
|
||||||
|
if l == nil {
|
||||||
|
l = log.NewNopLogger()
|
||||||
|
}
|
||||||
ep := &Endpoints{
|
ep := &Endpoints{
|
||||||
logger: l,
|
logger: l,
|
||||||
endpointsInf: eps,
|
endpointsInf: eps,
|
||||||
|
@ -74,7 +78,7 @@ func (e *Endpoints) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
if tg == nil {
|
if tg == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
e.logger.With("tg", fmt.Sprintf("%#v", tg)).Debugln("endpoints update")
|
level.Debug(e.logger).Log("msg", "endpoints update", "tg", fmt.Sprintf("%#v", tg))
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
case ch <- []*config.TargetGroup{tg}:
|
case ch <- []*config.TargetGroup{tg}:
|
||||||
|
@ -87,7 +91,7 @@ func (e *Endpoints) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
|
|
||||||
eps, err := convertToEndpoints(o)
|
eps, err := convertToEndpoints(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.logger.With("err", err).Errorln("converting to Endpoints object failed")
|
level.Error(e.logger).Log("msg", "converting to Endpoints object failed", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
send(e.buildEndpoints(eps))
|
send(e.buildEndpoints(eps))
|
||||||
|
@ -97,7 +101,7 @@ func (e *Endpoints) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
|
|
||||||
eps, err := convertToEndpoints(o)
|
eps, err := convertToEndpoints(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.logger.With("err", err).Errorln("converting to Endpoints object failed")
|
level.Error(e.logger).Log("msg", "converting to Endpoints object failed", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
send(e.buildEndpoints(eps))
|
send(e.buildEndpoints(eps))
|
||||||
|
@ -107,7 +111,7 @@ func (e *Endpoints) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
|
|
||||||
eps, err := convertToEndpoints(o)
|
eps, err := convertToEndpoints(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.logger.With("err", err).Errorln("converting to Endpoints object failed")
|
level.Error(e.logger).Log("msg", "converting to Endpoints object failed", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
send(&config.TargetGroup{Source: endpointsSource(eps)})
|
send(&config.TargetGroup{Source: endpointsSource(eps)})
|
||||||
|
@ -117,7 +121,7 @@ func (e *Endpoints) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
serviceUpdate := func(o interface{}) {
|
serviceUpdate := func(o interface{}) {
|
||||||
svc, err := convertToService(o)
|
svc, err := convertToService(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.logger.With("err", err).Errorln("converting to Service object failed")
|
level.Error(e.logger).Log("msg", "converting to Service object failed", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -129,7 +133,7 @@ func (e *Endpoints) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
send(e.buildEndpoints(obj.(*apiv1.Endpoints)))
|
send(e.buildEndpoints(obj.(*apiv1.Endpoints)))
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.logger.With("err", err).Errorln("retrieving endpoints failed")
|
level.Error(e.logger).Log("msg", "retrieving endpoints failed", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
|
@ -310,7 +314,7 @@ func (e *Endpoints) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.logger.With("err", err).Errorln("resolving pod ref failed")
|
level.Error(e.logger).Log("msg", "resolving pod ref failed", "err", err)
|
||||||
}
|
}
|
||||||
return obj.(*apiv1.Pod)
|
return obj.(*apiv1.Pod)
|
||||||
}
|
}
|
||||||
|
@ -325,7 +329,7 @@ func (e *Endpoints) addServiceLabels(ns, name string, tg *config.TargetGroup) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.logger.With("err", err).Errorln("retrieving service failed")
|
level.Error(e.logger).Log("msg", "retrieving service failed", "err", err)
|
||||||
}
|
}
|
||||||
svc = obj.(*apiv1.Service)
|
svc = obj.(*apiv1.Service)
|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,6 @@ package kubernetes
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
@ -36,7 +35,7 @@ func makeTestEndpointsDiscovery() (*Endpoints, *fakeInformer, *fakeInformer, *fa
|
||||||
svc := newFakeServiceInformer()
|
svc := newFakeServiceInformer()
|
||||||
eps := newFakeEndpointsInformer()
|
eps := newFakeEndpointsInformer()
|
||||||
pod := newFakePodInformer()
|
pod := newFakePodInformer()
|
||||||
return NewEndpoints(log.Base(), svc, eps, pod), svc, eps, pod
|
return NewEndpoints(nil, svc, eps, pod), svc, eps, pod
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeEndpoints() *v1.Endpoints {
|
func makeEndpoints() *v1.Endpoints {
|
||||||
|
|
|
@ -16,7 +16,8 @@ package kubernetes
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/util/strutil"
|
"github.com/prometheus/prometheus/util/strutil"
|
||||||
|
@ -64,7 +65,7 @@ func (s *Ingress) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
|
|
||||||
ingress, err := convertToIngress(o)
|
ingress, err := convertToIngress(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.With("err", err).Errorln("converting to Ingress object failed")
|
level.Error(s.logger).Log("msg", "converting to Ingress object failed", "err", err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
send(s.buildIngress(ingress))
|
send(s.buildIngress(ingress))
|
||||||
|
@ -74,7 +75,7 @@ func (s *Ingress) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
|
|
||||||
ingress, err := convertToIngress(o)
|
ingress, err := convertToIngress(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.With("err", err).Errorln("converting to Ingress object failed")
|
level.Error(s.logger).Log("msg", "converting to Ingress object failed", "err", err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
send(&config.TargetGroup{Source: ingressSource(ingress)})
|
send(&config.TargetGroup{Source: ingressSource(ingress)})
|
||||||
|
@ -84,7 +85,7 @@ func (s *Ingress) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
|
|
||||||
ingress, err := convertToIngress(o)
|
ingress, err := convertToIngress(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.With("err", err).Errorln("converting to Ingress object failed")
|
level.Error(s.logger).Log("msg", "converting to Ingress object failed", "err", err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
send(s.buildIngress(ingress))
|
send(s.buildIngress(ingress))
|
||||||
|
|
|
@ -16,7 +16,6 @@ package kubernetes
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
@ -33,7 +32,7 @@ func newFakeIngressInformer() *fakeInformer {
|
||||||
|
|
||||||
func makeTestIngressDiscovery() (*Ingress, *fakeInformer) {
|
func makeTestIngressDiscovery() (*Ingress, *fakeInformer) {
|
||||||
i := newFakeIngressInformer()
|
i := newFakeIngressInformer()
|
||||||
return NewIngress(log.Base(), i), i
|
return NewIngress(nil, i), i
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeIngress(tls []v1beta1.IngressTLS) *v1beta1.Ingress {
|
func makeIngress(tls []v1beta1.IngressTLS) *v1beta1.Ingress {
|
||||||
|
|
|
@ -21,10 +21,10 @@ import (
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"k8s.io/apimachinery/pkg/util/runtime"
|
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/pkg/api"
|
"k8s.io/client-go/pkg/api"
|
||||||
apiv1 "k8s.io/client-go/pkg/api/v1"
|
apiv1 "k8s.io/client-go/pkg/api/v1"
|
||||||
|
@ -70,14 +70,6 @@ type Discovery struct {
|
||||||
namespaceDiscovery *config.KubernetesNamespaceDiscovery
|
namespaceDiscovery *config.KubernetesNamespaceDiscovery
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
|
||||||
runtime.ErrorHandlers = []func(error){
|
|
||||||
func(err error) {
|
|
||||||
log.With("component", "kube_client_runtime").Errorln(err)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Discovery) getNamespaces() []string {
|
func (d *Discovery) getNamespaces() []string {
|
||||||
namespaces := d.namespaceDiscovery.Names
|
namespaces := d.namespaceDiscovery.Names
|
||||||
if len(namespaces) == 0 {
|
if len(namespaces) == 0 {
|
||||||
|
@ -88,6 +80,9 @@ func (d *Discovery) getNamespaces() []string {
|
||||||
|
|
||||||
// New creates a new Kubernetes discovery for the given role.
|
// New creates a new Kubernetes discovery for the given role.
|
||||||
func New(l log.Logger, conf *config.KubernetesSDConfig) (*Discovery, error) {
|
func New(l log.Logger, conf *config.KubernetesSDConfig) (*Discovery, error) {
|
||||||
|
if l == nil {
|
||||||
|
l = log.NewNopLogger()
|
||||||
|
}
|
||||||
var (
|
var (
|
||||||
kcfg *rest.Config
|
kcfg *rest.Config
|
||||||
err error
|
err error
|
||||||
|
@ -102,18 +97,19 @@ func New(l log.Logger, conf *config.KubernetesSDConfig) (*Discovery, error) {
|
||||||
// Because the handling of configuration parameters changes
|
// Because the handling of configuration parameters changes
|
||||||
// we should inform the user when their currently configured values
|
// we should inform the user when their currently configured values
|
||||||
// will be ignored due to precedence of InClusterConfig
|
// will be ignored due to precedence of InClusterConfig
|
||||||
l.Info("Using pod service account via in-cluster config")
|
level.Info(l).Log("msg", "Using pod service account via in-cluster config")
|
||||||
|
|
||||||
if conf.TLSConfig.CAFile != "" {
|
if conf.TLSConfig.CAFile != "" {
|
||||||
l.Warn("Configured TLS CA file is ignored when using pod service account")
|
level.Warn(l).Log("msg", "Configured TLS CA file is ignored when using pod service account")
|
||||||
}
|
}
|
||||||
if conf.TLSConfig.CertFile != "" || conf.TLSConfig.KeyFile != "" {
|
if conf.TLSConfig.CertFile != "" || conf.TLSConfig.KeyFile != "" {
|
||||||
l.Warn("Configured TLS client certificate is ignored when using pod service account")
|
level.Warn(l).Log("msg", "Configured TLS client certificate is ignored when using pod service account")
|
||||||
}
|
}
|
||||||
if conf.BearerToken != "" {
|
if conf.BearerToken != "" {
|
||||||
l.Warn("Configured auth token is ignored when using pod service account")
|
level.Warn(l).Log("msg", "Configured auth token is ignored when using pod service account")
|
||||||
}
|
}
|
||||||
if conf.BasicAuth != nil {
|
if conf.BasicAuth != nil {
|
||||||
l.Warn("Configured basic authentication credentials are ignored when using pod service account")
|
level.Warn(l).Log("msg", "Configured basic authentication credentials are ignored when using pod service account")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
kcfg = &rest.Config{
|
kcfg = &rest.Config{
|
||||||
|
@ -173,7 +169,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
slw := cache.NewListWatchFromClient(rclient, "services", namespace, nil)
|
slw := cache.NewListWatchFromClient(rclient, "services", namespace, nil)
|
||||||
plw := cache.NewListWatchFromClient(rclient, "pods", namespace, nil)
|
plw := cache.NewListWatchFromClient(rclient, "pods", namespace, nil)
|
||||||
eps := NewEndpoints(
|
eps := NewEndpoints(
|
||||||
d.logger.With("kubernetes_sd", "endpoint"),
|
log.With(d.logger, "role", "endpoint"),
|
||||||
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncPeriod),
|
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncPeriod),
|
||||||
cache.NewSharedInformer(elw, &apiv1.Endpoints{}, resyncPeriod),
|
cache.NewSharedInformer(elw, &apiv1.Endpoints{}, resyncPeriod),
|
||||||
cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncPeriod),
|
cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncPeriod),
|
||||||
|
@ -203,7 +199,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
for _, namespace := range namespaces {
|
for _, namespace := range namespaces {
|
||||||
plw := cache.NewListWatchFromClient(rclient, "pods", namespace, nil)
|
plw := cache.NewListWatchFromClient(rclient, "pods", namespace, nil)
|
||||||
pod := NewPod(
|
pod := NewPod(
|
||||||
d.logger.With("kubernetes_sd", "pod"),
|
log.With(d.logger, "role", "pod"),
|
||||||
cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncPeriod),
|
cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncPeriod),
|
||||||
)
|
)
|
||||||
go pod.informer.Run(ctx.Done())
|
go pod.informer.Run(ctx.Done())
|
||||||
|
@ -223,7 +219,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
for _, namespace := range namespaces {
|
for _, namespace := range namespaces {
|
||||||
slw := cache.NewListWatchFromClient(rclient, "services", namespace, nil)
|
slw := cache.NewListWatchFromClient(rclient, "services", namespace, nil)
|
||||||
svc := NewService(
|
svc := NewService(
|
||||||
d.logger.With("kubernetes_sd", "service"),
|
log.With(d.logger, "role", "service"),
|
||||||
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncPeriod),
|
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncPeriod),
|
||||||
)
|
)
|
||||||
go svc.informer.Run(ctx.Done())
|
go svc.informer.Run(ctx.Done())
|
||||||
|
@ -243,7 +239,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
for _, namespace := range namespaces {
|
for _, namespace := range namespaces {
|
||||||
ilw := cache.NewListWatchFromClient(reclient, "ingresses", namespace, nil)
|
ilw := cache.NewListWatchFromClient(reclient, "ingresses", namespace, nil)
|
||||||
ingress := NewIngress(
|
ingress := NewIngress(
|
||||||
d.logger.With("kubernetes_sd", "ingress"),
|
log.With(d.logger, "role", "ingress"),
|
||||||
cache.NewSharedInformer(ilw, &extensionsv1beta1.Ingress{}, resyncPeriod),
|
cache.NewSharedInformer(ilw, &extensionsv1beta1.Ingress{}, resyncPeriod),
|
||||||
)
|
)
|
||||||
go ingress.informer.Run(ctx.Done())
|
go ingress.informer.Run(ctx.Done())
|
||||||
|
@ -261,7 +257,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
case "node":
|
case "node":
|
||||||
nlw := cache.NewListWatchFromClient(rclient, "nodes", api.NamespaceAll, nil)
|
nlw := cache.NewListWatchFromClient(rclient, "nodes", api.NamespaceAll, nil)
|
||||||
node := NewNode(
|
node := NewNode(
|
||||||
d.logger.With("kubernetes_sd", "node"),
|
log.With(d.logger, "role", "node"),
|
||||||
cache.NewSharedInformer(nlw, &apiv1.Node{}, resyncPeriod),
|
cache.NewSharedInformer(nlw, &apiv1.Node{}, resyncPeriod),
|
||||||
)
|
)
|
||||||
go node.informer.Run(ctx.Done())
|
go node.informer.Run(ctx.Done())
|
||||||
|
@ -272,7 +268,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
node.Run(ctx, ch)
|
node.Run(ctx, ch)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
d.logger.Errorf("unknown Kubernetes discovery kind %q", d.role)
|
level.Error(d.logger).Log("msg", "unknown Kubernetes discovery kind", "role", d.role)
|
||||||
}
|
}
|
||||||
|
|
||||||
<-ctx.Done()
|
<-ctx.Done()
|
||||||
|
|
|
@ -18,7 +18,8 @@ import (
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/util/strutil"
|
"github.com/prometheus/prometheus/util/strutil"
|
||||||
|
@ -37,6 +38,9 @@ type Node struct {
|
||||||
|
|
||||||
// NewNode returns a new node discovery.
|
// NewNode returns a new node discovery.
|
||||||
func NewNode(l log.Logger, inf cache.SharedInformer) *Node {
|
func NewNode(l log.Logger, inf cache.SharedInformer) *Node {
|
||||||
|
if l == nil {
|
||||||
|
l = log.NewNopLogger()
|
||||||
|
}
|
||||||
return &Node{logger: l, informer: inf, store: inf.GetStore()}
|
return &Node{logger: l, informer: inf, store: inf.GetStore()}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -67,7 +71,7 @@ func (n *Node) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
|
|
||||||
node, err := convertToNode(o)
|
node, err := convertToNode(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
n.logger.With("err", err).Errorln("converting to Node object failed")
|
level.Error(n.logger).Log("msg", "converting to Node object failed", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
send(n.buildNode(node))
|
send(n.buildNode(node))
|
||||||
|
@ -77,7 +81,7 @@ func (n *Node) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
|
|
||||||
node, err := convertToNode(o)
|
node, err := convertToNode(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
n.logger.With("err", err).Errorln("converting to Node object failed")
|
level.Error(n.logger).Log("msg", "converting to Node object failed", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
send(&config.TargetGroup{Source: nodeSource(node)})
|
send(&config.TargetGroup{Source: nodeSource(node)})
|
||||||
|
@ -87,7 +91,7 @@ func (n *Node) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
|
|
||||||
node, err := convertToNode(o)
|
node, err := convertToNode(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
n.logger.With("err", err).Errorln("converting to Node object failed")
|
level.Error(n.logger).Log("msg", "converting to Node object failed", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
send(n.buildNode(node))
|
send(n.buildNode(node))
|
||||||
|
@ -151,7 +155,7 @@ func (n *Node) buildNode(node *apiv1.Node) *config.TargetGroup {
|
||||||
|
|
||||||
addr, addrMap, err := nodeAddress(node)
|
addr, addrMap, err := nodeAddress(node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
n.logger.With("err", err).Debugf("No node address found")
|
level.Warn(n.logger).Log("msg", "No node address found", "err", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
addr = net.JoinHostPort(addr, strconv.FormatInt(int64(node.Status.DaemonEndpoints.KubeletEndpoint.Port), 10))
|
addr = net.JoinHostPort(addr, strconv.FormatInt(int64(node.Status.DaemonEndpoints.KubeletEndpoint.Port), 10))
|
||||||
|
|
|
@ -20,7 +20,6 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -159,7 +158,7 @@ func newFakeNodeInformer() *fakeInformer {
|
||||||
|
|
||||||
func makeTestNodeDiscovery() (*Node, *fakeInformer) {
|
func makeTestNodeDiscovery() (*Node, *fakeInformer) {
|
||||||
i := newFakeNodeInformer()
|
i := newFakeNodeInformer()
|
||||||
return NewNode(log.Base(), i), i
|
return NewNode(nil, i), i
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeNode(name, address string, labels map[string]string, annotations map[string]string) *v1.Node {
|
func makeNode(name, address string, labels map[string]string, annotations map[string]string) *v1.Node {
|
||||||
|
|
|
@ -19,7 +19,8 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/util/strutil"
|
"github.com/prometheus/prometheus/util/strutil"
|
||||||
|
@ -38,6 +39,9 @@ type Pod struct {
|
||||||
|
|
||||||
// NewPod creates a new pod discovery.
|
// NewPod creates a new pod discovery.
|
||||||
func NewPod(l log.Logger, pods cache.SharedInformer) *Pod {
|
func NewPod(l log.Logger, pods cache.SharedInformer) *Pod {
|
||||||
|
if l == nil {
|
||||||
|
l = log.NewNopLogger()
|
||||||
|
}
|
||||||
return &Pod{
|
return &Pod{
|
||||||
informer: pods,
|
informer: pods,
|
||||||
store: pods.GetStore(),
|
store: pods.GetStore(),
|
||||||
|
@ -53,7 +57,7 @@ func (p *Pod) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
tg := p.buildPod(o.(*apiv1.Pod))
|
tg := p.buildPod(o.(*apiv1.Pod))
|
||||||
initial = append(initial, tg)
|
initial = append(initial, tg)
|
||||||
|
|
||||||
p.logger.With("tg", fmt.Sprintf("%#v", tg)).Debugln("initial pod")
|
level.Debug(p.logger).Log("msg", "initial pod", "tg", fmt.Sprintf("%#v", tg))
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
|
@ -63,7 +67,7 @@ func (p *Pod) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
|
|
||||||
// Send target groups for pod updates.
|
// Send target groups for pod updates.
|
||||||
send := func(tg *config.TargetGroup) {
|
send := func(tg *config.TargetGroup) {
|
||||||
p.logger.With("tg", fmt.Sprintf("%#v", tg)).Debugln("pod update")
|
level.Debug(p.logger).Log("msg", "pod update", "tg", fmt.Sprintf("%#v", tg))
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
case ch <- []*config.TargetGroup{tg}:
|
case ch <- []*config.TargetGroup{tg}:
|
||||||
|
@ -75,7 +79,7 @@ func (p *Pod) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
|
|
||||||
pod, err := convertToPod(o)
|
pod, err := convertToPod(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
p.logger.With("err", err).Errorln("converting to Pod object failed")
|
level.Error(p.logger).Log("msg", "converting to Pod object failed", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
send(p.buildPod(pod))
|
send(p.buildPod(pod))
|
||||||
|
@ -85,7 +89,7 @@ func (p *Pod) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
|
|
||||||
pod, err := convertToPod(o)
|
pod, err := convertToPod(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
p.logger.With("err", err).Errorln("converting to Pod object failed")
|
level.Error(p.logger).Log("msg", "converting to Pod object failed", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
send(&config.TargetGroup{Source: podSource(pod)})
|
send(&config.TargetGroup{Source: podSource(pod)})
|
||||||
|
@ -95,7 +99,7 @@ func (p *Pod) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
|
|
||||||
pod, err := convertToPod(o)
|
pod, err := convertToPod(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
p.logger.With("err", err).Errorln("converting to Pod object failed")
|
level.Error(p.logger).Log("msg", "converting to Pod object failed", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
send(p.buildPod(pod))
|
send(p.buildPod(pod))
|
||||||
|
|
|
@ -16,7 +16,6 @@ package kubernetes
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
@ -34,7 +33,7 @@ func newFakePodInformer() *fakeInformer {
|
||||||
|
|
||||||
func makeTestPodDiscovery() (*Pod, *fakeInformer) {
|
func makeTestPodDiscovery() (*Pod, *fakeInformer) {
|
||||||
i := newFakePodInformer()
|
i := newFakePodInformer()
|
||||||
return NewPod(log.Base(), i), i
|
return NewPod(nil, i), i
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeMultiPortPod() *v1.Pod {
|
func makeMultiPortPod() *v1.Pod {
|
||||||
|
|
|
@ -18,7 +18,8 @@ import (
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/util/strutil"
|
"github.com/prometheus/prometheus/util/strutil"
|
||||||
|
@ -36,6 +37,9 @@ type Service struct {
|
||||||
|
|
||||||
// NewService returns a new service discovery.
|
// NewService returns a new service discovery.
|
||||||
func NewService(l log.Logger, inf cache.SharedInformer) *Service {
|
func NewService(l log.Logger, inf cache.SharedInformer) *Service {
|
||||||
|
if l == nil {
|
||||||
|
l = log.NewNopLogger()
|
||||||
|
}
|
||||||
return &Service{logger: l, informer: inf, store: inf.GetStore()}
|
return &Service{logger: l, informer: inf, store: inf.GetStore()}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -66,7 +70,7 @@ func (s *Service) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
|
|
||||||
svc, err := convertToService(o)
|
svc, err := convertToService(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.With("err", err).Errorln("converting to Service object failed")
|
level.Error(s.logger).Log("msg", "converting to Service object failed", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
send(s.buildService(svc))
|
send(s.buildService(svc))
|
||||||
|
@ -76,7 +80,7 @@ func (s *Service) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
|
|
||||||
svc, err := convertToService(o)
|
svc, err := convertToService(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.With("err", err).Errorln("converting to Service object failed")
|
level.Error(s.logger).Log("msg", "converting to Service object failed", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
send(&config.TargetGroup{Source: serviceSource(svc)})
|
send(&config.TargetGroup{Source: serviceSource(svc)})
|
||||||
|
@ -86,7 +90,7 @@ func (s *Service) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
|
|
||||||
svc, err := convertToService(o)
|
svc, err := convertToService(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.With("err", err).Errorln("converting to Service object failed")
|
level.Error(s.logger).Log("msg", "converting to Service object failed", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
send(s.buildService(svc))
|
send(s.buildService(svc))
|
||||||
|
|
|
@ -17,7 +17,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
@ -35,7 +34,7 @@ func newFakeServiceInformer() *fakeInformer {
|
||||||
|
|
||||||
func makeTestServiceDiscovery() (*Service, *fakeInformer) {
|
func makeTestServiceDiscovery() (*Service, *fakeInformer) {
|
||||||
i := newFakeServiceInformer()
|
i := newFakeServiceInformer()
|
||||||
return NewService(log.Base(), i), i
|
return NewService(nil, i), i
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeMultiPortService() *v1.Service {
|
func makeMultiPortService() *v1.Service {
|
||||||
|
|
|
@ -26,8 +26,9 @@ import (
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/util/httputil"
|
"github.com/prometheus/prometheus/util/httputil"
|
||||||
|
@ -94,6 +95,10 @@ type Discovery struct {
|
||||||
|
|
||||||
// NewDiscovery returns a new Marathon Discovery.
|
// NewDiscovery returns a new Marathon Discovery.
|
||||||
func NewDiscovery(conf *config.MarathonSDConfig, logger log.Logger) (*Discovery, error) {
|
func NewDiscovery(conf *config.MarathonSDConfig, logger log.Logger) (*Discovery, error) {
|
||||||
|
if logger == nil {
|
||||||
|
logger = log.NewNopLogger()
|
||||||
|
}
|
||||||
|
|
||||||
tls, err := httputil.NewTLSConfig(conf.TLSConfig)
|
tls, err := httputil.NewTLSConfig(conf.TLSConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -134,7 +139,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
case <-time.After(d.refreshInterval):
|
case <-time.After(d.refreshInterval):
|
||||||
err := d.updateServices(ctx, ch)
|
err := d.updateServices(ctx, ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.logger.Errorf("Error while updating services: %s", err)
|
level.Error(d.logger).Log("msg", "Error while updating services", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -173,7 +178,7 @@ func (d *Discovery) updateServices(ctx context.Context, ch chan<- []*config.Targ
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
case ch <- []*config.TargetGroup{{Source: source}}:
|
case ch <- []*config.TargetGroup{{Source: source}}:
|
||||||
d.logger.Debugf("Removing group for %s", source)
|
level.Debug(d.logger).Log("msg", "Removing group", "source", source)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,6 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
@ -33,7 +32,7 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
func testUpdateServices(client AppListClient, ch chan []*config.TargetGroup) error {
|
func testUpdateServices(client AppListClient, ch chan []*config.TargetGroup) error {
|
||||||
md, err := NewDiscovery(&conf, log.Base())
|
md, err := NewDiscovery(&conf, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -141,7 +140,7 @@ func TestMarathonSDSendGroup(t *testing.T) {
|
||||||
|
|
||||||
func TestMarathonSDRemoveApp(t *testing.T) {
|
func TestMarathonSDRemoveApp(t *testing.T) {
|
||||||
var ch = make(chan []*config.TargetGroup, 1)
|
var ch = make(chan []*config.TargetGroup, 1)
|
||||||
md, err := NewDiscovery(&conf, log.Base())
|
md, err := NewDiscovery(&conf, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s", err)
|
t.Fatalf("%s", err)
|
||||||
}
|
}
|
||||||
|
@ -177,7 +176,7 @@ func TestMarathonSDRunAndStop(t *testing.T) {
|
||||||
ch = make(chan []*config.TargetGroup)
|
ch = make(chan []*config.TargetGroup)
|
||||||
doneCh = make(chan error)
|
doneCh = make(chan error)
|
||||||
)
|
)
|
||||||
md, err := NewDiscovery(&conf, log.Base())
|
md, err := NewDiscovery(&conf, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s", err)
|
t.Fatalf("%s", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,11 +18,12 @@ import (
|
||||||
"net"
|
"net"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/gophercloud/gophercloud"
|
"github.com/gophercloud/gophercloud"
|
||||||
"github.com/gophercloud/gophercloud/openstack"
|
"github.com/gophercloud/gophercloud/openstack"
|
||||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors"
|
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors"
|
||||||
"github.com/gophercloud/gophercloud/pagination"
|
"github.com/gophercloud/gophercloud/pagination"
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
@ -58,7 +59,7 @@ func (h *HypervisorDiscovery) Run(ctx context.Context, ch chan<- []*config.Targe
|
||||||
// Get an initial set right away.
|
// Get an initial set right away.
|
||||||
tg, err := h.refresh()
|
tg, err := h.refresh()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logger.Error(err)
|
level.Error(h.logger).Log("msg", "Unable refresh target groups", "err", err.Error())
|
||||||
} else {
|
} else {
|
||||||
select {
|
select {
|
||||||
case ch <- []*config.TargetGroup{tg}:
|
case ch <- []*config.TargetGroup{tg}:
|
||||||
|
@ -75,7 +76,7 @@ func (h *HypervisorDiscovery) Run(ctx context.Context, ch chan<- []*config.Targe
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
tg, err := h.refresh()
|
tg, err := h.refresh()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logger.Error(err)
|
level.Error(h.logger).Log("msg", "Unable refresh target groups", "err", err.Error())
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,6 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
)
|
)
|
||||||
|
@ -57,7 +56,7 @@ func (s *OpenstackSDHypervisorTestSuite) openstackAuthSuccess() (Discovery, erro
|
||||||
Region: "RegionOne",
|
Region: "RegionOne",
|
||||||
Role: "hypervisor",
|
Role: "hypervisor",
|
||||||
}
|
}
|
||||||
return NewDiscovery(&conf, log.Base())
|
return NewDiscovery(&conf, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *OpenstackSDHypervisorTestSuite) TestOpenstackSDHypervisorRefresh() {
|
func (s *OpenstackSDHypervisorTestSuite) TestOpenstackSDHypervisorRefresh() {
|
||||||
|
|
|
@ -18,12 +18,13 @@ import (
|
||||||
"net"
|
"net"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/gophercloud/gophercloud"
|
"github.com/gophercloud/gophercloud"
|
||||||
"github.com/gophercloud/gophercloud/openstack"
|
"github.com/gophercloud/gophercloud/openstack"
|
||||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips"
|
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips"
|
||||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/servers"
|
"github.com/gophercloud/gophercloud/openstack/compute/v2/servers"
|
||||||
"github.com/gophercloud/gophercloud/pagination"
|
"github.com/gophercloud/gophercloud/pagination"
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
@ -54,6 +55,9 @@ type InstanceDiscovery struct {
|
||||||
// NewInstanceDiscovery returns a new instance discovery.
|
// NewInstanceDiscovery returns a new instance discovery.
|
||||||
func NewInstanceDiscovery(opts *gophercloud.AuthOptions,
|
func NewInstanceDiscovery(opts *gophercloud.AuthOptions,
|
||||||
interval time.Duration, port int, region string, l log.Logger) *InstanceDiscovery {
|
interval time.Duration, port int, region string, l log.Logger) *InstanceDiscovery {
|
||||||
|
if l == nil {
|
||||||
|
l = log.NewNopLogger()
|
||||||
|
}
|
||||||
return &InstanceDiscovery{authOpts: opts,
|
return &InstanceDiscovery{authOpts: opts,
|
||||||
region: region, interval: interval, port: port, logger: l}
|
region: region, interval: interval, port: port, logger: l}
|
||||||
}
|
}
|
||||||
|
@ -63,7 +67,7 @@ func (i *InstanceDiscovery) Run(ctx context.Context, ch chan<- []*config.TargetG
|
||||||
// Get an initial set right away.
|
// Get an initial set right away.
|
||||||
tg, err := i.refresh()
|
tg, err := i.refresh()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
i.logger.Error(err)
|
level.Error(i.logger).Log("msg", "Unable to refresh target groups", "err", err.Error())
|
||||||
} else {
|
} else {
|
||||||
select {
|
select {
|
||||||
case ch <- []*config.TargetGroup{tg}:
|
case ch <- []*config.TargetGroup{tg}:
|
||||||
|
@ -80,7 +84,7 @@ func (i *InstanceDiscovery) Run(ctx context.Context, ch chan<- []*config.TargetG
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
tg, err := i.refresh()
|
tg, err := i.refresh()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
i.logger.Error(err)
|
level.Error(i.logger).Log("msg", "Unable to refresh target groups", "err", err.Error())
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -155,27 +159,27 @@ func (i *InstanceDiscovery) refresh() (*config.TargetGroup, error) {
|
||||||
openstackLabelInstanceID: model.LabelValue(s.ID),
|
openstackLabelInstanceID: model.LabelValue(s.ID),
|
||||||
}
|
}
|
||||||
if len(s.Addresses) == 0 {
|
if len(s.Addresses) == 0 {
|
||||||
i.logger.Info("Got no IP address for instance %s", s.ID)
|
level.Info(i.logger).Log("msg", "Got no IP address", "instance", s.ID)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for _, address := range s.Addresses {
|
for _, address := range s.Addresses {
|
||||||
md, ok := address.([]interface{})
|
md, ok := address.([]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
i.logger.Warn("Invalid type for address, expected array")
|
level.Warn(i.logger).Log("msg", "Invalid type for address, expected array")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if len(md) == 0 {
|
if len(md) == 0 {
|
||||||
i.logger.Debugf("Got no IP address for instance %s", s.ID)
|
level.Debug(i.logger).Log("msg", "Got no IP address", "instance", s.ID)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
md1, ok := md[0].(map[string]interface{})
|
md1, ok := md[0].(map[string]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
i.logger.Warn("Invalid type for address, expected dict")
|
level.Warn(i.logger).Log("msg", "Invalid type for address, expected dict")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
addr, ok := md1["addr"].(string)
|
addr, ok := md1["addr"].(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
i.logger.Warn("Invalid type for address, expected string")
|
level.Warn(i.logger).Log("msg", "Invalid type for address, expected string")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
labels[openstackLabelPrivateIP] = model.LabelValue(addr)
|
labels[openstackLabelPrivateIP] = model.LabelValue(addr)
|
||||||
|
@ -191,7 +195,7 @@ func (i *InstanceDiscovery) refresh() (*config.TargetGroup, error) {
|
||||||
labels[openstackLabelInstanceName] = model.LabelValue(s.Name)
|
labels[openstackLabelInstanceName] = model.LabelValue(s.Name)
|
||||||
id, ok := s.Flavor["id"].(string)
|
id, ok := s.Flavor["id"].(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
i.logger.Warn("Invalid type for instance id, excepted string")
|
level.Warn(i.logger).Log("msg", "Invalid type for instance id, excepted string")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
labels[openstackLabelInstanceFlavor] = model.LabelValue(id)
|
labels[openstackLabelInstanceFlavor] = model.LabelValue(id)
|
||||||
|
|
|
@ -20,7 +20,6 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
)
|
)
|
||||||
|
@ -58,7 +57,7 @@ func (s *OpenstackSDInstanceTestSuite) openstackAuthSuccess() (Discovery, error)
|
||||||
Region: "RegionOne",
|
Region: "RegionOne",
|
||||||
Role: "instance",
|
Role: "instance",
|
||||||
}
|
}
|
||||||
return NewDiscovery(&conf, log.Base())
|
return NewDiscovery(&conf, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *OpenstackSDInstanceTestSuite) TestOpenstackSDInstanceRefresh() {
|
func (s *OpenstackSDInstanceTestSuite) TestOpenstackSDInstanceRefresh() {
|
||||||
|
|
|
@ -17,9 +17,9 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
"github.com/gophercloud/gophercloud"
|
"github.com/gophercloud/gophercloud"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
|
|
|
@ -20,8 +20,9 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/util/httputil"
|
"github.com/prometheus/prometheus/util/httputil"
|
||||||
|
@ -77,6 +78,10 @@ type Discovery struct {
|
||||||
|
|
||||||
// New returns a new Discovery which periodically refreshes its targets.
|
// New returns a new Discovery which periodically refreshes its targets.
|
||||||
func New(logger log.Logger, conf *config.TritonSDConfig) (*Discovery, error) {
|
func New(logger log.Logger, conf *config.TritonSDConfig) (*Discovery, error) {
|
||||||
|
if logger == nil {
|
||||||
|
logger = log.NewNopLogger()
|
||||||
|
}
|
||||||
|
|
||||||
tls, err := httputil.NewTLSConfig(conf.TLSConfig)
|
tls, err := httputil.NewTLSConfig(conf.TLSConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -103,7 +108,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
// Get an initial set right away.
|
// Get an initial set right away.
|
||||||
tg, err := d.refresh()
|
tg, err := d.refresh()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.logger.With("err", err).Error("Refreshing targets failed")
|
level.Error(d.logger).Log("msg", "Refreshing targets failed", "err", err)
|
||||||
} else {
|
} else {
|
||||||
ch <- []*config.TargetGroup{tg}
|
ch <- []*config.TargetGroup{tg}
|
||||||
}
|
}
|
||||||
|
@ -113,7 +118,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
tg, err := d.refresh()
|
tg, err := d.refresh()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.logger.With("err", err).Error("Refreshing targets failed")
|
level.Error(d.logger).Log("msg", "Refreshing targets failed", "err", err)
|
||||||
} else {
|
} else {
|
||||||
ch <- []*config.TargetGroup{tg}
|
ch <- []*config.TargetGroup{tg}
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,6 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
@ -54,17 +53,14 @@ var (
|
||||||
CertFile: "shouldnotexist.cert",
|
CertFile: "shouldnotexist.cert",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
logger = log.Base()
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestTritonSDNew(t *testing.T) {
|
func TestTritonSDNew(t *testing.T) {
|
||||||
td, err := New(logger, &conf)
|
td, err := New(nil, &conf)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.NotNil(t, td)
|
assert.NotNil(t, td)
|
||||||
assert.NotNil(t, td.client)
|
assert.NotNil(t, td.client)
|
||||||
assert.NotNil(t, td.interval)
|
assert.NotNil(t, td.interval)
|
||||||
assert.NotNil(t, td.logger)
|
|
||||||
assert.Equal(t, logger, td.logger, "td.logger equals logger")
|
|
||||||
assert.NotNil(t, td.sdConfig)
|
assert.NotNil(t, td.sdConfig)
|
||||||
assert.Equal(t, conf.Account, td.sdConfig.Account)
|
assert.Equal(t, conf.Account, td.sdConfig.Account)
|
||||||
assert.Equal(t, conf.DNSSuffix, td.sdConfig.DNSSuffix)
|
assert.Equal(t, conf.DNSSuffix, td.sdConfig.DNSSuffix)
|
||||||
|
@ -73,14 +69,14 @@ func TestTritonSDNew(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTritonSDNewBadConfig(t *testing.T) {
|
func TestTritonSDNewBadConfig(t *testing.T) {
|
||||||
td, err := New(logger, &badconf)
|
td, err := New(nil, &badconf)
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
assert.Nil(t, td)
|
assert.Nil(t, td)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTritonSDRun(t *testing.T) {
|
func TestTritonSDRun(t *testing.T) {
|
||||||
var (
|
var (
|
||||||
td, err = New(logger, &conf)
|
td, err = New(nil, &conf)
|
||||||
ch = make(chan []*config.TargetGroup)
|
ch = make(chan []*config.TargetGroup)
|
||||||
ctx, cancel = context.WithCancel(context.Background())
|
ctx, cancel = context.WithCancel(context.Background())
|
||||||
)
|
)
|
||||||
|
@ -132,7 +128,7 @@ func TestTritonSDRefreshMultipleTargets(t *testing.T) {
|
||||||
|
|
||||||
func TestTritonSDRefreshNoServer(t *testing.T) {
|
func TestTritonSDRefreshNoServer(t *testing.T) {
|
||||||
var (
|
var (
|
||||||
td, err = New(logger, &conf)
|
td, err = New(nil, &conf)
|
||||||
)
|
)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.NotNil(t, td)
|
assert.NotNil(t, td)
|
||||||
|
@ -146,7 +142,7 @@ func TestTritonSDRefreshNoServer(t *testing.T) {
|
||||||
|
|
||||||
func testTritonSDRefresh(t *testing.T, dstr string) []model.LabelSet {
|
func testTritonSDRefresh(t *testing.T, dstr string) []model.LabelSet {
|
||||||
var (
|
var (
|
||||||
td, err = New(logger, &conf)
|
td, err = New(nil, &conf)
|
||||||
s = httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
s = httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
fmt.Fprintln(w, dstr)
|
fmt.Fprintln(w, dstr)
|
||||||
}))
|
}))
|
||||||
|
|
|
@ -24,7 +24,7 @@ import (
|
||||||
"github.com/samuel/go-zookeeper/zk"
|
"github.com/samuel/go-zookeeper/zk"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
"github.com/go-kit/kit/log"
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/util/strutil"
|
"github.com/prometheus/prometheus/util/strutil"
|
||||||
"github.com/prometheus/prometheus/util/treecache"
|
"github.com/prometheus/prometheus/util/treecache"
|
||||||
|
@ -63,6 +63,10 @@ func NewDiscovery(
|
||||||
logger log.Logger,
|
logger log.Logger,
|
||||||
pf func(data []byte, path string) (model.LabelSet, error),
|
pf func(data []byte, path string) (model.LabelSet, error),
|
||||||
) *Discovery {
|
) *Discovery {
|
||||||
|
if logger == nil {
|
||||||
|
logger = log.NewNopLogger()
|
||||||
|
}
|
||||||
|
|
||||||
conn, _, err := zk.Connect(srvs, timeout)
|
conn, _, err := zk.Connect(srvs, timeout)
|
||||||
conn.SetLogger(treecache.ZookeeperLogger{})
|
conn.SetLogger(treecache.ZookeeperLogger{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -21,12 +21,15 @@ import (
|
||||||
"sort"
|
"sort"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Client allows sending batches of Prometheus samples to Graphite.
|
// Client allows sending batches of Prometheus samples to Graphite.
|
||||||
type Client struct {
|
type Client struct {
|
||||||
|
logger log.Logger
|
||||||
|
|
||||||
address string
|
address string
|
||||||
transport string
|
transport string
|
||||||
timeout time.Duration
|
timeout time.Duration
|
||||||
|
@ -34,8 +37,12 @@ type Client struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClient creates a new Client.
|
// NewClient creates a new Client.
|
||||||
func NewClient(address string, transport string, timeout time.Duration, prefix string) *Client {
|
func NewClient(logger log.Logger, address string, transport string, timeout time.Duration, prefix string) *Client {
|
||||||
|
if logger == nil {
|
||||||
|
logger = log.NewNopLogger()
|
||||||
|
}
|
||||||
return &Client{
|
return &Client{
|
||||||
|
logger: logger,
|
||||||
address: address,
|
address: address,
|
||||||
transport: transport,
|
transport: transport,
|
||||||
timeout: timeout,
|
timeout: timeout,
|
||||||
|
@ -86,8 +93,7 @@ func (c *Client) Write(samples model.Samples) error {
|
||||||
t := float64(s.Timestamp.UnixNano()) / 1e9
|
t := float64(s.Timestamp.UnixNano()) / 1e9
|
||||||
v := float64(s.Value)
|
v := float64(s.Value)
|
||||||
if math.IsNaN(v) || math.IsInf(v, 0) {
|
if math.IsNaN(v) || math.IsInf(v, 0) {
|
||||||
log.Warnf("cannot send value %f to Graphite,"+
|
level.Warn(c.logger).Log("msg", "cannot send value to Graphite, skipping sample", "value", v, "sample", s)
|
||||||
"skipping sample %#v", v, s)
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
fmt.Fprintf(&buf, "%s %f %f\n", k, v, t)
|
fmt.Fprintf(&buf, "%s %f %f\n", k, v, t)
|
||||||
|
|
|
@ -17,10 +17,12 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/prometheus/prompb"
|
"github.com/prometheus/prometheus/prompb"
|
||||||
|
|
||||||
|
@ -29,6 +31,8 @@ import (
|
||||||
|
|
||||||
// Client allows sending batches of Prometheus samples to InfluxDB.
|
// Client allows sending batches of Prometheus samples to InfluxDB.
|
||||||
type Client struct {
|
type Client struct {
|
||||||
|
logger log.Logger
|
||||||
|
|
||||||
client influx.Client
|
client influx.Client
|
||||||
database string
|
database string
|
||||||
retentionPolicy string
|
retentionPolicy string
|
||||||
|
@ -36,14 +40,20 @@ type Client struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClient creates a new Client.
|
// NewClient creates a new Client.
|
||||||
func NewClient(conf influx.HTTPConfig, db string, rp string) *Client {
|
func NewClient(logger log.Logger, conf influx.HTTPConfig, db string, rp string) *Client {
|
||||||
c, err := influx.NewHTTPClient(conf)
|
c, err := influx.NewHTTPClient(conf)
|
||||||
// Currently influx.NewClient() *should* never return an error.
|
// Currently influx.NewClient() *should* never return an error.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
level.Error(logger).Log("err", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if logger == nil {
|
||||||
|
logger = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Client{
|
return &Client{
|
||||||
|
logger: logger,
|
||||||
client: c,
|
client: c,
|
||||||
database: db,
|
database: db,
|
||||||
retentionPolicy: rp,
|
retentionPolicy: rp,
|
||||||
|
@ -73,7 +83,7 @@ func (c *Client) Write(samples model.Samples) error {
|
||||||
for _, s := range samples {
|
for _, s := range samples {
|
||||||
v := float64(s.Value)
|
v := float64(s.Value)
|
||||||
if math.IsNaN(v) || math.IsInf(v, 0) {
|
if math.IsNaN(v) || math.IsInf(v, 0) {
|
||||||
log.Debugf("cannot send value %f to InfluxDB, skipping sample %#v", v, s)
|
level.Debug(c.logger).Log("msg", "cannot send to InfluxDB, skipping sample", "value", v, "sample", s)
|
||||||
c.ignoredSamples.Inc()
|
c.ignoredSamples.Inc()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -103,7 +103,7 @@ testmetric,test_label=test_label_value2 value=5.1234 123456789123
|
||||||
Password: "testpass",
|
Password: "testpass",
|
||||||
Timeout: time.Minute,
|
Timeout: time.Minute,
|
||||||
}
|
}
|
||||||
c := NewClient(conf, "test_db", "default")
|
c := NewClient(nil, conf, "test_db", "default")
|
||||||
|
|
||||||
if err := c.Write(samples); err != nil {
|
if err := c.Write(samples); err != nil {
|
||||||
t.Fatalf("Error sending samples: %s", err)
|
t.Fatalf("Error sending samples: %s", err)
|
||||||
|
|
|
@ -25,14 +25,16 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/gogo/protobuf/proto"
|
"github.com/gogo/protobuf/proto"
|
||||||
"github.com/golang/snappy"
|
"github.com/golang/snappy"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
influx "github.com/influxdata/influxdb/client/v2"
|
influx "github.com/influxdata/influxdb/client/v2"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/promlog"
|
||||||
"github.com/prometheus/prometheus/documentation/examples/remote_storage/remote_storage_adapter/graphite"
|
"github.com/prometheus/prometheus/documentation/examples/remote_storage/remote_storage_adapter/graphite"
|
||||||
"github.com/prometheus/prometheus/documentation/examples/remote_storage/remote_storage_adapter/influxdb"
|
"github.com/prometheus/prometheus/documentation/examples/remote_storage/remote_storage_adapter/influxdb"
|
||||||
"github.com/prometheus/prometheus/documentation/examples/remote_storage/remote_storage_adapter/opentsdb"
|
"github.com/prometheus/prometheus/documentation/examples/remote_storage/remote_storage_adapter/opentsdb"
|
||||||
|
@ -96,8 +98,13 @@ func main() {
|
||||||
cfg := parseFlags()
|
cfg := parseFlags()
|
||||||
http.Handle(cfg.telemetryPath, prometheus.Handler())
|
http.Handle(cfg.telemetryPath, prometheus.Handler())
|
||||||
|
|
||||||
writers, readers := buildClients(cfg)
|
logLevel := promlog.AllowedLevel{}
|
||||||
serve(cfg.listenAddr, writers, readers)
|
logLevel.Set("debug")
|
||||||
|
|
||||||
|
logger := promlog.New(logLevel)
|
||||||
|
|
||||||
|
writers, readers := buildClients(logger, cfg)
|
||||||
|
serve(logger, cfg.listenAddr, writers, readers)
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseFlags() *config {
|
func parseFlags() *config {
|
||||||
|
@ -150,23 +157,29 @@ type reader interface {
|
||||||
Name() string
|
Name() string
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildClients(cfg *config) ([]writer, []reader) {
|
func buildClients(logger log.Logger, cfg *config) ([]writer, []reader) {
|
||||||
var writers []writer
|
var writers []writer
|
||||||
var readers []reader
|
var readers []reader
|
||||||
if cfg.graphiteAddress != "" {
|
if cfg.graphiteAddress != "" {
|
||||||
c := graphite.NewClient(
|
c := graphite.NewClient(
|
||||||
|
log.With(logger, "storage", "Graphite"),
|
||||||
cfg.graphiteAddress, cfg.graphiteTransport,
|
cfg.graphiteAddress, cfg.graphiteTransport,
|
||||||
cfg.remoteTimeout, cfg.graphitePrefix)
|
cfg.remoteTimeout, cfg.graphitePrefix)
|
||||||
writers = append(writers, c)
|
writers = append(writers, c)
|
||||||
}
|
}
|
||||||
if cfg.opentsdbURL != "" {
|
if cfg.opentsdbURL != "" {
|
||||||
c := opentsdb.NewClient(cfg.opentsdbURL, cfg.remoteTimeout)
|
c := opentsdb.NewClient(
|
||||||
|
log.With(logger, "storage", "OpenTSDB"),
|
||||||
|
cfg.opentsdbURL,
|
||||||
|
cfg.remoteTimeout,
|
||||||
|
)
|
||||||
writers = append(writers, c)
|
writers = append(writers, c)
|
||||||
}
|
}
|
||||||
if cfg.influxdbURL != "" {
|
if cfg.influxdbURL != "" {
|
||||||
url, err := url.Parse(cfg.influxdbURL)
|
url, err := url.Parse(cfg.influxdbURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to parse InfluxDB URL %q: %v", cfg.influxdbURL, err)
|
level.Error(logger).Log("msg", "Failed to parse InfluxDB URL", "url", cfg.influxdbURL, "err", err)
|
||||||
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
conf := influx.HTTPConfig{
|
conf := influx.HTTPConfig{
|
||||||
Addr: url.String(),
|
Addr: url.String(),
|
||||||
|
@ -174,34 +187,39 @@ func buildClients(cfg *config) ([]writer, []reader) {
|
||||||
Password: cfg.influxdbPassword,
|
Password: cfg.influxdbPassword,
|
||||||
Timeout: cfg.remoteTimeout,
|
Timeout: cfg.remoteTimeout,
|
||||||
}
|
}
|
||||||
c := influxdb.NewClient(conf, cfg.influxdbDatabase, cfg.influxdbRetentionPolicy)
|
c := influxdb.NewClient(
|
||||||
|
log.With(logger, "storage", "InfluxDB"),
|
||||||
|
conf,
|
||||||
|
cfg.influxdbDatabase,
|
||||||
|
cfg.influxdbRetentionPolicy,
|
||||||
|
)
|
||||||
prometheus.MustRegister(c)
|
prometheus.MustRegister(c)
|
||||||
writers = append(writers, c)
|
writers = append(writers, c)
|
||||||
readers = append(readers, c)
|
readers = append(readers, c)
|
||||||
}
|
}
|
||||||
log.Info("Starting up...")
|
level.Info(logger).Log("Starting up...")
|
||||||
return writers, readers
|
return writers, readers
|
||||||
}
|
}
|
||||||
|
|
||||||
func serve(addr string, writers []writer, readers []reader) error {
|
func serve(logger log.Logger, addr string, writers []writer, readers []reader) error {
|
||||||
http.HandleFunc("/write", func(w http.ResponseWriter, r *http.Request) {
|
http.HandleFunc("/write", func(w http.ResponseWriter, r *http.Request) {
|
||||||
compressed, err := ioutil.ReadAll(r.Body)
|
compressed, err := ioutil.ReadAll(r.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorln("Read error:", err)
|
level.Error(logger).Log("msg", "Read error", "err", err.Error())
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
reqBuf, err := snappy.Decode(nil, compressed)
|
reqBuf, err := snappy.Decode(nil, compressed)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorln("Decode error:", err)
|
level.Error(logger).Log("msg", "Decode error", "err", err.Error())
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var req prompb.WriteRequest
|
var req prompb.WriteRequest
|
||||||
if err := proto.Unmarshal(reqBuf, &req); err != nil {
|
if err := proto.Unmarshal(reqBuf, &req); err != nil {
|
||||||
log.Errorln("Unmarshal error:", err)
|
level.Error(logger).Log("msg", "Unmarshal error", "err", err.Error())
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -213,7 +231,7 @@ func serve(addr string, writers []writer, readers []reader) error {
|
||||||
for _, w := range writers {
|
for _, w := range writers {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(rw writer) {
|
go func(rw writer) {
|
||||||
sendSamples(rw, samples)
|
sendSamples(logger, rw, samples)
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}(w)
|
}(w)
|
||||||
}
|
}
|
||||||
|
@ -223,21 +241,21 @@ func serve(addr string, writers []writer, readers []reader) error {
|
||||||
http.HandleFunc("/read", func(w http.ResponseWriter, r *http.Request) {
|
http.HandleFunc("/read", func(w http.ResponseWriter, r *http.Request) {
|
||||||
compressed, err := ioutil.ReadAll(r.Body)
|
compressed, err := ioutil.ReadAll(r.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorln("Read error:", err)
|
level.Error(logger).Log("msg", "Read error", "err", err.Error())
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
reqBuf, err := snappy.Decode(nil, compressed)
|
reqBuf, err := snappy.Decode(nil, compressed)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorln("Decode error:", err)
|
level.Error(logger).Log("msg", "Decode error", "err", err.Error())
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var req prompb.ReadRequest
|
var req prompb.ReadRequest
|
||||||
if err := proto.Unmarshal(reqBuf, &req); err != nil {
|
if err := proto.Unmarshal(reqBuf, &req); err != nil {
|
||||||
log.Errorln("Unmarshal error:", err)
|
level.Error(logger).Log("msg", "Unmarshal error", "err", err.Error())
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -252,7 +270,7 @@ func serve(addr string, writers []writer, readers []reader) error {
|
||||||
var resp *prompb.ReadResponse
|
var resp *prompb.ReadResponse
|
||||||
resp, err = reader.Read(&req)
|
resp, err = reader.Read(&req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.With("query", req).With("storage", reader.Name()).With("err", err).Warnf("Error executing query")
|
level.Warn(logger).Log("msg", "Error executing query", "query", req, "storage", reader.Name(), "err", err)
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -295,12 +313,12 @@ func protoToSamples(req *prompb.WriteRequest) model.Samples {
|
||||||
return samples
|
return samples
|
||||||
}
|
}
|
||||||
|
|
||||||
func sendSamples(w writer, samples model.Samples) {
|
func sendSamples(logger log.Logger, w writer, samples model.Samples) {
|
||||||
begin := time.Now()
|
begin := time.Now()
|
||||||
err := w.Write(samples)
|
err := w.Write(samples)
|
||||||
duration := time.Since(begin).Seconds()
|
duration := time.Since(begin).Seconds()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.With("num_samples", len(samples)).With("storage", w.Name()).With("err", err).Warnf("Error sending samples to remote storage")
|
level.Warn(logger).Log("msg", "Error sending samples to remote storage", "err", err, "storage", w.Name(), "num_samples", len(samples))
|
||||||
failedSamples.WithLabelValues(w.Name()).Add(float64(len(samples)))
|
failedSamples.WithLabelValues(w.Name()).Add(float64(len(samples)))
|
||||||
}
|
}
|
||||||
sentSamples.WithLabelValues(w.Name()).Add(float64(len(samples)))
|
sentSamples.WithLabelValues(w.Name()).Add(float64(len(samples)))
|
||||||
|
|
|
@ -23,7 +23,8 @@ import (
|
||||||
"net/url"
|
"net/url"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"golang.org/x/net/context/ctxhttp"
|
"golang.org/x/net/context/ctxhttp"
|
||||||
|
@ -36,13 +37,16 @@ const (
|
||||||
|
|
||||||
// Client allows sending batches of Prometheus samples to OpenTSDB.
|
// Client allows sending batches of Prometheus samples to OpenTSDB.
|
||||||
type Client struct {
|
type Client struct {
|
||||||
|
logger log.Logger
|
||||||
|
|
||||||
url string
|
url string
|
||||||
timeout time.Duration
|
timeout time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClient creates a new Client.
|
// NewClient creates a new Client.
|
||||||
func NewClient(url string, timeout time.Duration) *Client {
|
func NewClient(logger log.Logger, url string, timeout time.Duration) *Client {
|
||||||
return &Client{
|
return &Client{
|
||||||
|
logger: logger,
|
||||||
url: url,
|
url: url,
|
||||||
timeout: timeout,
|
timeout: timeout,
|
||||||
}
|
}
|
||||||
|
@ -75,7 +79,7 @@ func (c *Client) Write(samples model.Samples) error {
|
||||||
for _, s := range samples {
|
for _, s := range samples {
|
||||||
v := float64(s.Value)
|
v := float64(s.Value)
|
||||||
if math.IsNaN(v) || math.IsInf(v, 0) {
|
if math.IsNaN(v) || math.IsInf(v, 0) {
|
||||||
log.Warnf("cannot send value %f to OpenTSDB, skipping sample %#v", v, s)
|
level.Warn(c.logger).Log("msg", "cannot send value to OpenTSDB, skipping sample", "value", v, "sample", s)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
metric := TagValue(s.Metric[model.MetricNameLabel])
|
metric := TagValue(s.Metric[model.MetricNameLabel])
|
||||||
|
|
|
@ -26,8 +26,9 @@ import (
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"golang.org/x/net/context/ctxhttp"
|
"golang.org/x/net/context/ctxhttp"
|
||||||
|
@ -211,6 +212,9 @@ func New(o *Options, logger log.Logger) *Notifier {
|
||||||
if o.Do == nil {
|
if o.Do == nil {
|
||||||
o.Do = ctxhttp.Do
|
o.Do = ctxhttp.Do
|
||||||
}
|
}
|
||||||
|
if logger == nil {
|
||||||
|
logger = log.NewNopLogger()
|
||||||
|
}
|
||||||
|
|
||||||
n := &Notifier{
|
n := &Notifier{
|
||||||
queue: make([]*Alert, 0, o.QueueCapacity),
|
queue: make([]*Alert, 0, o.QueueCapacity),
|
||||||
|
@ -223,7 +227,14 @@ func New(o *Options, logger log.Logger) *Notifier {
|
||||||
|
|
||||||
queueLenFunc := func() float64 { return float64(n.queueLen()) }
|
queueLenFunc := func() float64 { return float64(n.queueLen()) }
|
||||||
alertmanagersDiscoveredFunc := func() float64 { return float64(len(n.Alertmanagers())) }
|
alertmanagersDiscoveredFunc := func() float64 { return float64(len(n.Alertmanagers())) }
|
||||||
n.metrics = newAlertMetrics(o.Registerer, o.QueueCapacity, queueLenFunc, alertmanagersDiscoveredFunc)
|
|
||||||
|
n.metrics = newAlertMetrics(
|
||||||
|
o.Registerer,
|
||||||
|
o.QueueCapacity,
|
||||||
|
queueLenFunc,
|
||||||
|
alertmanagersDiscoveredFunc,
|
||||||
|
)
|
||||||
|
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -337,7 +348,7 @@ func (n *Notifier) Send(alerts ...*Alert) {
|
||||||
if d := len(alerts) - n.opts.QueueCapacity; d > 0 {
|
if d := len(alerts) - n.opts.QueueCapacity; d > 0 {
|
||||||
alerts = alerts[d:]
|
alerts = alerts[d:]
|
||||||
|
|
||||||
n.logger.Warnf("Alert batch larger than queue capacity, dropping %d alerts", d)
|
level.Warn(n.logger).Log("msg", "Alert batch larger than queue capacity, dropping alerts", "num_dropped", d)
|
||||||
n.metrics.dropped.Add(float64(d))
|
n.metrics.dropped.Add(float64(d))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -346,7 +357,7 @@ func (n *Notifier) Send(alerts ...*Alert) {
|
||||||
if d := (len(n.queue) + len(alerts)) - n.opts.QueueCapacity; d > 0 {
|
if d := (len(n.queue) + len(alerts)) - n.opts.QueueCapacity; d > 0 {
|
||||||
n.queue = n.queue[d:]
|
n.queue = n.queue[d:]
|
||||||
|
|
||||||
n.logger.Warnf("Alert notification queue full, dropping %d alerts", d)
|
level.Warn(n.logger).Log("msg", "Alert notification queue full, dropping alerts", "num_dropped", d)
|
||||||
n.metrics.dropped.Add(float64(d))
|
n.metrics.dropped.Add(float64(d))
|
||||||
}
|
}
|
||||||
n.queue = append(n.queue, alerts...)
|
n.queue = append(n.queue, alerts...)
|
||||||
|
@ -404,7 +415,7 @@ func (n *Notifier) sendAll(alerts ...*Alert) bool {
|
||||||
|
|
||||||
b, err := json.Marshal(alerts)
|
b, err := json.Marshal(alerts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
n.logger.Errorf("Encoding alerts failed: %s", err)
|
level.Error(n.logger).Log("msg", "Encoding alerts failed", "err", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -429,7 +440,7 @@ func (n *Notifier) sendAll(alerts ...*Alert) bool {
|
||||||
u := am.url().String()
|
u := am.url().String()
|
||||||
|
|
||||||
if err := n.sendOne(ctx, ams.client, u, b); err != nil {
|
if err := n.sendOne(ctx, ams.client, u, b); err != nil {
|
||||||
n.logger.With("alertmanager", u).With("count", len(alerts)).Errorf("Error sending alerts: %s", err)
|
level.Error(n.logger).Log("alertmanager", u, "count", len(alerts), "msg", "Error sending alert", "err", err)
|
||||||
n.metrics.errors.WithLabelValues(u).Inc()
|
n.metrics.errors.WithLabelValues(u).Inc()
|
||||||
} else {
|
} else {
|
||||||
atomic.AddUint64(&numSuccess, 1)
|
atomic.AddUint64(&numSuccess, 1)
|
||||||
|
@ -468,7 +479,7 @@ func (n *Notifier) sendOne(ctx context.Context, c *http.Client, url string, b []
|
||||||
|
|
||||||
// Stop shuts down the notification handler.
|
// Stop shuts down the notification handler.
|
||||||
func (n *Notifier) Stop() {
|
func (n *Notifier) Stop() {
|
||||||
n.logger.Info("Stopping notification handler...")
|
level.Info(n.logger).Log("msg", "Stopping notification handler...")
|
||||||
n.cancel()
|
n.cancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -526,7 +537,7 @@ func (s *alertmanagerSet) Sync(tgs []*config.TargetGroup) {
|
||||||
for _, tg := range tgs {
|
for _, tg := range tgs {
|
||||||
ams, err := alertmanagerFromGroup(tg, s.cfg)
|
ams, err := alertmanagerFromGroup(tg, s.cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.With("err", err).Error("generating discovered Alertmanagers failed")
|
level.Error(s.logger).Log("msg", "Creating discovered Alertmanagers failed", "err", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
all = append(all, ams...)
|
all = append(all, ams...)
|
||||||
|
|
|
@ -26,7 +26,6 @@ import (
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
"github.com/prometheus/prometheus/pkg/labels"
|
||||||
|
@ -65,7 +64,7 @@ func TestPostPath(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHandlerNextBatch(t *testing.T) {
|
func TestHandlerNextBatch(t *testing.T) {
|
||||||
h := New(&Options{}, log.Base())
|
h := New(&Options{}, nil)
|
||||||
|
|
||||||
for i := range make([]struct{}, 2*maxBatchSize+1) {
|
for i := range make([]struct{}, 2*maxBatchSize+1) {
|
||||||
h.queue = append(h.queue, &Alert{
|
h.queue = append(h.queue, &Alert{
|
||||||
|
@ -152,7 +151,7 @@ func TestHandlerSendAll(t *testing.T) {
|
||||||
defer server1.Close()
|
defer server1.Close()
|
||||||
defer server2.Close()
|
defer server2.Close()
|
||||||
|
|
||||||
h := New(&Options{}, log.Base())
|
h := New(&Options{}, nil)
|
||||||
h.alertmanagers = append(h.alertmanagers, &alertmanagerSet{
|
h.alertmanagers = append(h.alertmanagers, &alertmanagerSet{
|
||||||
ams: []alertmanager{
|
ams: []alertmanager{
|
||||||
alertmanagerMock{
|
alertmanagerMock{
|
||||||
|
@ -215,7 +214,7 @@ func TestCustomDo(t *testing.T) {
|
||||||
Body: ioutil.NopCloser(nil),
|
Body: ioutil.NopCloser(nil),
|
||||||
}, nil
|
}, nil
|
||||||
},
|
},
|
||||||
}, log.Base())
|
}, nil)
|
||||||
|
|
||||||
h.sendOne(context.Background(), nil, testURL, []byte(testBody))
|
h.sendOne(context.Background(), nil, testURL, []byte(testBody))
|
||||||
|
|
||||||
|
@ -237,7 +236,7 @@ func TestExternalLabels(t *testing.T) {
|
||||||
Replacement: "c",
|
Replacement: "c",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, log.Base())
|
}, nil)
|
||||||
|
|
||||||
// This alert should get the external label attached.
|
// This alert should get the external label attached.
|
||||||
h.Send(&Alert{
|
h.Send(&Alert{
|
||||||
|
@ -277,7 +276,7 @@ func TestHandlerRelabel(t *testing.T) {
|
||||||
Replacement: "renamed",
|
Replacement: "renamed",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, log.Base())
|
}, nil)
|
||||||
|
|
||||||
// This alert should be dropped due to the configuration
|
// This alert should be dropped due to the configuration
|
||||||
h.Send(&Alert{
|
h.Send(&Alert{
|
||||||
|
@ -324,7 +323,7 @@ func TestHandlerQueueing(t *testing.T) {
|
||||||
h := New(&Options{
|
h := New(&Options{
|
||||||
QueueCapacity: 3 * maxBatchSize,
|
QueueCapacity: 3 * maxBatchSize,
|
||||||
},
|
},
|
||||||
log.Base(),
|
nil,
|
||||||
)
|
)
|
||||||
h.alertmanagers = append(h.alertmanagers, &alertmanagerSet{
|
h.alertmanagers = append(h.alertmanagers, &alertmanagerSet{
|
||||||
ams: []alertmanager{
|
ams: []alertmanager{
|
||||||
|
|
|
@ -23,9 +23,10 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
opentracing "github.com/opentracing/opentracing-go"
|
opentracing "github.com/opentracing/opentracing-go"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
"github.com/prometheus/prometheus/pkg/labels"
|
||||||
"github.com/prometheus/prometheus/pkg/timestamp"
|
"github.com/prometheus/prometheus/pkg/timestamp"
|
||||||
"github.com/prometheus/prometheus/pkg/value"
|
"github.com/prometheus/prometheus/pkg/value"
|
||||||
|
@ -239,7 +240,7 @@ type EngineOptions struct {
|
||||||
var DefaultEngineOptions = &EngineOptions{
|
var DefaultEngineOptions = &EngineOptions{
|
||||||
MaxConcurrentQueries: 20,
|
MaxConcurrentQueries: 20,
|
||||||
Timeout: 2 * time.Minute,
|
Timeout: 2 * time.Minute,
|
||||||
Logger: log.Base(),
|
Logger: log.NewNopLogger(),
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewInstantQuery returns an evaluation query for the given expression at the given time.
|
// NewInstantQuery returns an evaluation query for the given expression at the given time.
|
||||||
|
@ -517,7 +518,7 @@ func (ng *Engine) populateIterators(ctx context.Context, s *EvalStmt) (storage.Q
|
||||||
n.series, err = expandSeriesSet(querier.Select(n.LabelMatchers...))
|
n.series, err = expandSeriesSet(querier.Select(n.LabelMatchers...))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO(fabxc): use multi-error.
|
// TODO(fabxc): use multi-error.
|
||||||
ng.logger.Errorln("expand series set:", err)
|
level.Error(ng.logger).Log("msg", "error expanding series set", "err", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
for _, s := range n.series {
|
for _, s := range n.series {
|
||||||
|
@ -528,7 +529,7 @@ func (ng *Engine) populateIterators(ctx context.Context, s *EvalStmt) (storage.Q
|
||||||
case *MatrixSelector:
|
case *MatrixSelector:
|
||||||
n.series, err = expandSeriesSet(querier.Select(n.LabelMatchers...))
|
n.series, err = expandSeriesSet(querier.Select(n.LabelMatchers...))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ng.logger.Errorln("expand series set:", err)
|
level.Error(ng.logger).Log("msg", "error expanding series set", "err", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
for _, s := range n.series {
|
for _, s := range n.series {
|
||||||
|
@ -580,19 +581,20 @@ func (ev *evaluator) error(err error) {
|
||||||
// recover is the handler that turns panics into returns from the top level of evaluation.
|
// recover is the handler that turns panics into returns from the top level of evaluation.
|
||||||
func (ev *evaluator) recover(errp *error) {
|
func (ev *evaluator) recover(errp *error) {
|
||||||
e := recover()
|
e := recover()
|
||||||
if e != nil {
|
if e == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
if _, ok := e.(runtime.Error); ok {
|
if _, ok := e.(runtime.Error); ok {
|
||||||
// Print the stack trace but do not inhibit the running application.
|
// Print the stack trace but do not inhibit the running application.
|
||||||
buf := make([]byte, 64<<10)
|
buf := make([]byte, 64<<10)
|
||||||
buf = buf[:runtime.Stack(buf, false)]
|
buf = buf[:runtime.Stack(buf, false)]
|
||||||
|
|
||||||
ev.logger.Errorf("parser panic: %v\n%s", e, buf)
|
level.Error(ev.logger).Log("msg", "runtime panic in parser", "err", e, "stacktrace", string(buf))
|
||||||
*errp = fmt.Errorf("unexpected error")
|
*errp = fmt.Errorf("unexpected error")
|
||||||
} else {
|
} else {
|
||||||
*errp = e.(error)
|
*errp = e.(error)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// evalScalar attempts to evaluate e to a Scalar value and errors otherwise.
|
// evalScalar attempts to evaluate e to a Scalar value and errors otherwise.
|
||||||
func (ev *evaluator) evalScalar(e Expr) Scalar {
|
func (ev *evaluator) evalScalar(e Expr) Scalar {
|
||||||
|
|
|
@ -21,7 +21,7 @@ import (
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
"github.com/go-kit/kit/log"
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
"github.com/prometheus/prometheus/pkg/labels"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -296,9 +296,8 @@ load 10s
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRecoverEvaluatorRuntime(t *testing.T) {
|
func TestRecoverEvaluatorRuntime(t *testing.T) {
|
||||||
ev := &evaluator{
|
ev := &evaluator{logger: log.NewNopLogger()}
|
||||||
logger: log.Base(),
|
|
||||||
}
|
|
||||||
var err error
|
var err error
|
||||||
defer ev.recover(&err)
|
defer ev.recover(&err)
|
||||||
|
|
||||||
|
@ -312,7 +311,7 @@ func TestRecoverEvaluatorRuntime(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRecoverEvaluatorError(t *testing.T) {
|
func TestRecoverEvaluatorError(t *testing.T) {
|
||||||
ev := &evaluator{logger: log.Base()}
|
ev := &evaluator{logger: log.NewNopLogger()}
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
e := fmt.Errorf("custom error")
|
e := fmt.Errorf("custom error")
|
||||||
|
|
|
@ -16,13 +16,13 @@ package promql
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
"github.com/prometheus/prometheus/pkg/labels"
|
||||||
"github.com/prometheus/prometheus/pkg/value"
|
"github.com/prometheus/prometheus/pkg/value"
|
||||||
|
@ -342,7 +342,7 @@ func (p *parser) recover(errp *error) {
|
||||||
buf := make([]byte, 64<<10)
|
buf := make([]byte, 64<<10)
|
||||||
buf = buf[:runtime.Stack(buf, false)]
|
buf = buf[:runtime.Stack(buf, false)]
|
||||||
|
|
||||||
log.Errorf("parser panic: %v\n%s", e, buf)
|
fmt.Fprintf(os.Stderr, "parser panic: %v\n%s", e, buf)
|
||||||
*errp = errUnexpected
|
*errp = errUnexpected
|
||||||
} else {
|
} else {
|
||||||
*errp = e.(error)
|
*errp = e.(error)
|
||||||
|
|
|
@ -25,8 +25,9 @@ import (
|
||||||
"time"
|
"time"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
"github.com/prometheus/common/version"
|
"github.com/prometheus/common/version"
|
||||||
|
@ -143,10 +144,14 @@ const maxAheadTime = 10 * time.Minute
|
||||||
type labelsMutator func(labels.Labels) labels.Labels
|
type labelsMutator func(labels.Labels) labels.Labels
|
||||||
|
|
||||||
func newScrapePool(ctx context.Context, cfg *config.ScrapeConfig, app Appendable, logger log.Logger) *scrapePool {
|
func newScrapePool(ctx context.Context, cfg *config.ScrapeConfig, app Appendable, logger log.Logger) *scrapePool {
|
||||||
|
if logger == nil {
|
||||||
|
logger = log.NewNopLogger()
|
||||||
|
}
|
||||||
|
|
||||||
client, err := httputil.NewClientFromConfig(cfg.HTTPClientConfig)
|
client, err := httputil.NewClientFromConfig(cfg.HTTPClientConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Any errors that could occur here should be caught during config validation.
|
// Any errors that could occur here should be caught during config validation.
|
||||||
logger.Errorf("Error creating HTTP client for job %q: %s", cfg.JobName, err)
|
level.Error(logger).Log("msg", "Error creating HTTP client", "err", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
buffers := pool.NewBytesPool(163, 100e6, 3)
|
buffers := pool.NewBytesPool(163, 100e6, 3)
|
||||||
|
@ -162,7 +167,7 @@ func newScrapePool(ctx context.Context, cfg *config.ScrapeConfig, app Appendable
|
||||||
}
|
}
|
||||||
sp.newLoop = func(t *Target, s scraper) loop {
|
sp.newLoop = func(t *Target, s scraper) loop {
|
||||||
return newScrapeLoop(sp.ctx, s,
|
return newScrapeLoop(sp.ctx, s,
|
||||||
logger.With("target", t),
|
log.With(logger, "target", t),
|
||||||
buffers,
|
buffers,
|
||||||
func(l labels.Labels) labels.Labels { return sp.mutateSampleLabels(l, t) },
|
func(l labels.Labels) labels.Labels { return sp.mutateSampleLabels(l, t) },
|
||||||
func(l labels.Labels) labels.Labels { return sp.mutateReportSampleLabels(l, t) },
|
func(l labels.Labels) labels.Labels { return sp.mutateReportSampleLabels(l, t) },
|
||||||
|
@ -207,7 +212,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) {
|
||||||
client, err := httputil.NewClientFromConfig(cfg.HTTPClientConfig)
|
client, err := httputil.NewClientFromConfig(cfg.HTTPClientConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Any errors that could occur here should be caught during config validation.
|
// Any errors that could occur here should be caught during config validation.
|
||||||
sp.logger.Errorf("Error creating HTTP client for job %q: %s", cfg.JobName, err)
|
level.Error(sp.logger).Log("msg", "Error creating HTTP client", "err", err)
|
||||||
}
|
}
|
||||||
sp.config = cfg
|
sp.config = cfg
|
||||||
sp.client = client
|
sp.client = client
|
||||||
|
@ -251,7 +256,7 @@ func (sp *scrapePool) Sync(tgs []*config.TargetGroup) {
|
||||||
for _, tg := range tgs {
|
for _, tg := range tgs {
|
||||||
targets, err := targetsFromGroup(tg, sp.config)
|
targets, err := targetsFromGroup(tg, sp.config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
sp.logger.With("err", err).Error("creating targets failed")
|
level.Error(sp.logger).Log("msg", "creating targets failed", "err", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
all = append(all, targets...)
|
all = append(all, targets...)
|
||||||
|
@ -603,7 +608,7 @@ func newScrapeLoop(
|
||||||
appender func() storage.Appender,
|
appender func() storage.Appender,
|
||||||
) *scrapeLoop {
|
) *scrapeLoop {
|
||||||
if l == nil {
|
if l == nil {
|
||||||
l = log.Base()
|
l = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
if buffers == nil {
|
if buffers == nil {
|
||||||
buffers = pool.NewBytesPool(1e3, 1e6, 3)
|
buffers = pool.NewBytesPool(1e3, 1e6, 3)
|
||||||
|
@ -678,7 +683,7 @@ mainLoop:
|
||||||
sl.lastScrapeSize = len(b)
|
sl.lastScrapeSize = len(b)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
sl.l.With("err", scrapeErr.Error()).Debug("scrape failed")
|
level.Debug(sl.l).Log("msg", "Scrape failed", "err", scrapeErr.Error())
|
||||||
if errc != nil {
|
if errc != nil {
|
||||||
errc <- scrapeErr
|
errc <- scrapeErr
|
||||||
}
|
}
|
||||||
|
@ -688,11 +693,11 @@ mainLoop:
|
||||||
// we still call sl.append to trigger stale markers.
|
// we still call sl.append to trigger stale markers.
|
||||||
total, added, appErr := sl.append(b, start)
|
total, added, appErr := sl.append(b, start)
|
||||||
if appErr != nil {
|
if appErr != nil {
|
||||||
sl.l.With("err", appErr).Warn("append failed")
|
level.Warn(sl.l).Log("msg", "append failed", "err", appErr)
|
||||||
// The append failed, probably due to a parse error or sample limit.
|
// The append failed, probably due to a parse error or sample limit.
|
||||||
// Call sl.append again with an empty scrape to trigger stale markers.
|
// Call sl.append again with an empty scrape to trigger stale markers.
|
||||||
if _, _, err := sl.append([]byte{}, start); err != nil {
|
if _, _, err := sl.append([]byte{}, start); err != nil {
|
||||||
sl.l.With("err", err).Error("append failed")
|
level.Warn(sl.l).Log("msg", "append failed", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -761,10 +766,10 @@ func (sl *scrapeLoop) endOfRunStaleness(last time.Time, ticker *time.Ticker, int
|
||||||
// If the target has since been recreated and scraped, the
|
// If the target has since been recreated and scraped, the
|
||||||
// stale markers will be out of order and ignored.
|
// stale markers will be out of order and ignored.
|
||||||
if _, _, err := sl.append([]byte{}, staleTime); err != nil {
|
if _, _, err := sl.append([]byte{}, staleTime); err != nil {
|
||||||
sl.l.With("err", err).Error("stale append failed")
|
level.Error(sl.l).Log("msg", "stale append failed", "err", err)
|
||||||
}
|
}
|
||||||
if err := sl.reportStale(staleTime); err != nil {
|
if err := sl.reportStale(staleTime); err != nil {
|
||||||
sl.l.With("err", err).Error("stale report failed")
|
level.Error(sl.l).Log("msg", "stale report failed", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -833,17 +838,17 @@ loop:
|
||||||
ok = false
|
ok = false
|
||||||
case storage.ErrOutOfOrderSample:
|
case storage.ErrOutOfOrderSample:
|
||||||
numOutOfOrder++
|
numOutOfOrder++
|
||||||
sl.l.With("timeseries", string(met)).Debug("Out of order sample")
|
level.Debug(sl.l).Log("msg", "Out of order sample", "series", string(met))
|
||||||
targetScrapeSampleOutOfOrder.Inc()
|
targetScrapeSampleOutOfOrder.Inc()
|
||||||
continue
|
continue
|
||||||
case storage.ErrDuplicateSampleForTimestamp:
|
case storage.ErrDuplicateSampleForTimestamp:
|
||||||
numDuplicates++
|
numDuplicates++
|
||||||
sl.l.With("timeseries", string(met)).Debug("Duplicate sample for timestamp")
|
level.Debug(sl.l).Log("msg", "Duplicate sample for timestamp", "series", string(met))
|
||||||
targetScrapeSampleDuplicate.Inc()
|
targetScrapeSampleDuplicate.Inc()
|
||||||
continue
|
continue
|
||||||
case storage.ErrOutOfBounds:
|
case storage.ErrOutOfBounds:
|
||||||
numOutOfBounds++
|
numOutOfBounds++
|
||||||
sl.l.With("timeseries", string(met)).Debug("Out of bounds metric")
|
level.Debug(sl.l).Log("msg", "Out of bounds metric", "series", string(met))
|
||||||
targetScrapeSampleOutOfBounds.Inc()
|
targetScrapeSampleOutOfBounds.Inc()
|
||||||
continue
|
continue
|
||||||
case errSampleLimit:
|
case errSampleLimit:
|
||||||
|
@ -889,19 +894,19 @@ loop:
|
||||||
case storage.ErrOutOfOrderSample:
|
case storage.ErrOutOfOrderSample:
|
||||||
err = nil
|
err = nil
|
||||||
numOutOfOrder++
|
numOutOfOrder++
|
||||||
sl.l.With("timeseries", string(met)).Debug("Out of order sample")
|
level.Debug(sl.l).Log("msg", "Out of order sample", "series", string(met))
|
||||||
targetScrapeSampleOutOfOrder.Inc()
|
targetScrapeSampleOutOfOrder.Inc()
|
||||||
continue
|
continue
|
||||||
case storage.ErrDuplicateSampleForTimestamp:
|
case storage.ErrDuplicateSampleForTimestamp:
|
||||||
err = nil
|
err = nil
|
||||||
numDuplicates++
|
numDuplicates++
|
||||||
sl.l.With("timeseries", string(met)).Debug("Duplicate sample for timestamp")
|
level.Debug(sl.l).Log("msg", "Duplicate sample for timestamp", "series", string(met))
|
||||||
targetScrapeSampleDuplicate.Inc()
|
targetScrapeSampleDuplicate.Inc()
|
||||||
continue
|
continue
|
||||||
case storage.ErrOutOfBounds:
|
case storage.ErrOutOfBounds:
|
||||||
err = nil
|
err = nil
|
||||||
numOutOfBounds++
|
numOutOfBounds++
|
||||||
sl.l.With("timeseries", string(met)).Debug("Out of bounds metric")
|
level.Debug(sl.l).Log("msg", "Out of bounds metric", "series", string(met))
|
||||||
targetScrapeSampleOutOfBounds.Inc()
|
targetScrapeSampleOutOfBounds.Inc()
|
||||||
continue
|
continue
|
||||||
case errSampleLimit:
|
case errSampleLimit:
|
||||||
|
@ -927,13 +932,13 @@ loop:
|
||||||
err = sampleLimitErr
|
err = sampleLimitErr
|
||||||
}
|
}
|
||||||
if numOutOfOrder > 0 {
|
if numOutOfOrder > 0 {
|
||||||
sl.l.With("numDropped", numOutOfOrder).Warn("Error on ingesting out-of-order samples")
|
level.Warn(sl.l).Log("msg", "Error on ingesting out-of-order samples", "num_dropped", numOutOfOrder)
|
||||||
}
|
}
|
||||||
if numDuplicates > 0 {
|
if numDuplicates > 0 {
|
||||||
sl.l.With("numDropped", numDuplicates).Warn("Error on ingesting samples with different value but same timestamp")
|
level.Warn(sl.l).Log("msg", "Error on ingesting samples with different value but same timestamp", "num_dropped", numDuplicates)
|
||||||
}
|
}
|
||||||
if numOutOfBounds > 0 {
|
if numOutOfBounds > 0 {
|
||||||
sl.l.With("numOutOfBounds", numOutOfBounds).Warn("Error on ingesting samples that are too old or are too far into the future")
|
level.Warn(sl.l).Log("msg", "Error on ingesting samples that are too old or are too far into the future", "num_dropped", numOutOfBounds)
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
sl.cache.forEachStale(func(lset labels.Labels) bool {
|
sl.cache.forEachStale(func(lset labels.Labels) bool {
|
||||||
|
|
|
@ -28,7 +28,6 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
@ -44,7 +43,7 @@ func TestNewScrapePool(t *testing.T) {
|
||||||
var (
|
var (
|
||||||
app = &nopAppendable{}
|
app = &nopAppendable{}
|
||||||
cfg = &config.ScrapeConfig{}
|
cfg = &config.ScrapeConfig{}
|
||||||
sp = newScrapePool(context.Background(), cfg, app, log.Base())
|
sp = newScrapePool(context.Background(), cfg, app, nil)
|
||||||
)
|
)
|
||||||
|
|
||||||
if a, ok := sp.appendable.(*nopAppendable); !ok || a != app {
|
if a, ok := sp.appendable.(*nopAppendable); !ok || a != app {
|
||||||
|
@ -167,7 +166,7 @@ func TestScrapePoolReload(t *testing.T) {
|
||||||
targets: map[uint64]*Target{},
|
targets: map[uint64]*Target{},
|
||||||
loops: map[uint64]loop{},
|
loops: map[uint64]loop{},
|
||||||
newLoop: newLoop,
|
newLoop: newLoop,
|
||||||
logger: log.Base(),
|
logger: nil,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reloading a scrape pool with a new scrape configuration must stop all scrape
|
// Reloading a scrape pool with a new scrape configuration must stop all scrape
|
||||||
|
@ -231,7 +230,7 @@ func TestScrapePoolReload(t *testing.T) {
|
||||||
func TestScrapePoolAppender(t *testing.T) {
|
func TestScrapePoolAppender(t *testing.T) {
|
||||||
cfg := &config.ScrapeConfig{}
|
cfg := &config.ScrapeConfig{}
|
||||||
app := &nopAppendable{}
|
app := &nopAppendable{}
|
||||||
sp := newScrapePool(context.Background(), cfg, app, log.Base())
|
sp := newScrapePool(context.Background(), cfg, app, nil)
|
||||||
|
|
||||||
wrapped := sp.appender()
|
wrapped := sp.appender()
|
||||||
|
|
||||||
|
@ -500,7 +499,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
|
||||||
} else if numScrapes == 5 {
|
} else if numScrapes == 5 {
|
||||||
cancel()
|
cancel()
|
||||||
}
|
}
|
||||||
return fmt.Errorf("Scrape failed.")
|
return fmt.Errorf("scrape failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -520,7 +519,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
|
||||||
t.Fatalf("Appended samples not as expected. Wanted: %d samples Got: %d", 22, len(appender.result))
|
t.Fatalf("Appended samples not as expected. Wanted: %d samples Got: %d", 22, len(appender.result))
|
||||||
}
|
}
|
||||||
if appender.result[0].v != 42.0 {
|
if appender.result[0].v != 42.0 {
|
||||||
t.Fatalf("Appended first sample not as expected. Wanted: %f Got: %f", appender.result[0], 42)
|
t.Fatalf("Appended first sample not as expected. Wanted: %f Got: %f", appender.result[0].v, 42.0)
|
||||||
}
|
}
|
||||||
if !value.IsStaleNaN(appender.result[5].v) {
|
if !value.IsStaleNaN(appender.result[5].v) {
|
||||||
t.Fatalf("Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.result[5].v))
|
t.Fatalf("Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.result[5].v))
|
||||||
|
@ -559,7 +558,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
|
||||||
} else if numScrapes == 3 {
|
} else if numScrapes == 3 {
|
||||||
cancel()
|
cancel()
|
||||||
}
|
}
|
||||||
return fmt.Errorf("Scrape failed.")
|
return fmt.Errorf("scrape failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -579,7 +578,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
|
||||||
t.Fatalf("Appended samples not as expected. Wanted: %d samples Got: %d", 22, len(appender.result))
|
t.Fatalf("Appended samples not as expected. Wanted: %d samples Got: %d", 22, len(appender.result))
|
||||||
}
|
}
|
||||||
if appender.result[0].v != 42.0 {
|
if appender.result[0].v != 42.0 {
|
||||||
t.Fatalf("Appended first sample not as expected. Wanted: %f Got: %f", appender.result[0], 42)
|
t.Fatalf("Appended first sample not as expected. Wanted: %f Got: %f", appender.result[0].v, 42.0)
|
||||||
}
|
}
|
||||||
if !value.IsStaleNaN(appender.result[5].v) {
|
if !value.IsStaleNaN(appender.result[5].v) {
|
||||||
t.Fatalf("Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.result[5].v))
|
t.Fatalf("Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.result[5].v))
|
||||||
|
|
|
@ -16,7 +16,8 @@ package retrieval
|
||||||
import (
|
import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
|
@ -64,7 +65,7 @@ func NewTargetManager(app Appendable, logger log.Logger) *TargetManager {
|
||||||
|
|
||||||
// Run starts background processing to handle target updates.
|
// Run starts background processing to handle target updates.
|
||||||
func (tm *TargetManager) Run() {
|
func (tm *TargetManager) Run() {
|
||||||
tm.logger.Info("Starting target manager...")
|
level.Info(tm.logger).Log("msg", "Starting target manager...")
|
||||||
|
|
||||||
tm.mtx.Lock()
|
tm.mtx.Lock()
|
||||||
|
|
||||||
|
@ -78,7 +79,7 @@ func (tm *TargetManager) Run() {
|
||||||
|
|
||||||
// Stop all background processing.
|
// Stop all background processing.
|
||||||
func (tm *TargetManager) Stop() {
|
func (tm *TargetManager) Stop() {
|
||||||
tm.logger.Infoln("Stopping target manager...")
|
level.Info(tm.logger).Log("msg", "Stopping target manager...")
|
||||||
|
|
||||||
tm.mtx.Lock()
|
tm.mtx.Lock()
|
||||||
// Cancel the base context, this will cause all target providers to shut down
|
// Cancel the base context, this will cause all target providers to shut down
|
||||||
|
@ -90,7 +91,7 @@ func (tm *TargetManager) Stop() {
|
||||||
// Wait for all scrape inserts to complete.
|
// Wait for all scrape inserts to complete.
|
||||||
tm.wg.Wait()
|
tm.wg.Wait()
|
||||||
|
|
||||||
tm.logger.Infoln("Target manager stopped.")
|
level.Info(tm.logger).Log("msg", "Target manager stopped")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tm *TargetManager) reload() {
|
func (tm *TargetManager) reload() {
|
||||||
|
@ -106,7 +107,7 @@ func (tm *TargetManager) reload() {
|
||||||
ts = &targetSet{
|
ts = &targetSet{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
cancel: cancel,
|
cancel: cancel,
|
||||||
sp: newScrapePool(ctx, scfg, tm.append, tm.logger),
|
sp: newScrapePool(ctx, scfg, tm.append, log.With(tm.logger, "scrape_pool", scfg.JobName)),
|
||||||
}
|
}
|
||||||
ts.ts = discovery.NewTargetSet(ts.sp)
|
ts.ts = discovery.NewTargetSet(ts.sp)
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,8 @@ import (
|
||||||
|
|
||||||
html_template "html/template"
|
html_template "html/template"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
"github.com/prometheus/prometheus/pkg/labels"
|
||||||
|
@ -118,7 +119,7 @@ func NewAlertingRule(name string, vec promql.Expr, hold time.Duration, lbls, ann
|
||||||
labels: lbls,
|
labels: lbls,
|
||||||
annotations: anns,
|
annotations: anns,
|
||||||
active: map[uint64]*Alert{},
|
active: map[uint64]*Alert{},
|
||||||
logger: logger.With("alert", name),
|
logger: logger,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -203,7 +204,7 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, engine *promql.En
|
||||||
result, err := tmpl.Expand()
|
result, err := tmpl.Expand()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
result = fmt.Sprintf("<error expanding template: %s>", err)
|
result = fmt.Sprintf("<error expanding template: %s>", err)
|
||||||
r.logger.Warnf("Error expanding alert template %v with data '%v': %s", r.Name(), tmplData, err)
|
level.Warn(r.logger).Log("msg", "Expanding alert template failed", "err", err, "data", tmplData)
|
||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,7 +16,6 @@ package rules
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
"github.com/prometheus/prometheus/pkg/labels"
|
||||||
"github.com/prometheus/prometheus/promql"
|
"github.com/prometheus/prometheus/promql"
|
||||||
)
|
)
|
||||||
|
@ -26,7 +25,7 @@ func TestAlertingRuleHTMLSnippet(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
rule := NewAlertingRule("testrule", expr, 0, labels.FromStrings("html", "<b>BOLD</b>"), labels.FromStrings("html", "<b>BOLD</b>"), log.Base())
|
rule := NewAlertingRule("testrule", expr, 0, labels.FromStrings("html", "<b>BOLD</b>"), labels.FromStrings("html", "<b>BOLD</b>"), nil)
|
||||||
|
|
||||||
const want = `alert: <a href="/test/prefix/graph?g0.expr=ALERTS%7Balertname%3D%22testrule%22%7D&g0.tab=0">testrule</a>
|
const want = `alert: <a href="/test/prefix/graph?g0.expr=ALERTS%7Balertname%3D%22testrule%22%7D&g0.tab=0">testrule</a>
|
||||||
expr: <a href="/test/prefix/graph?g0.expr=foo%7Bhtml%3D%22%3Cb%3EBOLD%3Cb%3E%22%7D&g0.tab=0">foo{html="<b>BOLD<b>"}</a>
|
expr: <a href="/test/prefix/graph?g0.expr=foo%7Bhtml%3D%22%3Cb%3EBOLD%3Cb%3E%22%7D&g0.tab=0">foo{html="<b>BOLD<b>"}</a>
|
||||||
|
|
|
@ -25,8 +25,9 @@ import (
|
||||||
|
|
||||||
html_template "html/template"
|
html_template "html/template"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
|
@ -151,7 +152,7 @@ func NewGroup(name, file string, interval time.Duration, rules []Rule, opts *Man
|
||||||
seriesInPreviousEval: make([]map[string]labels.Labels, len(rules)),
|
seriesInPreviousEval: make([]map[string]labels.Labels, len(rules)),
|
||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
terminated: make(chan struct{}),
|
terminated: make(chan struct{}),
|
||||||
logger: opts.Logger.With("group", name),
|
logger: log.With(opts.Logger, "group", name),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -308,7 +309,7 @@ func (g *Group) Eval(ts time.Time) {
|
||||||
// Canceled queries are intentional termination of queries. This normally
|
// Canceled queries are intentional termination of queries. This normally
|
||||||
// happens on shutdown and thus we skip logging of any errors here.
|
// happens on shutdown and thus we skip logging of any errors here.
|
||||||
if _, ok := err.(promql.ErrQueryCanceled); !ok {
|
if _, ok := err.(promql.ErrQueryCanceled); !ok {
|
||||||
g.logger.Warnf("Error while evaluating rule %q: %s", rule, err)
|
level.Warn(g.logger).Log("msg", "Evaluating rule failed", "rule", rule, "err", err)
|
||||||
}
|
}
|
||||||
evalFailures.WithLabelValues(rtyp).Inc()
|
evalFailures.WithLabelValues(rtyp).Inc()
|
||||||
return
|
return
|
||||||
|
@ -324,7 +325,7 @@ func (g *Group) Eval(ts time.Time) {
|
||||||
|
|
||||||
app, err := g.opts.Appendable.Appender()
|
app, err := g.opts.Appendable.Appender()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
g.logger.With("err", err).Warn("creating appender failed")
|
level.Warn(g.logger).Log("msg", "creating appender failed", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -334,22 +335,22 @@ func (g *Group) Eval(ts time.Time) {
|
||||||
switch err {
|
switch err {
|
||||||
case storage.ErrOutOfOrderSample:
|
case storage.ErrOutOfOrderSample:
|
||||||
numOutOfOrder++
|
numOutOfOrder++
|
||||||
g.logger.With("sample", s).With("err", err).Debug("Rule evaluation result discarded")
|
level.Debug(g.logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s)
|
||||||
case storage.ErrDuplicateSampleForTimestamp:
|
case storage.ErrDuplicateSampleForTimestamp:
|
||||||
numDuplicates++
|
numDuplicates++
|
||||||
g.logger.With("sample", s).With("err", err).Debug("Rule evaluation result discarded")
|
level.Debug(g.logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s)
|
||||||
default:
|
default:
|
||||||
g.logger.With("sample", s).With("err", err).Warn("Rule evaluation result discarded")
|
level.Warn(g.logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
seriesReturned[s.Metric.String()] = s.Metric
|
seriesReturned[s.Metric.String()] = s.Metric
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if numOutOfOrder > 0 {
|
if numOutOfOrder > 0 {
|
||||||
g.logger.With("numDropped", numOutOfOrder).Warn("Error on ingesting out-of-order result from rule evaluation")
|
level.Warn(g.logger).Log("msg", "Error on ingesting out-of-order result from rule evaluation", "numDropped", numOutOfOrder)
|
||||||
}
|
}
|
||||||
if numDuplicates > 0 {
|
if numDuplicates > 0 {
|
||||||
g.logger.With("numDropped", numDuplicates).Warn("Error on ingesting results from rule evaluation with different value but same timestamp")
|
level.Warn(g.logger).Log("msg", "Error on ingesting results from rule evaluation with different value but same timestamp", "numDropped", numDuplicates)
|
||||||
}
|
}
|
||||||
|
|
||||||
for metric, lset := range g.seriesInPreviousEval[i] {
|
for metric, lset := range g.seriesInPreviousEval[i] {
|
||||||
|
@ -362,12 +363,12 @@ func (g *Group) Eval(ts time.Time) {
|
||||||
// Do not count these in logging, as this is expected if series
|
// Do not count these in logging, as this is expected if series
|
||||||
// is exposed from a different rule.
|
// is exposed from a different rule.
|
||||||
default:
|
default:
|
||||||
g.logger.With("sample", metric).With("err", err).Warn("adding stale sample failed")
|
level.Warn(g.logger).Log("msg", "adding stale sample failed", "sample", metric, "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := app.Commit(); err != nil {
|
if err := app.Commit(); err != nil {
|
||||||
g.logger.With("err", err).Warn("rule sample appending failed")
|
level.Warn(g.logger).Log("msg", "rule sample appending failed", "err", err)
|
||||||
} else {
|
} else {
|
||||||
g.seriesInPreviousEval[i] = seriesReturned
|
g.seriesInPreviousEval[i] = seriesReturned
|
||||||
}
|
}
|
||||||
|
@ -451,13 +452,13 @@ func (m *Manager) Stop() {
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
m.logger.Info("Stopping rule manager...")
|
level.Info(m.logger).Log("msg", "Stopping rule manager...")
|
||||||
|
|
||||||
for _, eg := range m.groups {
|
for _, eg := range m.groups {
|
||||||
eg.stop()
|
eg.stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
m.logger.Info("Rule manager stopped.")
|
level.Info(m.logger).Log("msg", "Rule manager stopped")
|
||||||
}
|
}
|
||||||
|
|
||||||
// ApplyConfig updates the rule manager's state as the config requires. If
|
// ApplyConfig updates the rule manager's state as the config requires. If
|
||||||
|
@ -481,7 +482,7 @@ func (m *Manager) ApplyConfig(conf *config.Config) error {
|
||||||
groups, errs := m.loadGroups(time.Duration(conf.GlobalConfig.EvaluationInterval), files...)
|
groups, errs := m.loadGroups(time.Duration(conf.GlobalConfig.EvaluationInterval), files...)
|
||||||
if errs != nil {
|
if errs != nil {
|
||||||
for _, e := range errs {
|
for _, e := range errs {
|
||||||
m.logger.Errorln(e)
|
level.Error(m.logger).Log("msg", "loading groups failed", "err", e)
|
||||||
}
|
}
|
||||||
return errors.New("error loading rules, previous rule set restored")
|
return errors.New("error loading rules, previous rule set restored")
|
||||||
}
|
}
|
||||||
|
@ -555,7 +556,7 @@ func (m *Manager) loadGroups(interval time.Duration, filenames ...string) (map[s
|
||||||
time.Duration(r.For),
|
time.Duration(r.For),
|
||||||
labels.FromMap(r.Labels),
|
labels.FromMap(r.Labels),
|
||||||
labels.FromMap(r.Annotations),
|
labels.FromMap(r.Annotations),
|
||||||
m.logger,
|
log.With(m.logger, "alert", r.Alert),
|
||||||
))
|
))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,7 +22,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
"github.com/go-kit/kit/log"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
"github.com/prometheus/prometheus/pkg/labels"
|
||||||
|
@ -58,8 +58,7 @@ func TestAlertingRule(t *testing.T) {
|
||||||
expr,
|
expr,
|
||||||
time.Minute,
|
time.Minute,
|
||||||
labels.FromStrings("severity", "{{\"c\"}}ritical"),
|
labels.FromStrings("severity", "{{\"c\"}}ritical"),
|
||||||
nil,
|
nil, nil,
|
||||||
log.Base(),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
baseTime := time.Unix(0, 0)
|
baseTime := time.Unix(0, 0)
|
||||||
|
@ -167,7 +166,7 @@ func TestStaleness(t *testing.T) {
|
||||||
QueryEngine: engine,
|
QueryEngine: engine,
|
||||||
Appendable: storage,
|
Appendable: storage,
|
||||||
Context: context.Background(),
|
Context: context.Background(),
|
||||||
Logger: log.Base(),
|
Logger: log.NewNopLogger(),
|
||||||
}
|
}
|
||||||
|
|
||||||
expr, err := promql.ParseExpr("a + 1")
|
expr, err := promql.ParseExpr("a + 1")
|
||||||
|
@ -244,7 +243,7 @@ func readSeriesSet(ss storage.SeriesSet) (map[string][]promql.Point, error) {
|
||||||
func TestCopyState(t *testing.T) {
|
func TestCopyState(t *testing.T) {
|
||||||
oldGroup := &Group{
|
oldGroup := &Group{
|
||||||
rules: []Rule{
|
rules: []Rule{
|
||||||
NewAlertingRule("alert", nil, 0, nil, nil, log.Base()),
|
NewAlertingRule("alert", nil, 0, nil, nil, nil),
|
||||||
NewRecordingRule("rule1", nil, nil),
|
NewRecordingRule("rule1", nil, nil),
|
||||||
NewRecordingRule("rule2", nil, nil),
|
NewRecordingRule("rule2", nil, nil),
|
||||||
NewRecordingRule("rule3", nil, nil),
|
NewRecordingRule("rule3", nil, nil),
|
||||||
|
@ -264,7 +263,7 @@ func TestCopyState(t *testing.T) {
|
||||||
NewRecordingRule("rule3", nil, nil),
|
NewRecordingRule("rule3", nil, nil),
|
||||||
NewRecordingRule("rule3", nil, nil),
|
NewRecordingRule("rule3", nil, nil),
|
||||||
NewRecordingRule("rule3", nil, nil),
|
NewRecordingRule("rule3", nil, nil),
|
||||||
NewAlertingRule("alert", nil, 0, nil, nil, log.Base()),
|
NewAlertingRule("alert", nil, 0, nil, nil, nil),
|
||||||
NewRecordingRule("rule1", nil, nil),
|
NewRecordingRule("rule1", nil, nil),
|
||||||
NewRecordingRule("rule4", nil, nil),
|
NewRecordingRule("rule4", nil, nil),
|
||||||
},
|
},
|
||||||
|
|
|
@ -17,19 +17,23 @@ import (
|
||||||
"container/heap"
|
"container/heap"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
"github.com/prometheus/prometheus/pkg/labels"
|
||||||
)
|
)
|
||||||
|
|
||||||
type fanout struct {
|
type fanout struct {
|
||||||
|
logger log.Logger
|
||||||
|
|
||||||
primary Storage
|
primary Storage
|
||||||
secondaries []Storage
|
secondaries []Storage
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFanout returns a new fan-out Storage, which proxies reads and writes
|
// NewFanout returns a new fan-out Storage, which proxies reads and writes
|
||||||
// through to multiple underlying storages.
|
// through to multiple underlying storages.
|
||||||
func NewFanout(primary Storage, secondaries ...Storage) Storage {
|
func NewFanout(logger log.Logger, primary Storage, secondaries ...Storage) Storage {
|
||||||
return &fanout{
|
return &fanout{
|
||||||
|
logger: logger,
|
||||||
primary: primary,
|
primary: primary,
|
||||||
secondaries: secondaries,
|
secondaries: secondaries,
|
||||||
}
|
}
|
||||||
|
@ -74,6 +78,7 @@ func (f *fanout) Appender() (Appender, error) {
|
||||||
secondaries = append(secondaries, appender)
|
secondaries = append(secondaries, appender)
|
||||||
}
|
}
|
||||||
return &fanoutAppender{
|
return &fanoutAppender{
|
||||||
|
logger: f.logger,
|
||||||
primary: primary,
|
primary: primary,
|
||||||
secondaries: secondaries,
|
secondaries: secondaries,
|
||||||
}, nil
|
}, nil
|
||||||
|
@ -97,6 +102,8 @@ func (f *fanout) Close() error {
|
||||||
|
|
||||||
// fanoutAppender implements Appender.
|
// fanoutAppender implements Appender.
|
||||||
type fanoutAppender struct {
|
type fanoutAppender struct {
|
||||||
|
logger log.Logger
|
||||||
|
|
||||||
primary Appender
|
primary Appender
|
||||||
secondaries []Appender
|
secondaries []Appender
|
||||||
}
|
}
|
||||||
|
@ -136,7 +143,7 @@ func (f *fanoutAppender) Commit() (err error) {
|
||||||
err = appender.Commit()
|
err = appender.Commit()
|
||||||
} else {
|
} else {
|
||||||
if rollbackErr := appender.Rollback(); rollbackErr != nil {
|
if rollbackErr := appender.Rollback(); rollbackErr != nil {
|
||||||
log.Errorf("Squashed rollback error on commit: %v", rollbackErr)
|
level.Error(f.logger).Log("msg", "Squashed rollback error on commit", "err", rollbackErr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -151,7 +158,7 @@ func (f *fanoutAppender) Rollback() (err error) {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = rollbackErr
|
err = rollbackErr
|
||||||
} else if rollbackErr != nil {
|
} else if rollbackErr != nil {
|
||||||
log.Errorf("Squashed rollback error on rollback: %v", rollbackErr)
|
level.Error(f.logger).Log("msg", "Squashed rollback error on rollback", "err", rollbackErr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -370,8 +377,7 @@ func (c *mergeIterator) Seek(t int64) bool {
|
||||||
|
|
||||||
func (c *mergeIterator) At() (t int64, v float64) {
|
func (c *mergeIterator) At() (t int64, v float64) {
|
||||||
if len(c.h) == 0 {
|
if len(c.h) == 0 {
|
||||||
log.Error("mergeIterator.At() called after .Next() returned false.")
|
panic("mergeIterator.At() called after .Next() returned false.")
|
||||||
return 0, 0
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO do I need to dedupe or just merge?
|
// TODO do I need to dedupe or just merge?
|
||||||
|
|
|
@ -20,8 +20,9 @@ import (
|
||||||
|
|
||||||
"golang.org/x/time/rate"
|
"golang.org/x/time/rate"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/relabel"
|
"github.com/prometheus/prometheus/relabel"
|
||||||
|
@ -171,6 +172,8 @@ type StorageClient interface {
|
||||||
// QueueManager manages a queue of samples to be sent to the Storage
|
// QueueManager manages a queue of samples to be sent to the Storage
|
||||||
// indicated by the provided StorageClient.
|
// indicated by the provided StorageClient.
|
||||||
type QueueManager struct {
|
type QueueManager struct {
|
||||||
|
logger log.Logger
|
||||||
|
|
||||||
cfg QueueManagerConfig
|
cfg QueueManagerConfig
|
||||||
externalLabels model.LabelSet
|
externalLabels model.LabelSet
|
||||||
relabelConfigs []*config.RelabelConfig
|
relabelConfigs []*config.RelabelConfig
|
||||||
|
@ -190,8 +193,12 @@ type QueueManager struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewQueueManager builds a new QueueManager.
|
// NewQueueManager builds a new QueueManager.
|
||||||
func NewQueueManager(cfg QueueManagerConfig, externalLabels model.LabelSet, relabelConfigs []*config.RelabelConfig, client StorageClient) *QueueManager {
|
func NewQueueManager(logger log.Logger, cfg QueueManagerConfig, externalLabels model.LabelSet, relabelConfigs []*config.RelabelConfig, client StorageClient) *QueueManager {
|
||||||
|
if logger == nil {
|
||||||
|
logger = log.NewNopLogger()
|
||||||
|
}
|
||||||
t := &QueueManager{
|
t := &QueueManager{
|
||||||
|
logger: logger,
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
externalLabels: externalLabels,
|
externalLabels: externalLabels,
|
||||||
relabelConfigs: relabelConfigs,
|
relabelConfigs: relabelConfigs,
|
||||||
|
@ -244,7 +251,7 @@ func (t *QueueManager) Append(s *model.Sample) error {
|
||||||
} else {
|
} else {
|
||||||
droppedSamplesTotal.WithLabelValues(t.queueName).Inc()
|
droppedSamplesTotal.WithLabelValues(t.queueName).Inc()
|
||||||
if t.logLimiter.Allow() {
|
if t.logLimiter.Allow() {
|
||||||
log.Warn("Remote storage queue full, discarding sample. Multiple subsequent messages of this kind may be suppressed.")
|
level.Warn(t.logger).Log("msg", "Remote storage queue full, discarding sample. Multiple subsequent messages of this kind may be suppressed.")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -272,14 +279,15 @@ func (t *QueueManager) Start() {
|
||||||
// Stop stops sending samples to the remote storage and waits for pending
|
// Stop stops sending samples to the remote storage and waits for pending
|
||||||
// sends to complete.
|
// sends to complete.
|
||||||
func (t *QueueManager) Stop() {
|
func (t *QueueManager) Stop() {
|
||||||
log.Infof("Stopping remote storage...")
|
level.Info(t.logger).Log("msg", "Stopping remote storage...")
|
||||||
close(t.quit)
|
close(t.quit)
|
||||||
t.wg.Wait()
|
t.wg.Wait()
|
||||||
|
|
||||||
t.shardsMtx.Lock()
|
t.shardsMtx.Lock()
|
||||||
defer t.shardsMtx.Unlock()
|
defer t.shardsMtx.Unlock()
|
||||||
t.shards.stop()
|
t.shards.stop()
|
||||||
log.Info("Remote storage stopped.")
|
|
||||||
|
level.Info(t.logger).Log("msg", "Remote storage stopped.")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *QueueManager) updateShardsLoop() {
|
func (t *QueueManager) updateShardsLoop() {
|
||||||
|
@ -323,15 +331,17 @@ func (t *QueueManager) calculateDesiredShards() {
|
||||||
timePerSample = samplesOutDuration / samplesOut
|
timePerSample = samplesOutDuration / samplesOut
|
||||||
desiredShards = (timePerSample * (samplesIn + samplesPending + t.integralAccumulator)) / float64(time.Second)
|
desiredShards = (timePerSample * (samplesIn + samplesPending + t.integralAccumulator)) / float64(time.Second)
|
||||||
)
|
)
|
||||||
log.Debugf("QueueManager.caclulateDesiredShards samplesIn=%f, samplesOut=%f, samplesPending=%f, desiredShards=%f",
|
level.Debug(t.logger).Log("msg", "QueueManager.caclulateDesiredShards",
|
||||||
samplesIn, samplesOut, samplesPending, desiredShards)
|
"samplesIn", samplesIn, "samplesOut", samplesOut,
|
||||||
|
"samplesPending", samplesPending, "desiredShards", desiredShards)
|
||||||
|
|
||||||
// Changes in the number of shards must be greater than shardToleranceFraction.
|
// Changes in the number of shards must be greater than shardToleranceFraction.
|
||||||
var (
|
var (
|
||||||
lowerBound = float64(t.numShards) * (1. - shardToleranceFraction)
|
lowerBound = float64(t.numShards) * (1. - shardToleranceFraction)
|
||||||
upperBound = float64(t.numShards) * (1. + shardToleranceFraction)
|
upperBound = float64(t.numShards) * (1. + shardToleranceFraction)
|
||||||
)
|
)
|
||||||
log.Debugf("QueueManager.updateShardsLoop %f <= %f <= %f", lowerBound, desiredShards, upperBound)
|
level.Debug(t.logger).Log("msg", "QueueManager.updateShardsLoop",
|
||||||
|
"lowerBound", lowerBound, "desiredShards", desiredShards, "upperBound", upperBound)
|
||||||
if lowerBound <= desiredShards && desiredShards <= upperBound {
|
if lowerBound <= desiredShards && desiredShards <= upperBound {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -350,10 +360,10 @@ func (t *QueueManager) calculateDesiredShards() {
|
||||||
// to stay close to shardUpdateDuration.
|
// to stay close to shardUpdateDuration.
|
||||||
select {
|
select {
|
||||||
case t.reshardChan <- numShards:
|
case t.reshardChan <- numShards:
|
||||||
log.Infof("Remote storage resharding from %d to %d shards.", t.numShards, numShards)
|
level.Info(t.logger).Log("msg", "Remote storage resharding", "from", t.numShards, "to", numShards)
|
||||||
t.numShards = numShards
|
t.numShards = numShards
|
||||||
default:
|
default:
|
||||||
log.Infof("Currently resharding, skipping.")
|
level.Info(t.logger).Log("msg", "Currently resharding, skipping.")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -453,9 +463,9 @@ func (s *shards) runShard(i int) {
|
||||||
case sample, ok := <-queue:
|
case sample, ok := <-queue:
|
||||||
if !ok {
|
if !ok {
|
||||||
if len(pendingSamples) > 0 {
|
if len(pendingSamples) > 0 {
|
||||||
log.Debugf("Flushing %d samples to remote storage...", len(pendingSamples))
|
level.Debug(s.qm.logger).Log("msg", "Flushing samples to remote storage...", "count", len(pendingSamples))
|
||||||
s.sendSamples(pendingSamples)
|
s.sendSamples(pendingSamples)
|
||||||
log.Debugf("Done flushing.")
|
level.Debug(s.qm.logger).Log("msg", "Done flushing.")
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -499,7 +509,7 @@ func (s *shards) sendSamplesWithBackoff(samples model.Samples) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Warnf("Error sending %d samples to remote storage: %s", len(samples), err)
|
level.Warn(s.qm.logger).Log("msg", "Error sending samples to remote storage", "count", len(samples), "err", err)
|
||||||
if _, ok := err.(recoverableError); !ok {
|
if _, ok := err.(recoverableError); !ok {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
|
@ -99,7 +99,7 @@ func TestSampleDelivery(t *testing.T) {
|
||||||
|
|
||||||
cfg := defaultQueueManagerConfig
|
cfg := defaultQueueManagerConfig
|
||||||
cfg.MaxShards = 1
|
cfg.MaxShards = 1
|
||||||
m := NewQueueManager(cfg, nil, nil, c)
|
m := NewQueueManager(nil, cfg, nil, nil, c)
|
||||||
|
|
||||||
// These should be received by the client.
|
// These should be received by the client.
|
||||||
for _, s := range samples[:len(samples)/2] {
|
for _, s := range samples[:len(samples)/2] {
|
||||||
|
@ -133,7 +133,7 @@ func TestSampleDeliveryOrder(t *testing.T) {
|
||||||
|
|
||||||
c := NewTestStorageClient()
|
c := NewTestStorageClient()
|
||||||
c.expectSamples(samples)
|
c.expectSamples(samples)
|
||||||
m := NewQueueManager(defaultQueueManagerConfig, nil, nil, c)
|
m := NewQueueManager(nil, defaultQueueManagerConfig, nil, nil, c)
|
||||||
|
|
||||||
// These should be received by the client.
|
// These should be received by the client.
|
||||||
for _, s := range samples {
|
for _, s := range samples {
|
||||||
|
@ -211,7 +211,7 @@ func TestSpawnNotMoreThanMaxConcurrentSendsGoroutines(t *testing.T) {
|
||||||
cfg := defaultQueueManagerConfig
|
cfg := defaultQueueManagerConfig
|
||||||
cfg.MaxShards = 1
|
cfg.MaxShards = 1
|
||||||
cfg.QueueCapacity = n
|
cfg.QueueCapacity = n
|
||||||
m := NewQueueManager(cfg, nil, nil, c)
|
m := NewQueueManager(nil, cfg, nil, nil, c)
|
||||||
|
|
||||||
m.Start()
|
m.Start()
|
||||||
|
|
||||||
|
|
|
@ -16,6 +16,7 @@ package remote
|
||||||
import (
|
import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
)
|
)
|
||||||
|
@ -23,6 +24,7 @@ import (
|
||||||
// Storage represents all the remote read and write endpoints. It implements
|
// Storage represents all the remote read and write endpoints. It implements
|
||||||
// storage.Storage.
|
// storage.Storage.
|
||||||
type Storage struct {
|
type Storage struct {
|
||||||
|
logger log.Logger
|
||||||
mtx sync.RWMutex
|
mtx sync.RWMutex
|
||||||
|
|
||||||
// For writes
|
// For writes
|
||||||
|
@ -33,6 +35,13 @@ type Storage struct {
|
||||||
externalLabels model.LabelSet
|
externalLabels model.LabelSet
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewStorage(l log.Logger) *Storage {
|
||||||
|
if l == nil {
|
||||||
|
l = log.NewNopLogger()
|
||||||
|
}
|
||||||
|
return &Storage{logger: l}
|
||||||
|
}
|
||||||
|
|
||||||
// ApplyConfig updates the state as the new config requires.
|
// ApplyConfig updates the state as the new config requires.
|
||||||
func (s *Storage) ApplyConfig(conf *config.Config) error {
|
func (s *Storage) ApplyConfig(conf *config.Config) error {
|
||||||
s.mtx.Lock()
|
s.mtx.Lock()
|
||||||
|
@ -53,6 +62,7 @@ func (s *Storage) ApplyConfig(conf *config.Config) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
newQueues = append(newQueues, NewQueueManager(
|
newQueues = append(newQueues, NewQueueManager(
|
||||||
|
s.logger,
|
||||||
defaultQueueManagerConfig,
|
defaultQueueManagerConfig,
|
||||||
conf.GlobalConfig.ExternalLabels,
|
conf.GlobalConfig.ExternalLabels,
|
||||||
rwConf.WriteRelabelConfigs,
|
rwConf.WriteRelabelConfigs,
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
@ -55,7 +56,7 @@ type Options struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Open returns a new storage backed by a TSDB database that is configured for Prometheus.
|
// Open returns a new storage backed by a TSDB database that is configured for Prometheus.
|
||||||
func Open(path string, r prometheus.Registerer, opts *Options) (*tsdb.DB, error) {
|
func Open(path string, l log.Logger, r prometheus.Registerer, opts *Options) (*tsdb.DB, error) {
|
||||||
// Start with smallest block duration and create exponential buckets until the exceed the
|
// Start with smallest block duration and create exponential buckets until the exceed the
|
||||||
// configured maximum block duration.
|
// configured maximum block duration.
|
||||||
rngs := tsdb.ExponentialBlockRanges(int64(time.Duration(opts.MinBlockDuration).Seconds()*1000), 10, 3)
|
rngs := tsdb.ExponentialBlockRanges(int64(time.Duration(opts.MinBlockDuration).Seconds()*1000), 10, 3)
|
||||||
|
@ -67,7 +68,7 @@ func Open(path string, r prometheus.Registerer, opts *Options) (*tsdb.DB, error)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
db, err := tsdb.Open(path, nil, r, &tsdb.Options{
|
db, err := tsdb.Open(path, l, r, &tsdb.Options{
|
||||||
WALFlushInterval: 10 * time.Second,
|
WALFlushInterval: 10 * time.Second,
|
||||||
RetentionDuration: uint64(time.Duration(opts.Retention).Seconds() * 1000),
|
RetentionDuration: uint64(time.Duration(opts.Retention).Seconds() * 1000),
|
||||||
BlockRanges: rngs,
|
BlockRanges: rngs,
|
||||||
|
|
|
@ -18,7 +18,6 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/storage/tsdb"
|
"github.com/prometheus/prometheus/storage/tsdb"
|
||||||
|
@ -32,11 +31,9 @@ func NewStorage(t T) storage.Storage {
|
||||||
t.Fatalf("Opening test dir failed: %s", err)
|
t.Fatalf("Opening test dir failed: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.With("dir", dir).Debugln("opening test storage")
|
|
||||||
|
|
||||||
// Tests just load data for a series sequentially. Thus we
|
// Tests just load data for a series sequentially. Thus we
|
||||||
// need a long appendable window.
|
// need a long appendable window.
|
||||||
db, err := tsdb.Open(dir, nil, &tsdb.Options{
|
db, err := tsdb.Open(dir, nil, nil, &tsdb.Options{
|
||||||
MinBlockDuration: model.Duration(24 * time.Hour),
|
MinBlockDuration: model.Duration(24 * time.Hour),
|
||||||
MaxBlockDuration: model.Duration(24 * time.Hour),
|
MaxBlockDuration: model.Duration(24 * time.Hour),
|
||||||
})
|
})
|
||||||
|
@ -52,8 +49,6 @@ type testStorage struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s testStorage) Close() error {
|
func (s testStorage) Close() error {
|
||||||
log.With("dir", s.dir).Debugln("closing test storage")
|
|
||||||
|
|
||||||
if err := s.Storage.Close(); err != nil {
|
if err := s.Storage.Close(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,8 +19,9 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/samuel/go-zookeeper/zk"
|
"github.com/samuel/go-zookeeper/zk"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -50,7 +51,7 @@ type ZookeeperLogger struct {
|
||||||
|
|
||||||
// Implements zk.Logger
|
// Implements zk.Logger
|
||||||
func (zl ZookeeperLogger) Printf(s string, i ...interface{}) {
|
func (zl ZookeeperLogger) Printf(s string, i ...interface{}) {
|
||||||
zl.logger.Infof(s, i...)
|
level.Info(zl.logger).Log("msg", fmt.Sprintf(s, i...))
|
||||||
}
|
}
|
||||||
|
|
||||||
type ZookeeperTreeCache struct {
|
type ZookeeperTreeCache struct {
|
||||||
|
@ -113,20 +114,20 @@ func (tc *ZookeeperTreeCache) loop(path string) {
|
||||||
|
|
||||||
err := tc.recursiveNodeUpdate(path, tc.head)
|
err := tc.recursiveNodeUpdate(path, tc.head)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tc.logger.Errorf("Error during initial read of Zookeeper: %s", err)
|
level.Error(tc.logger).Log("msg", "Error during initial read of Zookeeper", "err", err)
|
||||||
failure()
|
failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case ev := <-tc.head.events:
|
case ev := <-tc.head.events:
|
||||||
tc.logger.Debugf("Received Zookeeper event: %s", ev)
|
level.Debug(tc.logger).Log("msg", "Received Zookeeper event", "event", ev)
|
||||||
if failureMode {
|
if failureMode {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if ev.Type == zk.EventNotWatching {
|
if ev.Type == zk.EventNotWatching {
|
||||||
tc.logger.Infof("Lost connection to Zookeeper.")
|
level.Info(tc.logger).Log("msg", "Lost connection to Zookeeper.")
|
||||||
failure()
|
failure()
|
||||||
} else {
|
} else {
|
||||||
path := strings.TrimPrefix(ev.Path, tc.prefix)
|
path := strings.TrimPrefix(ev.Path, tc.prefix)
|
||||||
|
@ -147,15 +148,15 @@ func (tc *ZookeeperTreeCache) loop(path string) {
|
||||||
|
|
||||||
err := tc.recursiveNodeUpdate(ev.Path, node)
|
err := tc.recursiveNodeUpdate(ev.Path, node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tc.logger.Errorf("Error during processing of Zookeeper event: %s", err)
|
level.Error(tc.logger).Log("msg", "Error during processing of Zookeeper event", "err", err)
|
||||||
failure()
|
failure()
|
||||||
} else if tc.head.data == nil {
|
} else if tc.head.data == nil {
|
||||||
tc.logger.Errorf("Error during processing of Zookeeper event: path %s no longer exists", tc.prefix)
|
level.Error(tc.logger).Log("msg", "Error during processing of Zookeeper event", "err", "path no longer exists", "path", tc.prefix)
|
||||||
failure()
|
failure()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case <-retryChan:
|
case <-retryChan:
|
||||||
tc.logger.Infof("Attempting to resync state with Zookeeper")
|
level.Info(tc.logger).Log("msg", "Attempting to resync state with Zookeeper")
|
||||||
previousState := &zookeeperTreeCacheNode{
|
previousState := &zookeeperTreeCacheNode{
|
||||||
children: tc.head.children,
|
children: tc.head.children,
|
||||||
}
|
}
|
||||||
|
@ -163,13 +164,13 @@ func (tc *ZookeeperTreeCache) loop(path string) {
|
||||||
tc.head.children = make(map[string]*zookeeperTreeCacheNode)
|
tc.head.children = make(map[string]*zookeeperTreeCacheNode)
|
||||||
|
|
||||||
if err := tc.recursiveNodeUpdate(tc.prefix, tc.head); err != nil {
|
if err := tc.recursiveNodeUpdate(tc.prefix, tc.head); err != nil {
|
||||||
tc.logger.Errorf("Error during Zookeeper resync: %s", err)
|
level.Error(tc.logger).Log("msg", "Error during Zookeeper resync", "err", err)
|
||||||
// Revert to our previous state.
|
// Revert to our previous state.
|
||||||
tc.head.children = previousState.children
|
tc.head.children = previousState.children
|
||||||
failure()
|
failure()
|
||||||
} else {
|
} else {
|
||||||
tc.resyncState(tc.prefix, tc.head, previousState)
|
tc.resyncState(tc.prefix, tc.head, previousState)
|
||||||
tc.logger.Infof("Zookeeper resync successful")
|
level.Info(tc.logger).Log("Zookeeper resync successful")
|
||||||
failureMode = false
|
failureMode = false
|
||||||
}
|
}
|
||||||
case <-tc.stop:
|
case <-tc.stop:
|
||||||
|
|
22
vendor/github.com/go-kit/kit/log/level/doc.go
generated
vendored
Normal file
22
vendor/github.com/go-kit/kit/log/level/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
// Package level implements leveled logging on top of package log. To use the
|
||||||
|
// level package, create a logger as per normal in your func main, and wrap it
|
||||||
|
// with level.NewFilter.
|
||||||
|
//
|
||||||
|
// var logger log.Logger
|
||||||
|
// logger = log.NewLogfmtLogger(os.Stderr)
|
||||||
|
// logger = level.NewFilter(logger, level.AllowInfoAndAbove()) // <--
|
||||||
|
// logger = log.With(logger, "ts", log.DefaultTimestampUTC)
|
||||||
|
//
|
||||||
|
// Then, at the callsites, use one of the level.Debug, Info, Warn, or Error
|
||||||
|
// helper methods to emit leveled log events.
|
||||||
|
//
|
||||||
|
// logger.Log("foo", "bar") // as normal, no level
|
||||||
|
// level.Debug(logger).Log("request_id", reqID, "trace_data", trace.Get())
|
||||||
|
// if value > 100 {
|
||||||
|
// level.Error(logger).Log("value", value)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// NewFilter allows precise control over what happens when a log event is
|
||||||
|
// emitted without a level key, or if a squelched level is used. Check the
|
||||||
|
// Option functions for details.
|
||||||
|
package level
|
205
vendor/github.com/go-kit/kit/log/level/level.go
generated
vendored
Normal file
205
vendor/github.com/go-kit/kit/log/level/level.go
generated
vendored
Normal file
|
@ -0,0 +1,205 @@
|
||||||
|
package level
|
||||||
|
|
||||||
|
import "github.com/go-kit/kit/log"
|
||||||
|
|
||||||
|
// Error returns a logger that includes a Key/ErrorValue pair.
|
||||||
|
func Error(logger log.Logger) log.Logger {
|
||||||
|
return log.WithPrefix(logger, Key(), ErrorValue())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warn returns a logger that includes a Key/WarnValue pair.
|
||||||
|
func Warn(logger log.Logger) log.Logger {
|
||||||
|
return log.WithPrefix(logger, Key(), WarnValue())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Info returns a logger that includes a Key/InfoValue pair.
|
||||||
|
func Info(logger log.Logger) log.Logger {
|
||||||
|
return log.WithPrefix(logger, Key(), InfoValue())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debug returns a logger that includes a Key/DebugValue pair.
|
||||||
|
func Debug(logger log.Logger) log.Logger {
|
||||||
|
return log.WithPrefix(logger, Key(), DebugValue())
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFilter wraps next and implements level filtering. See the commentary on
|
||||||
|
// the Option functions for a detailed description of how to configure levels.
|
||||||
|
// If no options are provided, all leveled log events created with Debug,
|
||||||
|
// Info, Warn or Error helper methods are squelched and non-leveled log
|
||||||
|
// events are passed to next unmodified.
|
||||||
|
func NewFilter(next log.Logger, options ...Option) log.Logger {
|
||||||
|
l := &logger{
|
||||||
|
next: next,
|
||||||
|
}
|
||||||
|
for _, option := range options {
|
||||||
|
option(l)
|
||||||
|
}
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
type logger struct {
|
||||||
|
next log.Logger
|
||||||
|
allowed level
|
||||||
|
squelchNoLevel bool
|
||||||
|
errNotAllowed error
|
||||||
|
errNoLevel error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *logger) Log(keyvals ...interface{}) error {
|
||||||
|
var hasLevel, levelAllowed bool
|
||||||
|
for i := 1; i < len(keyvals); i += 2 {
|
||||||
|
if v, ok := keyvals[i].(*levelValue); ok {
|
||||||
|
hasLevel = true
|
||||||
|
levelAllowed = l.allowed&v.level != 0
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !hasLevel && l.squelchNoLevel {
|
||||||
|
return l.errNoLevel
|
||||||
|
}
|
||||||
|
if hasLevel && !levelAllowed {
|
||||||
|
return l.errNotAllowed
|
||||||
|
}
|
||||||
|
return l.next.Log(keyvals...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Option sets a parameter for the leveled logger.
|
||||||
|
type Option func(*logger)
|
||||||
|
|
||||||
|
// AllowAll is an alias for AllowDebug.
|
||||||
|
func AllowAll() Option {
|
||||||
|
return AllowDebug()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowDebug allows error, warn, info and debug level log events to pass.
|
||||||
|
func AllowDebug() Option {
|
||||||
|
return allowed(levelError | levelWarn | levelInfo | levelDebug)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowInfo allows error, warn and info level log events to pass.
|
||||||
|
func AllowInfo() Option {
|
||||||
|
return allowed(levelError | levelWarn | levelInfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowWarn allows error and warn level log events to pass.
|
||||||
|
func AllowWarn() Option {
|
||||||
|
return allowed(levelError | levelWarn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowError allows only error level log events to pass.
|
||||||
|
func AllowError() Option {
|
||||||
|
return allowed(levelError)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowNone allows no leveled log events to pass.
|
||||||
|
func AllowNone() Option {
|
||||||
|
return allowed(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func allowed(allowed level) Option {
|
||||||
|
return func(l *logger) { l.allowed = allowed }
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrNotAllowed sets the error to return from Log when it squelches a log
|
||||||
|
// event disallowed by the configured Allow[Level] option. By default,
|
||||||
|
// ErrNotAllowed is nil; in this case the log event is squelched with no
|
||||||
|
// error.
|
||||||
|
func ErrNotAllowed(err error) Option {
|
||||||
|
return func(l *logger) { l.errNotAllowed = err }
|
||||||
|
}
|
||||||
|
|
||||||
|
// SquelchNoLevel instructs Log to squelch log events with no level, so that
|
||||||
|
// they don't proceed through to the wrapped logger. If SquelchNoLevel is set
|
||||||
|
// to true and a log event is squelched in this way, the error value
|
||||||
|
// configured with ErrNoLevel is returned to the caller.
|
||||||
|
func SquelchNoLevel(squelch bool) Option {
|
||||||
|
return func(l *logger) { l.squelchNoLevel = squelch }
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrNoLevel sets the error to return from Log when it squelches a log event
|
||||||
|
// with no level. By default, ErrNoLevel is nil; in this case the log event is
|
||||||
|
// squelched with no error.
|
||||||
|
func ErrNoLevel(err error) Option {
|
||||||
|
return func(l *logger) { l.errNoLevel = err }
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewInjector wraps next and returns a logger that adds a Key/level pair to
|
||||||
|
// the beginning of log events that don't already contain a level. In effect,
|
||||||
|
// this gives a default level to logs without a level.
|
||||||
|
func NewInjector(next log.Logger, level Value) log.Logger {
|
||||||
|
return &injector{
|
||||||
|
next: next,
|
||||||
|
level: level,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type injector struct {
|
||||||
|
next log.Logger
|
||||||
|
level interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *injector) Log(keyvals ...interface{}) error {
|
||||||
|
for i := 1; i < len(keyvals); i += 2 {
|
||||||
|
if _, ok := keyvals[i].(*levelValue); ok {
|
||||||
|
return l.next.Log(keyvals...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
kvs := make([]interface{}, len(keyvals)+2)
|
||||||
|
kvs[0], kvs[1] = key, l.level
|
||||||
|
copy(kvs[2:], keyvals)
|
||||||
|
return l.next.Log(kvs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value is the interface that each of the canonical level values implement.
|
||||||
|
// It contains unexported methods that prevent types from other packages from
|
||||||
|
// implementing it and guaranteeing that NewFilter can distinguish the levels
|
||||||
|
// defined in this package from all other values.
|
||||||
|
type Value interface {
|
||||||
|
String() string
|
||||||
|
levelVal()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Key returns the unique key added to log events by the loggers in this
|
||||||
|
// package.
|
||||||
|
func Key() interface{} { return key }
|
||||||
|
|
||||||
|
// ErrorValue returns the unique value added to log events by Error.
|
||||||
|
func ErrorValue() Value { return errorValue }
|
||||||
|
|
||||||
|
// WarnValue returns the unique value added to log events by Warn.
|
||||||
|
func WarnValue() Value { return warnValue }
|
||||||
|
|
||||||
|
// InfoValue returns the unique value added to log events by Info.
|
||||||
|
func InfoValue() Value { return infoValue }
|
||||||
|
|
||||||
|
// DebugValue returns the unique value added to log events by Warn.
|
||||||
|
func DebugValue() Value { return debugValue }
|
||||||
|
|
||||||
|
var (
|
||||||
|
// key is of type interfae{} so that it allocates once during package
|
||||||
|
// initialization and avoids allocating every type the value is added to a
|
||||||
|
// []interface{} later.
|
||||||
|
key interface{} = "level"
|
||||||
|
|
||||||
|
errorValue = &levelValue{level: levelError, name: "error"}
|
||||||
|
warnValue = &levelValue{level: levelWarn, name: "warn"}
|
||||||
|
infoValue = &levelValue{level: levelInfo, name: "info"}
|
||||||
|
debugValue = &levelValue{level: levelDebug, name: "debug"}
|
||||||
|
)
|
||||||
|
|
||||||
|
type level byte
|
||||||
|
|
||||||
|
const (
|
||||||
|
levelDebug level = 1 << iota
|
||||||
|
levelInfo
|
||||||
|
levelWarn
|
||||||
|
levelError
|
||||||
|
)
|
||||||
|
|
||||||
|
type levelValue struct {
|
||||||
|
name string
|
||||||
|
level
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *levelValue) String() string { return v.name }
|
||||||
|
func (v *levelValue) levelVal() {}
|
33
vendor/github.com/prometheus/common/promlog/flag/flag.go
generated
vendored
Normal file
33
vendor/github.com/prometheus/common/promlog/flag/flag.go
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
// Copyright 2017 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package flag
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/prometheus/common/promlog"
|
||||||
|
kingpin "gopkg.in/alecthomas/kingpin.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// LevelFlagName is the canonical flag name to configure the allowed log level
|
||||||
|
// within Prometheus projects.
|
||||||
|
const LevelFlagName = "log.level"
|
||||||
|
|
||||||
|
// LevelFlagHelp is the help description for the log.level flag.
|
||||||
|
const LevelFlagHelp = "Only log messages with the given severity or above. One of: [debug, info, warn, error]"
|
||||||
|
|
||||||
|
// AddFlags adds the flags used by this package to the Kingpin application.
|
||||||
|
// To use the default Kingpin application, call AddFlags(kingpin.CommandLine, ..).
|
||||||
|
func AddFlags(a *kingpin.Application, logLevel *promlog.AllowedLevel) {
|
||||||
|
a.Flag(LevelFlagName, LevelFlagHelp).
|
||||||
|
Default("info").SetValue(logLevel)
|
||||||
|
}
|
63
vendor/github.com/prometheus/common/promlog/log.go
generated
vendored
Normal file
63
vendor/github.com/prometheus/common/promlog/log.go
generated
vendored
Normal file
|
@ -0,0 +1,63 @@
|
||||||
|
// Copyright 2017 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package promlog defines standardised ways to initialize Go kit loggers
|
||||||
|
// across Prometheus components.
|
||||||
|
// It should typically only ever be imported by main packages.
|
||||||
|
package promlog
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AllowedLevel is a settable identifier for the minimum level a log entry
|
||||||
|
// must be have.
|
||||||
|
type AllowedLevel struct {
|
||||||
|
s string
|
||||||
|
o level.Option
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *AllowedLevel) String() string {
|
||||||
|
return l.s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set updates the value of the allowed level.
|
||||||
|
func (l *AllowedLevel) Set(s string) error {
|
||||||
|
switch s {
|
||||||
|
case "debug":
|
||||||
|
l.o = level.AllowDebug()
|
||||||
|
case "info":
|
||||||
|
l.o = level.AllowInfo()
|
||||||
|
case "warn":
|
||||||
|
l.o = level.AllowWarn()
|
||||||
|
case "error":
|
||||||
|
l.o = level.AllowError()
|
||||||
|
default:
|
||||||
|
return errors.Errorf("unrecognized log level %q", s)
|
||||||
|
}
|
||||||
|
l.s = s
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a new leveled oklog logger in the logfmt format. Each logged line will be annotated
|
||||||
|
// with a timestamp. The output always goes to stderr.
|
||||||
|
func New(al AllowedLevel) log.Logger {
|
||||||
|
l := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
|
||||||
|
l = level.NewFilter(l, al.o)
|
||||||
|
l = log.With(l, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
|
||||||
|
return l
|
||||||
|
}
|
12
vendor/vendor.json
vendored
12
vendor/vendor.json
vendored
|
@ -422,6 +422,12 @@
|
||||||
"revision": "04dd4f741c6e76cc170a4d7913f4c625952e6f58",
|
"revision": "04dd4f741c6e76cc170a4d7913f4c625952e6f58",
|
||||||
"revisionTime": "2017-03-20T09:05:36Z"
|
"revisionTime": "2017-03-20T09:05:36Z"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"checksumSHA1": "t7aTpDH0h4BZcGU0KkUr14QQG2w=",
|
||||||
|
"path": "github.com/go-kit/kit/log/level",
|
||||||
|
"revision": "6964666de57c88f7d93da127e900d201b632f561",
|
||||||
|
"revisionTime": "2017-05-17T16:52:12Z"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "KxX/Drph+byPXBFIXaCZaCOAnrU=",
|
"checksumSHA1": "KxX/Drph+byPXBFIXaCZaCOAnrU=",
|
||||||
"path": "github.com/go-logfmt/logfmt",
|
"path": "github.com/go-logfmt/logfmt",
|
||||||
|
@ -840,6 +846,12 @@
|
||||||
"revision": "0d0c3d572886e0f2323ed376557f9eb99b97d25b",
|
"revision": "0d0c3d572886e0f2323ed376557f9eb99b97d25b",
|
||||||
"revisionTime": "2017-06-16T14:41:04Z"
|
"revisionTime": "2017-06-16T14:41:04Z"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"checksumSHA1": "Yseprf8kAFr/s7wztkQnrFuFN+8=",
|
||||||
|
"path": "github.com/prometheus/common/promlog",
|
||||||
|
"revision": "2f17f4a9d485bf34b4bfaccc273805040e4f86c8",
|
||||||
|
"revisionTime": "2017-09-08T16:18:22Z"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "9aDxDuzZt1l7FQJ9qpn2kPcF7NU=",
|
"checksumSHA1": "9aDxDuzZt1l7FQJ9qpn2kPcF7NU=",
|
||||||
"path": "github.com/prometheus/common/route",
|
"path": "github.com/prometheus/common/route",
|
||||||
|
|
|
@ -17,11 +17,11 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/gogo/protobuf/proto"
|
"github.com/gogo/protobuf/proto"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
"github.com/prometheus/common/expfmt"
|
"github.com/prometheus/common/expfmt"
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
"github.com/prometheus/prometheus/pkg/labels"
|
||||||
|
@ -160,7 +160,7 @@ func (h *Handler) federation(w http.ResponseWriter, req *http.Request) {
|
||||||
if protMetricFam != nil {
|
if protMetricFam != nil {
|
||||||
if err := enc.Encode(protMetricFam); err != nil {
|
if err := enc.Encode(protMetricFam); err != nil {
|
||||||
federationErrors.Inc()
|
federationErrors.Inc()
|
||||||
log.With("err", err).Error("federation failed")
|
level.Error(h.logger).Log("msg", "federation failed", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -180,7 +180,7 @@ func (h *Handler) federation(w http.ResponseWriter, req *http.Request) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !nameSeen {
|
if !nameSeen {
|
||||||
log.With("metric", s.Metric).Warn("Ignoring nameless metric during federation.")
|
level.Warn(h.logger).Log("msg", "Ignoring nameless metric during federation", "metric", s.Metric)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Attach global labels if they do not exist yet.
|
// Attach global labels if they do not exist yet.
|
||||||
|
@ -203,7 +203,7 @@ func (h *Handler) federation(w http.ResponseWriter, req *http.Request) {
|
||||||
if protMetricFam != nil {
|
if protMetricFam != nil {
|
||||||
if err := enc.Encode(protMetricFam); err != nil {
|
if err := enc.Encode(protMetricFam); err != nil {
|
||||||
federationErrors.Inc()
|
federationErrors.Inc()
|
||||||
log.With("err", err).Error("federation failed")
|
level.Error(h.logger).Log("msg", "federation failed", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
36
web/web.go
36
web/web.go
|
@ -19,6 +19,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
stdlog "log"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/pprof"
|
"net/http/pprof"
|
||||||
|
@ -38,10 +39,11 @@ import (
|
||||||
template_text "text/template"
|
template_text "text/template"
|
||||||
|
|
||||||
"github.com/cockroachdb/cmux"
|
"github.com/cockroachdb/cmux"
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/go-kit/kit/log/level"
|
||||||
"github.com/opentracing-contrib/go-stdlib/nethttp"
|
"github.com/opentracing-contrib/go-stdlib/nethttp"
|
||||||
"github.com/opentracing/opentracing-go"
|
"github.com/opentracing/opentracing-go"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/common/route"
|
"github.com/prometheus/common/route"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
@ -67,6 +69,8 @@ var localhostRepresentations = []string{"127.0.0.1", "localhost"}
|
||||||
|
|
||||||
// Handler serves various HTTP endpoints of the Prometheus server
|
// Handler serves various HTTP endpoints of the Prometheus server
|
||||||
type Handler struct {
|
type Handler struct {
|
||||||
|
logger log.Logger
|
||||||
|
|
||||||
targetManager *retrieval.TargetManager
|
targetManager *retrieval.TargetManager
|
||||||
ruleManager *rules.Manager
|
ruleManager *rules.Manager
|
||||||
queryEngine *promql.Engine
|
queryEngine *promql.Engine
|
||||||
|
@ -141,15 +145,19 @@ type Options struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// New initializes a new web Handler.
|
// New initializes a new web Handler.
|
||||||
func New(o *Options) *Handler {
|
func New(logger log.Logger, o *Options) *Handler {
|
||||||
router := route.New()
|
router := route.New()
|
||||||
cwd, err := os.Getwd()
|
cwd, err := os.Getwd()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cwd = "<error retrieving current working directory>"
|
cwd = "<error retrieving current working directory>"
|
||||||
}
|
}
|
||||||
|
if logger == nil {
|
||||||
|
logger = log.NewNopLogger()
|
||||||
|
}
|
||||||
|
|
||||||
h := &Handler{
|
h := &Handler{
|
||||||
|
logger: logger,
|
||||||
router: router,
|
router: router,
|
||||||
quitCh: make(chan struct{}),
|
quitCh: make(chan struct{}),
|
||||||
reloadCh: make(chan chan error),
|
reloadCh: make(chan chan error),
|
||||||
|
@ -205,7 +213,7 @@ func New(o *Options) *Handler {
|
||||||
router.Get("/targets", readyf(instrf("targets", h.targets)))
|
router.Get("/targets", readyf(instrf("targets", h.targets)))
|
||||||
router.Get("/version", readyf(instrf("version", h.version)))
|
router.Get("/version", readyf(instrf("version", h.version)))
|
||||||
|
|
||||||
router.Get("/heap", readyf(instrf("heap", dumpHeap)))
|
router.Get("/heap", readyf(instrf("heap", h.dumpHeap)))
|
||||||
|
|
||||||
router.Get("/metrics", prometheus.Handler().ServeHTTP)
|
router.Get("/metrics", prometheus.Handler().ServeHTTP)
|
||||||
|
|
||||||
|
@ -215,7 +223,7 @@ func New(o *Options) *Handler {
|
||||||
|
|
||||||
router.Get("/consoles/*filepath", readyf(instrf("consoles", h.consoles)))
|
router.Get("/consoles/*filepath", readyf(instrf("consoles", h.consoles)))
|
||||||
|
|
||||||
router.Get("/static/*filepath", readyf(instrf("static", serveStaticAsset)))
|
router.Get("/static/*filepath", readyf(instrf("static", h.serveStaticAsset)))
|
||||||
|
|
||||||
if o.UserAssetsPath != "" {
|
if o.UserAssetsPath != "" {
|
||||||
router.Get("/user/*filepath", readyf(instrf("user", route.FileServe(o.UserAssetsPath))))
|
router.Get("/user/*filepath", readyf(instrf("user", route.FileServe(o.UserAssetsPath))))
|
||||||
|
@ -296,20 +304,20 @@ func serveDebug(w http.ResponseWriter, req *http.Request) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func serveStaticAsset(w http.ResponseWriter, req *http.Request) {
|
func (h *Handler) serveStaticAsset(w http.ResponseWriter, req *http.Request) {
|
||||||
fp := route.Param(req.Context(), "filepath")
|
fp := route.Param(req.Context(), "filepath")
|
||||||
fp = filepath.Join("web/ui/static", fp)
|
fp = filepath.Join("web/ui/static", fp)
|
||||||
|
|
||||||
info, err := ui.AssetInfo(fp)
|
info, err := ui.AssetInfo(fp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.With("file", fp).Warn("Could not get file info: ", err)
|
level.Warn(h.logger).Log("msg", "Could not get file info", "err", err, "file", fp)
|
||||||
w.WriteHeader(http.StatusNotFound)
|
w.WriteHeader(http.StatusNotFound)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
file, err := ui.Asset(fp)
|
file, err := ui.Asset(fp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err != io.EOF {
|
if err != io.EOF {
|
||||||
log.With("file", fp).Warn("Could not get file: ", err)
|
level.Warn(h.logger).Log("msg", "Could not get file", "err", err, "file", fp)
|
||||||
}
|
}
|
||||||
w.WriteHeader(http.StatusNotFound)
|
w.WriteHeader(http.StatusNotFound)
|
||||||
return
|
return
|
||||||
|
@ -356,7 +364,7 @@ func (h *Handler) Reload() <-chan chan error {
|
||||||
|
|
||||||
// Run serves the HTTP endpoints.
|
// Run serves the HTTP endpoints.
|
||||||
func (h *Handler) Run(ctx context.Context) error {
|
func (h *Handler) Run(ctx context.Context) error {
|
||||||
log.Infof("Listening on %s", h.options.ListenAddress)
|
level.Info(h.logger).Log("msg", "Start listening for connections", "address", h.options.ListenAddress)
|
||||||
|
|
||||||
l, err := net.Listen("tcp", h.options.ListenAddress)
|
l, err := net.Listen("tcp", h.options.ListenAddress)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -413,20 +421,22 @@ func (h *Handler) Run(ctx context.Context) error {
|
||||||
}),
|
}),
|
||||||
))
|
))
|
||||||
|
|
||||||
|
errlog := stdlog.New(log.NewStdlibAdapter(level.Error(h.logger)), "", 0)
|
||||||
|
|
||||||
httpSrv := &http.Server{
|
httpSrv := &http.Server{
|
||||||
Handler: nethttp.Middleware(opentracing.GlobalTracer(), mux, operationName),
|
Handler: nethttp.Middleware(opentracing.GlobalTracer(), mux, operationName),
|
||||||
ErrorLog: log.NewErrorLogger(),
|
ErrorLog: errlog,
|
||||||
ReadTimeout: h.options.ReadTimeout,
|
ReadTimeout: h.options.ReadTimeout,
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
if err := httpSrv.Serve(httpl); err != nil {
|
if err := httpSrv.Serve(httpl); err != nil {
|
||||||
log.With("err", err).Warnf("error serving HTTP")
|
level.Warn(h.logger).Log("msg", "error serving HTTP", "err", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
go func() {
|
go func() {
|
||||||
if err := grpcSrv.Serve(grpcl); err != nil {
|
if err := grpcSrv.Serve(grpcl); err != nil {
|
||||||
log.With("err", err).Warnf("error serving HTTP")
|
level.Warn(h.logger).Log("msg", "error serving gRPC", "err", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -701,11 +711,11 @@ func (h *Handler) executeTemplate(w http.ResponseWriter, name string, data inter
|
||||||
io.WriteString(w, result)
|
io.WriteString(w, result)
|
||||||
}
|
}
|
||||||
|
|
||||||
func dumpHeap(w http.ResponseWriter, r *http.Request) {
|
func (h *Handler) dumpHeap(w http.ResponseWriter, r *http.Request) {
|
||||||
target := fmt.Sprintf("/tmp/%d.heap", time.Now().Unix())
|
target := fmt.Sprintf("/tmp/%d.heap", time.Now().Unix())
|
||||||
f, err := os.Create(target)
|
f, err := os.Create(target)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Could not dump heap: ", err)
|
level.Error(h.logger).Log("msg", "Could not dump heap", "err", err)
|
||||||
}
|
}
|
||||||
fmt.Fprintf(w, "Writing to %s...", target)
|
fmt.Fprintf(w, "Writing to %s...", target)
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
|
@ -88,7 +88,7 @@ func TestReadyAndHealthy(t *testing.T) {
|
||||||
|
|
||||||
opts.Flags = map[string]string{}
|
opts.Flags = map[string]string{}
|
||||||
|
|
||||||
webHandler := New(opts)
|
webHandler := New(nil, opts)
|
||||||
go webHandler.Run(context.Background())
|
go webHandler.Run(context.Background())
|
||||||
|
|
||||||
// Give some time for the web goroutine to run since we need the server
|
// Give some time for the web goroutine to run since we need the server
|
||||||
|
|
Loading…
Reference in a new issue