mirror of
https://github.com/prometheus/prometheus.git
synced 2025-02-02 08:31:11 -08:00
ref: JSONFileLogger slog handler, add scrape.FailureLogger interface
Improves upon #15434, better resolves #15433. This commit introduces a few changes to ensure safer handling of the JSONFileLogger: - the JSONFileLogger struct now implements the slog.Handler interface, so it can directly be used to create slog Loggers. This pattern more closely aligns with upstream slog usage (which is generally based around handlers), as well as making it clear that devs are creating a whole new logger when interacting with it (vs silently modifying internal configs like it did previously). - updates the `promql.QueryLogger` interface to be a union of the methods of both the `io.Closer` interface and the `slog.Handler` interface. This allows for plugging in/using slog-compatible loggers other than the JSONFileLogger, if desired (ie, for downstream projects). - introduces new `scrape.FailureLogger` interface; just like `promql.QueryLogger`, it is a union of `io.Closer` and `slog.Handler` interfaces. Similar logic applies to reasoning. - updates tests where needed; have the `FakeQueryLogger` from promql's engine_test implement the `slog.Handler`, improve JSONFileLogger test suite, etc. Signed-off-by: TJ Hoplock <t.hoplock@gmail.com>
This commit is contained in:
parent
dfbfe8b5e8
commit
e0104a6b7e
|
@ -47,6 +47,7 @@ import (
|
|||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/prometheus/util/annotations"
|
||||
"github.com/prometheus/prometheus/util/logging"
|
||||
"github.com/prometheus/prometheus/util/stats"
|
||||
"github.com/prometheus/prometheus/util/zeropool"
|
||||
)
|
||||
|
@ -123,12 +124,13 @@ type QueryEngine interface {
|
|||
NewRangeQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error)
|
||||
}
|
||||
|
||||
var _ QueryLogger = (*logging.JSONFileLogger)(nil)
|
||||
|
||||
// QueryLogger is an interface that can be used to log all the queries logged
|
||||
// by the engine.
|
||||
type QueryLogger interface {
|
||||
Log(context.Context, slog.Level, string, ...any)
|
||||
With(args ...any)
|
||||
Close() error
|
||||
slog.Handler
|
||||
io.Closer
|
||||
}
|
||||
|
||||
// A Query is derived from an a raw query string and can be run against an engine
|
||||
|
@ -626,6 +628,9 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annota
|
|||
defer func() {
|
||||
ng.queryLoggerLock.RLock()
|
||||
if l := ng.queryLogger; l != nil {
|
||||
logger := slog.New(l)
|
||||
f := make([]any, 0, 16) // Probably enough up front to not need to reallocate on append.
|
||||
|
||||
params := make(map[string]interface{}, 4)
|
||||
params["query"] = q.q
|
||||
if eq, ok := q.Statement().(*parser.EvalStmt); ok {
|
||||
|
@ -634,7 +639,7 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annota
|
|||
// The step provided by the user is in seconds.
|
||||
params["step"] = int64(eq.Interval / (time.Second / time.Nanosecond))
|
||||
}
|
||||
f := []interface{}{"params", params}
|
||||
f = append(f, "params", params)
|
||||
if err != nil {
|
||||
f = append(f, "error", err)
|
||||
}
|
||||
|
@ -647,7 +652,7 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annota
|
|||
f = append(f, k, v)
|
||||
}
|
||||
}
|
||||
l.Log(context.Background(), slog.LevelInfo, "promql query logged", f...)
|
||||
logger.Info("promql query logged", f...)
|
||||
// TODO: @tjhop -- do we still need this metric/error log if logger doesn't return errors?
|
||||
// ng.metrics.queryLogFailures.Inc()
|
||||
// ng.logger.Error("can't log query", "err", err)
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"math"
|
||||
"sort"
|
||||
|
@ -2147,9 +2148,13 @@ func TestSubquerySelector(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
var _ io.Closer = (*FakeQueryLogger)(nil)
|
||||
|
||||
var _ slog.Handler = (*FakeQueryLogger)(nil)
|
||||
|
||||
type FakeQueryLogger struct {
|
||||
closed bool
|
||||
logs []interface{}
|
||||
logs []any
|
||||
attrs []any
|
||||
}
|
||||
|
||||
|
@ -2161,26 +2166,47 @@ func NewFakeQueryLogger() *FakeQueryLogger {
|
|||
}
|
||||
}
|
||||
|
||||
// It implements the promql.QueryLogger interface.
|
||||
func (f *FakeQueryLogger) Close() error {
|
||||
f.closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// It implements the promql.QueryLogger interface.
|
||||
func (f *FakeQueryLogger) Log(ctx context.Context, level slog.Level, msg string, args ...any) {
|
||||
func (f *FakeQueryLogger) Enabled(ctx context.Context, level slog.Level) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (f *FakeQueryLogger) Handle(ctx context.Context, r slog.Record) error {
|
||||
// Test usage only really cares about existence of keyvals passed in
|
||||
// via args, just append in the log message before handling the
|
||||
// provided args and any embedded kvs added via `.With()` on f.attrs.
|
||||
log := append([]any{msg}, args...)
|
||||
log = append(log, f.attrs...)
|
||||
log := append([]any{r.Message}, f.attrs...)
|
||||
f.attrs = f.attrs[:0]
|
||||
|
||||
r.Attrs(func(a slog.Attr) bool {
|
||||
log = append(log, a.Key, a.Value.Any())
|
||||
return true
|
||||
})
|
||||
|
||||
f.logs = append(f.logs, log...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// It implements the promql.QueryLogger interface.
|
||||
func (f *FakeQueryLogger) With(args ...any) {
|
||||
f.attrs = append(f.attrs, args...)
|
||||
func (f *FakeQueryLogger) WithAttrs(attrs []slog.Attr) slog.Handler {
|
||||
attrPairs := []any{}
|
||||
for _, a := range attrs {
|
||||
attrPairs = append(attrPairs, a.Key, a.Value.Any())
|
||||
}
|
||||
|
||||
return &FakeQueryLogger{
|
||||
closed: f.closed,
|
||||
logs: f.logs,
|
||||
attrs: append(f.attrs, attrPairs...),
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeQueryLogger) WithGroup(name string) slog.Handler {
|
||||
// We don't need to support groups for this fake handler in testing.
|
||||
return f
|
||||
}
|
||||
|
||||
func TestQueryLogger_basic(t *testing.T) {
|
||||
|
|
|
@ -107,7 +107,7 @@ type Manager struct {
|
|||
scrapeConfigs map[string]*config.ScrapeConfig
|
||||
scrapePools map[string]*scrapePool
|
||||
newScrapeFailureLogger func(string) (*logging.JSONFileLogger, error)
|
||||
scrapeFailureLoggers map[string]*logging.JSONFileLogger
|
||||
scrapeFailureLoggers map[string]FailureLogger
|
||||
targetSets map[string][]*targetgroup.Group
|
||||
buffers *pool.Pool
|
||||
|
||||
|
@ -249,7 +249,7 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error {
|
|||
}
|
||||
|
||||
c := make(map[string]*config.ScrapeConfig)
|
||||
scrapeFailureLoggers := map[string]*logging.JSONFileLogger{
|
||||
scrapeFailureLoggers := map[string]FailureLogger{
|
||||
"": nil, // Emptying the file name sets the scrape logger to nil.
|
||||
}
|
||||
for _, scfg := range scfgs {
|
||||
|
@ -257,7 +257,7 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error {
|
|||
if _, ok := scrapeFailureLoggers[scfg.ScrapeFailureLogFile]; !ok {
|
||||
// We promise to reopen the file on each reload.
|
||||
var (
|
||||
logger *logging.JSONFileLogger
|
||||
logger FailureLogger
|
||||
err error
|
||||
)
|
||||
if m.newScrapeFailureLogger != nil {
|
||||
|
|
|
@ -61,6 +61,15 @@ var AlignScrapeTimestamps = true
|
|||
|
||||
var errNameLabelMandatory = fmt.Errorf("missing metric name (%s label)", labels.MetricName)
|
||||
|
||||
var _ FailureLogger = (*logging.JSONFileLogger)(nil)
|
||||
|
||||
// FailureLogger is an interface that can be used to log all failed
|
||||
// scrapes.
|
||||
type FailureLogger interface {
|
||||
slog.Handler
|
||||
io.Closer
|
||||
}
|
||||
|
||||
// scrapePool manages scrapes for sets of targets.
|
||||
type scrapePool struct {
|
||||
appendable storage.Appendable
|
||||
|
@ -90,7 +99,7 @@ type scrapePool struct {
|
|||
|
||||
metrics *scrapeMetrics
|
||||
|
||||
scrapeFailureLogger *logging.JSONFileLogger
|
||||
scrapeFailureLogger FailureLogger
|
||||
scrapeFailureLoggerMtx sync.RWMutex
|
||||
}
|
||||
|
||||
|
@ -223,11 +232,11 @@ func (sp *scrapePool) DroppedTargetsCount() int {
|
|||
return sp.droppedTargetsCount
|
||||
}
|
||||
|
||||
func (sp *scrapePool) SetScrapeFailureLogger(l *logging.JSONFileLogger) {
|
||||
func (sp *scrapePool) SetScrapeFailureLogger(l FailureLogger) {
|
||||
sp.scrapeFailureLoggerMtx.Lock()
|
||||
defer sp.scrapeFailureLoggerMtx.Unlock()
|
||||
if l != nil {
|
||||
l.With("job_name", sp.config.JobName)
|
||||
l = slog.New(l).With("job_name", sp.config.JobName).Handler().(FailureLogger)
|
||||
}
|
||||
sp.scrapeFailureLogger = l
|
||||
|
||||
|
@ -238,7 +247,7 @@ func (sp *scrapePool) SetScrapeFailureLogger(l *logging.JSONFileLogger) {
|
|||
}
|
||||
}
|
||||
|
||||
func (sp *scrapePool) getScrapeFailureLogger() *logging.JSONFileLogger {
|
||||
func (sp *scrapePool) getScrapeFailureLogger() FailureLogger {
|
||||
sp.scrapeFailureLoggerMtx.RLock()
|
||||
defer sp.scrapeFailureLoggerMtx.RUnlock()
|
||||
return sp.scrapeFailureLogger
|
||||
|
@ -860,7 +869,7 @@ func (s *targetScraper) readResponse(ctx context.Context, resp *http.Response, w
|
|||
type loop interface {
|
||||
run(errc chan<- error)
|
||||
setForcedError(err error)
|
||||
setScrapeFailureLogger(*logging.JSONFileLogger)
|
||||
setScrapeFailureLogger(FailureLogger)
|
||||
stop()
|
||||
getCache() *scrapeCache
|
||||
disableEndOfRunStalenessMarkers()
|
||||
|
@ -876,7 +885,7 @@ type cacheEntry struct {
|
|||
type scrapeLoop struct {
|
||||
scraper scraper
|
||||
l *slog.Logger
|
||||
scrapeFailureLogger *logging.JSONFileLogger
|
||||
scrapeFailureLogger FailureLogger
|
||||
scrapeFailureLoggerMtx sync.RWMutex
|
||||
cache *scrapeCache
|
||||
lastScrapeSize int
|
||||
|
@ -1267,11 +1276,11 @@ func newScrapeLoop(ctx context.Context,
|
|||
return sl
|
||||
}
|
||||
|
||||
func (sl *scrapeLoop) setScrapeFailureLogger(l *logging.JSONFileLogger) {
|
||||
func (sl *scrapeLoop) setScrapeFailureLogger(l FailureLogger) {
|
||||
sl.scrapeFailureLoggerMtx.Lock()
|
||||
defer sl.scrapeFailureLoggerMtx.Unlock()
|
||||
if ts, ok := sl.scraper.(fmt.Stringer); ok && l != nil {
|
||||
l.With("target", ts.String())
|
||||
l = slog.New(l).With("target", ts.String()).Handler().(FailureLogger)
|
||||
}
|
||||
sl.scrapeFailureLogger = l
|
||||
}
|
||||
|
@ -1421,7 +1430,7 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er
|
|||
sl.l.Debug("Scrape failed", "err", scrapeErr)
|
||||
sl.scrapeFailureLoggerMtx.RLock()
|
||||
if sl.scrapeFailureLogger != nil {
|
||||
sl.scrapeFailureLogger.Log(context.Background(), slog.LevelError, scrapeErr.Error())
|
||||
slog.New(sl.scrapeFailureLogger).Error(scrapeErr.Error())
|
||||
}
|
||||
sl.scrapeFailureLoggerMtx.RUnlock()
|
||||
if errc != nil {
|
||||
|
|
|
@ -55,7 +55,6 @@ import (
|
|||
"github.com/prometheus/prometheus/model/value"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/prometheus/util/logging"
|
||||
"github.com/prometheus/prometheus/util/pool"
|
||||
"github.com/prometheus/prometheus/util/teststorage"
|
||||
"github.com/prometheus/prometheus/util/testutil"
|
||||
|
@ -252,7 +251,7 @@ type testLoop struct {
|
|||
timeout time.Duration
|
||||
}
|
||||
|
||||
func (l *testLoop) setScrapeFailureLogger(*logging.JSONFileLogger) {
|
||||
func (l *testLoop) setScrapeFailureLogger(FailureLogger) {
|
||||
}
|
||||
|
||||
func (l *testLoop) run(errc chan<- error) {
|
||||
|
|
|
@ -16,16 +16,22 @@ package logging
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"os"
|
||||
|
||||
"github.com/prometheus/common/promslog"
|
||||
)
|
||||
|
||||
// JSONFileLogger represents a logger that writes JSON to a file. It implements the promql.QueryLogger interface.
|
||||
var _ slog.Handler = (*JSONFileLogger)(nil)
|
||||
|
||||
var _ io.Closer = (*JSONFileLogger)(nil)
|
||||
|
||||
// JSONFileLogger represents a logger that writes JSON to a file. It implements
|
||||
// the slog.Handler interface, as well as the io.Closer interface.
|
||||
type JSONFileLogger struct {
|
||||
logger *slog.Logger
|
||||
file *os.File
|
||||
handler slog.Handler
|
||||
file *os.File
|
||||
}
|
||||
|
||||
// NewJSONFileLogger returns a new JSONFileLogger.
|
||||
|
@ -42,24 +48,47 @@ func NewJSONFileLogger(s string) (*JSONFileLogger, error) {
|
|||
jsonFmt := &promslog.AllowedFormat{}
|
||||
_ = jsonFmt.Set("json")
|
||||
return &JSONFileLogger{
|
||||
logger: promslog.New(&promslog.Config{Format: jsonFmt, Writer: f}),
|
||||
file: f,
|
||||
handler: promslog.New(&promslog.Config{Format: jsonFmt, Writer: f}).Handler(),
|
||||
file: f,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes the underlying file. It implements the promql.QueryLogger interface.
|
||||
// Close closes the underlying file. It implements the io.Closer interface.
|
||||
func (l *JSONFileLogger) Close() error {
|
||||
return l.file.Close()
|
||||
}
|
||||
|
||||
// With calls the `With()` method on the underlying `log/slog.Logger` with the
|
||||
// provided msg and args. It implements the promql.QueryLogger interface.
|
||||
func (l *JSONFileLogger) With(args ...any) {
|
||||
l.logger = l.logger.With(args...)
|
||||
// Enabled returns true if and only if the internal slog.Handler is enabled. It
|
||||
// implements the slog.Handler interface.
|
||||
func (l *JSONFileLogger) Enabled(ctx context.Context, level slog.Level) bool {
|
||||
return l.handler.Enabled(ctx, level)
|
||||
}
|
||||
|
||||
// Log calls the `Log()` method on the underlying `log/slog.Logger` with the
|
||||
// provided msg and args. It implements the promql.QueryLogger interface.
|
||||
func (l *JSONFileLogger) Log(ctx context.Context, level slog.Level, msg string, args ...any) {
|
||||
l.logger.Log(ctx, level, msg, args...)
|
||||
// Handle takes record created by an slog.Logger and forwards it to the
|
||||
// internal slog.Handler for dispatching the log call to the backing file. It
|
||||
// implements the slog.Handler interface.
|
||||
func (l *JSONFileLogger) Handle(ctx context.Context, r slog.Record) error {
|
||||
return l.handler.Handle(ctx, r.Clone())
|
||||
}
|
||||
|
||||
// WithAttrs returns a new *JSONFileLogger with a new internal handler that has
|
||||
// the provided attrs attached as attributes on all further log calls. It
|
||||
// implements the slog.Handler interface.
|
||||
func (l *JSONFileLogger) WithAttrs(attrs []slog.Attr) slog.Handler {
|
||||
if len(attrs) == 0 {
|
||||
return l
|
||||
}
|
||||
|
||||
return &JSONFileLogger{file: l.file, handler: l.handler.WithAttrs(attrs)}
|
||||
}
|
||||
|
||||
// WithGroup returns a new *JSONFileLogger with a new internal handler that has
|
||||
// the provided group name attached, to group all other attributes added to the
|
||||
// logger. It implements the slog.Handler interface.
|
||||
func (l *JSONFileLogger) WithGroup(name string) slog.Handler {
|
||||
if name == "" {
|
||||
return l
|
||||
}
|
||||
|
||||
return &JSONFileLogger{file: l.file, handler: l.handler.WithGroup(name)}
|
||||
}
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
package logging
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"os"
|
||||
"strings"
|
||||
|
@ -24,6 +23,19 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func getLogLines(t *testing.T, name string) []string {
|
||||
content, err := os.ReadFile(name)
|
||||
require.NoError(t, err)
|
||||
|
||||
lines := strings.Split(string(content), "\n")
|
||||
for i := len(lines) - 1; i >= 0; i-- {
|
||||
if lines[i] == "" {
|
||||
lines = append(lines[:i], lines[i+1:]...)
|
||||
}
|
||||
}
|
||||
return lines
|
||||
}
|
||||
|
||||
func TestJSONFileLogger_basic(t *testing.T) {
|
||||
f, err := os.CreateTemp("", "logging")
|
||||
require.NoError(t, err)
|
||||
|
@ -32,24 +44,23 @@ func TestJSONFileLogger_basic(t *testing.T) {
|
|||
require.NoError(t, os.Remove(f.Name()))
|
||||
}()
|
||||
|
||||
l, err := NewJSONFileLogger(f.Name())
|
||||
logHandler, err := NewJSONFileLogger(f.Name())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, l, "logger can't be nil")
|
||||
require.NotNil(t, logHandler, "logger handler can't be nil")
|
||||
|
||||
l.Log(context.Background(), slog.LevelInfo, "test", "hello", "world")
|
||||
require.NoError(t, err)
|
||||
r := make([]byte, 1024)
|
||||
_, err = f.Read(r)
|
||||
require.NoError(t, err)
|
||||
logger := slog.New(logHandler)
|
||||
logger.Info("test", "hello", "world")
|
||||
|
||||
result, err := regexp.Match(`^{"time":"[^"]+","level":"INFO","source":"file.go:\d+","msg":"test","hello":"world"}\n`, r)
|
||||
r := getLogLines(t, f.Name())
|
||||
require.Len(t, r, 1, "expected 1 log line")
|
||||
result, err := regexp.Match(`^{"time":"[^"]+","level":"INFO","source":"\w+.go:\d+","msg":"test","hello":"world"}`, []byte(r[0]))
|
||||
require.NoError(t, err)
|
||||
require.True(t, result, "unexpected content: %s", r)
|
||||
|
||||
err = l.Close()
|
||||
err = logHandler.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = l.file.Close()
|
||||
err = logHandler.file.Close()
|
||||
require.Error(t, err)
|
||||
require.True(t, strings.HasSuffix(err.Error(), os.ErrClosed.Error()), "file not closed")
|
||||
}
|
||||
|
@ -62,31 +73,31 @@ func TestJSONFileLogger_parallel(t *testing.T) {
|
|||
require.NoError(t, os.Remove(f.Name()))
|
||||
}()
|
||||
|
||||
l, err := NewJSONFileLogger(f.Name())
|
||||
logHandler, err := NewJSONFileLogger(f.Name())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, l, "logger can't be nil")
|
||||
require.NotNil(t, logHandler, "logger handler can't be nil")
|
||||
|
||||
l.Log(context.Background(), slog.LevelInfo, "test", "hello", "world")
|
||||
logger := slog.New(logHandler)
|
||||
logger.Info("test", "hello", "world")
|
||||
|
||||
logHandler2, err := NewJSONFileLogger(f.Name())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, logHandler2, "logger handler can't be nil")
|
||||
|
||||
logger2 := slog.New(logHandler2)
|
||||
logger2.Info("test", "hello", "world")
|
||||
|
||||
err = logHandler.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
l2, err := NewJSONFileLogger(f.Name())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, l, "logger can't be nil")
|
||||
|
||||
l2.Log(context.Background(), slog.LevelInfo, "test", "hello", "world")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = l.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = l.file.Close()
|
||||
err = logHandler.file.Close()
|
||||
require.Error(t, err)
|
||||
require.True(t, strings.HasSuffix(err.Error(), os.ErrClosed.Error()), "file not closed")
|
||||
|
||||
err = l2.Close()
|
||||
err = logHandler2.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = l2.file.Close()
|
||||
err = logHandler2.file.Close()
|
||||
require.Error(t, err)
|
||||
require.True(t, strings.HasSuffix(err.Error(), os.ErrClosed.Error()), "file not closed")
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue