switch to go-kit/log (#1575)

Signed-off-by: yeya24 <yb532204897@gmail.com>
This commit is contained in:
Ben Ye 2019-12-31 11:19:37 -05:00 committed by Ben Kochie
parent a80b7d0bc5
commit 2477c5c67d
158 changed files with 3434 additions and 4636 deletions

View file

@ -22,11 +22,13 @@ import (
"os"
"strings"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
)
type arpCollector struct {
entries *prometheus.Desc
logger log.Logger
}
func init() {
@ -34,13 +36,14 @@ func init() {
}
// NewARPCollector returns a new Collector exposing ARP stats.
func NewARPCollector() (Collector, error) {
func NewARPCollector(logger log.Logger) (Collector, error) {
return &arpCollector{
entries: prometheus.NewDesc(
prometheus.BuildFQName(namespace, "arp", "entries"),
"ARP entries by device",
[]string{"device"}, nil,
),
logger: logger,
}, nil
}

View file

@ -18,6 +18,7 @@ package collector
import (
"fmt"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs/bcache"
)
@ -28,19 +29,21 @@ func init() {
// A bcacheCollector is a Collector which gathers metrics from Linux bcache.
type bcacheCollector struct {
fs bcache.FS
fs bcache.FS
logger log.Logger
}
// NewBcacheCollector returns a newly allocated bcacheCollector.
// It exposes a number of Linux bcache statistics.
func NewBcacheCollector() (Collector, error) {
func NewBcacheCollector(logger log.Logger) (Collector, error) {
fs, err := bcache.NewFS(*sysPath)
if err != nil {
return nil, fmt.Errorf("failed to open sysfs: %w", err)
}
return &bcacheCollector{
fs: fs,
fs: fs,
logger: logger,
}, nil
}

View file

@ -22,12 +22,14 @@ import (
"path/filepath"
"strings"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
)
type bondingCollector struct {
slaves, active typedDesc
logger log.Logger
}
func init() {
@ -36,7 +38,7 @@ func init() {
// NewBondingCollector returns a newly allocated bondingCollector.
// It exposes the number of configured and active slave of linux bonding interfaces.
func NewBondingCollector() (Collector, error) {
func NewBondingCollector(logger log.Logger) (Collector, error) {
return &bondingCollector{
slaves: typedDesc{prometheus.NewDesc(
prometheus.BuildFQName(namespace, "bonding", "slaves"),
@ -48,6 +50,7 @@ func NewBondingCollector() (Collector, error) {
"Number of active slaves per bonding interface.",
[]string{"master"}, nil,
), prometheus.GaugeValue},
logger: logger,
}, nil
}
@ -57,7 +60,7 @@ func (c *bondingCollector) Update(ch chan<- prometheus.Metric) error {
bondingStats, err := readBondingStats(statusfile)
if err != nil {
if os.IsNotExist(err) {
log.Debugf("Not collecting bonding, file does not exist: %s", statusfile)
level.Debug(c.logger).Log("msg", "Not collecting bonding, file does not exist", "file", statusfile)
return nil
}
return err

View file

@ -17,17 +17,21 @@
package collector
import (
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
)
type bootTimeCollector struct{ boottime bsdSysctl }
type bootTimeCollector struct {
boottime bsdSysctl
logger log.Logger
}
func init() {
registerCollector("boottime", defaultEnabled, newBootTimeCollector)
}
// newBootTimeCollector returns a new Collector exposing system boot time on BSD systems.
func newBootTimeCollector() (Collector, error) {
func newBootTimeCollector(logger log.Logger) (Collector, error) {
return &bootTimeCollector{
boottime: bsdSysctl{
name: "boot_time_seconds",
@ -35,6 +39,7 @@ func newBootTimeCollector() (Collector, error) {
mib: "kern.boottime",
dataType: bsdSysctlTypeStructTimeval,
},
logger: logger,
}, nil
}

View file

@ -17,19 +17,21 @@
package collector
import (
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/siebenmann/go-kstat"
)
type bootTimeCollector struct {
boottime typedDesc
logger log.Logger
}
func init() {
registerCollector("boottime", defaultEnabled, newBootTimeCollector)
}
func newBootTimeCollector() (Collector, error) {
func newBootTimeCollector(logger log.Logger) (Collector, error) {
return &bootTimeCollector{
boottime: typedDesc{
prometheus.NewDesc(
@ -37,6 +39,7 @@ func newBootTimeCollector() (Collector, error) {
"Unix time of last boot, including microseconds.",
nil, nil,
), prometheus.GaugeValue},
logger: logger,
}, nil
}

View file

@ -20,8 +20,9 @@ import (
"fmt"
"strconv"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"github.com/prometheus/procfs"
)
@ -30,8 +31,9 @@ const (
)
type buddyinfoCollector struct {
fs procfs.FS
desc *prometheus.Desc
fs procfs.FS
desc *prometheus.Desc
logger log.Logger
}
func init() {
@ -39,7 +41,7 @@ func init() {
}
// NewBuddyinfoCollector returns a new Collector exposing buddyinfo stats.
func NewBuddyinfoCollector() (Collector, error) {
func NewBuddyinfoCollector(logger log.Logger) (Collector, error) {
desc := prometheus.NewDesc(
prometheus.BuildFQName(namespace, buddyInfoSubsystem, "blocks"),
"Count of free blocks according to size.",
@ -49,7 +51,7 @@ func NewBuddyinfoCollector() (Collector, error) {
if err != nil {
return nil, fmt.Errorf("failed to open procfs: %w", err)
}
return &buddyinfoCollector{fs, desc}, nil
return &buddyinfoCollector{fs, desc, logger}, nil
}
// Update calls (*buddyinfoCollector).getBuddyInfo to get the platform specific
@ -60,7 +62,7 @@ func (c *buddyinfoCollector) Update(ch chan<- prometheus.Metric) error {
return fmt.Errorf("couldn't get buddyinfo: %s", err)
}
log.Debugf("Set node_buddy: %#v", buddyInfo)
level.Debug(c.logger).Log("msg", "Set node_buddy", "buddyInfo", buddyInfo)
for _, entry := range buddyInfo {
for size, value := range entry.Sizes {
ch <- prometheus.MustNewConstMetric(

View file

@ -19,8 +19,9 @@ import (
"sync"
"time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"gopkg.in/alecthomas/kingpin.v2"
)
@ -48,11 +49,11 @@ const (
)
var (
factories = make(map[string]func() (Collector, error))
factories = make(map[string]func(logger log.Logger) (Collector, error))
collectorState = make(map[string]*bool)
)
func registerCollector(collector string, isDefaultEnabled bool, factory func() (Collector, error)) {
func registerCollector(collector string, isDefaultEnabled bool, factory func(logger log.Logger) (Collector, error)) {
var helpDefaultState string
if isDefaultEnabled {
helpDefaultState = "enabled"
@ -73,10 +74,11 @@ func registerCollector(collector string, isDefaultEnabled bool, factory func() (
// NodeCollector implements the prometheus.Collector interface.
type NodeCollector struct {
Collectors map[string]Collector
logger log.Logger
}
// NewNodeCollector creates a new NodeCollector.
func NewNodeCollector(filters ...string) (*NodeCollector, error) {
func NewNodeCollector(logger log.Logger, filters ...string) (*NodeCollector, error) {
f := make(map[string]bool)
for _, filter := range filters {
enabled, exist := collectorState[filter]
@ -91,7 +93,7 @@ func NewNodeCollector(filters ...string) (*NodeCollector, error) {
collectors := make(map[string]Collector)
for key, enabled := range collectorState {
if *enabled {
collector, err := factories[key]()
collector, err := factories[key](log.With(logger, "collector", key))
if err != nil {
return nil, err
}
@ -100,7 +102,7 @@ func NewNodeCollector(filters ...string) (*NodeCollector, error) {
}
}
}
return &NodeCollector{Collectors: collectors}, nil
return &NodeCollector{Collectors: collectors, logger: logger}, nil
}
// Describe implements the prometheus.Collector interface.
@ -115,24 +117,24 @@ func (n NodeCollector) Collect(ch chan<- prometheus.Metric) {
wg.Add(len(n.Collectors))
for name, c := range n.Collectors {
go func(name string, c Collector) {
execute(name, c, ch)
execute(name, c, ch, n.logger)
wg.Done()
}(name, c)
}
wg.Wait()
}
func execute(name string, c Collector, ch chan<- prometheus.Metric) {
func execute(name string, c Collector, ch chan<- prometheus.Metric, logger log.Logger) {
begin := time.Now()
err := c.Update(ch)
duration := time.Since(begin)
var success float64
if err != nil {
log.Errorf("ERROR: %s collector failed after %fs: %s", name, duration.Seconds(), err)
level.Error(logger).Log("msg", "collector failed", "name", name, "duration_seconds", duration.Seconds(), "err", err)
success = 0
} else {
log.Debugf("OK: %s collector succeeded after %fs.", name, duration.Seconds())
level.Debug(logger).Log("msg", "collector succeeded", "name", name, "duration_seconds", duration.Seconds())
success = 1
}
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, duration.Seconds(), name)

View file

@ -16,12 +16,14 @@
package collector
import (
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
)
type conntrackCollector struct {
current *prometheus.Desc
limit *prometheus.Desc
logger log.Logger
}
func init() {
@ -29,7 +31,7 @@ func init() {
}
// NewConntrackCollector returns a new Collector exposing conntrack stats.
func NewConntrackCollector() (Collector, error) {
func NewConntrackCollector(logger log.Logger) (Collector, error) {
return &conntrackCollector{
current: prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "nf_conntrack_entries"),
@ -41,6 +43,7 @@ func NewConntrackCollector() (Collector, error) {
"Maximum size of connection tracking table.",
nil, nil,
),
logger: logger,
}, nil
}

View file

@ -25,6 +25,7 @@ import (
"strconv"
"unsafe"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
)
@ -49,7 +50,8 @@ import "C"
const ClocksPerSec = float64(C.CLK_TCK)
type statCollector struct {
cpu *prometheus.Desc
cpu *prometheus.Desc
logger log.Logger
}
func init() {
@ -57,9 +59,10 @@ func init() {
}
// NewCPUCollector returns a new Collector exposing CPU stats.
func NewCPUCollector() (Collector, error) {
func NewCPUCollector(logger log.Logger) (Collector, error) {
return &statCollector{
cpu: nodeCPUSecondsDesc,
cpu: nodeCPUSecondsDesc,
logger: logger,
}, nil
}

View file

@ -20,6 +20,7 @@ import (
"fmt"
"unsafe"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
)
@ -75,7 +76,8 @@ import "C"
const maxCPUTimesLen = C.MAXCPU * C.CPUSTATES
type statCollector struct {
cpu *prometheus.Desc
cpu *prometheus.Desc
logger log.Logger
}
func init() {
@ -83,9 +85,10 @@ func init() {
}
// NewStatCollector returns a new Collector exposing CPU stats.
func NewStatCollector() (Collector, error) {
func NewStatCollector(logger log.Logger) (Collector, error) {
return &statCollector{
cpu: nodeCPUSecondsDesc,
cpu: nodeCPUSecondsDesc,
logger: logger,
}, nil
}

View file

@ -21,8 +21,9 @@ import (
"strconv"
"unsafe"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"golang.org/x/sys/unix"
)
@ -81,8 +82,9 @@ func getCPUTimes() ([]cputime, error) {
}
type statCollector struct {
cpu typedDesc
temp typedDesc
cpu typedDesc
temp typedDesc
logger log.Logger
}
func init() {
@ -90,7 +92,7 @@ func init() {
}
// NewStatCollector returns a new Collector exposing CPU stats.
func NewStatCollector() (Collector, error) {
func NewStatCollector(logger log.Logger) (Collector, error) {
return &statCollector{
cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue},
temp: typedDesc{prometheus.NewDesc(
@ -98,6 +100,7 @@ func NewStatCollector() (Collector, error) {
"CPU temperature",
[]string{"cpu"}, nil,
), prometheus.GaugeValue},
logger: logger,
}, nil
}
@ -130,11 +133,11 @@ func (c *statCollector) Update(ch chan<- prometheus.Metric) error {
if err != nil {
if err == unix.ENOENT {
// No temperature information for this CPU
log.Debugf("no temperature information for CPU %d", cpu)
level.Debug(c.logger).Log("msg", "no temperature information for CPU", "cpu", cpu)
} else {
// Unexpected error
ch <- c.temp.mustNewConstMetric(math.NaN(), lcpu)
log.Errorf("failed to query CPU temperature for CPU %d: %s", cpu, err)
level.Error(c.logger).Log("msg", "failed to query CPU temperature for CPU", "cpu", cpu, "err", err)
}
continue
}

View file

@ -20,10 +20,11 @@ import (
"path/filepath"
"strconv"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"github.com/prometheus/procfs"
kingpin "gopkg.in/alecthomas/kingpin.v2"
"gopkg.in/alecthomas/kingpin.v2"
)
type cpuCollector struct {
@ -33,6 +34,7 @@ type cpuCollector struct {
cpuGuest *prometheus.Desc
cpuCoreThrottle *prometheus.Desc
cpuPackageThrottle *prometheus.Desc
logger log.Logger
}
var (
@ -44,7 +46,7 @@ func init() {
}
// NewCPUCollector returns a new Collector exposing kernel/system statistics.
func NewCPUCollector() (Collector, error) {
func NewCPUCollector(logger log.Logger) (Collector, error) {
fs, err := procfs.NewFS(*procPath)
if err != nil {
return nil, fmt.Errorf("failed to open procfs: %w", err)
@ -72,6 +74,7 @@ func NewCPUCollector() (Collector, error) {
"Number of times this cpu package has been throttled.",
[]string{"package"}, nil,
),
logger: logger,
}, nil
}
@ -134,12 +137,12 @@ func (c *cpuCollector) updateThermalThrottle(ch chan<- prometheus.Metric) error
// topology/physical_package_id
if physicalPackageID, err = readUintFromFile(filepath.Join(cpu, "topology", "physical_package_id")); err != nil {
log.Debugf("CPU %v is missing physical_package_id", cpu)
level.Debug(c.logger).Log("msg", "CPU is missing physical_package_id", "cpu", cpu)
continue
}
// topology/core_id
if coreID, err = readUintFromFile(filepath.Join(cpu, "topology", "core_id")); err != nil {
log.Debugf("CPU %v is missing core_id", cpu)
level.Debug(c.logger).Log("msg", "CPU is missing core_id", "cpu", cpu)
continue
}
@ -157,7 +160,7 @@ func (c *cpuCollector) updateThermalThrottle(ch chan<- prometheus.Metric) error
if coreThrottleCount, err := readUintFromFile(filepath.Join(cpu, "thermal_throttle", "core_throttle_count")); err == nil {
packageCoreThrottles[physicalPackageID][coreID] = coreThrottleCount
} else {
log.Debugf("CPU %v is missing core_throttle_count", cpu)
level.Debug(c.logger).Log("msg", "CPU is missing core_throttle_count", "cpu", cpu)
}
}
@ -167,7 +170,7 @@ func (c *cpuCollector) updateThermalThrottle(ch chan<- prometheus.Metric) error
if packageThrottleCount, err := readUintFromFile(filepath.Join(cpu, "thermal_throttle", "package_throttle_count")); err == nil {
packageThrottles[physicalPackageID] = packageThrottleCount
} else {
log.Debugf("CPU %v is missing package_throttle_count", cpu)
level.Debug(c.logger).Log("msg", "CPU is missing package_throttle_count", "cpu", cpu)
}
}
}

View file

@ -19,6 +19,7 @@ import (
"strconv"
"unsafe"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/unix"
)
@ -30,16 +31,18 @@ import (
import "C"
type cpuCollector struct {
cpu typedDesc
cpu typedDesc
logger log.Logger
}
func init() {
registerCollector("cpu", defaultEnabled, NewCPUCollector)
}
func NewCPUCollector() (Collector, error) {
func NewCPUCollector(logger log.Logger) (Collector, error) {
return &cpuCollector{
cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue},
cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue},
logger: logger,
}, nil
}

View file

@ -19,6 +19,7 @@ package collector
import (
"strconv"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
kstat "github.com/siebenmann/go-kstat"
)
@ -27,16 +28,18 @@ import (
import "C"
type cpuCollector struct {
cpu typedDesc
cpu typedDesc
logger log.Logger
}
func init() {
registerCollector("cpu", defaultEnabled, NewCpuCollector)
}
func NewCpuCollector() (Collector, error) {
func NewCpuCollector(logger log.Logger) (Collector, error) {
return &cpuCollector{
cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue},
cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue},
logger: logger,
}, nil
}

View file

@ -18,6 +18,7 @@ package collector
import (
"fmt"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs/sysfs"
)
@ -30,6 +31,7 @@ type cpuFreqCollector struct {
scalingFreq *prometheus.Desc
scalingFreqMin *prometheus.Desc
scalingFreqMax *prometheus.Desc
logger log.Logger
}
func init() {
@ -37,7 +39,7 @@ func init() {
}
// NewCPUFreqCollector returns a new Collector exposing kernel/system statistics.
func NewCPUFreqCollector() (Collector, error) {
func NewCPUFreqCollector(logger log.Logger) (Collector, error) {
fs, err := sysfs.NewFS(*sysPath)
if err != nil {
return nil, fmt.Errorf("failed to open sysfs: %w", err)
@ -75,6 +77,7 @@ func NewCPUFreqCollector() (Collector, error) {
"Maximum scaled cpu thread frequency in hertz.",
[]string{"cpu"}, nil,
),
logger: logger,
}, nil
}

View file

@ -20,6 +20,7 @@ import (
"fmt"
"strconv"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
kstat "github.com/siebenmann/go-kstat"
)
@ -30,13 +31,14 @@ import "C"
type cpuFreqCollector struct {
cpuFreq *prometheus.Desc
cpuFreqMax *prometheus.Desc
logger log.Logger
}
func init() {
registerCollector("cpufreq", defaultEnabled, NewCpuFreqCollector)
}
func NewFreqCpuCollector() (Collector, error) {
func NewFreqCpuCollector(logger log.Logger) (Collector, error) {
return &cpuFreqCollector{
cpuFreq: prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_hertz"),
@ -48,6 +50,7 @@ func NewFreqCpuCollector() (Collector, error) {
"Maximum cpu thread frequency in hertz.",
[]string{"cpu"}, nil,
),
logger: logger,
}, nil
}

View file

@ -19,6 +19,7 @@ import (
"errors"
"fmt"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
)
@ -96,6 +97,7 @@ type devstatCollector struct {
bytesDesc *prometheus.Desc
transfersDesc *prometheus.Desc
blocksDesc *prometheus.Desc
logger log.Logger
}
func init() {
@ -103,7 +105,7 @@ func init() {
}
// NewDevstatCollector returns a new Collector exposing Device stats.
func NewDevstatCollector() (Collector, error) {
func NewDevstatCollector(logger log.Logger) (Collector, error) {
return &devstatCollector{
bytesDesc: prometheus.NewDesc(
prometheus.BuildFQName(namespace, devstatSubsystem, "bytes_total"),
@ -120,6 +122,7 @@ func NewDevstatCollector() (Collector, error) {
"The total number of bytes given in terms of the devices blocksize.",
[]string{"device"}, nil,
),
logger: logger,
}, nil
}

View file

@ -21,6 +21,7 @@ import (
"sync"
"unsafe"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
)
@ -41,6 +42,7 @@ type devstatCollector struct {
duration typedDesc
busyTime typedDesc
blocks typedDesc
logger log.Logger
}
func init() {
@ -48,7 +50,7 @@ func init() {
}
// NewDevstatCollector returns a new Collector exposing Device stats.
func NewDevstatCollector() (Collector, error) {
func NewDevstatCollector(logger log.Logger) (Collector, error) {
return &devstatCollector{
devinfo: &C.struct_devinfo{},
bytes: typedDesc{prometheus.NewDesc(
@ -76,6 +78,7 @@ func NewDevstatCollector() (Collector, error) {
"The total number of blocks transferred.",
[]string{"device"}, nil,
), prometheus.CounterValue},
logger: logger,
}, nil
}

View file

@ -18,6 +18,7 @@ package collector
import (
"fmt"
"github.com/go-kit/kit/log"
"github.com/lufia/iostat"
"github.com/prometheus/client_golang/prometheus"
)
@ -28,7 +29,8 @@ type typedDescFunc struct {
}
type diskstatsCollector struct {
descs []typedDescFunc
descs []typedDescFunc
logger log.Logger
}
func init() {
@ -36,7 +38,7 @@ func init() {
}
// NewDiskstatsCollector returns a new Collector exposing disk device stats.
func NewDiskstatsCollector() (Collector, error) {
func NewDiskstatsCollector(logger log.Logger) (Collector, error) {
var diskLabelNames = []string{"device"}
return &diskstatsCollector{
@ -124,6 +126,7 @@ func NewDiskstatsCollector() (Collector, error) {
},
},
},
logger: logger,
}, nil
}

View file

@ -24,8 +24,9 @@ import (
"strconv"
"strings"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"gopkg.in/alecthomas/kingpin.v2"
)
@ -54,6 +55,7 @@ func (d *typedFactorDesc) mustNewConstMetric(value float64, labels ...string) pr
type diskstatsCollector struct {
ignoredDevicesPattern *regexp.Regexp
descs []typedFactorDesc
logger log.Logger
}
func init() {
@ -62,7 +64,7 @@ func init() {
// NewDiskstatsCollector returns a new Collector exposing disk device stats.
// Docs from https://www.kernel.org/doc/Documentation/iostats.txt
func NewDiskstatsCollector() (Collector, error) {
func NewDiskstatsCollector(logger log.Logger) (Collector, error) {
var diskLabelNames = []string{"device"}
return &diskstatsCollector{
@ -178,6 +180,7 @@ func NewDiskstatsCollector() (Collector, error) {
factor: .001,
},
},
logger: logger,
}, nil
}
@ -189,7 +192,7 @@ func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) error {
for dev, stats := range diskStats {
if c.ignoredDevicesPattern.MatchString(dev) {
log.Debugf("Ignoring device: %s", dev)
level.Debug(c.logger).Log("msg", "Ignoring device", "device", dev)
continue
}

View file

@ -18,6 +18,7 @@ package collector
import (
"unsafe"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/unix"
)
@ -34,6 +35,7 @@ type diskstatsCollector struct {
wxfer typedDesc
wbytes typedDesc
time typedDesc
logger log.Logger
}
func init() {
@ -41,13 +43,14 @@ func init() {
}
// NewDiskstatsCollector returns a new Collector exposing disk device stats.
func NewDiskstatsCollector() (Collector, error) {
func NewDiskstatsCollector(logger log.Logger) (Collector, error) {
return &diskstatsCollector{
rxfer: typedDesc{readsCompletedDesc, prometheus.CounterValue},
rbytes: typedDesc{readBytesDesc, prometheus.CounterValue},
wxfer: typedDesc{writesCompletedDesc, prometheus.CounterValue},
wbytes: typedDesc{writtenBytesDesc, prometheus.CounterValue},
time: typedDesc{ioTimeSecondsDesc, prometheus.CounterValue},
logger: logger,
}, nil
}

View file

@ -20,8 +20,9 @@ import (
"strconv"
"strings"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
)
// Numerical metric provided by /proc/drbd.
@ -74,13 +75,14 @@ type drbdCollector struct {
numerical map[string]drbdNumericalMetric
stringPair map[string]drbdStringPairMetric
connected *prometheus.Desc
logger log.Logger
}
func init() {
registerCollector("drbd", defaultDisabled, newDRBDCollector)
}
func newDRBDCollector() (Collector, error) {
func newDRBDCollector(logger log.Logger) (Collector, error) {
return &drbdCollector{
numerical: map[string]drbdNumericalMetric{
"ns": newDRBDNumericalMetric(
@ -176,6 +178,7 @@ func newDRBDCollector() (Collector, error) {
[]string{"device"},
nil,
),
logger: logger,
}, nil
}
@ -184,7 +187,7 @@ func (c *drbdCollector) Update(ch chan<- prometheus.Metric) error {
file, err := os.Open(statsFile)
if err != nil {
if os.IsNotExist(err) {
log.Debugf("drbd: %s does not exist, skipping: %s", statsFile, err)
level.Debug(c.logger).Log("msg", "stats file does not exist, skipping", "file", statsFile, "err", err)
return nil
}
@ -201,7 +204,7 @@ func (c *drbdCollector) Update(ch chan<- prometheus.Metric) error {
kv := strings.Split(field, ":")
if len(kv) != 2 {
log.Debugf("drbd: skipping invalid key:value pair %q", field)
level.Debug(c.logger).Log("msg", "skipping invalid key:value pair", "field", field)
continue
}
@ -267,7 +270,7 @@ func (c *drbdCollector) Update(ch chan<- prometheus.Metric) error {
continue
}
log.Debugf("drbd: unhandled key-value pair: [%s: %q]", kv[0], kv[1])
level.Debug(c.logger).Log("msg", "unhandled key-value pair", "key", kv[0], "value", kv[1])
}
return scanner.Err()

View file

@ -20,6 +20,7 @@ import (
"path/filepath"
"regexp"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
)
@ -37,6 +38,7 @@ type edacCollector struct {
ueCount *prometheus.Desc
csRowCECount *prometheus.Desc
csRowUECount *prometheus.Desc
logger log.Logger
}
func init() {
@ -44,7 +46,7 @@ func init() {
}
// NewEdacCollector returns a new Collector exposing edac stats.
func NewEdacCollector() (Collector, error) {
func NewEdacCollector(logger log.Logger) (Collector, error) {
return &edacCollector{
ceCount: prometheus.NewDesc(
prometheus.BuildFQName(namespace, edacSubsystem, "correctable_errors_total"),
@ -66,6 +68,7 @@ func NewEdacCollector() (Collector, error) {
"Total uncorrectable memory errors for this csrow.",
[]string{"controller", "csrow"}, nil,
),
logger: logger,
}, nil
}

View file

@ -18,11 +18,13 @@ package collector
import (
"fmt"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
)
type entropyCollector struct {
entropyAvail *prometheus.Desc
logger log.Logger
}
func init() {
@ -30,13 +32,14 @@ func init() {
}
// NewEntropyCollector returns a new Collector exposing entropy stats.
func NewEntropyCollector() (Collector, error) {
func NewEntropyCollector(logger log.Logger) (Collector, error) {
return &entropyCollector{
entropyAvail: prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "entropy_available_bits"),
"Bits of available entropy.",
nil, nil,
),
logger: logger,
}, nil
}

View file

@ -17,11 +17,13 @@
package collector
import (
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
)
type execCollector struct {
sysctls []bsdSysctl
logger log.Logger
}
func init() {
@ -29,7 +31,7 @@ func init() {
}
// NewExecCollector returns a new Collector exposing system execution statistics.
func NewExecCollector() (Collector, error) {
func NewExecCollector(logger log.Logger) (Collector, error) {
// From sys/vm/vm_meter.c:
// All are of type CTLTYPE_UINT.
//
@ -73,6 +75,7 @@ func NewExecCollector() (Collector, error) {
mib: "vm.stats.vm.v_forks",
},
},
logger: logger,
}, nil
}

View file

@ -22,6 +22,7 @@ import (
"os"
"strconv"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
)
@ -29,15 +30,17 @@ const (
fileFDStatSubsystem = "filefd"
)
type fileFDStatCollector struct{}
type fileFDStatCollector struct {
logger log.Logger
}
func init() {
registerCollector(fileFDStatSubsystem, defaultEnabled, NewFileFDStatCollector)
}
// NewFileFDStatCollector returns a new Collector exposing file-nr stats.
func NewFileFDStatCollector() (Collector, error) {
return &fileFDStatCollector{}, nil
func NewFileFDStatCollector(logger log.Logger) (Collector, error) {
return &fileFDStatCollector{logger}, nil
}
func (c *fileFDStatCollector) Update(ch chan<- prometheus.Metric) error {

View file

@ -20,7 +20,7 @@ import (
"errors"
"unsafe"
"github.com/prometheus/common/log"
"github.com/go-kit/kit/log/level"
)
/*
@ -50,14 +50,14 @@ func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) {
for i := 0; i < int(count); i++ {
mountpoint := C.GoString(&mnt[i].f_mntonname[0])
if c.ignoredMountPointsPattern.MatchString(mountpoint) {
log.Debugf("Ignoring mount point: %s", mountpoint)
level.Debug(c.logger).Log("msg", "Ignoring mount point", "mountpoint", mountpoint)
continue
}
device := C.GoString(&mnt[i].f_mntfromname[0])
fstype := C.GoString(&mnt[i].f_fstypename[0])
if c.ignoredFSTypesPattern.MatchString(fstype) {
log.Debugf("Ignoring fs type: %s", fstype)
level.Debug(c.logger).Log("msg", "Ignoring fs type", "type", fstype)
continue
}

View file

@ -19,6 +19,7 @@ package collector
import (
"regexp"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"gopkg.in/alecthomas/kingpin.v2"
)
@ -48,6 +49,7 @@ type filesystemCollector struct {
sizeDesc, freeDesc, availDesc *prometheus.Desc
filesDesc, filesFreeDesc *prometheus.Desc
roDesc, deviceErrorDesc *prometheus.Desc
logger log.Logger
}
type filesystemLabels struct {
@ -66,7 +68,7 @@ func init() {
}
// NewFilesystemCollector returns a new Collector exposing filesystems stats.
func NewFilesystemCollector() (Collector, error) {
func NewFilesystemCollector(logger log.Logger) (Collector, error) {
subsystem := "filesystem"
mountPointPattern := regexp.MustCompile(*ignoredMountPoints)
filesystemsTypesPattern := regexp.MustCompile(*ignoredFSTypes)
@ -123,6 +125,7 @@ func NewFilesystemCollector() (Collector, error) {
filesFreeDesc: filesFreeDesc,
roDesc: roDesc,
deviceErrorDesc: deviceErrorDesc,
logger: logger,
}, nil
}

View file

@ -19,7 +19,7 @@ import (
"bytes"
"unsafe"
"github.com/prometheus/common/log"
"github.com/go-kit/kit/log/level"
"golang.org/x/sys/unix"
)
@ -54,14 +54,14 @@ func (c *filesystemCollector) GetStats() ([]filesystemStats, error) {
for _, fs := range buf {
mountpoint := gostring(fs.Mntonname[:])
if c.ignoredMountPointsPattern.MatchString(mountpoint) {
log.Debugf("Ignoring mount point: %s", mountpoint)
level.Debug(c.logger).Log("msg", "Ignoring mount point", "mountpoint", mountpoint)
continue
}
device := gostring(fs.Mntfromname[:])
fstype := gostring(fs.Fstypename[:])
if c.ignoredFSTypesPattern.MatchString(fstype) {
log.Debugf("Ignoring fs type: %s", fstype)
level.Debug(c.logger).Log("msg", "Ignoring fs type", "type", fstype)
continue
}

View file

@ -24,9 +24,10 @@ import (
"sync"
"time"
"github.com/prometheus/common/log"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"golang.org/x/sys/unix"
kingpin "gopkg.in/alecthomas/kingpin.v2"
"gopkg.in/alecthomas/kingpin.v2"
)
const (
@ -42,18 +43,18 @@ var stuckMountsMtx = &sync.Mutex{}
// GetStats returns filesystem stats.
func (c *filesystemCollector) GetStats() ([]filesystemStats, error) {
mps, err := mountPointDetails()
mps, err := mountPointDetails(c.logger)
if err != nil {
return nil, err
}
stats := []filesystemStats{}
for _, labels := range mps {
if c.ignoredMountPointsPattern.MatchString(labels.mountPoint) {
log.Debugf("Ignoring mount point: %s", labels.mountPoint)
level.Debug(c.logger).Log("msg", "Ignoring mount point", "mountpoint", labels.mountPoint)
continue
}
if c.ignoredFSTypesPattern.MatchString(labels.fsType) {
log.Debugf("Ignoring fs type: %s", labels.fsType)
level.Debug(c.logger).Log("msg", "Ignoring fs", "type", labels.fsType)
continue
}
stuckMountsMtx.Lock()
@ -62,7 +63,7 @@ func (c *filesystemCollector) GetStats() ([]filesystemStats, error) {
labels: labels,
deviceError: 1,
})
log.Debugf("Mount point %q is in an unresponsive state", labels.mountPoint)
level.Debug(c.logger).Log("msg", "Mount point is in an unresponsive state", "mountpoint", labels.mountPoint)
stuckMountsMtx.Unlock()
continue
}
@ -71,7 +72,7 @@ func (c *filesystemCollector) GetStats() ([]filesystemStats, error) {
// The success channel is used do tell the "watcher" that the stat
// finished successfully. The channel is closed on success.
success := make(chan struct{})
go stuckMountWatcher(labels.mountPoint, success)
go stuckMountWatcher(labels.mountPoint, success, c.logger)
buf := new(unix.Statfs_t)
err = unix.Statfs(rootfsFilePath(labels.mountPoint), buf)
@ -79,7 +80,7 @@ func (c *filesystemCollector) GetStats() ([]filesystemStats, error) {
close(success)
// If the mount has been marked as stuck, unmark it and log it's recovery.
if _, ok := stuckMounts[labels.mountPoint]; ok {
log.Debugf("Mount point %q has recovered, monitoring will resume", labels.mountPoint)
level.Debug(c.logger).Log("msg", "Mount point has recovered, monitoring will resume", "mountpoint", labels.mountPoint)
delete(stuckMounts, labels.mountPoint)
}
stuckMountsMtx.Unlock()
@ -89,7 +90,8 @@ func (c *filesystemCollector) GetStats() ([]filesystemStats, error) {
labels: labels,
deviceError: 1,
})
log.Debugf("Error on statfs() system call for %q: %s", rootfsFilePath(labels.mountPoint), err)
level.Debug(c.logger).Log("msg", "Error on statfs() system call", "rootfs", rootfsFilePath(labels.mountPoint), "err", err)
continue
}
@ -117,7 +119,7 @@ func (c *filesystemCollector) GetStats() ([]filesystemStats, error) {
// stuckMountWatcher listens on the given success channel and if the channel closes
// then the watcher does nothing. If instead the timeout is reached, the
// mount point that is being watched is marked as stuck.
func stuckMountWatcher(mountPoint string, success chan struct{}) {
func stuckMountWatcher(mountPoint string, success chan struct{}, logger log.Logger) {
select {
case <-success:
// Success
@ -128,18 +130,18 @@ func stuckMountWatcher(mountPoint string, success chan struct{}) {
case <-success:
// Success came in just after the timeout was reached, don't label the mount as stuck
default:
log.Debugf("Mount point %q timed out, it is being labeled as stuck and will not be monitored", mountPoint)
level.Debug(logger).Log("msg", "Mount point timed out, it is being labeled as stuck and will not be monitored", "mountpoint", mountPoint)
stuckMounts[mountPoint] = struct{}{}
}
stuckMountsMtx.Unlock()
}
}
func mountPointDetails() ([]filesystemLabels, error) {
func mountPointDetails(logger log.Logger) ([]filesystemLabels, error) {
file, err := os.Open(procFilePath("1/mounts"))
if os.IsNotExist(err) {
// Fallback to `/proc/mounts` if `/proc/1/mounts` is missing due hidepid.
log.Debugf("Got %q reading root mounts, falling back to system mounts", err)
level.Debug(logger).Log("msg", "Reading root mounts failed, falling back to system mounts", "err", err)
file, err = os.Open(procFilePath("mounts"))
}
if err != nil {

View file

@ -16,6 +16,7 @@
package collector
import (
"github.com/go-kit/kit/log"
"strings"
"testing"
@ -80,7 +81,7 @@ func TestMountPointDetails(t *testing.T) {
"/var/lib/kubelet/plugins/kubernetes.io/vsphere-volume/mounts/[vsanDatastore] bafb9e5a-8856-7e6c-699c-801844e77a4a/kubernetes-dynamic-pvc-3eba5bba-48a3-11e8-89ab-005056b92113.vmdk": "",
}
filesystems, err := mountPointDetails()
filesystems, err := mountPointDetails(log.NewNopLogger())
if err != nil {
t.Log(err)
}
@ -101,7 +102,7 @@ func TestMountsFallback(t *testing.T) {
"/": "",
}
filesystems, err := mountPointDetails()
filesystems, err := mountPointDetails(log.NewNopLogger())
if err != nil {
t.Log(err)
}
@ -129,7 +130,7 @@ func TestPathRootfs(t *testing.T) {
"/sys/fs/cgroup": "",
}
filesystems, err := mountPointDetails()
filesystems, err := mountPointDetails(log.NewNopLogger())
if err != nil {
t.Log(err)
}

View file

@ -24,8 +24,9 @@ import (
"strconv"
"strings"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"golang.org/x/sys/unix"
)
@ -45,12 +46,14 @@ func init() {
registerCollector("hwmon", defaultEnabled, NewHwMonCollector)
}
type hwMonCollector struct{}
type hwMonCollector struct {
logger log.Logger
}
// NewHwMonCollector returns a new Collector exposing /sys/class/hwmon stats
// (similar to lm-sensors).
func NewHwMonCollector() (Collector, error) {
return &hwMonCollector{}, nil
func NewHwMonCollector(logger log.Logger) (Collector, error) {
return &hwMonCollector{logger}, nil
}
func cleanMetricName(name string) string {
@ -422,7 +425,7 @@ func (c *hwMonCollector) Update(ch chan<- prometheus.Metric) error {
hwmonFiles, err := ioutil.ReadDir(hwmonPathName)
if err != nil {
if os.IsNotExist(err) {
log.Debug("hwmon collector metrics are not available for this system")
level.Debug(c.logger).Log("msg", "hwmon collector metrics are not available for this system")
return nil
}

View file

@ -20,6 +20,7 @@ import (
"fmt"
"strconv"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs/sysfs"
)
@ -27,6 +28,7 @@ import (
type infinibandCollector struct {
fs sysfs.FS
metricDescs map[string]*prometheus.Desc
logger log.Logger
}
func init() {
@ -34,7 +36,7 @@ func init() {
}
// NewInfiniBandCollector returns a new Collector exposing InfiniBand stats.
func NewInfiniBandCollector() (Collector, error) {
func NewInfiniBandCollector(logger log.Logger) (Collector, error) {
var i infinibandCollector
var err error
@ -42,6 +44,7 @@ func NewInfiniBandCollector() (Collector, error) {
if err != nil {
return nil, fmt.Errorf("failed to open sysfs: %w", err)
}
i.logger = logger
// Detailed description for all metrics.
descriptions := map[string]string{

View file

@ -16,10 +16,14 @@
package collector
import "github.com/prometheus/client_golang/prometheus"
import (
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
)
type interruptsCollector struct {
desc typedDesc
desc typedDesc
logger log.Logger
}
func init() {
@ -27,12 +31,13 @@ func init() {
}
// NewInterruptsCollector returns a new Collector exposing interrupts stats.
func NewInterruptsCollector() (Collector, error) {
func NewInterruptsCollector(logger log.Logger) (Collector, error) {
return &interruptsCollector{
desc: typedDesc{prometheus.NewDesc(
namespace+"_interrupts_total",
"Interrupt details.",
interruptLabelNames, nil,
), prometheus.CounterValue},
logger: logger,
}, nil
}

View file

@ -20,8 +20,9 @@ import (
"os"
"strconv"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"github.com/prometheus/procfs"
)
@ -30,6 +31,7 @@ type ipvsCollector struct {
fs procfs.FS
backendConnectionsActive, backendConnectionsInact, backendWeight typedDesc
connections, incomingPackets, outgoingPackets, incomingBytes, outgoingBytes typedDesc
logger log.Logger
}
func init() {
@ -38,11 +40,11 @@ func init() {
// NewIPVSCollector sets up a new collector for IPVS metrics. It accepts the
// "procfs" config parameter to override the default proc location (/proc).
func NewIPVSCollector() (Collector, error) {
return newIPVSCollector()
func NewIPVSCollector(logger log.Logger) (Collector, error) {
return newIPVSCollector(logger)
}
func newIPVSCollector() (*ipvsCollector, error) {
func newIPVSCollector(logger log.Logger) (*ipvsCollector, error) {
var (
ipvsBackendLabelNames = []string{
"local_address",
@ -57,6 +59,7 @@ func newIPVSCollector() (*ipvsCollector, error) {
subsystem = "ipvs"
)
c.logger = logger
c.fs, err = procfs.NewFS(*procPath)
if err != nil {
return nil, fmt.Errorf("failed to open procfs: %w", err)
@ -111,7 +114,7 @@ func (c *ipvsCollector) Update(ch chan<- prometheus.Metric) error {
if err != nil {
// Cannot access ipvs metrics, report no error.
if os.IsNotExist(err) {
log.Debug("ipvs collector metrics are not available for this system")
level.Debug(c.logger).Log("msg", "ipvs collector metrics are not available for this system")
return nil
}
return fmt.Errorf("could not get IPVS stats: %s", err)

View file

@ -15,6 +15,7 @@ package collector
import (
"fmt"
"github.com/go-kit/kit/log"
"io/ioutil"
"net/http"
"net/http/httptest"
@ -30,7 +31,7 @@ func TestIPVSCollector(t *testing.T) {
if _, err := kingpin.CommandLine.Parse([]string{"--path.procfs", "fixtures/proc"}); err != nil {
t.Fatal(err)
}
collector, err := newIPVSCollector()
collector, err := newIPVSCollector(log.NewNopLogger())
if err != nil {
t.Fatal(err)
}
@ -79,7 +80,7 @@ func TestIPVSCollectorResponse(t *testing.T) {
if _, err := kingpin.CommandLine.Parse([]string{"--path.procfs", "fixtures/proc"}); err != nil {
t.Fatal(err)
}
collector, err := NewIPVSCollector()
collector, err := NewIPVSCollector(log.NewNopLogger())
if err != nil {
t.Fatal(err)
}

View file

@ -19,6 +19,7 @@ import (
"fmt"
"path/filepath"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
)
@ -29,6 +30,7 @@ var (
type ksmdCollector struct {
metricDescs map[string]*prometheus.Desc
logger log.Logger
}
func init() {
@ -47,7 +49,7 @@ func getCanonicalMetricName(filename string) string {
}
// NewKsmdCollector returns a new Collector exposing kernel/system statistics.
func NewKsmdCollector() (Collector, error) {
func NewKsmdCollector(logger log.Logger) (Collector, error) {
subsystem := "ksmd"
descs := make(map[string]*prometheus.Desc)
@ -56,7 +58,7 @@ func NewKsmdCollector() (Collector, error) {
prometheus.BuildFQName(namespace, subsystem, getCanonicalMetricName(n)),
fmt.Sprintf("ksmd '%s' file.", n), nil, nil)
}
return &ksmdCollector{descs}, nil
return &ksmdCollector{descs, logger}, nil
}
// Update implements Collector and exposes kernel and system statistics.

View file

@ -19,12 +19,14 @@ package collector
import (
"fmt"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
)
type loadavgCollector struct {
metric []typedDesc
logger log.Logger
}
func init() {
@ -32,13 +34,14 @@ func init() {
}
// NewLoadavgCollector returns a new Collector exposing load average stats.
func NewLoadavgCollector() (Collector, error) {
func NewLoadavgCollector(logger log.Logger) (Collector, error) {
return &loadavgCollector{
metric: []typedDesc{
{prometheus.NewDesc(namespace+"_load1", "1m load average.", nil, nil), prometheus.GaugeValue},
{prometheus.NewDesc(namespace+"_load5", "5m load average.", nil, nil), prometheus.GaugeValue},
{prometheus.NewDesc(namespace+"_load15", "15m load average.", nil, nil), prometheus.GaugeValue},
},
logger: logger,
}, nil
}
@ -48,7 +51,7 @@ func (c *loadavgCollector) Update(ch chan<- prometheus.Metric) error {
return fmt.Errorf("couldn't get load: %s", err)
}
for i, load := range loads {
log.Debugf("return load %d: %f", i, load)
level.Debug(c.logger).Log("msg", "return load", "index", i, "load", load)
ch <- c.metric[i].mustNewConstMetric(load)
}
return err

View file

@ -20,6 +20,7 @@ import (
"os"
"strconv"
"github.com/go-kit/kit/log"
"github.com/godbus/dbus"
"github.com/prometheus/client_golang/prometheus"
)
@ -43,7 +44,9 @@ var (
)
)
type logindCollector struct{}
type logindCollector struct {
logger log.Logger
}
type logindDbus struct {
conn *dbus.Conn
@ -82,8 +85,8 @@ func init() {
}
// NewLogindCollector returns a new Collector exposing logind statistics.
func NewLogindCollector() (Collector, error) {
return &logindCollector{}, nil
func NewLogindCollector(logger log.Logger) (Collector, error) {
return &logindCollector{logger}, nil
}
func (lc *logindCollector) Update(ch chan<- prometheus.Metric) error {

View file

@ -19,20 +19,23 @@ import (
"fmt"
"os"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"github.com/prometheus/procfs"
)
type mdadmCollector struct{}
type mdadmCollector struct {
logger log.Logger
}
func init() {
registerCollector("mdadm", defaultEnabled, NewMdadmCollector)
}
// NewMdadmCollector returns a new Collector exposing raid statistics.
func NewMdadmCollector() (Collector, error) {
return &mdadmCollector{}, nil
func NewMdadmCollector(logger log.Logger) (Collector, error) {
return &mdadmCollector{logger}, nil
}
var (
@ -101,7 +104,7 @@ func (c *mdadmCollector) Update(ch chan<- prometheus.Metric) error {
if err != nil {
if os.IsNotExist(err) {
log.Debugf("Not collecting mdstat, file does not exist: %s", *procPath)
level.Debug(c.logger).Log("msg", "Not collecting mdstat, file does not exist", "file", *procPath)
return nil
}
@ -109,7 +112,7 @@ func (c *mdadmCollector) Update(ch chan<- prometheus.Metric) error {
}
for _, mdStat := range mdStats {
log.Debugf("collecting metrics for device %s", mdStat.Name)
level.Debug(c.logger).Log("msg", "collecting metrics for device", "device", mdStat.Name)
stateVals := make(map[string]float64)
stateVals[mdStat.ActivityState] = 1

View file

@ -20,23 +20,26 @@ import (
"fmt"
"strings"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
)
const (
memInfoSubsystem = "memory"
)
type meminfoCollector struct{}
type meminfoCollector struct {
logger log.Logger
}
func init() {
registerCollector("meminfo", defaultEnabled, NewMeminfoCollector)
}
// NewMeminfoCollector returns a new Collector exposing memory stats.
func NewMeminfoCollector() (Collector, error) {
return &meminfoCollector{}, nil
func NewMeminfoCollector(logger log.Logger) (Collector, error) {
return &meminfoCollector{logger}, nil
}
// Update calls (*meminfoCollector).getMemInfo to get the platform specific
@ -47,7 +50,7 @@ func (c *meminfoCollector) Update(ch chan<- prometheus.Metric) error {
if err != nil {
return fmt.Errorf("couldn't get meminfo: %s", err)
}
log.Debugf("Set node_mem: %#v", memInfo)
level.Debug(c.logger).Log("msg", "Set node_mem", "memInfo", memInfo)
for k, v := range memInfo {
if strings.HasSuffix(k, "_total") {
metricType = prometheus.CounterValue

View file

@ -25,6 +25,7 @@ import (
"strconv"
"strings"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
)
@ -43,6 +44,7 @@ type meminfoMetric struct {
type meminfoNumaCollector struct {
metricDescs map[string]*prometheus.Desc
logger log.Logger
}
func init() {
@ -50,9 +52,10 @@ func init() {
}
// NewMeminfoNumaCollector returns a new Collector exposing memory stats.
func NewMeminfoNumaCollector() (Collector, error) {
func NewMeminfoNumaCollector(logger log.Logger) (Collector, error) {
return &meminfoNumaCollector{
metricDescs: map[string]*prometheus.Desc{},
logger: logger,
}, nil
}

View file

@ -19,6 +19,7 @@ package collector
import (
"fmt"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/unix"
)
@ -31,6 +32,7 @@ type memoryCollector struct {
pageSize uint64
sysctls []bsdSysctl
kvm kvm
logger log.Logger
}
func init() {
@ -38,7 +40,7 @@ func init() {
}
// NewMemoryCollector returns a new Collector exposing memory stats.
func NewMemoryCollector() (Collector, error) {
func NewMemoryCollector(logger log.Logger) (Collector, error) {
tmp32, err := unix.SysctlUint32("vm.stats.vm.v_page_size")
if err != nil {
return nil, fmt.Errorf("sysctl(vm.stats.vm.v_page_size) failed: %s", err)
@ -57,6 +59,7 @@ func NewMemoryCollector() (Collector, error) {
}
return &memoryCollector{
logger: logger,
pageSize: uint64(tmp32),
sysctls: []bsdSysctl{
// Descriptions via: https://wiki.freebsd.org/Memory

View file

@ -16,8 +16,9 @@ package collector
import (
"fmt"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"github.com/prometheus/procfs"
)
@ -91,6 +92,8 @@ type mountStatsCollector struct {
NFSEventPNFSWriteTotal *prometheus.Desc
proc procfs.Proc
logger log.Logger
}
// used to uniquely identify an NFS mount to prevent duplicates
@ -105,7 +108,7 @@ func init() {
}
// NewMountStatsCollector returns a new Collector exposing NFS statistics.
func NewMountStatsCollector() (Collector, error) {
func NewMountStatsCollector(logger log.Logger) (Collector, error) {
fs, err := procfs.NewFS(*procPath)
if err != nil {
return nil, fmt.Errorf("failed to open procfs: %w", err)
@ -498,7 +501,8 @@ func NewMountStatsCollector() (Collector, error) {
nil,
),
proc: proc,
proc: proc,
logger: logger,
}, nil
}
@ -534,7 +538,7 @@ func (c *mountStatsCollector) Update(ch chan<- prometheus.Metric) error {
deviceIdentifier := nfsDeviceIdentifier{m.Device, stats.Transport.Protocol, mountAddress}
i := deviceList[deviceIdentifier]
if i {
log.Debugf("Skipping duplicate device entry %q", deviceIdentifier)
level.Debug(c.logger).Log("msg", "Skipping duplicate device entry", "device", deviceIdentifier)
continue
}

View file

@ -20,9 +20,10 @@ import (
"fmt"
"regexp"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs/sysfs"
kingpin "gopkg.in/alecthomas/kingpin.v2"
"gopkg.in/alecthomas/kingpin.v2"
)
var (
@ -34,6 +35,7 @@ type netClassCollector struct {
subsystem string
ignoredDevicesPattern *regexp.Regexp
metricDescs map[string]*prometheus.Desc
logger log.Logger
}
func init() {
@ -41,7 +43,7 @@ func init() {
}
// NewNetClassCollector returns a new Collector exposing network class stats.
func NewNetClassCollector() (Collector, error) {
func NewNetClassCollector(logger log.Logger) (Collector, error) {
fs, err := sysfs.NewFS(*sysPath)
if err != nil {
return nil, fmt.Errorf("failed to open sysfs: %w", err)
@ -52,6 +54,7 @@ func NewNetClassCollector() (Collector, error) {
subsystem: "network",
ignoredDevicesPattern: pattern,
metricDescs: map[string]*prometheus.Desc{},
logger: logger,
}, nil
}

View file

@ -21,7 +21,8 @@ import (
"regexp"
"strconv"
"github.com/prometheus/common/log"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
)
/*
@ -34,7 +35,7 @@ import (
*/
import "C"
func getNetDevStats(ignore *regexp.Regexp, accept *regexp.Regexp) (map[string]map[string]string, error) {
func getNetDevStats(ignore *regexp.Regexp, accept *regexp.Regexp, logger log.Logger) (map[string]map[string]string, error) {
netDev := map[string]map[string]string{}
var ifap, ifa *C.struct_ifaddrs
@ -47,11 +48,11 @@ func getNetDevStats(ignore *regexp.Regexp, accept *regexp.Regexp) (map[string]ma
if ifa.ifa_addr.sa_family == C.AF_LINK {
dev := C.GoString(ifa.ifa_name)
if ignore != nil && ignore.MatchString(dev) {
log.Debugf("Ignoring device: %s", dev)
level.Debug(logger).Log("msg", "Ignoring device", "device", dev)
continue
}
if accept != nil && !accept.MatchString(dev) {
log.Debugf("Ignoring device: %s", dev)
level.Debug(logger).Log("msg", "Ignoring device", "device", dev)
continue
}

View file

@ -22,6 +22,7 @@ import (
"regexp"
"strconv"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"gopkg.in/alecthomas/kingpin.v2"
)
@ -36,6 +37,7 @@ type netDevCollector struct {
ignoredDevicesPattern *regexp.Regexp
acceptDevicesPattern *regexp.Regexp
metricDescs map[string]*prometheus.Desc
logger log.Logger
}
func init() {
@ -43,7 +45,7 @@ func init() {
}
// NewNetDevCollector returns a new Collector exposing network device stats.
func NewNetDevCollector() (Collector, error) {
func NewNetDevCollector(logger log.Logger) (Collector, error) {
if *netdevIgnoredDevices != "" && *netdevAcceptDevices != "" {
return nil, errors.New("device-blacklist & accept-devices are mutually exclusive")
}
@ -63,11 +65,12 @@ func NewNetDevCollector() (Collector, error) {
ignoredDevicesPattern: ignorePattern,
acceptDevicesPattern: acceptPattern,
metricDescs: map[string]*prometheus.Desc{},
logger: logger,
}, nil
}
func (c *netDevCollector) Update(ch chan<- prometheus.Metric) error {
netDev, err := getNetDevStats(c.ignoredDevicesPattern, c.acceptDevicesPattern)
netDev, err := getNetDevStats(c.ignoredDevicesPattern, c.acceptDevicesPattern, c.logger)
if err != nil {
return fmt.Errorf("couldn't get netstats: %s", err)
}

View file

@ -23,11 +23,12 @@ import (
"regexp"
"strconv"
"github.com/prometheus/common/log"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"golang.org/x/sys/unix"
)
func getNetDevStats(ignore *regexp.Regexp, accept *regexp.Regexp) (map[string]map[string]string, error) {
func getNetDevStats(ignore *regexp.Regexp, accept *regexp.Regexp, logger log.Logger) (map[string]map[string]string, error) {
netDev := map[string]map[string]string{}
ifs, err := net.Interfaces()
@ -38,16 +39,16 @@ func getNetDevStats(ignore *regexp.Regexp, accept *regexp.Regexp) (map[string]ma
for _, iface := range ifs {
ifaceData, err := getIfaceData(iface.Index)
if err != nil {
log.Debugf("failed to load data for interface %q: %v", iface.Name, err)
level.Debug(logger).Log("msg", "failed to load data for interface", "device", iface.Name, "err", err)
continue
}
if ignore != nil && ignore.MatchString(iface.Name) {
log.Debugf("Ignoring device: %s", iface.Name)
level.Debug(logger).Log("msg", "Ignoring device", "device", iface.Name)
continue
}
if accept != nil && !accept.MatchString(iface.Name) {
log.Debugf("Ignoring device: %s", iface.Name)
level.Debug(logger).Log("msg", "Ignoring device", "device", iface.Name)
continue
}

View file

@ -23,7 +23,8 @@ import (
"regexp"
"strings"
"github.com/prometheus/common/log"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
)
var (
@ -31,17 +32,17 @@ var (
procNetDevFieldSep = regexp.MustCompile(` +`)
)
func getNetDevStats(ignore *regexp.Regexp, accept *regexp.Regexp) (map[string]map[string]string, error) {
func getNetDevStats(ignore *regexp.Regexp, accept *regexp.Regexp, logger log.Logger) (map[string]map[string]string, error) {
file, err := os.Open(procFilePath("net/dev"))
if err != nil {
return nil, err
}
defer file.Close()
return parseNetDevStats(file, ignore, accept)
return parseNetDevStats(file, ignore, accept, logger)
}
func parseNetDevStats(r io.Reader, ignore *regexp.Regexp, accept *regexp.Regexp) (map[string]map[string]string, error) {
func parseNetDevStats(r io.Reader, ignore *regexp.Regexp, accept *regexp.Regexp, logger log.Logger) (map[string]map[string]string, error) {
scanner := bufio.NewScanner(r)
scanner.Scan() // skip first header
scanner.Scan()
@ -65,11 +66,11 @@ func parseNetDevStats(r io.Reader, ignore *regexp.Regexp, accept *regexp.Regexp)
dev := parts[1]
if ignore != nil && ignore.MatchString(dev) {
log.Debugf("Ignoring device: %s", dev)
level.Debug(logger).Log("msg", "Ignoring device", "device", dev)
continue
}
if accept != nil && !accept.MatchString(dev) {
log.Debugf("Ignoring device: %s", dev)
level.Debug(logger).Log("msg", "Ignoring device", "device", dev)
continue
}

View file

@ -14,6 +14,7 @@
package collector
import (
"github.com/go-kit/kit/log"
"os"
"regexp"
"testing"
@ -26,7 +27,7 @@ func TestNetDevStatsIgnore(t *testing.T) {
}
defer file.Close()
netStats, err := parseNetDevStats(file, regexp.MustCompile("^veth"), nil)
netStats, err := parseNetDevStats(file, regexp.MustCompile("^veth"), nil, log.NewNopLogger())
if err != nil {
t.Fatal(err)
}
@ -67,7 +68,7 @@ func TestNetDevStatsAccept(t *testing.T) {
}
defer file.Close()
netStats, err := parseNetDevStats(file, nil, regexp.MustCompile("^💩0$"))
netStats, err := parseNetDevStats(file, nil, regexp.MustCompile("^💩0$"), log.NewNopLogger())
if err != nil {
t.Fatal(err)
}

View file

@ -20,7 +20,8 @@ import (
"regexp"
"strconv"
"github.com/prometheus/common/log"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
)
/*
@ -31,7 +32,7 @@ import (
*/
import "C"
func getNetDevStats(ignore *regexp.Regexp, accept *regexp.Regexp) (map[string]map[string]string, error) {
func getNetDevStats(ignore *regexp.Regexp, accept *regexp.Regexp, logger log.Logger) (map[string]map[string]string, error) {
netDev := map[string]map[string]string{}
var ifap, ifa *C.struct_ifaddrs
@ -44,11 +45,11 @@ func getNetDevStats(ignore *regexp.Regexp, accept *regexp.Regexp) (map[string]ma
if ifa.ifa_addr.sa_family == C.AF_LINK {
dev := C.GoString(ifa.ifa_name)
if ignore != nil && ignore.MatchString(dev) {
log.Debugf("Ignoring device: %s", dev)
level.Debug(logger).Log("msg", "Ignoring device", "device", dev)
continue
}
if accept != nil && !accept.MatchString(dev) {
log.Debugf("Ignoring device: %s", dev)
level.Debug(logger).Log("msg", "Ignoring device", "device", dev)
continue
}

View file

@ -24,6 +24,7 @@ import (
"strconv"
"strings"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"gopkg.in/alecthomas/kingpin.v2"
)
@ -38,6 +39,7 @@ var (
type netStatCollector struct {
fieldPattern *regexp.Regexp
logger log.Logger
}
func init() {
@ -46,10 +48,11 @@ func init() {
// NewNetStatCollector takes and returns
// a new Collector exposing network stats.
func NewNetStatCollector() (Collector, error) {
func NewNetStatCollector(logger log.Logger) (Collector, error) {
pattern := regexp.MustCompile(*netStatFields)
return &netStatCollector{
fieldPattern: pattern,
logger: logger,
}, nil
}

View file

@ -18,8 +18,9 @@ import (
"os"
"reflect"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"github.com/prometheus/procfs/nfs"
)
@ -35,6 +36,7 @@ type nfsCollector struct {
nfsRPCRetransmissionsDesc *prometheus.Desc
nfsRPCAuthenticationRefreshesDesc *prometheus.Desc
nfsProceduresDesc *prometheus.Desc
logger log.Logger
}
func init() {
@ -42,7 +44,7 @@ func init() {
}
// NewNfsCollector returns a new Collector exposing NFS statistics.
func NewNfsCollector() (Collector, error) {
func NewNfsCollector(logger log.Logger) (Collector, error) {
fs, err := nfs.NewFS(*procPath)
if err != nil {
return nil, fmt.Errorf("failed to open procfs: %w", err)
@ -86,6 +88,7 @@ func NewNfsCollector() (Collector, error) {
[]string{"proto", "method"},
nil,
),
logger: logger,
}, nil
}
@ -93,7 +96,7 @@ func (c *nfsCollector) Update(ch chan<- prometheus.Metric) error {
stats, err := c.fs.ClientRPCStats()
if err != nil {
if os.IsNotExist(err) {
log.Debugf("Not collecting NFS metrics: %s", err)
level.Debug(c.logger).Log("msg", "Not collecting NFS metrics", "err", err)
return nil
}
return fmt.Errorf("failed to retrieve nfs stats: %w", err)

View file

@ -17,8 +17,9 @@ import (
"fmt"
"os"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"github.com/prometheus/procfs/nfs"
)
@ -27,6 +28,7 @@ import (
type nfsdCollector struct {
fs nfs.FS
requestsDesc *prometheus.Desc
logger log.Logger
}
func init() {
@ -38,7 +40,7 @@ const (
)
// NewNFSdCollector returns a new Collector exposing /proc/net/rpc/nfsd statistics.
func NewNFSdCollector() (Collector, error) {
func NewNFSdCollector(logger log.Logger) (Collector, error) {
fs, err := nfs.NewFS(*procPath)
if err != nil {
return nil, fmt.Errorf("failed to open procfs: %w", err)
@ -51,6 +53,7 @@ func NewNFSdCollector() (Collector, error) {
"Total number NFSd Requests by method and protocol.",
[]string{"proto", "method"}, nil,
),
logger: logger,
}, nil
}
@ -59,7 +62,7 @@ func (c *nfsdCollector) Update(ch chan<- prometheus.Metric) error {
stats, err := c.fs.ServerRPCStats()
if err != nil {
if os.IsNotExist(err) {
log.Debugf("Not collecting NFSd metrics: %s", err)
level.Debug(c.logger).Log("msg", "Not collecting NFSd metrics", "err", err)
return nil
}
return fmt.Errorf("failed to retrieve nfsd stats: %w", err)

View file

@ -22,6 +22,7 @@ import (
"time"
"github.com/beevik/ntp"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"gopkg.in/alecthomas/kingpin.v2"
)
@ -47,6 +48,7 @@ var (
type ntpCollector struct {
stratum, leap, rtt, offset, reftime, rootDelay, rootDispersion, sanity typedDesc
logger log.Logger
}
func init() {
@ -57,7 +59,7 @@ func init() {
// Default definition of "local" is:
// - collector.ntp.server address is a loopback address (or collector.ntp.server-is-mine flag is turned on)
// - the server is reachable with outgoin IP_TTL = 1
func NewNtpCollector() (Collector, error) {
func NewNtpCollector(logger log.Logger) (Collector, error) {
ipaddr := net.ParseIP(*ntpServer)
if !*ntpServerIsLocal && (ipaddr == nil || !ipaddr.IsLoopback()) {
return nil, fmt.Errorf("only IP address of local NTP server is valid for --collector.ntp.server")
@ -112,6 +114,7 @@ func NewNtpCollector() (Collector, error) {
"NTPD sanity according to RFC5905 heuristics and configured limits.",
nil, nil,
), prometheus.GaugeValue},
logger: logger,
}, nil
}

View file

@ -17,7 +17,8 @@ import (
"fmt"
"runtime"
perf "github.com/hodgesds/perf-utils"
"github.com/go-kit/kit/log"
"github.com/hodgesds/perf-utils"
"github.com/prometheus/client_golang/prometheus"
)
@ -29,7 +30,7 @@ func init() {
registerCollector(perfSubsystem, defaultDisabled, NewPerfCollector)
}
// perfCollector is a Collecter that uses the perf subsystem to collect
// perfCollector is a Collector that uses the perf subsystem to collect
// metrics. It uses perf_event_open an ioctls for profiling. Due to the fact
// that the perf subsystem is highly dependent on kernel configuration and
// settings not all profiler values may be exposed on the target system at any
@ -39,34 +40,36 @@ type perfCollector struct {
perfSwProfilers map[int]perf.SoftwareProfiler
perfCacheProfilers map[int]perf.CacheProfiler
desc map[string]*prometheus.Desc
logger log.Logger
}
// NewPerfCollector returns a new perf based collector, it creates a profiler
// per CPU.
func NewPerfCollector() (Collector, error) {
collector := &perfCollector{
func NewPerfCollector(logger log.Logger) (Collector, error) {
c := &perfCollector{
perfHwProfilers: map[int]perf.HardwareProfiler{},
perfSwProfilers: map[int]perf.SoftwareProfiler{},
perfCacheProfilers: map[int]perf.CacheProfiler{},
logger: logger,
}
ncpus := runtime.NumCPU()
for i := 0; i < ncpus; i++ {
// Use -1 to profile all processes on the CPU, see:
// man perf_event_open
collector.perfHwProfilers[i] = perf.NewHardwareProfiler(-1, i)
if err := collector.perfHwProfilers[i].Start(); err != nil {
return collector, err
c.perfHwProfilers[i] = perf.NewHardwareProfiler(-1, i)
if err := c.perfHwProfilers[i].Start(); err != nil {
return c, err
}
collector.perfSwProfilers[i] = perf.NewSoftwareProfiler(-1, i)
if err := collector.perfSwProfilers[i].Start(); err != nil {
return collector, err
c.perfSwProfilers[i] = perf.NewSoftwareProfiler(-1, i)
if err := c.perfSwProfilers[i].Start(); err != nil {
return c, err
}
collector.perfCacheProfilers[i] = perf.NewCacheProfiler(-1, i)
if err := collector.perfCacheProfilers[i].Start(); err != nil {
return collector, err
c.perfCacheProfilers[i] = perf.NewCacheProfiler(-1, i)
if err := c.perfCacheProfilers[i].Start(); err != nil {
return c, err
}
}
collector.desc = map[string]*prometheus.Desc{
c.desc = map[string]*prometheus.Desc{
"cpucycles_total": prometheus.NewDesc(
prometheus.BuildFQName(
namespace,
@ -309,7 +312,7 @@ func NewPerfCollector() (Collector, error) {
),
}
return collector, nil
return c, nil
}
// Update implements the Collector interface and will collect metrics per CPU.

View file

@ -16,6 +16,7 @@
package collector
import (
"github.com/go-kit/kit/log"
"io/ioutil"
"strconv"
"strings"
@ -37,7 +38,7 @@ func TestPerfCollector(t *testing.T) {
if paranoid >= 1 {
t.Skip("Skipping perf tests, set perf_event_paranoid to 0")
}
collector, err := NewPerfCollector()
collector, err := NewPerfCollector(log.NewNopLogger())
if err != nil {
t.Fatal(err)
}

View file

@ -20,6 +20,7 @@ import (
"fmt"
"regexp"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs/sysfs"
"gopkg.in/alecthomas/kingpin.v2"
@ -33,18 +34,20 @@ type powerSupplyClassCollector struct {
subsystem string
ignoredPattern *regexp.Regexp
metricDescs map[string]*prometheus.Desc
logger log.Logger
}
func init() {
registerCollector("powersupplyclass", defaultEnabled, NewPowerSupplyClassCollector)
}
func NewPowerSupplyClassCollector() (Collector, error) {
func NewPowerSupplyClassCollector(logger log.Logger) (Collector, error) {
pattern := regexp.MustCompile(*powerSupplyClassIgnoredPowerSupplies)
return &powerSupplyClassCollector{
subsystem: "power_supply",
ignoredPattern: pattern,
metricDescs: map[string]*prometheus.Desc{},
logger: logger,
}, nil
}

View file

@ -18,8 +18,9 @@ package collector
import (
"fmt"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"github.com/prometheus/procfs"
)
@ -35,6 +36,8 @@ type pressureStatsCollector struct {
memFull *prometheus.Desc
fs procfs.FS
logger log.Logger
}
func init() {
@ -42,7 +45,7 @@ func init() {
}
// NewPressureStatsCollector returns a Collector exposing pressure stall information
func NewPressureStatsCollector() (Collector, error) {
func NewPressureStatsCollector(logger log.Logger) (Collector, error) {
fs, err := procfs.NewFS(*procPath)
if err != nil {
return nil, fmt.Errorf("failed to open procfs: %w", err)
@ -74,17 +77,18 @@ func NewPressureStatsCollector() (Collector, error) {
"Total time in seconds no process could make progress due to memory congestion",
nil, nil,
),
fs: fs,
fs: fs,
logger: logger,
}, nil
}
// Update calls procfs.NewPSIStatsForResource for the different resources and updates the values
func (c *pressureStatsCollector) Update(ch chan<- prometheus.Metric) error {
for _, res := range psiResources {
log.Debugf("collecting statistics for resource: %s", res)
level.Debug(c.logger).Log("msg", "collecting statistics for resource", "resource", res)
vals, err := c.fs.PSIStatsForResource(res)
if err != nil {
log.Debug("pressure information is unavailable, you need a Linux kernel >= 4.20 and/or CONFIG_PSI enabled for your kernel")
level.Debug(c.logger).Log("msg", "pressure information is unavailable, you need a Linux kernel >= 4.20 and/or CONFIG_PSI enabled for your kernel")
return nil
}
switch res {
@ -97,7 +101,7 @@ func (c *pressureStatsCollector) Update(ch chan<- prometheus.Metric) error {
ch <- prometheus.MustNewConstMetric(c.mem, prometheus.CounterValue, float64(vals.Some.Total)/1000.0/1000.0)
ch <- prometheus.MustNewConstMetric(c.memFull, prometheus.CounterValue, float64(vals.Full.Total)/1000.0/1000.0)
default:
log.Debugf("did not account for resource: %s", res)
level.Debug(c.logger).Log("msg", "did not account for resource", "resource", res)
}
}

View file

@ -19,8 +19,9 @@ import (
"fmt"
"os"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"github.com/prometheus/procfs"
)
@ -31,6 +32,7 @@ type processCollector struct {
procsState *prometheus.Desc
pidUsed *prometheus.Desc
pidMax *prometheus.Desc
logger log.Logger
}
func init() {
@ -38,7 +40,7 @@ func init() {
}
// NewProcessStatCollector returns a new Collector exposing process data read from the proc filesystem.
func NewProcessStatCollector() (Collector, error) {
func NewProcessStatCollector(logger log.Logger) (Collector, error) {
fs, err := procfs.NewFS(*procPath)
if err != nil {
return nil, fmt.Errorf("failed to open procfs: %w", err)
@ -67,6 +69,7 @@ func NewProcessStatCollector() (Collector, error) {
pidMax: prometheus.NewDesc(prometheus.BuildFQName(namespace, subsystem, "max_processes"),
"Number of max PIDs limit", nil, nil,
),
logger: logger,
}, nil
}
func (c *processCollector) Update(ch chan<- prometheus.Metric) error {
@ -108,11 +111,11 @@ func (c *processCollector) getAllocatedThreads() (int, map[string]int32, int, er
stat, err := pid.Stat()
// PIDs can vanish between getting the list and getting stats.
if os.IsNotExist(err) {
log.Debugf("file not found when retrieving stats for pid %v: %q", pid, err)
level.Debug(c.logger).Log("msg", "file not found when retrieving stats for pid", "pid", pid, "err", err)
continue
}
if err != nil {
log.Debugf("error reading stat for pid %v: %q", pid, err)
level.Debug(c.logger).Log("msg", "error reading stat for pid", "pid", pid, "err", err)
return 0, nil, 0, err
}
pids++

View file

@ -16,6 +16,7 @@
package collector
import (
"github.com/go-kit/kit/log"
"testing"
"github.com/prometheus/procfs"
@ -31,7 +32,7 @@ func TestReadProcessStatus(t *testing.T) {
if err != nil {
t.Errorf("failed to open procfs: %v", err)
}
c := processCollector{fs: fs}
c := processCollector{fs: fs, logger: log.NewNopLogger()}
pids, states, threads, err := c.getAllocatedThreads()
if err != nil {
t.Fatalf("Cannot retrieve data from procfs getAllocatedThreads function: %v ", err)

View file

@ -21,6 +21,7 @@ import (
"path/filepath"
"github.com/ema/qdisc"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"gopkg.in/alecthomas/kingpin.v2"
)
@ -31,6 +32,7 @@ type qdiscStatCollector struct {
drops typedDesc
requeues typedDesc
overlimits typedDesc
logger log.Logger
}
var (
@ -42,7 +44,7 @@ func init() {
}
// NewQdiscStatCollector returns a new Collector exposing queuing discipline statistics.
func NewQdiscStatCollector() (Collector, error) {
func NewQdiscStatCollector(logger log.Logger) (Collector, error) {
return &qdiscStatCollector{
bytes: typedDesc{prometheus.NewDesc(
prometheus.BuildFQName(namespace, "qdisc", "bytes_total"),
@ -69,6 +71,7 @@ func NewQdiscStatCollector() (Collector, error) {
"Number of overlimit packets.",
[]string{"device", "kind"}, nil,
), prometheus.CounterValue},
logger: logger,
}, nil
}

View file

@ -16,8 +16,9 @@
package collector
import (
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"github.com/soundcloud/go-runit/runit"
"gopkg.in/alecthomas/kingpin.v2"
)
@ -25,7 +26,11 @@ import (
var runitServiceDir = kingpin.Flag("collector.runit.servicedir", "Path to runit service directory.").Default("/etc/service").String()
type runitCollector struct {
state, stateDesired, stateNormal, stateTimestamp typedDesc
state typedDesc
stateDesired typedDesc
stateNormal typedDesc
stateTimestamp typedDesc
logger log.Logger
}
func init() {
@ -33,7 +38,7 @@ func init() {
}
// NewRunitCollector returns a new Collector exposing runit statistics.
func NewRunitCollector() (Collector, error) {
func NewRunitCollector(logger log.Logger) (Collector, error) {
var (
subsystem = "service"
constLabels = prometheus.Labels{"supervisor": "runit"}
@ -61,6 +66,7 @@ func NewRunitCollector() (Collector, error) {
"Unix timestamp of the last runit service state change.",
labelNames, constLabels,
), prometheus.GaugeValue},
logger: logger,
}, nil
}
@ -73,11 +79,11 @@ func (c *runitCollector) Update(ch chan<- prometheus.Metric) error {
for _, service := range services {
status, err := service.Status()
if err != nil {
log.Debugf("Couldn't get status for %s: %s, skipping...", service.Name, err)
level.Debug(c.logger).Log("msg", "Couldn't get status", "service", service.Name, "err", err)
continue
}
log.Debugf("%s is %d on pid %d for %d seconds", service.Name, status.State, status.Pid, status.Duration)
level.Debug(c.logger).Log("msg", "duration", "service", service.Name, "status", status.State, "pid", status.Pid, "duration_seconds", status.Duration)
ch <- c.state.mustNewConstMetric(float64(status.State), service.Name)
ch <- c.stateDesired.mustNewConstMetric(float64(status.Want), service.Name)
ch <- c.stateTimestamp.mustNewConstMetric(float64(status.Timestamp.Unix()), service.Name)

View file

@ -16,6 +16,7 @@ package collector
import (
"fmt"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs"
)
@ -46,17 +47,18 @@ var (
)
// NewSchedstatCollector returns a new Collector exposing task scheduler statistics
func NewSchedstatCollector() (Collector, error) {
func NewSchedstatCollector(logger log.Logger) (Collector, error) {
fs, err := procfs.NewFS(*procPath)
if err != nil {
return nil, fmt.Errorf("failed to open procfs: %w", err)
}
return &schedstatCollector{fs: fs}, nil
return &schedstatCollector{fs, logger}, nil
}
type schedstatCollector struct {
fs procfs.FS
fs procfs.FS
logger log.Logger
}
func init() {

View file

@ -19,8 +19,9 @@ import (
"fmt"
"os"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"github.com/prometheus/procfs"
)
@ -31,15 +32,17 @@ const (
// Used for calculating the total memory bytes on TCP and UDP.
var pageSize = os.Getpagesize()
type sockStatCollector struct{}
type sockStatCollector struct {
logger log.Logger
}
func init() {
registerCollector(sockStatSubsystem, defaultEnabled, NewSockStatCollector)
}
// NewSockStatCollector returns a new Collector exposing socket stats.
func NewSockStatCollector() (Collector, error) {
return &sockStatCollector{}, nil
func NewSockStatCollector(logger log.Logger) (Collector, error) {
return &sockStatCollector{logger}, nil
}
func (c *sockStatCollector) Update(ch chan<- prometheus.Metric) error {
@ -53,7 +56,7 @@ func (c *sockStatCollector) Update(ch chan<- prometheus.Metric) error {
switch {
case err == nil:
case os.IsNotExist(err):
log.Debug("IPv4 sockstat statistics not found, skipping")
level.Debug(c.logger).Log("msg", "IPv4 sockstat statistics not found, skipping")
default:
return fmt.Errorf("failed to get IPv4 sockstat data: %w", err)
}
@ -62,7 +65,7 @@ func (c *sockStatCollector) Update(ch chan<- prometheus.Metric) error {
switch {
case err == nil:
case os.IsNotExist(err):
log.Debug("IPv6 sockstat statistics not found, skipping")
level.Debug(c.logger).Log("msg", "IPv6 sockstat statistics not found, skipping")
default:
return fmt.Errorf("failed to get IPv6 sockstat data: %w", err)
}

View file

@ -18,9 +18,9 @@ package collector
import (
"fmt"
"github.com/prometheus/procfs"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs"
)
type statCollector struct {
@ -31,6 +31,7 @@ type statCollector struct {
btime *prometheus.Desc
procsRunning *prometheus.Desc
procsBlocked *prometheus.Desc
logger log.Logger
}
func init() {
@ -38,7 +39,7 @@ func init() {
}
// NewStatCollector returns a new Collector exposing kernel/system statistics.
func NewStatCollector() (Collector, error) {
func NewStatCollector(logger log.Logger) (Collector, error) {
fs, err := procfs.NewFS(*procPath)
if err != nil {
return nil, fmt.Errorf("failed to open procfs: %w", err)
@ -75,6 +76,7 @@ func NewStatCollector() (Collector, error) {
"Number of processes blocked waiting for I/O to complete.",
nil, nil,
),
logger: logger,
}, nil
}

View file

@ -18,9 +18,10 @@ package collector
import (
"fmt"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/mattn/go-xmlrpc"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"gopkg.in/alecthomas/kingpin.v2"
)
@ -33,6 +34,7 @@ type supervisordCollector struct {
stateDesc *prometheus.Desc
exitStatusDesc *prometheus.Desc
startTimeDesc *prometheus.Desc
logger log.Logger
}
func init() {
@ -40,7 +42,7 @@ func init() {
}
// NewSupervisordCollector returns a new Collector exposing supervisord statistics.
func NewSupervisordCollector() (Collector, error) {
func NewSupervisordCollector(logger log.Logger) (Collector, error) {
var (
subsystem = "supervisord"
labelNames = []string{"name", "group"}
@ -70,6 +72,7 @@ func NewSupervisordCollector() (Collector, error) {
labelNames,
nil,
),
logger: logger,
}, nil
}
@ -147,7 +150,7 @@ func (c *supervisordCollector) Update(ch chan<- prometheus.Metric) error {
} else {
ch <- prometheus.MustNewConstMetric(c.upDesc, prometheus.GaugeValue, 0, labels...)
}
log.Debugf("%s:%s is %s on pid %d", info.Group, info.Name, info.StateName, info.PID)
level.Debug(c.logger).Log("msg", "process info", "group", info.Group, "name", info.Name, "state", info.StateName, "pid", info.PID)
}
return nil

View file

@ -25,8 +25,9 @@ import (
"time"
"github.com/coreos/go-systemd/dbus"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
kingpin "gopkg.in/alecthomas/kingpin.v2"
)
@ -62,6 +63,7 @@ type systemdCollector struct {
systemdVersion int
unitWhitelistPattern *regexp.Regexp
unitBlacklistPattern *regexp.Regexp
logger log.Logger
}
var unitStatesName = []string{"active", "activating", "deactivating", "inactive", "failed"}
@ -71,7 +73,7 @@ func init() {
}
// NewSystemdCollector returns a new Collector exposing systemd statistics.
func NewSystemdCollector() (Collector, error) {
func NewSystemdCollector(logger log.Logger) (Collector, error) {
const subsystem = "systemd"
unitDesc := prometheus.NewDesc(
@ -119,10 +121,10 @@ func NewSystemdCollector() (Collector, error) {
unitWhitelistPattern := regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *unitWhitelist))
unitBlacklistPattern := regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *unitBlacklist))
systemdVersion := getSystemdVersion()
systemdVersion := getSystemdVersion(logger)
if systemdVersion < minSystemdVersionSystemState {
log.Warnf("Detected systemd version %v is lower than minimum %v", systemdVersion, minSystemdVersionSystemState)
log.Warn("Some systemd state and timer metrics will not be available")
level.Warn(logger).Log("msg", "Detected systemd version is lower than minimum", "current", systemdVersion, "minimum", minSystemdVersionSystemState)
level.Warn(logger).Log("msg", "Some systemd state and timer metrics will not be available")
}
return &systemdCollector{
@ -141,6 +143,7 @@ func NewSystemdCollector() (Collector, error) {
systemdVersion: systemdVersion,
unitWhitelistPattern: unitWhitelistPattern,
unitBlacklistPattern: unitBlacklistPattern,
logger: logger,
}, nil
}
@ -158,16 +161,16 @@ func (c *systemdCollector) Update(ch chan<- prometheus.Metric) error {
if err != nil {
return fmt.Errorf("couldn't get units: %s", err)
}
log.Debugf("systemd getAllUnits took %f", time.Since(begin).Seconds())
level.Debug(c.logger).Log("msg", "getAllUnits took", "duration_seconds", time.Since(begin).Seconds())
begin = time.Now()
summary := summarizeUnits(allUnits)
c.collectSummaryMetrics(ch, summary)
log.Debugf("systemd collectSummaryMetrics took %f", time.Since(begin).Seconds())
level.Debug(c.logger).Log("msg", "collectSummaryMetrics took", "duration_seconds", time.Since(begin).Seconds())
begin = time.Now()
units := filterUnits(allUnits, c.unitWhitelistPattern, c.unitBlacklistPattern)
log.Debugf("systemd filterUnits took %f", time.Since(begin).Seconds())
units := filterUnits(allUnits, c.unitWhitelistPattern, c.unitBlacklistPattern, c.logger)
level.Debug(c.logger).Log("msg", "filterUnits took", "duration_seconds", time.Since(begin).Seconds())
var wg sync.WaitGroup
defer wg.Wait()
@ -177,7 +180,7 @@ func (c *systemdCollector) Update(ch chan<- prometheus.Metric) error {
defer wg.Done()
begin = time.Now()
c.collectUnitStatusMetrics(conn, ch, units)
log.Debugf("systemd collectUnitStatusMetrics took %f", time.Since(begin).Seconds())
level.Debug(c.logger).Log("msg", "collectUnitStatusMetrics took", "duration_seconds", time.Since(begin).Seconds())
}()
if *enableStartTimeMetrics {
@ -186,7 +189,7 @@ func (c *systemdCollector) Update(ch chan<- prometheus.Metric) error {
defer wg.Done()
begin = time.Now()
c.collectUnitStartTimeMetrics(conn, ch, units)
log.Debugf("systemd collectUnitStartTimeMetrics took %f", time.Since(begin).Seconds())
level.Debug(c.logger).Log("msg", "collectUnitStartTimeMetrics took", "duration_seconds", time.Since(begin).Seconds())
}()
}
@ -196,7 +199,7 @@ func (c *systemdCollector) Update(ch chan<- prometheus.Metric) error {
defer wg.Done()
begin = time.Now()
c.collectUnitTasksMetrics(conn, ch, units)
log.Debugf("systemd collectUnitTasksMetrics took %f", time.Since(begin).Seconds())
level.Debug(c.logger).Log("msg", "collectUnitTasksMetrics took", "duration_seconds", time.Since(begin).Seconds())
}()
}
@ -206,7 +209,7 @@ func (c *systemdCollector) Update(ch chan<- prometheus.Metric) error {
defer wg.Done()
begin = time.Now()
c.collectTimers(conn, ch, units)
log.Debugf("systemd collectTimers took %f", time.Since(begin).Seconds())
level.Debug(c.logger).Log("msg", "collectTimers took", "duration_seconds", time.Since(begin).Seconds())
}()
}
@ -215,13 +218,13 @@ func (c *systemdCollector) Update(ch chan<- prometheus.Metric) error {
defer wg.Done()
begin = time.Now()
c.collectSockets(conn, ch, units)
log.Debugf("systemd collectSockets took %f", time.Since(begin).Seconds())
level.Debug(c.logger).Log("msg", "collectSockets took", "duration_seconds", time.Since(begin).Seconds())
}()
if c.systemdVersion >= minSystemdVersionSystemState {
begin = time.Now()
err = c.collectSystemState(conn, ch)
log.Debugf("systemd collectSystemState took %f", time.Since(begin).Seconds())
level.Debug(c.logger).Log("msg", "collectSystemState took", "duration_seconds", time.Since(begin).Seconds())
}
ch <- prometheus.MustNewConstMetric(
@ -236,14 +239,14 @@ func (c *systemdCollector) collectUnitStatusMetrics(conn *dbus.Conn, ch chan<- p
if strings.HasSuffix(unit.Name, ".service") {
serviceTypeProperty, err := conn.GetUnitTypeProperty(unit.Name, "Service", "Type")
if err != nil {
log.Debugf("couldn't get unit '%s' Type: %s", unit.Name, err)
level.Debug(c.logger).Log("msg", "couldn't get unit type", "unit", unit.Name, "err", err)
} else {
serviceType = serviceTypeProperty.Value.Value().(string)
}
} else if strings.HasSuffix(unit.Name, ".mount") {
serviceTypeProperty, err := conn.GetUnitTypeProperty(unit.Name, "Mount", "Type")
if err != nil {
log.Debugf("couldn't get unit '%s' Type: %s", unit.Name, err)
level.Debug(c.logger).Log("msg", "couldn't get unit type", "unit", unit.Name, "err", err)
} else {
serviceType = serviceTypeProperty.Value.Value().(string)
}
@ -261,7 +264,7 @@ func (c *systemdCollector) collectUnitStatusMetrics(conn *dbus.Conn, ch chan<- p
// NRestarts wasn't added until systemd 235.
restartsCount, err := conn.GetUnitTypeProperty(unit.Name, "Service", "NRestarts")
if err != nil {
log.Debugf("couldn't get unit '%s' NRestarts: %s", unit.Name, err)
level.Debug(c.logger).Log("msg", "couldn't get unit NRestarts", "unit", unit.Name, "err", err)
} else {
ch <- prometheus.MustNewConstMetric(
c.nRestartsDesc, prometheus.CounterValue,
@ -279,7 +282,7 @@ func (c *systemdCollector) collectSockets(conn *dbus.Conn, ch chan<- prometheus.
acceptedConnectionCount, err := conn.GetUnitTypeProperty(unit.Name, "Socket", "NAccepted")
if err != nil {
log.Debugf("couldn't get unit '%s' NAccepted: %s", unit.Name, err)
level.Debug(c.logger).Log("msg", "couldn't get unit NAccepted", "unit", unit.Name, "err", err)
continue
}
ch <- prometheus.MustNewConstMetric(
@ -288,7 +291,7 @@ func (c *systemdCollector) collectSockets(conn *dbus.Conn, ch chan<- prometheus.
currentConnectionCount, err := conn.GetUnitTypeProperty(unit.Name, "Socket", "NConnections")
if err != nil {
log.Debugf("couldn't get unit '%s' NConnections: %s", unit.Name, err)
level.Debug(c.logger).Log("msg", "couldn't get unit NConnections", "unit", unit.Name, "err", err)
continue
}
ch <- prometheus.MustNewConstMetric(
@ -316,7 +319,7 @@ func (c *systemdCollector) collectUnitStartTimeMetrics(conn *dbus.Conn, ch chan<
} else {
timestampValue, err := conn.GetUnitProperty(unit.Name, "ActiveEnterTimestamp")
if err != nil {
log.Debugf("couldn't get unit '%s' StartTimeUsec: %s", unit.Name, err)
level.Debug(c.logger).Log("msg", "couldn't get unit StartTimeUsec", "unit", unit.Name, "err", err)
continue
}
startTimeUsec = timestampValue.Value.Value().(uint64)
@ -334,7 +337,7 @@ func (c *systemdCollector) collectUnitTasksMetrics(conn *dbus.Conn, ch chan<- pr
if strings.HasSuffix(unit.Name, ".service") {
tasksCurrentCount, err := conn.GetUnitTypeProperty(unit.Name, "Service", "TasksCurrent")
if err != nil {
log.Debugf("couldn't get unit '%s' TasksCurrent: %s", unit.Name, err)
level.Debug(c.logger).Log("msg", "couldn't get unit TasksCurrent", "unit", unit.Name, "err", err)
} else {
val = tasksCurrentCount.Value.Value().(uint64)
// Don't set if tasksCurrent if dbus reports MaxUint64.
@ -346,7 +349,7 @@ func (c *systemdCollector) collectUnitTasksMetrics(conn *dbus.Conn, ch chan<- pr
}
tasksMaxCount, err := conn.GetUnitTypeProperty(unit.Name, "Service", "TasksMax")
if err != nil {
log.Debugf("couldn't get unit '%s' TasksMax: %s", unit.Name, err)
level.Debug(c.logger).Log("msg", "couldn't get unit TasksMax", "unit", unit.Name, "err", err)
} else {
val = tasksMaxCount.Value.Value().(uint64)
// Don't set if tasksMax if dbus reports MaxUint64.
@ -368,7 +371,7 @@ func (c *systemdCollector) collectTimers(conn *dbus.Conn, ch chan<- prometheus.M
lastTriggerValue, err := conn.GetUnitTypeProperty(unit.Name, "Timer", "LastTriggerUSec")
if err != nil {
log.Debugf("couldn't get unit '%s' LastTriggerUSec: %s", unit.Name, err)
level.Debug(c.logger).Log("msg", "couldn't get unit LastTriggerUSec", "unit", unit.Name, "err", err)
continue
}
@ -440,36 +443,36 @@ func summarizeUnits(units []unit) map[string]float64 {
return summarized
}
func filterUnits(units []unit, whitelistPattern, blacklistPattern *regexp.Regexp) []unit {
func filterUnits(units []unit, whitelistPattern, blacklistPattern *regexp.Regexp, logger log.Logger) []unit {
filtered := make([]unit, 0, len(units))
for _, unit := range units {
if whitelistPattern.MatchString(unit.Name) && !blacklistPattern.MatchString(unit.Name) && unit.LoadState == "loaded" {
log.Debugf("Adding unit: %s", unit.Name)
level.Debug(logger).Log("msg", "Adding unit", "unit", unit.Name)
filtered = append(filtered, unit)
} else {
log.Debugf("Ignoring unit: %s", unit.Name)
level.Debug(logger).Log("msg", "Ignoring unit", "unit", unit.Name)
}
}
return filtered
}
func getSystemdVersion() int {
func getSystemdVersion(logger log.Logger) int {
conn, err := newSystemdDbusConn()
if err != nil {
log.Warnf("Unable to get systemd dbus connection, defaulting systemd version to 0: %s", err)
level.Warn(logger).Log("msg", "Unable to get systemd dbus connection, defaulting systemd version to 0", "err", err)
return 0
}
defer conn.Close()
version, err := conn.GetManagerProperty("Version")
if err != nil {
log.Warn("Unable to get systemd version property, defaulting to 0")
level.Warn(logger).Log("msg", "Unable to get systemd version property, defaulting to 0")
return 0
}
version = strings.Replace(version, "\"", "", 2)
v, err := strconv.Atoi(version)
if err != nil {
log.Warnf("Got invalid systemd version: %v", version)
level.Warn(logger).Log("msg", "Got invalid systemd version", "version", version)
return 0
}
return v

View file

@ -14,6 +14,7 @@
package collector
import (
"github.com/go-kit/kit/log"
"regexp"
"testing"
@ -90,7 +91,7 @@ func TestSystemdIgnoreFilter(t *testing.T) {
fixtures := getUnitListFixtures()
whitelistPattern := regexp.MustCompile("^foo$")
blacklistPattern := regexp.MustCompile("^bar$")
filtered := filterUnits(fixtures[0], whitelistPattern, blacklistPattern)
filtered := filterUnits(fixtures[0], whitelistPattern, blacklistPattern, log.NewNopLogger())
for _, unit := range filtered {
if blacklistPattern.MatchString(unit.Name) || !whitelistPattern.MatchString(unit.Name) {
t.Error(unit.Name, "should not be in the filtered list")
@ -98,13 +99,14 @@ func TestSystemdIgnoreFilter(t *testing.T) {
}
}
func TestSystemdIgnoreFilterDefaultKeepsAll(t *testing.T) {
c, err := NewSystemdCollector()
logger := log.NewNopLogger()
c, err := NewSystemdCollector(logger)
if err != nil {
t.Fatal(err)
}
fixtures := getUnitListFixtures()
collector := c.(*systemdCollector)
filtered := filterUnits(fixtures[0], collector.unitWhitelistPattern, collector.unitBlacklistPattern)
filtered := filterUnits(fixtures[0], collector.unitWhitelistPattern, collector.unitBlacklistPattern, logger)
// Adjust fixtures by 3 "not-found" units.
if len(filtered) != len(fixtures[0])-3 {
t.Error("Default filters removed units")

View file

@ -23,6 +23,7 @@ import (
"strconv"
"strings"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
)
@ -54,7 +55,8 @@ const (
)
type tcpStatCollector struct {
desc typedDesc
desc typedDesc
logger log.Logger
}
func init() {
@ -62,13 +64,14 @@ func init() {
}
// NewTCPStatCollector returns a new Collector exposing network stats.
func NewTCPStatCollector() (Collector, error) {
func NewTCPStatCollector(logger log.Logger) (Collector, error) {
return &tcpStatCollector{
desc: typedDesc{prometheus.NewDesc(
prometheus.BuildFQName(namespace, "tcp", "connection_states"),
"Number of connection states.",
[]string{"state"}, nil,
), prometheus.GaugeValue},
logger: logger,
}, nil
}

View file

@ -24,10 +24,11 @@ import (
"strings"
"time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/common/log"
kingpin "gopkg.in/alecthomas/kingpin.v2"
)
@ -44,7 +45,8 @@ var (
type textFileCollector struct {
path string
// Only set for testing to get predictable output.
mtime *float64
mtime *float64
logger log.Logger
}
func init() {
@ -53,14 +55,15 @@ func init() {
// NewTextFileCollector returns a new Collector exposing metrics read from files
// in the given textfile directory.
func NewTextFileCollector() (Collector, error) {
func NewTextFileCollector(logger log.Logger) (Collector, error) {
c := &textFileCollector{
path: *textFileDirectory,
path: *textFileDirectory,
logger: logger,
}
return c, nil
}
func convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<- prometheus.Metric) {
func convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<- prometheus.Metric, logger log.Logger) {
var valType prometheus.ValueType
var val float64
@ -76,7 +79,7 @@ func convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<- prometheus.Me
for _, metric := range metricFamily.Metric {
if metric.TimestampMs != nil {
log.Warnf("Ignoring unsupported custom timestamp on textfile collector metric %v", metric)
level.Warn(logger).Log("msg", "Ignoring unsupported custom timestamp on textfile collector metric", "metric", metric)
}
labels := metric.GetLabel()
@ -191,7 +194,7 @@ func (c *textFileCollector) Update(ch chan<- prometheus.Metric) error {
files, err := ioutil.ReadDir(c.path)
if err != nil && c.path != "" {
errored = true
log.Errorf("failed to read textfile collector directory %q: %v", c.path, err)
level.Error(c.logger).Log("msg", "failed to read textfile collector directory", "path", c.path, "err", err)
}
mtimes := make(map[string]time.Time, len(files))
@ -203,7 +206,7 @@ func (c *textFileCollector) Update(ch chan<- prometheus.Metric) error {
mtime, err := c.processFile(f.Name(), ch)
if err != nil {
errored = true
log.Errorf("failed to collect textfile data from %q: %v", f.Name(), err)
level.Error(c.logger).Log("msg", "failed to collect textfile data", "file", f.Name(), "err", err)
continue
}
@ -257,7 +260,7 @@ func (c *textFileCollector) processFile(name string, ch chan<- prometheus.Metric
}
for _, mf := range families {
convertMetricFamily(mf, ch)
convertMetricFamily(mf, ch, c.logger)
}
// Only stat the file once it has been parsed and validated, so that

View file

@ -20,10 +20,12 @@ import (
"net/http/httptest"
"testing"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/common/log"
kingpin "gopkg.in/alecthomas/kingpin.v2"
"github.com/prometheus/common/promlog"
"github.com/prometheus/common/promlog/flag"
"gopkg.in/alecthomas/kingpin.v2"
)
type collectorAdapter struct {
@ -39,8 +41,7 @@ func (a collectorAdapter) Describe(ch chan<- *prometheus.Desc) {
// Collect implements the prometheus.Collector interface.
func (a collectorAdapter) Collect(ch chan<- prometheus.Metric) {
err := a.Update(ch)
if err != nil {
if err := a.Update(ch); err != nil {
panic(fmt.Sprintf("failed to update collector: %v", err))
}
}
@ -95,15 +96,16 @@ func TestTextfileCollector(t *testing.T) {
for i, test := range tests {
mtime := 1.0
c := &textFileCollector{
path: test.path,
mtime: &mtime,
path: test.path,
mtime: &mtime,
logger: log.NewNopLogger(),
}
// Suppress a log message about `nonexistent_path` not existing, this is
// expected and clutters the test output.
log.AddFlags(kingpin.CommandLine)
_, err := kingpin.CommandLine.Parse([]string{"--log.level", "fatal"})
if err != nil {
promlogConfig := &promlog.Config{}
flag.AddFlags(kingpin.CommandLine, promlogConfig)
if _, err := kingpin.CommandLine.Parse([]string{"--log.level", "debug"}); err != nil {
t.Fatal(err)
}

View file

@ -18,6 +18,7 @@ package collector
import (
"fmt"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs/sysfs"
)
@ -30,6 +31,7 @@ type thermalZoneCollector struct {
coolingDeviceCurState *prometheus.Desc
coolingDeviceMaxState *prometheus.Desc
zoneTemp *prometheus.Desc
logger log.Logger
}
func init() {
@ -37,7 +39,7 @@ func init() {
}
// NewThermalZoneCollector returns a new Collector exposing kernel/system statistics.
func NewThermalZoneCollector() (Collector, error) {
func NewThermalZoneCollector(logger log.Logger) (Collector, error) {
fs, err := sysfs.NewFS(*sysPath)
if err != nil {
return nil, fmt.Errorf("failed to open sysfs: %w", err)
@ -60,6 +62,7 @@ func NewThermalZoneCollector() (Collector, error) {
"Maximum throttle state of the cooling device",
[]string{"name", "type"}, nil,
),
logger: logger,
}, nil
}

View file

@ -18,12 +18,14 @@ package collector
import (
"time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
)
type timeCollector struct {
desc *prometheus.Desc
desc *prometheus.Desc
logger log.Logger
}
func init() {
@ -32,19 +34,20 @@ func init() {
// NewTimeCollector returns a new Collector exposing the current system time in
// seconds since epoch.
func NewTimeCollector() (Collector, error) {
func NewTimeCollector(logger log.Logger) (Collector, error) {
return &timeCollector{
desc: prometheus.NewDesc(
namespace+"_time_seconds",
"System time in seconds since epoch (1970).",
nil, nil,
),
logger: logger,
}, nil
}
func (c *timeCollector) Update(ch chan<- prometheus.Metric) error {
now := float64(time.Now().UnixNano()) / 1e9
log.Debugf("Return time: %f", now)
level.Debug(c.logger).Log("msg", "Return time", "now", now)
ch <- prometheus.MustNewConstMetric(c.desc, prometheus.GaugeValue, now)
return nil
}

View file

@ -19,6 +19,7 @@ package collector
import (
"fmt"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/unix"
)
@ -54,6 +55,7 @@ type timexCollector struct {
stbcnt,
tai,
syncStatus typedDesc
logger log.Logger
}
func init() {
@ -61,7 +63,7 @@ func init() {
}
// NewTimexCollector returns a new Collector exposing adjtime(3) stats.
func NewTimexCollector() (Collector, error) {
func NewTimexCollector(logger log.Logger) (Collector, error) {
const subsystem = "timex"
return &timexCollector{
@ -150,6 +152,7 @@ func NewTimexCollector() (Collector, error) {
"Is clock synchronized to a reliable server (1 = yes, 0 = no).",
nil, nil,
), prometheus.GaugeValue},
logger: logger,
}, nil
}

View file

@ -17,6 +17,7 @@
package collector
import (
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
)
@ -34,7 +35,9 @@ var unameDesc = prometheus.NewDesc(
nil,
)
type unameCollector struct{}
type unameCollector struct {
logger log.Logger
}
type uname struct {
SysName string
Release string
@ -49,8 +52,8 @@ func init() {
}
// NewUnameCollector returns new unameCollector.
func newUnameCollector() (Collector, error) {
return &unameCollector{}, nil
func newUnameCollector(logger log.Logger) (Collector, error) {
return &unameCollector{logger}, nil
}
func (c *unameCollector) Update(ch chan<- prometheus.Metric) error {

View file

@ -23,6 +23,7 @@ import (
"strconv"
"strings"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"gopkg.in/alecthomas/kingpin.v2"
)
@ -37,6 +38,7 @@ var (
type vmStatCollector struct {
fieldPattern *regexp.Regexp
logger log.Logger
}
func init() {
@ -44,10 +46,11 @@ func init() {
}
// NewvmStatCollector returns a new Collector exposing vmstat stats.
func NewvmStatCollector() (Collector, error) {
func NewvmStatCollector(logger log.Logger) (Collector, error) {
pattern := regexp.MustCompile(*vmStatFields)
return &vmStatCollector{
fieldPattern: pattern,
logger: logger,
}, nil
}

View file

@ -20,9 +20,10 @@ import (
"os"
"path/filepath"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/mdlayher/wifi"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"gopkg.in/alecthomas/kingpin.v2"
)
@ -40,6 +41,8 @@ type wifiCollector struct {
stationTransmitRetriesTotal *prometheus.Desc
stationTransmitFailedTotal *prometheus.Desc
stationBeaconLossTotal *prometheus.Desc
logger log.Logger
}
var (
@ -61,7 +64,7 @@ type wifiStater interface {
}
// NewWifiCollector returns a new Collector exposing Wifi statistics.
func NewWifiCollector() (Collector, error) {
func NewWifiCollector(logger log.Logger) (Collector, error) {
const (
subsystem = "wifi"
)
@ -154,6 +157,7 @@ func NewWifiCollector() (Collector, error) {
labels,
nil,
),
logger: logger,
}, nil
}
@ -162,11 +166,11 @@ func (c *wifiCollector) Update(ch chan<- prometheus.Metric) error {
if err != nil {
// Cannot access wifi metrics, report no error.
if os.IsNotExist(err) {
log.Debug("wifi collector metrics are not available for this system")
level.Debug(c.logger).Log("msg", "wifi collector metrics are not available for this system")
return nil
}
if os.IsPermission(err) {
log.Debug("wifi collector got permission denied when accessing metrics")
level.Debug(c.logger).Log("msg", "wifi collector got permission denied when accessing metrics")
return nil
}
@ -185,7 +189,7 @@ func (c *wifiCollector) Update(ch chan<- prometheus.Metric) error {
continue
}
log.Debugf("probing wifi device %q with type %q", ifi.Name, ifi.Type)
level.Debug(c.logger).Log("msg", "probing wifi device with type", "wifi", ifi.Name, "type", ifi.Type)
ch <- prometheus.MustNewConstMetric(
c.interfaceFrequencyHertz,
@ -203,7 +207,7 @@ func (c *wifiCollector) Update(ch chan<- prometheus.Metric) error {
case err == nil:
c.updateBSSStats(ch, ifi.Name, bss)
case os.IsNotExist(err):
log.Debugf("BSS information not found for wifi device %q", ifi.Name)
level.Debug(c.logger).Log("msg", "BSS information not found for wifi device", "name", ifi.Name)
default:
return fmt.Errorf("failed to retrieve BSS for device %s: %v",
ifi.Name, err)
@ -216,7 +220,7 @@ func (c *wifiCollector) Update(ch chan<- prometheus.Metric) error {
c.updateStationStats(ch, ifi.Name, station)
}
case os.IsNotExist(err):
log.Debugf("station information not found for wifi device %q", ifi.Name)
level.Debug(c.logger).Log("msg", "station information not found for wifi device", "name", ifi.Name)
default:
return fmt.Errorf("failed to retrieve station info for device %q: %v",
ifi.Name, err)

View file

@ -16,13 +16,15 @@ package collector
import (
"fmt"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs/xfs"
)
// An xfsCollector is a Collector which gathers metrics from XFS filesystems.
type xfsCollector struct {
fs xfs.FS
fs xfs.FS
logger log.Logger
}
func init() {
@ -30,14 +32,15 @@ func init() {
}
// NewXFSCollector returns a new Collector exposing XFS statistics.
func NewXFSCollector() (Collector, error) {
func NewXFSCollector(logger log.Logger) (Collector, error) {
fs, err := xfs.NewFS(*procPath, *sysPath)
if err != nil {
return nil, fmt.Errorf("failed to open sysfs: %w", err)
}
return &xfsCollector{
fs: fs,
fs: fs,
logger: logger,
}, nil
}

View file

@ -20,8 +20,9 @@ import (
"errors"
"strings"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
)
var errZFSNotAvailable = errors.New("ZFS / ZFS statistics are not available")
@ -36,10 +37,11 @@ type zfsCollector struct {
linuxProcpathBase string
linuxZpoolIoPath string
linuxPathMap map[string]string
logger log.Logger
}
// NewZFSCollector returns a new Collector exposing ZFS statistics.
func NewZFSCollector() (Collector, error) {
func NewZFSCollector(logger log.Logger) (Collector, error) {
return &zfsCollector{
linuxProcpathBase: "spl/kstat/zfs",
linuxZpoolIoPath: "/*/io",
@ -56,6 +58,7 @@ func NewZFSCollector() (Collector, error) {
"zfs_zfetch": "zfetchstats",
"zfs_zil": "zil",
},
logger: logger,
}, nil
}
@ -63,7 +66,7 @@ func (c *zfsCollector) Update(ch chan<- prometheus.Metric) error {
for subsystem := range c.linuxPathMap {
if err := c.updateZfsStats(subsystem, ch); err != nil {
if err == errZFSNotAvailable {
log.Debug(err)
level.Debug(c.logger).Log("err", err)
// ZFS /proc files are added as new features to ZFS arrive, it is ok to continue
continue
}

View file

@ -16,11 +16,13 @@ package collector
import (
"fmt"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
)
type zfsCollector struct {
sysctls []bsdSysctl
logger log.Logger
}
const (
@ -31,7 +33,7 @@ func init() {
registerCollector("zfs", defaultEnabled, NewZfsCollector)
}
func NewZfsCollector() (Collector, error) {
func NewZfsCollector(logger log.Logger) (Collector, error) {
return &zfsCollector{
sysctls: []bsdSysctl{
{
@ -238,6 +240,7 @@ func NewZfsCollector() (Collector, error) {
valueType: prometheus.CounterValue,
},
},
logger: logger,
}, nil
}

View file

@ -22,8 +22,8 @@ import (
"strconv"
"strings"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
)
// constants from https://github.com/zfsonlinux/zfs/blob/master/lib/libspl/include/sys/kstat.h
@ -45,7 +45,7 @@ func (c *zfsCollector) openProcFile(path string) (*os.File, error) {
// file not found error can occur if:
// 1. zfs module is not loaded
// 2. zfs version does not have the feature with metrics -- ok to ignore
log.Debugf("Cannot open %q for reading", procFilePath(path))
level.Debug(c.logger).Log("msg", "Cannot open file for reading", "path", procFilePath(path))
return nil, errZFSNotAvailable
}
return file, nil
@ -77,7 +77,7 @@ func (c *zfsCollector) updatePoolStats(ch chan<- prometheus.Metric) error {
file, err := os.Open(zpoolPath)
if err != nil {
// this file should exist, but there is a race where an exporting pool can remove the files -- ok to ignore
log.Debugf("Cannot open %q for reading", zpoolPath)
level.Debug(c.logger).Log("msg", "Cannot open file for reading", "path", zpoolPath)
return errZFSNotAvailable
}

View file

@ -18,6 +18,7 @@ package collector
import (
"strings"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/siebenmann/go-kstat"
)
@ -52,6 +53,7 @@ type zfsCollector struct {
arcstatsSize *prometheus.Desc
zfetchstatsHits *prometheus.Desc
zfetchstatsMisses *prometheus.Desc
logger log.Logger
}
const (
@ -62,7 +64,7 @@ func init() {
registerCollector("zfs", defaultEnabled, NewZfsCollector)
}
func NewZfsCollector() (Collector, error) {
func NewZfsCollector(logger log.Logger) (Collector, error) {
return &zfsCollector{
abdstatsLinearCount: prometheus.NewDesc(
prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "abdstats_linear_count_total"),
@ -180,6 +182,7 @@ func NewZfsCollector() (Collector, error) {
prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "zfetchstats_misses_total"),
"ZFS cache fetch misses", nil, nil,
),
logger: logger,
}, nil
}

2
go.mod
View file

@ -4,9 +4,9 @@ require (
github.com/beevik/ntp v0.2.0
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e
github.com/ema/qdisc v0.0.0-20190904071900-b82c76788043
github.com/go-kit/kit v0.9.0
github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968
github.com/hodgesds/perf-utils v0.0.7
github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
github.com/lufia/iostat v0.0.0-20170605150913-9f7362b77ad3
github.com/mattn/go-xmlrpc v0.0.3
github.com/mdlayher/genetlink v0.0.0-20190828143517-e35f2bf499b9 // indirect

6
go.sum
View file

@ -20,9 +20,12 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/ema/qdisc v0.0.0-20190904071900-b82c76788043 h1:I3hLsM87FSASssIrIOGwJCio31dvLkvpYDKn2+r31ec=
github.com/ema/qdisc v0.0.0-20190904071900-b82c76788043/go.mod h1:ix4kG2zvdUd8kEKSW0ZTr1XLks0epFpI4j745DXxlNE=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968 h1:s+PDl6lozQ+dEUtUtQnO7+A2iPG3sK1pI4liU+jxn90=
github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
@ -46,8 +49,7 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/lufia/iostat v0.0.0-20170605150913-9f7362b77ad3 h1:XGhvld9vIpj929Gri5ybjukYZeyZwKkFkqgATqBQiOs=
github.com/lufia/iostat v0.0.0-20170605150913-9f7362b77ad3/go.mod h1:lRgtFVamD7L7GaXOSwBiuXMwU3Aicfn5h66LVs4u2SA=

View file

@ -15,13 +15,17 @@ package main
import (
"fmt"
"github.com/prometheus/common/promlog"
"github.com/prometheus/common/promlog/flag"
"net/http"
_ "net/http/pprof"
"os"
"sort"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/common/log"
"github.com/prometheus/common/version"
"github.com/prometheus/node_exporter/collector"
"github.com/prometheus/node_exporter/https"
@ -38,13 +42,15 @@ type handler struct {
exporterMetricsRegistry *prometheus.Registry
includeExporterMetrics bool
maxRequests int
logger log.Logger
}
func newHandler(includeExporterMetrics bool, maxRequests int) *handler {
func newHandler(includeExporterMetrics bool, maxRequests int, logger log.Logger) *handler {
h := &handler{
exporterMetricsRegistry: prometheus.NewRegistry(),
includeExporterMetrics: includeExporterMetrics,
maxRequests: maxRequests,
logger: logger,
}
if h.includeExporterMetrics {
h.exporterMetricsRegistry.MustRegister(
@ -53,7 +59,7 @@ func newHandler(includeExporterMetrics bool, maxRequests int) *handler {
)
}
if innerHandler, err := h.innerHandler(); err != nil {
log.Fatalf("Couldn't create metrics handler: %s", err)
panic(fmt.Sprintf("Couldn't create metrics handler: %s", err))
} else {
h.unfilteredHandler = innerHandler
}
@ -63,7 +69,7 @@ func newHandler(includeExporterMetrics bool, maxRequests int) *handler {
// ServeHTTP implements http.Handler.
func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
filters := r.URL.Query()["collect[]"]
log.Debugln("collect query:", filters)
level.Debug(h.logger).Log("msg", "collect query:", "filters", filters)
if len(filters) == 0 {
// No filters, use the prepared unfiltered handler.
@ -73,7 +79,7 @@ func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// To serve filtered metrics, we create a filtering handler on the fly.
filteredHandler, err := h.innerHandler(filters...)
if err != nil {
log.Warnln("Couldn't create filtered metrics handler:", err)
level.Warn(h.logger).Log("msg", "Couldn't create filtered metrics handler:", "err", err)
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(fmt.Sprintf("Couldn't create filtered metrics handler: %s", err)))
return
@ -81,13 +87,13 @@ func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
filteredHandler.ServeHTTP(w, r)
}
// innerHandler is used to create buth the one unfiltered http.Handler to be
// innerHandler is used to create both the one unfiltered http.Handler to be
// wrapped by the outer handler and also the filtered handlers created on the
// fly. The former is accomplished by calling innerHandler without any arguments
// (in which case it will log all the collectors enabled via command-line
// flags).
func (h *handler) innerHandler(filters ...string) (http.Handler, error) {
nc, err := collector.NewNodeCollector(filters...)
nc, err := collector.NewNodeCollector(h.logger, filters...)
if err != nil {
return nil, fmt.Errorf("couldn't create collector: %s", err)
}
@ -95,14 +101,14 @@ func (h *handler) innerHandler(filters ...string) (http.Handler, error) {
// Only log the creation of an unfiltered handler, which should happen
// only once upon startup.
if len(filters) == 0 {
log.Infof("Enabled collectors:")
level.Info(h.logger).Log("msg", "Enabled collectors")
collectors := []string{}
for n := range nc.Collectors {
collectors = append(collectors, n)
}
sort.Strings(collectors)
for _, n := range collectors {
log.Infof(" - %s", n)
for _, c := range collectors {
level.Info(h.logger).Log("collector", c)
}
}
@ -114,7 +120,6 @@ func (h *handler) innerHandler(filters ...string) (http.Handler, error) {
handler := promhttp.HandlerFor(
prometheus.Gatherers{h.exporterMetricsRegistry, r},
promhttp.HandlerOpts{
ErrorLog: log.NewErrorLogger(),
ErrorHandling: promhttp.ContinueOnError,
MaxRequestsInFlight: h.maxRequests,
Registry: h.exporterMetricsRegistry,
@ -154,15 +159,17 @@ func main() {
).Default("").String()
)
log.AddFlags(kingpin.CommandLine)
promlogConfig := &promlog.Config{}
flag.AddFlags(kingpin.CommandLine, promlogConfig)
kingpin.Version(version.Print("node_exporter"))
kingpin.HelpFlag.Short('h')
kingpin.Parse()
logger := promlog.New(promlogConfig)
log.Infoln("Starting node_exporter", version.Info())
log.Infoln("Build context", version.BuildContext())
level.Info(logger).Log("msg", "Starting node_exporter", "version", version.Info())
level.Info(logger).Log("msg", "Build context", "build_context", version.BuildContext())
http.Handle(*metricsPath, newHandler(!*disableExporterMetrics, *maxRequests))
http.Handle(*metricsPath, newHandler(!*disableExporterMetrics, *maxRequests, logger))
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(`<html>
<head><title>Node Exporter</title></head>
@ -173,9 +180,10 @@ func main() {
</html>`))
})
log.Infoln("Listening on", *listenAddress)
level.Info(logger).Log("msg", "Listening on", "address", *listenAddress)
server := &http.Server{Addr: *listenAddress}
if err := https.Listen(server, *configFile); err != nil {
log.Fatal(err)
level.Error(logger).Log("err", err)
os.Exit(1)
}
}

View file

@ -1,2 +1,4 @@
// Used in HTTP handlers, any error is handled by the server itself.
(net/http.ResponseWriter).Write
// Never check for logger errors.
(github.com/go-kit/kit/log.Logger).Log

View file

@ -1,6 +1,6 @@
The MIT License (MIT)
Copyright (c) 2014 Simon Eskildsen
Copyright (c) 2015 Peter Bourgon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@ -9,13 +9,14 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

151
vendor/github.com/go-kit/kit/log/README.md generated vendored Normal file
View file

@ -0,0 +1,151 @@
# package log
`package log` provides a minimal interface for structured logging in services.
It may be wrapped to encode conventions, enforce type-safety, provide leveled
logging, and so on. It can be used for both typical application log events,
and log-structured data streams.
## Structured logging
Structured logging is, basically, conceding to the reality that logs are
_data_, and warrant some level of schematic rigor. Using a stricter,
key/value-oriented message format for our logs, containing contextual and
semantic information, makes it much easier to get insight into the
operational activity of the systems we build. Consequently, `package log` is
of the strong belief that "[the benefits of structured logging outweigh the
minimal effort involved](https://www.thoughtworks.com/radar/techniques/structured-logging)".
Migrating from unstructured to structured logging is probably a lot easier
than you'd expect.
```go
// Unstructured
log.Printf("HTTP server listening on %s", addr)
// Structured
logger.Log("transport", "HTTP", "addr", addr, "msg", "listening")
```
## Usage
### Typical application logging
```go
w := log.NewSyncWriter(os.Stderr)
logger := log.NewLogfmtLogger(w)
logger.Log("question", "what is the meaning of life?", "answer", 42)
// Output:
// question="what is the meaning of life?" answer=42
```
### Contextual Loggers
```go
func main() {
var logger log.Logger
logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
logger = log.With(logger, "instance_id", 123)
logger.Log("msg", "starting")
NewWorker(log.With(logger, "component", "worker")).Run()
NewSlacker(log.With(logger, "component", "slacker")).Run()
}
// Output:
// instance_id=123 msg=starting
// instance_id=123 component=worker msg=running
// instance_id=123 component=slacker msg=running
```
### Interact with stdlib logger
Redirect stdlib logger to Go kit logger.
```go
import (
"os"
stdlog "log"
kitlog "github.com/go-kit/kit/log"
)
func main() {
logger := kitlog.NewJSONLogger(kitlog.NewSyncWriter(os.Stdout))
stdlog.SetOutput(kitlog.NewStdlibAdapter(logger))
stdlog.Print("I sure like pie")
}
// Output:
// {"msg":"I sure like pie","ts":"2016/01/01 12:34:56"}
```
Or, if, for legacy reasons, you need to pipe all of your logging through the
stdlib log package, you can redirect Go kit logger to the stdlib logger.
```go
logger := kitlog.NewLogfmtLogger(kitlog.StdlibWriter{})
logger.Log("legacy", true, "msg", "at least it's something")
// Output:
// 2016/01/01 12:34:56 legacy=true msg="at least it's something"
```
### Timestamps and callers
```go
var logger log.Logger
logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
logger.Log("msg", "hello")
// Output:
// ts=2016-01-01T12:34:56Z caller=main.go:15 msg=hello
```
## Levels
Log levels are supported via the [level package](https://godoc.org/github.com/go-kit/kit/log/level).
## Supported output formats
- [Logfmt](https://brandur.org/logfmt) ([see also](https://blog.codeship.com/logfmt-a-log-format-thats-easy-to-read-and-write))
- JSON
## Enhancements
`package log` is centered on the one-method Logger interface.
```go
type Logger interface {
Log(keyvals ...interface{}) error
}
```
This interface, and its supporting code like is the product of much iteration
and evaluation. For more details on the evolution of the Logger interface,
see [The Hunt for a Logger Interface](http://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide#1),
a talk by [Chris Hines](https://github.com/ChrisHines).
Also, please see
[#63](https://github.com/go-kit/kit/issues/63),
[#76](https://github.com/go-kit/kit/pull/76),
[#131](https://github.com/go-kit/kit/issues/131),
[#157](https://github.com/go-kit/kit/pull/157),
[#164](https://github.com/go-kit/kit/issues/164), and
[#252](https://github.com/go-kit/kit/pull/252)
to review historical conversations about package log and the Logger interface.
Value-add packages and suggestions,
like improvements to [the leveled logger](https://godoc.org/github.com/go-kit/kit/log/level),
are of course welcome. Good proposals should
- Be composable with [contextual loggers](https://godoc.org/github.com/go-kit/kit/log#With),
- Not break the behavior of [log.Caller](https://godoc.org/github.com/go-kit/kit/log#Caller) in any wrapped contextual loggers, and
- Be friendly to packages that accept only an unadorned log.Logger.
## Benchmarks & comparisons
There are a few Go logging benchmarks and comparisons that include Go kit's package log.
- [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) includes kit/log
- [uber-common/zap](https://github.com/uber-common/zap), a zero-alloc logging library, includes a comparison with kit/log

116
vendor/github.com/go-kit/kit/log/doc.go generated vendored Normal file
View file

@ -0,0 +1,116 @@
// Package log provides a structured logger.
//
// Structured logging produces logs easily consumed later by humans or
// machines. Humans might be interested in debugging errors, or tracing
// specific requests. Machines might be interested in counting interesting
// events, or aggregating information for off-line processing. In both cases,
// it is important that the log messages are structured and actionable.
// Package log is designed to encourage both of these best practices.
//
// Basic Usage
//
// The fundamental interface is Logger. Loggers create log events from
// key/value data. The Logger interface has a single method, Log, which
// accepts a sequence of alternating key/value pairs, which this package names
// keyvals.
//
// type Logger interface {
// Log(keyvals ...interface{}) error
// }
//
// Here is an example of a function using a Logger to create log events.
//
// func RunTask(task Task, logger log.Logger) string {
// logger.Log("taskID", task.ID, "event", "starting task")
// ...
// logger.Log("taskID", task.ID, "event", "task complete")
// }
//
// The keys in the above example are "taskID" and "event". The values are
// task.ID, "starting task", and "task complete". Every key is followed
// immediately by its value.
//
// Keys are usually plain strings. Values may be any type that has a sensible
// encoding in the chosen log format. With structured logging it is a good
// idea to log simple values without formatting them. This practice allows
// the chosen logger to encode values in the most appropriate way.
//
// Contextual Loggers
//
// A contextual logger stores keyvals that it includes in all log events.
// Building appropriate contextual loggers reduces repetition and aids
// consistency in the resulting log output. With and WithPrefix add context to
// a logger. We can use With to improve the RunTask example.
//
// func RunTask(task Task, logger log.Logger) string {
// logger = log.With(logger, "taskID", task.ID)
// logger.Log("event", "starting task")
// ...
// taskHelper(task.Cmd, logger)
// ...
// logger.Log("event", "task complete")
// }
//
// The improved version emits the same log events as the original for the
// first and last calls to Log. Passing the contextual logger to taskHelper
// enables each log event created by taskHelper to include the task.ID even
// though taskHelper does not have access to that value. Using contextual
// loggers this way simplifies producing log output that enables tracing the
// life cycle of individual tasks. (See the Contextual example for the full
// code of the above snippet.)
//
// Dynamic Contextual Values
//
// A Valuer function stored in a contextual logger generates a new value each
// time an event is logged. The Valuer example demonstrates how this feature
// works.
//
// Valuers provide the basis for consistently logging timestamps and source
// code location. The log package defines several valuers for that purpose.
// See Timestamp, DefaultTimestamp, DefaultTimestampUTC, Caller, and
// DefaultCaller. A common logger initialization sequence that ensures all log
// entries contain a timestamp and source location looks like this:
//
// logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
// logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
//
// Concurrent Safety
//
// Applications with multiple goroutines want each log event written to the
// same logger to remain separate from other log events. Package log provides
// two simple solutions for concurrent safe logging.
//
// NewSyncWriter wraps an io.Writer and serializes each call to its Write
// method. Using a SyncWriter has the benefit that the smallest practical
// portion of the logging logic is performed within a mutex, but it requires
// the formatting Logger to make only one call to Write per log event.
//
// NewSyncLogger wraps any Logger and serializes each call to its Log method.
// Using a SyncLogger has the benefit that it guarantees each log event is
// handled atomically within the wrapped logger, but it typically serializes
// both the formatting and output logic. Use a SyncLogger if the formatting
// logger may perform multiple writes per log event.
//
// Error Handling
//
// This package relies on the practice of wrapping or decorating loggers with
// other loggers to provide composable pieces of functionality. It also means
// that Logger.Log must return an error because some
// implementations—especially those that output log data to an io.Writer—may
// encounter errors that cannot be handled locally. This in turn means that
// Loggers that wrap other loggers should return errors from the wrapped
// logger up the stack.
//
// Fortunately, the decorator pattern also provides a way to avoid the
// necessity to check for errors every time an application calls Logger.Log.
// An application required to panic whenever its Logger encounters
// an error could initialize its logger as follows.
//
// fmtlogger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
// logger := log.LoggerFunc(func(keyvals ...interface{}) error {
// if err := fmtlogger.Log(keyvals...); err != nil {
// panic(err)
// }
// return nil
// })
package log

89
vendor/github.com/go-kit/kit/log/json_logger.go generated vendored Normal file
View file

@ -0,0 +1,89 @@
package log
import (
"encoding"
"encoding/json"
"fmt"
"io"
"reflect"
)
type jsonLogger struct {
io.Writer
}
// NewJSONLogger returns a Logger that encodes keyvals to the Writer as a
// single JSON object. Each log event produces no more than one call to
// w.Write. The passed Writer must be safe for concurrent use by multiple
// goroutines if the returned Logger will be used concurrently.
func NewJSONLogger(w io.Writer) Logger {
return &jsonLogger{w}
}
func (l *jsonLogger) Log(keyvals ...interface{}) error {
n := (len(keyvals) + 1) / 2 // +1 to handle case when len is odd
m := make(map[string]interface{}, n)
for i := 0; i < len(keyvals); i += 2 {
k := keyvals[i]
var v interface{} = ErrMissingValue
if i+1 < len(keyvals) {
v = keyvals[i+1]
}
merge(m, k, v)
}
return json.NewEncoder(l.Writer).Encode(m)
}
func merge(dst map[string]interface{}, k, v interface{}) {
var key string
switch x := k.(type) {
case string:
key = x
case fmt.Stringer:
key = safeString(x)
default:
key = fmt.Sprint(x)
}
// We want json.Marshaler and encoding.TextMarshaller to take priority over
// err.Error() and v.String(). But json.Marshall (called later) does that by
// default so we force a no-op if it's one of those 2 case.
switch x := v.(type) {
case json.Marshaler:
case encoding.TextMarshaler:
case error:
v = safeError(x)
case fmt.Stringer:
v = safeString(x)
}
dst[key] = v
}
func safeString(str fmt.Stringer) (s string) {
defer func() {
if panicVal := recover(); panicVal != nil {
if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() {
s = "NULL"
} else {
panic(panicVal)
}
}
}()
s = str.String()
return
}
func safeError(err error) (s interface{}) {
defer func() {
if panicVal := recover(); panicVal != nil {
if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() {
s = nil
} else {
panic(panicVal)
}
}
}()
s = err.Error()
return
}

22
vendor/github.com/go-kit/kit/log/level/doc.go generated vendored Normal file
View file

@ -0,0 +1,22 @@
// Package level implements leveled logging on top of Go kit's log package. To
// use the level package, create a logger as per normal in your func main, and
// wrap it with level.NewFilter.
//
// var logger log.Logger
// logger = log.NewLogfmtLogger(os.Stderr)
// logger = level.NewFilter(logger, level.AllowInfo()) // <--
// logger = log.With(logger, "ts", log.DefaultTimestampUTC)
//
// Then, at the callsites, use one of the level.Debug, Info, Warn, or Error
// helper methods to emit leveled log events.
//
// logger.Log("foo", "bar") // as normal, no level
// level.Debug(logger).Log("request_id", reqID, "trace_data", trace.Get())
// if value > 100 {
// level.Error(logger).Log("value", value)
// }
//
// NewFilter allows precise control over what happens when a log event is
// emitted without a level key, or if a squelched level is used. Check the
// Option functions for details.
package level

205
vendor/github.com/go-kit/kit/log/level/level.go generated vendored Normal file
View file

@ -0,0 +1,205 @@
package level
import "github.com/go-kit/kit/log"
// Error returns a logger that includes a Key/ErrorValue pair.
func Error(logger log.Logger) log.Logger {
return log.WithPrefix(logger, Key(), ErrorValue())
}
// Warn returns a logger that includes a Key/WarnValue pair.
func Warn(logger log.Logger) log.Logger {
return log.WithPrefix(logger, Key(), WarnValue())
}
// Info returns a logger that includes a Key/InfoValue pair.
func Info(logger log.Logger) log.Logger {
return log.WithPrefix(logger, Key(), InfoValue())
}
// Debug returns a logger that includes a Key/DebugValue pair.
func Debug(logger log.Logger) log.Logger {
return log.WithPrefix(logger, Key(), DebugValue())
}
// NewFilter wraps next and implements level filtering. See the commentary on
// the Option functions for a detailed description of how to configure levels.
// If no options are provided, all leveled log events created with Debug,
// Info, Warn or Error helper methods are squelched and non-leveled log
// events are passed to next unmodified.
func NewFilter(next log.Logger, options ...Option) log.Logger {
l := &logger{
next: next,
}
for _, option := range options {
option(l)
}
return l
}
type logger struct {
next log.Logger
allowed level
squelchNoLevel bool
errNotAllowed error
errNoLevel error
}
func (l *logger) Log(keyvals ...interface{}) error {
var hasLevel, levelAllowed bool
for i := 1; i < len(keyvals); i += 2 {
if v, ok := keyvals[i].(*levelValue); ok {
hasLevel = true
levelAllowed = l.allowed&v.level != 0
break
}
}
if !hasLevel && l.squelchNoLevel {
return l.errNoLevel
}
if hasLevel && !levelAllowed {
return l.errNotAllowed
}
return l.next.Log(keyvals...)
}
// Option sets a parameter for the leveled logger.
type Option func(*logger)
// AllowAll is an alias for AllowDebug.
func AllowAll() Option {
return AllowDebug()
}
// AllowDebug allows error, warn, info and debug level log events to pass.
func AllowDebug() Option {
return allowed(levelError | levelWarn | levelInfo | levelDebug)
}
// AllowInfo allows error, warn and info level log events to pass.
func AllowInfo() Option {
return allowed(levelError | levelWarn | levelInfo)
}
// AllowWarn allows error and warn level log events to pass.
func AllowWarn() Option {
return allowed(levelError | levelWarn)
}
// AllowError allows only error level log events to pass.
func AllowError() Option {
return allowed(levelError)
}
// AllowNone allows no leveled log events to pass.
func AllowNone() Option {
return allowed(0)
}
func allowed(allowed level) Option {
return func(l *logger) { l.allowed = allowed }
}
// ErrNotAllowed sets the error to return from Log when it squelches a log
// event disallowed by the configured Allow[Level] option. By default,
// ErrNotAllowed is nil; in this case the log event is squelched with no
// error.
func ErrNotAllowed(err error) Option {
return func(l *logger) { l.errNotAllowed = err }
}
// SquelchNoLevel instructs Log to squelch log events with no level, so that
// they don't proceed through to the wrapped logger. If SquelchNoLevel is set
// to true and a log event is squelched in this way, the error value
// configured with ErrNoLevel is returned to the caller.
func SquelchNoLevel(squelch bool) Option {
return func(l *logger) { l.squelchNoLevel = squelch }
}
// ErrNoLevel sets the error to return from Log when it squelches a log event
// with no level. By default, ErrNoLevel is nil; in this case the log event is
// squelched with no error.
func ErrNoLevel(err error) Option {
return func(l *logger) { l.errNoLevel = err }
}
// NewInjector wraps next and returns a logger that adds a Key/level pair to
// the beginning of log events that don't already contain a level. In effect,
// this gives a default level to logs without a level.
func NewInjector(next log.Logger, level Value) log.Logger {
return &injector{
next: next,
level: level,
}
}
type injector struct {
next log.Logger
level interface{}
}
func (l *injector) Log(keyvals ...interface{}) error {
for i := 1; i < len(keyvals); i += 2 {
if _, ok := keyvals[i].(*levelValue); ok {
return l.next.Log(keyvals...)
}
}
kvs := make([]interface{}, len(keyvals)+2)
kvs[0], kvs[1] = key, l.level
copy(kvs[2:], keyvals)
return l.next.Log(kvs...)
}
// Value is the interface that each of the canonical level values implement.
// It contains unexported methods that prevent types from other packages from
// implementing it and guaranteeing that NewFilter can distinguish the levels
// defined in this package from all other values.
type Value interface {
String() string
levelVal()
}
// Key returns the unique key added to log events by the loggers in this
// package.
func Key() interface{} { return key }
// ErrorValue returns the unique value added to log events by Error.
func ErrorValue() Value { return errorValue }
// WarnValue returns the unique value added to log events by Warn.
func WarnValue() Value { return warnValue }
// InfoValue returns the unique value added to log events by Info.
func InfoValue() Value { return infoValue }
// DebugValue returns the unique value added to log events by Warn.
func DebugValue() Value { return debugValue }
var (
// key is of type interface{} so that it allocates once during package
// initialization and avoids allocating every time the value is added to a
// []interface{} later.
key interface{} = "level"
errorValue = &levelValue{level: levelError, name: "error"}
warnValue = &levelValue{level: levelWarn, name: "warn"}
infoValue = &levelValue{level: levelInfo, name: "info"}
debugValue = &levelValue{level: levelDebug, name: "debug"}
)
type level byte
const (
levelDebug level = 1 << iota
levelInfo
levelWarn
levelError
)
type levelValue struct {
name string
level
}
func (v *levelValue) String() string { return v.name }
func (v *levelValue) levelVal() {}

135
vendor/github.com/go-kit/kit/log/log.go generated vendored Normal file
View file

@ -0,0 +1,135 @@
package log
import "errors"
// Logger is the fundamental interface for all log operations. Log creates a
// log event from keyvals, a variadic sequence of alternating keys and values.
// Implementations must be safe for concurrent use by multiple goroutines. In
// particular, any implementation of Logger that appends to keyvals or
// modifies or retains any of its elements must make a copy first.
type Logger interface {
Log(keyvals ...interface{}) error
}
// ErrMissingValue is appended to keyvals slices with odd length to substitute
// the missing value.
var ErrMissingValue = errors.New("(MISSING)")
// With returns a new contextual logger with keyvals prepended to those passed
// to calls to Log. If logger is also a contextual logger created by With or
// WithPrefix, keyvals is appended to the existing context.
//
// The returned Logger replaces all value elements (odd indexes) containing a
// Valuer with their generated value for each call to its Log method.
func With(logger Logger, keyvals ...interface{}) Logger {
if len(keyvals) == 0 {
return logger
}
l := newContext(logger)
kvs := append(l.keyvals, keyvals...)
if len(kvs)%2 != 0 {
kvs = append(kvs, ErrMissingValue)
}
return &context{
logger: l.logger,
// Limiting the capacity of the stored keyvals ensures that a new
// backing array is created if the slice must grow in Log or With.
// Using the extra capacity without copying risks a data race that
// would violate the Logger interface contract.
keyvals: kvs[:len(kvs):len(kvs)],
hasValuer: l.hasValuer || containsValuer(keyvals),
}
}
// WithPrefix returns a new contextual logger with keyvals prepended to those
// passed to calls to Log. If logger is also a contextual logger created by
// With or WithPrefix, keyvals is prepended to the existing context.
//
// The returned Logger replaces all value elements (odd indexes) containing a
// Valuer with their generated value for each call to its Log method.
func WithPrefix(logger Logger, keyvals ...interface{}) Logger {
if len(keyvals) == 0 {
return logger
}
l := newContext(logger)
// Limiting the capacity of the stored keyvals ensures that a new
// backing array is created if the slice must grow in Log or With.
// Using the extra capacity without copying risks a data race that
// would violate the Logger interface contract.
n := len(l.keyvals) + len(keyvals)
if len(keyvals)%2 != 0 {
n++
}
kvs := make([]interface{}, 0, n)
kvs = append(kvs, keyvals...)
if len(kvs)%2 != 0 {
kvs = append(kvs, ErrMissingValue)
}
kvs = append(kvs, l.keyvals...)
return &context{
logger: l.logger,
keyvals: kvs,
hasValuer: l.hasValuer || containsValuer(keyvals),
}
}
// context is the Logger implementation returned by With and WithPrefix. It
// wraps a Logger and holds keyvals that it includes in all log events. Its
// Log method calls bindValues to generate values for each Valuer in the
// context keyvals.
//
// A context must always have the same number of stack frames between calls to
// its Log method and the eventual binding of Valuers to their value. This
// requirement comes from the functional requirement to allow a context to
// resolve application call site information for a Caller stored in the
// context. To do this we must be able to predict the number of logging
// functions on the stack when bindValues is called.
//
// Two implementation details provide the needed stack depth consistency.
//
// 1. newContext avoids introducing an additional layer when asked to
// wrap another context.
// 2. With and WithPrefix avoid introducing an additional layer by
// returning a newly constructed context with a merged keyvals rather
// than simply wrapping the existing context.
type context struct {
logger Logger
keyvals []interface{}
hasValuer bool
}
func newContext(logger Logger) *context {
if c, ok := logger.(*context); ok {
return c
}
return &context{logger: logger}
}
// Log replaces all value elements (odd indexes) containing a Valuer in the
// stored context with their generated value, appends keyvals, and passes the
// result to the wrapped Logger.
func (l *context) Log(keyvals ...interface{}) error {
kvs := append(l.keyvals, keyvals...)
if len(kvs)%2 != 0 {
kvs = append(kvs, ErrMissingValue)
}
if l.hasValuer {
// If no keyvals were appended above then we must copy l.keyvals so
// that future log events will reevaluate the stored Valuers.
if len(keyvals) == 0 {
kvs = append([]interface{}{}, l.keyvals...)
}
bindValues(kvs[:len(l.keyvals)])
}
return l.logger.Log(kvs...)
}
// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If
// f is a function with the appropriate signature, LoggerFunc(f) is a Logger
// object that calls f.
type LoggerFunc func(...interface{}) error
// Log implements Logger by calling f(keyvals...).
func (f LoggerFunc) Log(keyvals ...interface{}) error {
return f(keyvals...)
}

62
vendor/github.com/go-kit/kit/log/logfmt_logger.go generated vendored Normal file
View file

@ -0,0 +1,62 @@
package log
import (
"bytes"
"io"
"sync"
"github.com/go-logfmt/logfmt"
)
type logfmtEncoder struct {
*logfmt.Encoder
buf bytes.Buffer
}
func (l *logfmtEncoder) Reset() {
l.Encoder.Reset()
l.buf.Reset()
}
var logfmtEncoderPool = sync.Pool{
New: func() interface{} {
var enc logfmtEncoder
enc.Encoder = logfmt.NewEncoder(&enc.buf)
return &enc
},
}
type logfmtLogger struct {
w io.Writer
}
// NewLogfmtLogger returns a logger that encodes keyvals to the Writer in
// logfmt format. Each log event produces no more than one call to w.Write.
// The passed Writer must be safe for concurrent use by multiple goroutines if
// the returned Logger will be used concurrently.
func NewLogfmtLogger(w io.Writer) Logger {
return &logfmtLogger{w}
}
func (l logfmtLogger) Log(keyvals ...interface{}) error {
enc := logfmtEncoderPool.Get().(*logfmtEncoder)
enc.Reset()
defer logfmtEncoderPool.Put(enc)
if err := enc.EncodeKeyvals(keyvals...); err != nil {
return err
}
// Add newline to the end of the buffer
if err := enc.EndRecord(); err != nil {
return err
}
// The Logger interface requires implementations to be safe for concurrent
// use by multiple goroutines. For this implementation that means making
// only one call to l.w.Write() for each call to Log.
if _, err := l.w.Write(enc.buf.Bytes()); err != nil {
return err
}
return nil
}

8
vendor/github.com/go-kit/kit/log/nop_logger.go generated vendored Normal file
View file

@ -0,0 +1,8 @@
package log
type nopLogger struct{}
// NewNopLogger returns a logger that doesn't do anything.
func NewNopLogger() Logger { return nopLogger{} }
func (nopLogger) Log(...interface{}) error { return nil }

116
vendor/github.com/go-kit/kit/log/stdlib.go generated vendored Normal file
View file

@ -0,0 +1,116 @@
package log
import (
"io"
"log"
"regexp"
"strings"
)
// StdlibWriter implements io.Writer by invoking the stdlib log.Print. It's
// designed to be passed to a Go kit logger as the writer, for cases where
// it's necessary to redirect all Go kit log output to the stdlib logger.
//
// If you have any choice in the matter, you shouldn't use this. Prefer to
// redirect the stdlib log to the Go kit logger via NewStdlibAdapter.
type StdlibWriter struct{}
// Write implements io.Writer.
func (w StdlibWriter) Write(p []byte) (int, error) {
log.Print(strings.TrimSpace(string(p)))
return len(p), nil
}
// StdlibAdapter wraps a Logger and allows it to be passed to the stdlib
// logger's SetOutput. It will extract date/timestamps, filenames, and
// messages, and place them under relevant keys.
type StdlibAdapter struct {
Logger
timestampKey string
fileKey string
messageKey string
}
// StdlibAdapterOption sets a parameter for the StdlibAdapter.
type StdlibAdapterOption func(*StdlibAdapter)
// TimestampKey sets the key for the timestamp field. By default, it's "ts".
func TimestampKey(key string) StdlibAdapterOption {
return func(a *StdlibAdapter) { a.timestampKey = key }
}
// FileKey sets the key for the file and line field. By default, it's "caller".
func FileKey(key string) StdlibAdapterOption {
return func(a *StdlibAdapter) { a.fileKey = key }
}
// MessageKey sets the key for the actual log message. By default, it's "msg".
func MessageKey(key string) StdlibAdapterOption {
return func(a *StdlibAdapter) { a.messageKey = key }
}
// NewStdlibAdapter returns a new StdlibAdapter wrapper around the passed
// logger. It's designed to be passed to log.SetOutput.
func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer {
a := StdlibAdapter{
Logger: logger,
timestampKey: "ts",
fileKey: "caller",
messageKey: "msg",
}
for _, option := range options {
option(&a)
}
return a
}
func (a StdlibAdapter) Write(p []byte) (int, error) {
result := subexps(p)
keyvals := []interface{}{}
var timestamp string
if date, ok := result["date"]; ok && date != "" {
timestamp = date
}
if time, ok := result["time"]; ok && time != "" {
if timestamp != "" {
timestamp += " "
}
timestamp += time
}
if timestamp != "" {
keyvals = append(keyvals, a.timestampKey, timestamp)
}
if file, ok := result["file"]; ok && file != "" {
keyvals = append(keyvals, a.fileKey, file)
}
if msg, ok := result["msg"]; ok {
keyvals = append(keyvals, a.messageKey, msg)
}
if err := a.Logger.Log(keyvals...); err != nil {
return 0, err
}
return len(p), nil
}
const (
logRegexpDate = `(?P<date>[0-9]{4}/[0-9]{2}/[0-9]{2})?[ ]?`
logRegexpTime = `(?P<time>[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?)?[ ]?`
logRegexpFile = `(?P<file>.+?:[0-9]+)?`
logRegexpMsg = `(: )?(?P<msg>.*)`
)
var (
logRegexp = regexp.MustCompile(logRegexpDate + logRegexpTime + logRegexpFile + logRegexpMsg)
)
func subexps(line []byte) map[string]string {
m := logRegexp.FindSubmatch(line)
if len(m) < len(logRegexp.SubexpNames()) {
return map[string]string{}
}
result := map[string]string{}
for i, name := range logRegexp.SubexpNames() {
result[name] = string(m[i])
}
return result
}

116
vendor/github.com/go-kit/kit/log/sync.go generated vendored Normal file
View file

@ -0,0 +1,116 @@
package log
import (
"io"
"sync"
"sync/atomic"
)
// SwapLogger wraps another logger that may be safely replaced while other
// goroutines use the SwapLogger concurrently. The zero value for a SwapLogger
// will discard all log events without error.
//
// SwapLogger serves well as a package global logger that can be changed by
// importers.
type SwapLogger struct {
logger atomic.Value
}
type loggerStruct struct {
Logger
}
// Log implements the Logger interface by forwarding keyvals to the currently
// wrapped logger. It does not log anything if the wrapped logger is nil.
func (l *SwapLogger) Log(keyvals ...interface{}) error {
s, ok := l.logger.Load().(loggerStruct)
if !ok || s.Logger == nil {
return nil
}
return s.Log(keyvals...)
}
// Swap replaces the currently wrapped logger with logger. Swap may be called
// concurrently with calls to Log from other goroutines.
func (l *SwapLogger) Swap(logger Logger) {
l.logger.Store(loggerStruct{logger})
}
// NewSyncWriter returns a new writer that is safe for concurrent use by
// multiple goroutines. Writes to the returned writer are passed on to w. If
// another write is already in progress, the calling goroutine blocks until
// the writer is available.
//
// If w implements the following interface, so does the returned writer.
//
// interface {
// Fd() uintptr
// }
func NewSyncWriter(w io.Writer) io.Writer {
switch w := w.(type) {
case fdWriter:
return &fdSyncWriter{fdWriter: w}
default:
return &syncWriter{Writer: w}
}
}
// syncWriter synchronizes concurrent writes to an io.Writer.
type syncWriter struct {
sync.Mutex
io.Writer
}
// Write writes p to the underlying io.Writer. If another write is already in
// progress, the calling goroutine blocks until the syncWriter is available.
func (w *syncWriter) Write(p []byte) (n int, err error) {
w.Lock()
n, err = w.Writer.Write(p)
w.Unlock()
return n, err
}
// fdWriter is an io.Writer that also has an Fd method. The most common
// example of an fdWriter is an *os.File.
type fdWriter interface {
io.Writer
Fd() uintptr
}
// fdSyncWriter synchronizes concurrent writes to an fdWriter.
type fdSyncWriter struct {
sync.Mutex
fdWriter
}
// Write writes p to the underlying io.Writer. If another write is already in
// progress, the calling goroutine blocks until the fdSyncWriter is available.
func (w *fdSyncWriter) Write(p []byte) (n int, err error) {
w.Lock()
n, err = w.fdWriter.Write(p)
w.Unlock()
return n, err
}
// syncLogger provides concurrent safe logging for another Logger.
type syncLogger struct {
mu sync.Mutex
logger Logger
}
// NewSyncLogger returns a logger that synchronizes concurrent use of the
// wrapped logger. When multiple goroutines use the SyncLogger concurrently
// only one goroutine will be allowed to log to the wrapped logger at a time.
// The other goroutines will block until the logger is available.
func NewSyncLogger(logger Logger) Logger {
return &syncLogger{logger: logger}
}
// Log logs keyvals to the underlying Logger. If another log is already in
// progress, the calling goroutine blocks until the syncLogger is available.
func (l *syncLogger) Log(keyvals ...interface{}) error {
l.mu.Lock()
err := l.logger.Log(keyvals...)
l.mu.Unlock()
return err
}

110
vendor/github.com/go-kit/kit/log/value.go generated vendored Normal file
View file

@ -0,0 +1,110 @@
package log
import (
"runtime"
"strconv"
"strings"
"time"
)
// A Valuer generates a log value. When passed to With or WithPrefix in a
// value element (odd indexes), it represents a dynamic value which is re-
// evaluated with each log event.
type Valuer func() interface{}
// bindValues replaces all value elements (odd indexes) containing a Valuer
// with their generated value.
func bindValues(keyvals []interface{}) {
for i := 1; i < len(keyvals); i += 2 {
if v, ok := keyvals[i].(Valuer); ok {
keyvals[i] = v()
}
}
}
// containsValuer returns true if any of the value elements (odd indexes)
// contain a Valuer.
func containsValuer(keyvals []interface{}) bool {
for i := 1; i < len(keyvals); i += 2 {
if _, ok := keyvals[i].(Valuer); ok {
return true
}
}
return false
}
// Timestamp returns a timestamp Valuer. It invokes the t function to get the
// time; unless you are doing something tricky, pass time.Now.
//
// Most users will want to use DefaultTimestamp or DefaultTimestampUTC, which
// are TimestampFormats that use the RFC3339Nano format.
func Timestamp(t func() time.Time) Valuer {
return func() interface{} { return t() }
}
// TimestampFormat returns a timestamp Valuer with a custom time format. It
// invokes the t function to get the time to format; unless you are doing
// something tricky, pass time.Now. The layout string is passed to
// Time.Format.
//
// Most users will want to use DefaultTimestamp or DefaultTimestampUTC, which
// are TimestampFormats that use the RFC3339Nano format.
func TimestampFormat(t func() time.Time, layout string) Valuer {
return func() interface{} {
return timeFormat{
time: t(),
layout: layout,
}
}
}
// A timeFormat represents an instant in time and a layout used when
// marshaling to a text format.
type timeFormat struct {
time time.Time
layout string
}
func (tf timeFormat) String() string {
return tf.time.Format(tf.layout)
}
// MarshalText implements encoding.TextMarshaller.
func (tf timeFormat) MarshalText() (text []byte, err error) {
// The following code adapted from the standard library time.Time.Format
// method. Using the same undocumented magic constant to extend the size
// of the buffer as seen there.
b := make([]byte, 0, len(tf.layout)+10)
b = tf.time.AppendFormat(b, tf.layout)
return b, nil
}
// Caller returns a Valuer that returns a file and line from a specified depth
// in the callstack. Users will probably want to use DefaultCaller.
func Caller(depth int) Valuer {
return func() interface{} {
_, file, line, _ := runtime.Caller(depth)
idx := strings.LastIndexByte(file, '/')
// using idx+1 below handles both of following cases:
// idx == -1 because no "/" was found, or
// idx >= 0 and we want to start at the character after the found "/".
return file[idx+1:] + ":" + strconv.Itoa(line)
}
}
var (
// DefaultTimestamp is a Valuer that returns the current wallclock time,
// respecting time zones, when bound.
DefaultTimestamp = TimestampFormat(time.Now, time.RFC3339Nano)
// DefaultTimestampUTC is a Valuer that returns the current time in UTC
// when bound.
DefaultTimestampUTC = TimestampFormat(
func() time.Time { return time.Now().UTC() },
time.RFC3339Nano,
)
// DefaultCaller is a Valuer that returns the file and line where the Log
// method was invoked. It can only be used with log.With.
DefaultCaller = Caller(3)
)

4
vendor/github.com/go-logfmt/logfmt/.gitignore generated vendored Normal file
View file

@ -0,0 +1,4 @@
_testdata/
_testdata2/
logfmt-fuzz.zip
logfmt.test.exe

Some files were not shown because too many files have changed in this diff Show more