mirror of
https://github.com/prometheus/node_exporter.git
synced 2025-03-05 21:00:12 -08:00
Merge branch 'master' of github.com:prometheus/node_exporter into add-arp-states
Signed-off-by: Emin Umut Gercek <eumutgercek@gmail.com>
This commit is contained in:
commit
d679000a11
4
.github/workflows/golangci-lint.yml
vendored
4
.github/workflows/golangci-lint.yml
vendored
|
@ -26,7 +26,7 @@ jobs:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
|
uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0
|
||||||
with:
|
with:
|
||||||
go-version: 1.23.x
|
go-version: 1.23.x
|
||||||
- name: Install snmp_exporter/generator dependencies
|
- name: Install snmp_exporter/generator dependencies
|
||||||
|
@ -36,4 +36,4 @@ jobs:
|
||||||
uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1
|
uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1
|
||||||
with:
|
with:
|
||||||
args: --verbose
|
args: --verbose
|
||||||
version: v1.61.0
|
version: v1.62.0
|
||||||
|
|
|
@ -1,17 +1,9 @@
|
||||||
linters:
|
linters:
|
||||||
enable:
|
enable:
|
||||||
- depguard
|
- depguard
|
||||||
|
- goimports
|
||||||
- misspell
|
- misspell
|
||||||
- revive
|
- revive
|
||||||
disable:
|
|
||||||
# Disable soon to deprecated[1] linters that lead to false
|
|
||||||
# positives when build tags disable certain files[2]
|
|
||||||
# 1: https://github.com/golangci/golangci-lint/issues/1841
|
|
||||||
# 2: https://github.com/prometheus/node_exporter/issues/1545
|
|
||||||
- deadcode
|
|
||||||
- unused
|
|
||||||
- structcheck
|
|
||||||
- varcheck
|
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
exclude-rules:
|
exclude-rules:
|
||||||
|
|
|
@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
|
||||||
SKIP_GOLANGCI_LINT :=
|
SKIP_GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT :=
|
GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT_OPTS ?=
|
GOLANGCI_LINT_OPTS ?=
|
||||||
GOLANGCI_LINT_VERSION ?= v1.61.0
|
GOLANGCI_LINT_VERSION ?= v1.62.0
|
||||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
|
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
|
||||||
# windows isn't included here because of the path separator being different.
|
# windows isn't included here because of the path separator being different.
|
||||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||||
|
|
|
@ -99,8 +99,8 @@ cpu | flags | --collector.cpu.info.flags-include | N/A
|
||||||
diskstats | device | --collector.diskstats.device-include | --collector.diskstats.device-exclude
|
diskstats | device | --collector.diskstats.device-include | --collector.diskstats.device-exclude
|
||||||
ethtool | device | --collector.ethtool.device-include | --collector.ethtool.device-exclude
|
ethtool | device | --collector.ethtool.device-include | --collector.ethtool.device-exclude
|
||||||
ethtool | metrics | --collector.ethtool.metrics-include | N/A
|
ethtool | metrics | --collector.ethtool.metrics-include | N/A
|
||||||
filesystem | fs-types | N/A | --collector.filesystem.fs-types-exclude
|
filesystem | fs-types | --collector.filesystem.fs-types-include | --collector.filesystem.fs-types-exclude
|
||||||
filesystem | mount-points | N/A | --collector.filesystem.mount-points-exclude
|
filesystem | mount-points | --collector.filesystem.mount-points-include | --collector.filesystem.mount-points-exclude
|
||||||
hwmon | chip | --collector.hwmon.chip-include | --collector.hwmon.chip-exclude
|
hwmon | chip | --collector.hwmon.chip-include | --collector.hwmon.chip-exclude
|
||||||
hwmon | sensor | --collector.hwmon.sensor-include | --collector.hwmon.sensor-exclude
|
hwmon | sensor | --collector.hwmon.sensor-include | --collector.hwmon.sensor-exclude
|
||||||
interrupts | name | --collector.interrupts.name-include | --collector.interrupts.name-exclude
|
interrupts | name | --collector.interrupts.name-include | --collector.interrupts.name-exclude
|
||||||
|
|
|
@ -17,13 +17,11 @@
|
||||||
package collector
|
package collector
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"net"
|
|
||||||
|
|
||||||
"github.com/alecthomas/kingpin/v2"
|
"github.com/alecthomas/kingpin/v2"
|
||||||
"github.com/jsimonetti/rtnetlink/v2"
|
"github.com/jsimonetti/rtnetlink/v2/rtnl"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/procfs"
|
"github.com/prometheus/procfs"
|
||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
|
@ -97,53 +95,41 @@ func getTotalArpEntries(deviceEntries []procfs.ARPEntry) map[string]uint32 {
|
||||||
}
|
}
|
||||||
|
|
||||||
func getArpEntriesRTNL() (map[string]uint32, map[string]map[string]int, error) {
|
func getArpEntriesRTNL() (map[string]uint32, map[string]map[string]int, error) {
|
||||||
conn, err := rtnetlink.Dial(nil)
|
conn, err := rtnl.Dial(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
|
|
||||||
neighbors, err := conn.Neigh.List()
|
// Neighbors will also contain IPv6 neighbors, but since this is purely an ARP collector,
|
||||||
|
// restrict to AF_INET.
|
||||||
|
neighbors, err := conn.Neighbours(nil, unix.AF_INET)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
ifIndexEntries := make(map[uint32]uint32)
|
// Map of interface name to ARP neighbor count.
|
||||||
ifIndexStates := make(map[uint32]map[string]int)
|
entries := make(map[string]uint32)
|
||||||
|
// Map of map[InterfaceName]map[StateName]int
|
||||||
|
states := make(map[string]map[string]int)
|
||||||
|
|
||||||
for _, n := range neighbors {
|
for _, n := range neighbors {
|
||||||
// Neighbors will also contain IPv6 neighbors, but since this is purely an ARP collector,
|
// Skip entries which have state NUD_NOARP to conform to output of /proc/net/arp.
|
||||||
// restrict to AF_INET. Also skip entries which have state NUD_NOARP to conform to output
|
if n.State&unix.NUD_NOARP != unix.NUD_NOARP {
|
||||||
// of /proc/net/arp.
|
|
||||||
if n.Family == unix.AF_INET && n.State&unix.NUD_NOARP == 0 {
|
|
||||||
ifIndexEntries[n.Index]++
|
|
||||||
|
|
||||||
_, ok := ifIndexStates[n.Index]
|
|
||||||
if !ok {
|
|
||||||
ifIndexStates[n.Index] = make(map[string]int)
|
|
||||||
}
|
|
||||||
ifIndexStates[n.Index][neighborStatesMap[n.State]]++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
enumEntries := make(map[string]uint32)
|
|
||||||
enumStates := make(map[string]map[string]int)
|
|
||||||
|
|
||||||
// Convert interface indexes to names.
|
|
||||||
for ifIndex, entryCount := range ifIndexEntries {
|
|
||||||
iface, err := net.InterfaceByIndex(int(ifIndex))
|
|
||||||
if err != nil {
|
|
||||||
if errors.Unwrap(err).Error() == "no such network interface" {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return nil, nil, err
|
|
||||||
|
entries[n.Interface.Name]++
|
||||||
|
|
||||||
|
_, ok := states[n.Interface.Name]
|
||||||
|
if !ok {
|
||||||
|
states[n.Interface.Name] = make(map[string]int)
|
||||||
}
|
}
|
||||||
|
|
||||||
enumEntries[iface.Name] = entryCount
|
states[n.Interface.Name][neighborStatesMap[n.State]]++
|
||||||
enumStates[iface.Name] = ifIndexStates[ifIndex]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return enumEntries, enumStates, nil
|
return entries, states, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *arpCollector) Update(ch chan<- prometheus.Metric) error {
|
func (c *arpCollector) Update(ch chan<- prometheus.Metric) error {
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
package collector
|
package collector
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"os"
|
"os"
|
||||||
|
@ -26,15 +27,17 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
|
|
||||||
"github.com/alecthomas/kingpin/v2"
|
"github.com/alecthomas/kingpin/v2"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/procfs"
|
"github.com/prometheus/procfs"
|
||||||
"github.com/prometheus/procfs/sysfs"
|
"github.com/prometheus/procfs/sysfs"
|
||||||
"golang.org/x/exp/maps"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type cpuCollector struct {
|
type cpuCollector struct {
|
||||||
fs procfs.FS
|
procfs procfs.FS
|
||||||
|
sysfs sysfs.FS
|
||||||
cpu *prometheus.Desc
|
cpu *prometheus.Desc
|
||||||
cpuInfo *prometheus.Desc
|
cpuInfo *prometheus.Desc
|
||||||
cpuFrequencyHz *prometheus.Desc
|
cpuFrequencyHz *prometheus.Desc
|
||||||
|
@ -45,6 +48,7 @@ type cpuCollector struct {
|
||||||
cpuPackageThrottle *prometheus.Desc
|
cpuPackageThrottle *prometheus.Desc
|
||||||
cpuIsolated *prometheus.Desc
|
cpuIsolated *prometheus.Desc
|
||||||
logger *slog.Logger
|
logger *slog.Logger
|
||||||
|
cpuOnline *prometheus.Desc
|
||||||
cpuStats map[int64]procfs.CPUStat
|
cpuStats map[int64]procfs.CPUStat
|
||||||
cpuStatsMutex sync.Mutex
|
cpuStatsMutex sync.Mutex
|
||||||
isolatedCpus []uint16
|
isolatedCpus []uint16
|
||||||
|
@ -70,17 +74,17 @@ func init() {
|
||||||
|
|
||||||
// NewCPUCollector returns a new Collector exposing kernel/system statistics.
|
// NewCPUCollector returns a new Collector exposing kernel/system statistics.
|
||||||
func NewCPUCollector(logger *slog.Logger) (Collector, error) {
|
func NewCPUCollector(logger *slog.Logger) (Collector, error) {
|
||||||
fs, err := procfs.NewFS(*procPath)
|
pfs, err := procfs.NewFS(*procPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to open procfs: %w", err)
|
return nil, fmt.Errorf("failed to open procfs: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
sysfs, err := sysfs.NewFS(*sysPath)
|
sfs, err := sysfs.NewFS(*sysPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to open sysfs: %w", err)
|
return nil, fmt.Errorf("failed to open sysfs: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
isolcpus, err := sysfs.IsolatedCPUs()
|
isolcpus, err := sfs.IsolatedCPUs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !os.IsNotExist(err) {
|
if !os.IsNotExist(err) {
|
||||||
return nil, fmt.Errorf("Unable to get isolated cpus: %w", err)
|
return nil, fmt.Errorf("Unable to get isolated cpus: %w", err)
|
||||||
|
@ -89,7 +93,8 @@ func NewCPUCollector(logger *slog.Logger) (Collector, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
c := &cpuCollector{
|
c := &cpuCollector{
|
||||||
fs: fs,
|
procfs: pfs,
|
||||||
|
sysfs: sfs,
|
||||||
cpu: nodeCPUSecondsDesc,
|
cpu: nodeCPUSecondsDesc,
|
||||||
cpuInfo: prometheus.NewDesc(
|
cpuInfo: prometheus.NewDesc(
|
||||||
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "info"),
|
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "info"),
|
||||||
|
@ -131,6 +136,11 @@ func NewCPUCollector(logger *slog.Logger) (Collector, error) {
|
||||||
"Whether each core is isolated, information from /sys/devices/system/cpu/isolated.",
|
"Whether each core is isolated, information from /sys/devices/system/cpu/isolated.",
|
||||||
[]string{"cpu"}, nil,
|
[]string{"cpu"}, nil,
|
||||||
),
|
),
|
||||||
|
cpuOnline: prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "online"),
|
||||||
|
"CPUs that are online and being scheduled.",
|
||||||
|
[]string{"cpu"}, nil,
|
||||||
|
),
|
||||||
logger: logger,
|
logger: logger,
|
||||||
isolatedCpus: isolcpus,
|
isolatedCpus: isolcpus,
|
||||||
cpuStats: make(map[int64]procfs.CPUStat),
|
cpuStats: make(map[int64]procfs.CPUStat),
|
||||||
|
@ -177,12 +187,21 @@ func (c *cpuCollector) Update(ch chan<- prometheus.Metric) error {
|
||||||
if c.isolatedCpus != nil {
|
if c.isolatedCpus != nil {
|
||||||
c.updateIsolated(ch)
|
c.updateIsolated(ch)
|
||||||
}
|
}
|
||||||
return c.updateThermalThrottle(ch)
|
err := c.updateThermalThrottle(ch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = c.updateOnline(ch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateInfo reads /proc/cpuinfo
|
// updateInfo reads /proc/cpuinfo
|
||||||
func (c *cpuCollector) updateInfo(ch chan<- prometheus.Metric) error {
|
func (c *cpuCollector) updateInfo(ch chan<- prometheus.Metric) error {
|
||||||
info, err := c.fs.CPUInfo()
|
info, err := c.procfs.CPUInfo()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -333,9 +352,31 @@ func (c *cpuCollector) updateIsolated(ch chan<- prometheus.Metric) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// updateOnline reads /sys/devices/system/cpu/cpu*/online through sysfs and exports online status metrics.
|
||||||
|
func (c *cpuCollector) updateOnline(ch chan<- prometheus.Metric) error {
|
||||||
|
cpus, err := c.sysfs.CPUs()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// No-op if the system does not support CPU online stats.
|
||||||
|
cpu0 := cpus[0]
|
||||||
|
if _, err := cpu0.Online(); err != nil && errors.Is(err, os.ErrNotExist) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for _, cpu := range cpus {
|
||||||
|
setOnline := float64(0)
|
||||||
|
if online, _ := cpu.Online(); online {
|
||||||
|
setOnline = 1
|
||||||
|
}
|
||||||
|
ch <- prometheus.MustNewConstMetric(c.cpuOnline, prometheus.GaugeValue, setOnline, cpu.Number())
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// updateStat reads /proc/stat through procfs and exports CPU-related metrics.
|
// updateStat reads /proc/stat through procfs and exports CPU-related metrics.
|
||||||
func (c *cpuCollector) updateStat(ch chan<- prometheus.Metric) error {
|
func (c *cpuCollector) updateStat(ch chan<- prometheus.Metric) error {
|
||||||
stats, err := c.fs.Stat()
|
stats, err := c.procfs.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,10 +18,11 @@ package collector
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
"github.com/prometheus/procfs/sysfs"
|
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/procfs/sysfs"
|
||||||
)
|
)
|
||||||
|
|
||||||
type cpuFreqCollector struct {
|
type cpuFreqCollector struct {
|
||||||
|
|
|
@ -32,12 +32,12 @@ func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
for _, stat := range fsStat {
|
for _, stat := range fsStat {
|
||||||
if c.excludedMountPointsPattern.MatchString(stat.MountPoint) {
|
if c.mountPointFilter.ignored(stat.MountPoint) {
|
||||||
c.logger.Debug("Ignoring mount point", "mountpoint", stat.MountPoint)
|
c.logger.Debug("Ignoring mount point", "mountpoint", stat.MountPoint)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
fstype := stat.TypeString()
|
fstype := stat.TypeString()
|
||||||
if c.excludedFSTypesPattern.MatchString(fstype) {
|
if c.fsTypeFilter.ignored(fstype) {
|
||||||
c.logger.Debug("Ignoring fs type", "type", fstype)
|
c.logger.Debug("Ignoring fs type", "type", fstype)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,14 +48,14 @@ func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) {
|
||||||
stats = []filesystemStats{}
|
stats = []filesystemStats{}
|
||||||
for i := 0; i < int(count); i++ {
|
for i := 0; i < int(count); i++ {
|
||||||
mountpoint := C.GoString(&mnt[i].f_mntonname[0])
|
mountpoint := C.GoString(&mnt[i].f_mntonname[0])
|
||||||
if c.excludedMountPointsPattern.MatchString(mountpoint) {
|
if c.mountPointFilter.ignored(mountpoint) {
|
||||||
c.logger.Debug("Ignoring mount point", "mountpoint", mountpoint)
|
c.logger.Debug("Ignoring mount point", "mountpoint", mountpoint)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
device := C.GoString(&mnt[i].f_mntfromname[0])
|
device := C.GoString(&mnt[i].f_mntfromname[0])
|
||||||
fstype := C.GoString(&mnt[i].f_fstypename[0])
|
fstype := C.GoString(&mnt[i].f_fstypename[0])
|
||||||
if c.excludedFSTypesPattern.MatchString(fstype) {
|
if c.fsTypeFilter.ignored(fstype) {
|
||||||
c.logger.Debug("Ignoring fs type", "type", fstype)
|
c.logger.Debug("Ignoring fs type", "type", fstype)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,8 +19,8 @@ package collector
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"regexp"
|
|
||||||
|
|
||||||
"github.com/alecthomas/kingpin/v2"
|
"github.com/alecthomas/kingpin/v2"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
@ -36,7 +36,7 @@ var (
|
||||||
mountPointsExcludeSet bool
|
mountPointsExcludeSet bool
|
||||||
mountPointsExclude = kingpin.Flag(
|
mountPointsExclude = kingpin.Flag(
|
||||||
"collector.filesystem.mount-points-exclude",
|
"collector.filesystem.mount-points-exclude",
|
||||||
"Regexp of mount points to exclude for filesystem collector.",
|
"Regexp of mount points to exclude for filesystem collector. (mutually exclusive to mount-points-include)",
|
||||||
).Default(defMountPointsExcluded).PreAction(func(c *kingpin.ParseContext) error {
|
).Default(defMountPointsExcluded).PreAction(func(c *kingpin.ParseContext) error {
|
||||||
mountPointsExcludeSet = true
|
mountPointsExcludeSet = true
|
||||||
return nil
|
return nil
|
||||||
|
@ -45,11 +45,15 @@ var (
|
||||||
"collector.filesystem.ignored-mount-points",
|
"collector.filesystem.ignored-mount-points",
|
||||||
"Regexp of mount points to ignore for filesystem collector.",
|
"Regexp of mount points to ignore for filesystem collector.",
|
||||||
).Hidden().String()
|
).Hidden().String()
|
||||||
|
mountPointsInclude = kingpin.Flag(
|
||||||
|
"collector.filesystem.mount-points-include",
|
||||||
|
"Regexp of mount points to include for filesystem collector. (mutually exclusive to mount-points-exclude)",
|
||||||
|
).String()
|
||||||
|
|
||||||
fsTypesExcludeSet bool
|
fsTypesExcludeSet bool
|
||||||
fsTypesExclude = kingpin.Flag(
|
fsTypesExclude = kingpin.Flag(
|
||||||
"collector.filesystem.fs-types-exclude",
|
"collector.filesystem.fs-types-exclude",
|
||||||
"Regexp of filesystem types to exclude for filesystem collector.",
|
"Regexp of filesystem types to exclude for filesystem collector. (mutually exclusive to fs-types-include)",
|
||||||
).Default(defFSTypesExcluded).PreAction(func(c *kingpin.ParseContext) error {
|
).Default(defFSTypesExcluded).PreAction(func(c *kingpin.ParseContext) error {
|
||||||
fsTypesExcludeSet = true
|
fsTypesExcludeSet = true
|
||||||
return nil
|
return nil
|
||||||
|
@ -58,13 +62,17 @@ var (
|
||||||
"collector.filesystem.ignored-fs-types",
|
"collector.filesystem.ignored-fs-types",
|
||||||
"Regexp of filesystem types to ignore for filesystem collector.",
|
"Regexp of filesystem types to ignore for filesystem collector.",
|
||||||
).Hidden().String()
|
).Hidden().String()
|
||||||
|
fsTypesInclude = kingpin.Flag(
|
||||||
|
"collector.filesystem.fs-types-include",
|
||||||
|
"Regexp of filesystem types to exclude for filesystem collector. (mutually exclusive to fs-types-exclude)",
|
||||||
|
).String()
|
||||||
|
|
||||||
filesystemLabelNames = []string{"device", "mountpoint", "fstype", "device_error"}
|
filesystemLabelNames = []string{"device", "mountpoint", "fstype", "device_error"}
|
||||||
)
|
)
|
||||||
|
|
||||||
type filesystemCollector struct {
|
type filesystemCollector struct {
|
||||||
excludedMountPointsPattern *regexp.Regexp
|
mountPointFilter deviceFilter
|
||||||
excludedFSTypesPattern *regexp.Regexp
|
fsTypeFilter deviceFilter
|
||||||
sizeDesc, freeDesc, availDesc *prometheus.Desc
|
sizeDesc, freeDesc, availDesc *prometheus.Desc
|
||||||
filesDesc, filesFreeDesc *prometheus.Desc
|
filesDesc, filesFreeDesc *prometheus.Desc
|
||||||
roDesc, deviceErrorDesc *prometheus.Desc
|
roDesc, deviceErrorDesc *prometheus.Desc
|
||||||
|
@ -89,29 +97,7 @@ func init() {
|
||||||
|
|
||||||
// NewFilesystemCollector returns a new Collector exposing filesystems stats.
|
// NewFilesystemCollector returns a new Collector exposing filesystems stats.
|
||||||
func NewFilesystemCollector(logger *slog.Logger) (Collector, error) {
|
func NewFilesystemCollector(logger *slog.Logger) (Collector, error) {
|
||||||
if *oldMountPointsExcluded != "" {
|
const subsystem = "filesystem"
|
||||||
if !mountPointsExcludeSet {
|
|
||||||
logger.Warn("--collector.filesystem.ignored-mount-points is DEPRECATED and will be removed in 2.0.0, use --collector.filesystem.mount-points-exclude")
|
|
||||||
*mountPointsExclude = *oldMountPointsExcluded
|
|
||||||
} else {
|
|
||||||
return nil, errors.New("--collector.filesystem.ignored-mount-points and --collector.filesystem.mount-points-exclude are mutually exclusive")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if *oldFSTypesExcluded != "" {
|
|
||||||
if !fsTypesExcludeSet {
|
|
||||||
logger.Warn("--collector.filesystem.ignored-fs-types is DEPRECATED and will be removed in 2.0.0, use --collector.filesystem.fs-types-exclude")
|
|
||||||
*fsTypesExclude = *oldFSTypesExcluded
|
|
||||||
} else {
|
|
||||||
return nil, errors.New("--collector.filesystem.ignored-fs-types and --collector.filesystem.fs-types-exclude are mutually exclusive")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
subsystem := "filesystem"
|
|
||||||
logger.Info("Parsed flag --collector.filesystem.mount-points-exclude", "flag", *mountPointsExclude)
|
|
||||||
mountPointPattern := regexp.MustCompile(*mountPointsExclude)
|
|
||||||
logger.Info("Parsed flag --collector.filesystem.fs-types-exclude", "flag", *fsTypesExclude)
|
|
||||||
filesystemsTypesPattern := regexp.MustCompile(*fsTypesExclude)
|
|
||||||
|
|
||||||
sizeDesc := prometheus.NewDesc(
|
sizeDesc := prometheus.NewDesc(
|
||||||
prometheus.BuildFQName(namespace, subsystem, "size_bytes"),
|
prometheus.BuildFQName(namespace, subsystem, "size_bytes"),
|
||||||
|
@ -162,9 +148,19 @@ func NewFilesystemCollector(logger *slog.Logger) (Collector, error) {
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
mountPointFilter, err := newMountPointsFilter(logger)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to parse mount points filter flags: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fsTypeFilter, err := newFSTypeFilter(logger)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to parse fs types filter flags: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
return &filesystemCollector{
|
return &filesystemCollector{
|
||||||
excludedMountPointsPattern: mountPointPattern,
|
mountPointFilter: mountPointFilter,
|
||||||
excludedFSTypesPattern: filesystemsTypesPattern,
|
fsTypeFilter: fsTypeFilter,
|
||||||
sizeDesc: sizeDesc,
|
sizeDesc: sizeDesc,
|
||||||
freeDesc: freeDesc,
|
freeDesc: freeDesc,
|
||||||
availDesc: availDesc,
|
availDesc: availDesc,
|
||||||
|
@ -230,3 +226,61 @@ func (c *filesystemCollector) Update(ch chan<- prometheus.Metric) error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newMountPointsFilter(logger *slog.Logger) (deviceFilter, error) {
|
||||||
|
if *oldMountPointsExcluded != "" {
|
||||||
|
if !mountPointsExcludeSet {
|
||||||
|
logger.Warn("--collector.filesystem.ignored-mount-points is DEPRECATED and will be removed in 2.0.0, use --collector.filesystem.mount-points-exclude")
|
||||||
|
*mountPointsExclude = *oldMountPointsExcluded
|
||||||
|
} else {
|
||||||
|
return deviceFilter{}, errors.New("--collector.filesystem.ignored-mount-points and --collector.filesystem.mount-points-exclude are mutually exclusive")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if *mountPointsInclude != "" && !mountPointsExcludeSet {
|
||||||
|
logger.Debug("mount-points-exclude flag not set when mount-points-include flag is set, assuming include is desired")
|
||||||
|
*mountPointsExclude = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
if *mountPointsExclude != "" && *mountPointsInclude != "" {
|
||||||
|
return deviceFilter{}, errors.New("--collector.filesystem.mount-points-exclude and --collector.filesystem.mount-points-include are mutually exclusive")
|
||||||
|
}
|
||||||
|
|
||||||
|
if *mountPointsExclude != "" {
|
||||||
|
logger.Info("Parsed flag --collector.filesystem.mount-points-exclude", "flag", *mountPointsExclude)
|
||||||
|
}
|
||||||
|
if *mountPointsInclude != "" {
|
||||||
|
logger.Info("Parsed flag --collector.filesystem.mount-points-include", "flag", *mountPointsInclude)
|
||||||
|
}
|
||||||
|
|
||||||
|
return newDeviceFilter(*mountPointsExclude, *mountPointsInclude), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newFSTypeFilter(logger *slog.Logger) (deviceFilter, error) {
|
||||||
|
if *oldFSTypesExcluded != "" {
|
||||||
|
if !fsTypesExcludeSet {
|
||||||
|
logger.Warn("--collector.filesystem.ignored-fs-types is DEPRECATED and will be removed in 2.0.0, use --collector.filesystem.fs-types-exclude")
|
||||||
|
*fsTypesExclude = *oldFSTypesExcluded
|
||||||
|
} else {
|
||||||
|
return deviceFilter{}, errors.New("--collector.filesystem.ignored-fs-types and --collector.filesystem.fs-types-exclude are mutually exclusive")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if *fsTypesInclude != "" && !fsTypesExcludeSet {
|
||||||
|
logger.Debug("fs-types-exclude flag not set when fs-types-include flag is set, assuming include is desired")
|
||||||
|
*fsTypesExclude = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
if *fsTypesExclude != "" && *fsTypesInclude != "" {
|
||||||
|
return deviceFilter{}, errors.New("--collector.filesystem.fs-types-exclude and --collector.filesystem.fs-types-include are mutually exclusive")
|
||||||
|
}
|
||||||
|
|
||||||
|
if *fsTypesExclude != "" {
|
||||||
|
logger.Info("Parsed flag --collector.filesystem.fs-types-exclude", "flag", *fsTypesExclude)
|
||||||
|
}
|
||||||
|
if *fsTypesInclude != "" {
|
||||||
|
logger.Info("Parsed flag --collector.filesystem.fs-types-include", "flag", *fsTypesInclude)
|
||||||
|
}
|
||||||
|
|
||||||
|
return newDeviceFilter(*fsTypesExclude, *fsTypesInclude), nil
|
||||||
|
}
|
||||||
|
|
|
@ -39,14 +39,14 @@ func (c *filesystemCollector) GetStats() ([]filesystemStats, error) {
|
||||||
stats := []filesystemStats{}
|
stats := []filesystemStats{}
|
||||||
for _, fs := range buf {
|
for _, fs := range buf {
|
||||||
mountpoint := unix.ByteSliceToString(fs.Mntonname[:])
|
mountpoint := unix.ByteSliceToString(fs.Mntonname[:])
|
||||||
if c.excludedMountPointsPattern.MatchString(mountpoint) {
|
if c.mountPointFilter.ignored(mountpoint) {
|
||||||
c.logger.Debug("Ignoring mount point", "mountpoint", mountpoint)
|
c.logger.Debug("Ignoring mount point", "mountpoint", mountpoint)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
device := unix.ByteSliceToString(fs.Mntfromname[:])
|
device := unix.ByteSliceToString(fs.Mntfromname[:])
|
||||||
fstype := unix.ByteSliceToString(fs.Fstypename[:])
|
fstype := unix.ByteSliceToString(fs.Fstypename[:])
|
||||||
if c.excludedFSTypesPattern.MatchString(fstype) {
|
if c.fsTypeFilter.ignored(fstype) {
|
||||||
c.logger.Debug("Ignoring fs type", "type", fstype)
|
c.logger.Debug("Ignoring fs type", "type", fstype)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -73,12 +73,12 @@ func (c *filesystemCollector) GetStats() ([]filesystemStats, error) {
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for _, labels := range mps {
|
for _, labels := range mps {
|
||||||
if c.excludedMountPointsPattern.MatchString(labels.mountPoint) {
|
if c.mountPointFilter.ignored(labels.mountPoint) {
|
||||||
c.logger.Debug("Ignoring mount point", "mountpoint", labels.mountPoint)
|
c.logger.Debug("Ignoring mount point", "mountpoint", labels.mountPoint)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if c.excludedFSTypesPattern.MatchString(labels.fsType) {
|
if c.fsTypeFilter.ignored(labels.fsType) {
|
||||||
c.logger.Debug("Ignoring fs", "type", labels.fsType)
|
c.logger.Debug("Ignoring fs type", "type", labels.fsType)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -97,14 +97,14 @@ func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) {
|
||||||
stats = []filesystemStats{}
|
stats = []filesystemStats{}
|
||||||
for _, v := range mnt {
|
for _, v := range mnt {
|
||||||
mountpoint := unix.ByteSliceToString(v.F_mntonname[:])
|
mountpoint := unix.ByteSliceToString(v.F_mntonname[:])
|
||||||
if c.excludedMountPointsPattern.MatchString(mountpoint) {
|
if c.mountPointFilter.ignored(mountpoint) {
|
||||||
c.logger.Debug("msg", "Ignoring mount point", "mountpoint", mountpoint)
|
c.logger.Debug("msg", "Ignoring mount point", "mountpoint", mountpoint)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
device := unix.ByteSliceToString(v.F_mntfromname[:])
|
device := unix.ByteSliceToString(v.F_mntfromname[:])
|
||||||
fstype := unix.ByteSliceToString(v.F_fstypename[:])
|
fstype := unix.ByteSliceToString(v.F_fstypename[:])
|
||||||
if c.excludedFSTypesPattern.MatchString(fstype) {
|
if c.fsTypeFilter.ignored(fstype) {
|
||||||
c.logger.Debug("msg", "Ignoring fs type", "type", fstype)
|
c.logger.Debug("msg", "Ignoring fs type", "type", fstype)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,14 +41,14 @@ func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) {
|
||||||
stats = []filesystemStats{}
|
stats = []filesystemStats{}
|
||||||
for _, v := range mnt {
|
for _, v := range mnt {
|
||||||
mountpoint := unix.ByteSliceToString(v.F_mntonname[:])
|
mountpoint := unix.ByteSliceToString(v.F_mntonname[:])
|
||||||
if c.excludedMountPointsPattern.MatchString(mountpoint) {
|
if c.mountPointFilter.ignored(mountpoint) {
|
||||||
c.logger.Debug("Ignoring mount point", "mountpoint", mountpoint)
|
c.logger.Debug("Ignoring mount point", "mountpoint", mountpoint)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
device := unix.ByteSliceToString(v.F_mntfromname[:])
|
device := unix.ByteSliceToString(v.F_mntfromname[:])
|
||||||
fstype := unix.ByteSliceToString(v.F_fstypename[:])
|
fstype := unix.ByteSliceToString(v.F_fstypename[:])
|
||||||
if c.excludedFSTypesPattern.MatchString(fstype) {
|
if c.fsTypeFilter.ignored(fstype) {
|
||||||
c.logger.Debug("Ignoring fs type", "type", fstype)
|
c.logger.Debug("Ignoring fs type", "type", fstype)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -871,6 +871,10 @@ node_hwmon_fan_target_rpm{chip="nct6779",sensor="fan2"} 27000
|
||||||
# HELP node_hwmon_fan_tolerance Hardware monitor fan element tolerance
|
# HELP node_hwmon_fan_tolerance Hardware monitor fan element tolerance
|
||||||
# TYPE node_hwmon_fan_tolerance gauge
|
# TYPE node_hwmon_fan_tolerance gauge
|
||||||
node_hwmon_fan_tolerance{chip="nct6779",sensor="fan2"} 0
|
node_hwmon_fan_tolerance{chip="nct6779",sensor="fan2"} 0
|
||||||
|
# HELP node_hwmon_freq_freq_mhz Hardware monitor for GPU frequency in MHz
|
||||||
|
# TYPE node_hwmon_freq_freq_mhz gauge
|
||||||
|
node_hwmon_freq_freq_mhz{chip="hwmon4",sensor="mclk"} 300
|
||||||
|
node_hwmon_freq_freq_mhz{chip="hwmon4",sensor="sclk"} 214
|
||||||
# HELP node_hwmon_in_alarm Hardware sensor alarm status (in)
|
# HELP node_hwmon_in_alarm Hardware sensor alarm status (in)
|
||||||
# TYPE node_hwmon_in_alarm gauge
|
# TYPE node_hwmon_in_alarm gauge
|
||||||
node_hwmon_in_alarm{chip="nct6779",sensor="in0"} 0
|
node_hwmon_in_alarm{chip="nct6779",sensor="in0"} 0
|
||||||
|
@ -984,6 +988,8 @@ node_hwmon_pwm_weight_temp_step_tol{chip="nct6779",sensor="pwm1"} 0
|
||||||
# TYPE node_hwmon_sensor_label gauge
|
# TYPE node_hwmon_sensor_label gauge
|
||||||
node_hwmon_sensor_label{chip="hwmon4",label="foosensor",sensor="temp1"} 1
|
node_hwmon_sensor_label{chip="hwmon4",label="foosensor",sensor="temp1"} 1
|
||||||
node_hwmon_sensor_label{chip="hwmon4",label="foosensor",sensor="temp2"} 1
|
node_hwmon_sensor_label{chip="hwmon4",label="foosensor",sensor="temp2"} 1
|
||||||
|
node_hwmon_sensor_label{chip="hwmon4",label="mclk",sensor="freq2"} 1
|
||||||
|
node_hwmon_sensor_label{chip="hwmon4",label="sclk",sensor="freq1"} 1
|
||||||
node_hwmon_sensor_label{chip="platform_applesmc_768",label="Left side",sensor="fan1"} 1
|
node_hwmon_sensor_label{chip="platform_applesmc_768",label="Left side",sensor="fan1"} 1
|
||||||
node_hwmon_sensor_label{chip="platform_applesmc_768",label="Right side",sensor="fan2"} 1
|
node_hwmon_sensor_label{chip="platform_applesmc_768",label="Right side",sensor="fan2"} 1
|
||||||
node_hwmon_sensor_label{chip="platform_coretemp_0",label="Core 0",sensor="temp2"} 1
|
node_hwmon_sensor_label{chip="platform_coretemp_0",label="Core 0",sensor="temp2"} 1
|
||||||
|
|
|
@ -893,6 +893,10 @@ node_hwmon_fan_target_rpm{chip="nct6779",sensor="fan2"} 27000
|
||||||
# HELP node_hwmon_fan_tolerance Hardware monitor fan element tolerance
|
# HELP node_hwmon_fan_tolerance Hardware monitor fan element tolerance
|
||||||
# TYPE node_hwmon_fan_tolerance gauge
|
# TYPE node_hwmon_fan_tolerance gauge
|
||||||
node_hwmon_fan_tolerance{chip="nct6779",sensor="fan2"} 0
|
node_hwmon_fan_tolerance{chip="nct6779",sensor="fan2"} 0
|
||||||
|
# HELP node_hwmon_freq_freq_mhz Hardware monitor for GPU frequency in MHz
|
||||||
|
# TYPE node_hwmon_freq_freq_mhz gauge
|
||||||
|
node_hwmon_freq_freq_mhz{chip="hwmon4",sensor="mclk"} 300
|
||||||
|
node_hwmon_freq_freq_mhz{chip="hwmon4",sensor="sclk"} 214
|
||||||
# HELP node_hwmon_in_alarm Hardware sensor alarm status (in)
|
# HELP node_hwmon_in_alarm Hardware sensor alarm status (in)
|
||||||
# TYPE node_hwmon_in_alarm gauge
|
# TYPE node_hwmon_in_alarm gauge
|
||||||
node_hwmon_in_alarm{chip="nct6779",sensor="in0"} 0
|
node_hwmon_in_alarm{chip="nct6779",sensor="in0"} 0
|
||||||
|
@ -1006,6 +1010,8 @@ node_hwmon_pwm_weight_temp_step_tol{chip="nct6779",sensor="pwm1"} 0
|
||||||
# TYPE node_hwmon_sensor_label gauge
|
# TYPE node_hwmon_sensor_label gauge
|
||||||
node_hwmon_sensor_label{chip="hwmon4",label="foosensor",sensor="temp1"} 1
|
node_hwmon_sensor_label{chip="hwmon4",label="foosensor",sensor="temp1"} 1
|
||||||
node_hwmon_sensor_label{chip="hwmon4",label="foosensor",sensor="temp2"} 1
|
node_hwmon_sensor_label{chip="hwmon4",label="foosensor",sensor="temp2"} 1
|
||||||
|
node_hwmon_sensor_label{chip="hwmon4",label="mclk",sensor="freq2"} 1
|
||||||
|
node_hwmon_sensor_label{chip="hwmon4",label="sclk",sensor="freq1"} 1
|
||||||
node_hwmon_sensor_label{chip="platform_applesmc_768",label="Left side",sensor="fan1"} 1
|
node_hwmon_sensor_label{chip="platform_applesmc_768",label="Left side",sensor="fan1"} 1
|
||||||
node_hwmon_sensor_label{chip="platform_applesmc_768",label="Right side",sensor="fan2"} 1
|
node_hwmon_sensor_label{chip="platform_applesmc_768",label="Right side",sensor="fan2"} 1
|
||||||
node_hwmon_sensor_label{chip="platform_coretemp_0",label="Core 0",sensor="temp2"} 1
|
node_hwmon_sensor_label{chip="platform_coretemp_0",label="Core 0",sensor="temp2"} 1
|
||||||
|
|
|
@ -437,6 +437,26 @@ Lines: 1
|
||||||
100000
|
100000
|
||||||
Mode: 644
|
Mode: 644
|
||||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: sys/class/hwmon/hwmon4/freq1_input
|
||||||
|
Lines: 1
|
||||||
|
214000000
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: sys/class/hwmon/hwmon4/freq1_label
|
||||||
|
Lines: 1
|
||||||
|
sclk
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: sys/class/hwmon/hwmon4/freq2_input
|
||||||
|
Lines: 1
|
||||||
|
300000000
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: sys/class/hwmon/hwmon4/freq2_label
|
||||||
|
Lines: 1
|
||||||
|
mclk
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
Path: sys/class/hwmon/hwmon5
|
Path: sys/class/hwmon/hwmon5
|
||||||
SymlinkTo: ../../devices/platform/bogus.0/hwmon/hwmon5/
|
SymlinkTo: ../../devices/platform/bogus.0/hwmon/hwmon5/
|
||||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
|
|
@ -44,7 +44,7 @@ var (
|
||||||
hwmonSensorTypes = []string{
|
hwmonSensorTypes = []string{
|
||||||
"vrm", "beep_enable", "update_interval", "in", "cpu", "fan",
|
"vrm", "beep_enable", "update_interval", "in", "cpu", "fan",
|
||||||
"pwm", "temp", "curr", "power", "energy", "humidity",
|
"pwm", "temp", "curr", "power", "energy", "humidity",
|
||||||
"intrusion",
|
"intrusion", "freq",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -357,6 +357,15 @@ func (c *hwMonCollector) updateHwmon(ch chan<- prometheus.Metric, dir string) er
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if sensorType == "freq" && element == "input" {
|
||||||
|
if label, ok := sensorData["label"]; ok {
|
||||||
|
sensorLabel := cleanMetricName(label)
|
||||||
|
desc := prometheus.NewDesc(name+"_freq_mhz", "Hardware monitor for GPU frequency in MHz", hwmonLabelDesc, nil)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
desc, prometheus.GaugeValue, parsedValue/1000000.0, append(labels[:len(labels)-1], sensorLabel)...)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
// fallback, just dump the metric as is
|
// fallback, just dump the metric as is
|
||||||
|
|
||||||
desc := prometheus.NewDesc(name, "Hardware monitor "+sensorType+" element "+element, hwmonLabelDesc, nil)
|
desc := prometheus.NewDesc(name, "Hardware monitor "+sensorType+" element "+element, hwmonLabelDesc, nil)
|
||||||
|
|
|
@ -18,9 +18,10 @@
|
||||||
package collector
|
package collector
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"log/slog"
|
||||||
|
|
||||||
"github.com/alecthomas/kingpin/v2"
|
"github.com/alecthomas/kingpin/v2"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"log/slog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type interruptsCollector struct {
|
type interruptsCollector struct {
|
||||||
|
|
|
@ -17,10 +17,11 @@
|
||||||
package collector
|
package collector
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"log/slog"
|
||||||
|
|
||||||
"github.com/alecthomas/kingpin/v2"
|
"github.com/alecthomas/kingpin/v2"
|
||||||
"github.com/prometheus-community/go-runit/runit"
|
"github.com/prometheus-community/go-runit/runit"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"log/slog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var runitServiceDir = kingpin.Flag("collector.runit.servicedir", "Path to runit service directory.").Default("/etc/service").String()
|
var runitServiceDir = kingpin.Flag("collector.runit.servicedir", "Path to runit service directory.").Default("/etc/service").String()
|
||||||
|
|
|
@ -17,9 +17,10 @@
|
||||||
package collector
|
package collector
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"log/slog"
|
||||||
|
|
||||||
"github.com/opencontainers/selinux/go-selinux"
|
"github.com/opencontainers/selinux/go-selinux"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"log/slog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type selinuxCollector struct {
|
type selinuxCollector struct {
|
||||||
|
|
|
@ -18,9 +18,10 @@ package collector
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/procfs"
|
"github.com/prometheus/procfs"
|
||||||
"log/slog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type softirqsCollector struct {
|
type softirqsCollector struct {
|
||||||
|
|
|
@ -18,8 +18,9 @@
|
||||||
package collector
|
package collector
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
"log/slog"
|
"log/slog"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
)
|
)
|
||||||
|
|
||||||
var unameDesc = prometheus.NewDesc(
|
var unameDesc = prometheus.NewDesc(
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
(
|
(
|
||||||
node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_size_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < %(fsSpaceFillingUpWarningThreshold)d
|
node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_size_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < %(fsSpaceFillingUpWarningThreshold)d
|
||||||
and
|
and
|
||||||
predict_linear(node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[%(fsSpaceFillingUpPredictionWindow)s], 24*60*60) < 0
|
predict_linear(node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[%(fsSpaceFillingUpPredictionWindow)s], %(nodeWarningWindowHours)s*60*60) < 0
|
||||||
and
|
and
|
||||||
node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0
|
node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0
|
||||||
)
|
)
|
||||||
|
@ -20,7 +20,7 @@
|
||||||
severity: 'warning',
|
severity: 'warning',
|
||||||
},
|
},
|
||||||
annotations: {
|
annotations: {
|
||||||
summary: 'Filesystem is predicted to run out of space within the next 24 hours.',
|
summary: 'Filesystem is predicted to run out of space within the next %(nodeWarningWindowHours)s hours.' % $._config,
|
||||||
description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left and is filling up.',
|
description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left and is filling up.',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -30,7 +30,7 @@
|
||||||
(
|
(
|
||||||
node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_size_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < %(fsSpaceFillingUpCriticalThreshold)d
|
node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_size_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < %(fsSpaceFillingUpCriticalThreshold)d
|
||||||
and
|
and
|
||||||
predict_linear(node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[6h], 4*60*60) < 0
|
predict_linear(node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[6h], %(nodeCriticalWindowHours)s*60*60) < 0
|
||||||
and
|
and
|
||||||
node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0
|
node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0
|
||||||
)
|
)
|
||||||
|
@ -40,7 +40,7 @@
|
||||||
severity: '%(nodeCriticalSeverity)s' % $._config,
|
severity: '%(nodeCriticalSeverity)s' % $._config,
|
||||||
},
|
},
|
||||||
annotations: {
|
annotations: {
|
||||||
summary: 'Filesystem is predicted to run out of space within the next 4 hours.',
|
summary: 'Filesystem is predicted to run out of space within the next %(nodeCriticalWindowHours)s hours.' % $._config,
|
||||||
description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left and is filling up fast.',
|
description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left and is filling up fast.',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -86,7 +86,7 @@
|
||||||
(
|
(
|
||||||
node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_files{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < 40
|
node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_files{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < 40
|
||||||
and
|
and
|
||||||
predict_linear(node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[6h], 24*60*60) < 0
|
predict_linear(node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[6h], %(nodeWarningWindowHours)s*60*60) < 0
|
||||||
and
|
and
|
||||||
node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0
|
node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0
|
||||||
)
|
)
|
||||||
|
@ -96,7 +96,7 @@
|
||||||
severity: 'warning',
|
severity: 'warning',
|
||||||
},
|
},
|
||||||
annotations: {
|
annotations: {
|
||||||
summary: 'Filesystem is predicted to run out of inodes within the next 24 hours.',
|
summary: 'Filesystem is predicted to run out of inodes within the next %(nodeWarningWindowHours)s hours.' % $._config,
|
||||||
description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left and is filling up.',
|
description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left and is filling up.',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -106,7 +106,7 @@
|
||||||
(
|
(
|
||||||
node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_files{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < 20
|
node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_files{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < 20
|
||||||
and
|
and
|
||||||
predict_linear(node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[6h], 4*60*60) < 0
|
predict_linear(node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[6h], %(nodeCriticalWindowHours)s*60*60) < 0
|
||||||
and
|
and
|
||||||
node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0
|
node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0
|
||||||
)
|
)
|
||||||
|
@ -116,7 +116,7 @@
|
||||||
severity: '%(nodeCriticalSeverity)s' % $._config,
|
severity: '%(nodeCriticalSeverity)s' % $._config,
|
||||||
},
|
},
|
||||||
annotations: {
|
annotations: {
|
||||||
summary: 'Filesystem is predicted to run out of inodes within the next 4 hours.',
|
summary: 'Filesystem is predicted to run out of inodes within the next %(nodeCriticalWindowHours)s hours.' % $._config,
|
||||||
description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left and is filling up fast.',
|
description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left and is filling up fast.',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -191,7 +191,7 @@
|
||||||
||| % $._config,
|
||| % $._config,
|
||||||
annotations: {
|
annotations: {
|
||||||
summary: 'Number of conntrack are getting close to the limit.',
|
summary: 'Number of conntrack are getting close to the limit.',
|
||||||
description: '{{ $value | humanizePercentage }} of conntrack entries are used.',
|
description: '{{ $labels.instance }} {{ $value | humanizePercentage }} of conntrack entries are used.',
|
||||||
},
|
},
|
||||||
labels: {
|
labels: {
|
||||||
severity: 'warning',
|
severity: 'warning',
|
||||||
|
@ -312,7 +312,7 @@
|
||||||
{
|
{
|
||||||
alert: 'NodeCPUHighUsage',
|
alert: 'NodeCPUHighUsage',
|
||||||
expr: |||
|
expr: |||
|
||||||
sum without(mode) (avg without (cpu) (rate(node_cpu_seconds_total{%(nodeExporterSelector)s, mode!="idle"}[2m]))) * 100 > %(cpuHighUsageThreshold)d
|
sum without(mode) (avg without (cpu) (rate(node_cpu_seconds_total{%(nodeExporterSelector)s, mode!~"idle|iowait"}[2m]))) * 100 > %(cpuHighUsageThreshold)d
|
||||||
||| % $._config,
|
||| % $._config,
|
||||||
'for': '15m',
|
'for': '15m',
|
||||||
labels: {
|
labels: {
|
||||||
|
|
|
@ -50,6 +50,16 @@
|
||||||
// 'NodeSystemSaturation' alert.
|
// 'NodeSystemSaturation' alert.
|
||||||
systemSaturationPerCoreThreshold: 2,
|
systemSaturationPerCoreThreshold: 2,
|
||||||
|
|
||||||
|
// Some of the alerts use predict_linear() to fire alerts ahead of time to
|
||||||
|
// prevent unrecoverable situations (eg. no more disk space). However, the
|
||||||
|
// node may have automatic processes (cronjobs) in place to prevent that
|
||||||
|
// within a certain time window, this may not align with the default time
|
||||||
|
// window of these alerts. This can cause these alerts to start flapping.
|
||||||
|
// By reducing the time window, the system gets more time to
|
||||||
|
// resolve this before problems occur.
|
||||||
|
nodeWarningWindowHours: '24',
|
||||||
|
nodeCriticalWindowHours: '4',
|
||||||
|
|
||||||
// Available disk space (%) thresholds on which to trigger the
|
// Available disk space (%) thresholds on which to trigger the
|
||||||
// 'NodeFilesystemSpaceFillingUp' alerts. These alerts fire if the disk
|
// 'NodeFilesystemSpaceFillingUp' alerts. These alerts fire if the disk
|
||||||
// usage grows in a way that it is predicted to run out in 4h or 1d
|
// usage grows in a way that it is predicted to run out in 4h or 1d
|
||||||
|
|
|
@ -1,201 +1,178 @@
|
||||||
local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet';
|
local grafana = import 'github.com/grafana/grafonnet/gen/grafonnet-latest/main.libsonnet';
|
||||||
local dashboard = grafana.dashboard;
|
local dashboard = grafana.dashboard;
|
||||||
local row = grafana.row;
|
local variable = dashboard.variable;
|
||||||
local prometheus = grafana.prometheus;
|
local row = grafana.panel.row;
|
||||||
local template = grafana.template;
|
local prometheus = grafana.query.prometheus;
|
||||||
local graphPanel = grafana.graphPanel;
|
|
||||||
|
local timeSeriesPanel = grafana.panel.timeSeries;
|
||||||
|
local tsOptions = timeSeriesPanel.options;
|
||||||
|
local tsStandardOptions = timeSeriesPanel.standardOptions;
|
||||||
|
local tsQueryOptions = timeSeriesPanel.queryOptions;
|
||||||
|
local tsCustom = timeSeriesPanel.fieldConfig.defaults.custom;
|
||||||
|
local tsLegend = tsOptions.legend;
|
||||||
|
|
||||||
local c = import '../config.libsonnet';
|
local c = import '../config.libsonnet';
|
||||||
|
|
||||||
local datasourceTemplate = {
|
local datasource = variable.datasource.new(
|
||||||
current: {
|
'datasource', 'prometheus'
|
||||||
text: 'default',
|
);
|
||||||
value: 'default',
|
|
||||||
},
|
local tsCommonPanelOptions =
|
||||||
hide: 0,
|
variable.query.withDatasourceFromVariable(datasource)
|
||||||
label: 'Data Source',
|
+ tsCustom.stacking.withMode('normal')
|
||||||
name: 'datasource',
|
+ tsCustom.withFillOpacity(100)
|
||||||
options: [],
|
+ tsCustom.withShowPoints('never')
|
||||||
query: 'prometheus',
|
+ tsLegend.withShowLegend(false)
|
||||||
refresh: 1,
|
+ tsOptions.tooltip.withMode('multi')
|
||||||
regex: '',
|
+ tsOptions.tooltip.withSort('desc');
|
||||||
type: 'datasource',
|
|
||||||
};
|
|
||||||
|
|
||||||
local CPUUtilisation =
|
local CPUUtilisation =
|
||||||
graphPanel.new(
|
timeSeriesPanel.new(
|
||||||
'CPU Utilisation',
|
'CPU Utilisation',
|
||||||
datasource='$datasource',
|
)
|
||||||
span=6,
|
+ tsCommonPanelOptions
|
||||||
format='percentunit',
|
+ tsStandardOptions.withUnit('percentunit');
|
||||||
stack=true,
|
|
||||||
fill=10,
|
|
||||||
legend_show=false,
|
|
||||||
) { tooltip+: { sort: 2 } };
|
|
||||||
|
|
||||||
local CPUSaturation =
|
local CPUSaturation =
|
||||||
// TODO: Is this a useful panel? At least there should be some explanation how load
|
// TODO: Is this a useful panel? At least there should be some explanation how load
|
||||||
// average relates to the "CPU saturation" in the title.
|
// average relates to the "CPU saturation" in the title.
|
||||||
graphPanel.new(
|
timeSeriesPanel.new(
|
||||||
'CPU Saturation (Load1 per CPU)',
|
'CPU Saturation (Load1 per CPU)',
|
||||||
datasource='$datasource',
|
)
|
||||||
span=6,
|
+ tsCommonPanelOptions
|
||||||
format='percentunit',
|
+ tsStandardOptions.withUnit('percentunit');
|
||||||
stack=true,
|
|
||||||
fill=10,
|
|
||||||
legend_show=false,
|
|
||||||
) { tooltip+: { sort: 2 } };
|
|
||||||
|
|
||||||
local memoryUtilisation =
|
local memoryUtilisation =
|
||||||
graphPanel.new(
|
timeSeriesPanel.new(
|
||||||
'Memory Utilisation',
|
'Memory Utilisation',
|
||||||
datasource='$datasource',
|
)
|
||||||
span=6,
|
+ tsCommonPanelOptions
|
||||||
format='percentunit',
|
+ tsStandardOptions.withUnit('percentunit');
|
||||||
stack=true,
|
|
||||||
fill=10,
|
|
||||||
legend_show=false,
|
|
||||||
) { tooltip+: { sort: 2 } };
|
|
||||||
|
|
||||||
local memorySaturation =
|
local memorySaturation =
|
||||||
graphPanel.new(
|
timeSeriesPanel.new(
|
||||||
'Memory Saturation (Major Page Faults)',
|
'Memory Saturation (Major Page Faults)',
|
||||||
datasource='$datasource',
|
)
|
||||||
span=6,
|
+ tsCommonPanelOptions
|
||||||
format='rds',
|
+ tsStandardOptions.withUnit('rds');
|
||||||
stack=true,
|
|
||||||
fill=10,
|
local networkOverrides = tsStandardOptions.withOverrides(
|
||||||
legend_show=false,
|
[
|
||||||
) { tooltip+: { sort: 2 } };
|
tsStandardOptions.override.byRegexp.new('/Transmit/')
|
||||||
|
+ tsStandardOptions.override.byRegexp.withPropertiesFromOptions(
|
||||||
|
tsCustom.withTransform('negative-Y')
|
||||||
|
),
|
||||||
|
]
|
||||||
|
);
|
||||||
|
|
||||||
local networkUtilisation =
|
local networkUtilisation =
|
||||||
graphPanel.new(
|
timeSeriesPanel.new(
|
||||||
'Network Utilisation (Bytes Receive/Transmit)',
|
'Network Utilisation (Bytes Receive/Transmit)',
|
||||||
datasource='$datasource',
|
|
||||||
span=6,
|
|
||||||
format='Bps',
|
|
||||||
stack=true,
|
|
||||||
fill=10,
|
|
||||||
legend_show=false,
|
|
||||||
)
|
)
|
||||||
.addSeriesOverride({ alias: '/Receive/', stack: 'A' })
|
+ tsCommonPanelOptions
|
||||||
.addSeriesOverride({ alias: '/Transmit/', stack: 'B', transform: 'negative-Y' })
|
+ tsStandardOptions.withUnit('Bps')
|
||||||
{ tooltip+: { sort: 2 } };
|
+ networkOverrides;
|
||||||
|
|
||||||
local networkSaturation =
|
local networkSaturation =
|
||||||
graphPanel.new(
|
timeSeriesPanel.new(
|
||||||
'Network Saturation (Drops Receive/Transmit)',
|
'Network Saturation (Drops Receive/Transmit)',
|
||||||
datasource='$datasource',
|
|
||||||
span=6,
|
|
||||||
format='Bps',
|
|
||||||
stack=true,
|
|
||||||
fill=10,
|
|
||||||
legend_show=false,
|
|
||||||
)
|
)
|
||||||
.addSeriesOverride({ alias: '/ Receive/', stack: 'A' })
|
+ tsCommonPanelOptions
|
||||||
.addSeriesOverride({ alias: '/ Transmit/', stack: 'B', transform: 'negative-Y' })
|
+ tsStandardOptions.withUnit('Bps')
|
||||||
{ tooltip+: { sort: 2 } };
|
+ networkOverrides;
|
||||||
|
|
||||||
local diskIOUtilisation =
|
local diskIOUtilisation =
|
||||||
graphPanel.new(
|
timeSeriesPanel.new(
|
||||||
'Disk IO Utilisation',
|
'Disk IO Utilisation',
|
||||||
datasource='$datasource',
|
)
|
||||||
span=6,
|
+ tsCommonPanelOptions
|
||||||
format='percentunit',
|
+ tsStandardOptions.withUnit('percentunit');
|
||||||
stack=true,
|
|
||||||
fill=10,
|
|
||||||
legend_show=false,
|
|
||||||
) { tooltip+: { sort: 2 } };
|
|
||||||
|
|
||||||
local diskIOSaturation =
|
local diskIOSaturation =
|
||||||
graphPanel.new(
|
timeSeriesPanel.new(
|
||||||
'Disk IO Saturation',
|
'Disk IO Saturation',
|
||||||
datasource='$datasource',
|
)
|
||||||
span=6,
|
+ tsCommonPanelOptions
|
||||||
format='percentunit',
|
+ tsStandardOptions.withUnit('percentunit');
|
||||||
stack=true,
|
|
||||||
fill=10,
|
|
||||||
legend_show=false,
|
|
||||||
) { tooltip+: { sort: 2 } };
|
|
||||||
|
|
||||||
local diskSpaceUtilisation =
|
local diskSpaceUtilisation =
|
||||||
graphPanel.new(
|
timeSeriesPanel.new(
|
||||||
'Disk Space Utilisation',
|
'Disk Space Utilisation',
|
||||||
datasource='$datasource',
|
)
|
||||||
span=12,
|
+ tsCommonPanelOptions
|
||||||
format='percentunit',
|
+ tsStandardOptions.withUnit('percentunit');
|
||||||
stack=true,
|
|
||||||
fill=10,
|
|
||||||
legend_show=false,
|
|
||||||
) { tooltip+: { sort: 2 } };
|
|
||||||
|
|
||||||
{
|
{
|
||||||
_clusterTemplate:: template.new(
|
_clusterVariable::
|
||||||
name='cluster',
|
variable.query.new('cluster')
|
||||||
datasource='$datasource',
|
+ variable.query.withDatasourceFromVariable(datasource)
|
||||||
query='label_values(node_time_seconds, %s)' % $._config.clusterLabel,
|
+ variable.query.queryTypes.withLabelValues(
|
||||||
current='',
|
$._config.clusterLabel,
|
||||||
hide=if $._config.showMultiCluster then '' else '2',
|
'node_time_seconds',
|
||||||
refresh=2,
|
)
|
||||||
includeAll=false,
|
+ (if $._config.showMultiCluster then variable.query.generalOptions.showOnDashboard.withLabelAndValue() else variable.query.generalOptions.showOnDashboard.withNothing())
|
||||||
sort=1
|
+ variable.query.refresh.onTime()
|
||||||
),
|
+ variable.query.selectionOptions.withIncludeAll(false)
|
||||||
|
+ variable.query.withSort(asc=true),
|
||||||
|
|
||||||
grafanaDashboards+:: {
|
grafanaDashboards+:: {
|
||||||
'node-rsrc-use.json':
|
'node-rsrc-use.json':
|
||||||
|
|
||||||
dashboard.new(
|
dashboard.new(
|
||||||
'%sUSE Method / Node' % $._config.dashboardNamePrefix,
|
'%sUSE Method / Node' % $._config.dashboardNamePrefix,
|
||||||
time_from='now-1h',
|
|
||||||
tags=($._config.dashboardTags),
|
|
||||||
timezone='utc',
|
|
||||||
refresh='30s',
|
|
||||||
graphTooltip='shared_crosshair',
|
|
||||||
uid=std.md5('node-rsrc-use.json')
|
|
||||||
)
|
)
|
||||||
.addTemplate(datasourceTemplate)
|
+ dashboard.time.withFrom('now-1h')
|
||||||
.addTemplate($._clusterTemplate)
|
+ dashboard.withTags($._config.dashboardTags)
|
||||||
.addTemplate(
|
+ dashboard.withTimezone('utc')
|
||||||
template.new(
|
+ dashboard.withRefresh('30s')
|
||||||
|
+ dashboard.graphTooltip.withSharedCrosshair()
|
||||||
|
+ dashboard.withUid(std.md5('node-rsrc-use.json'))
|
||||||
|
+ dashboard.withVariables([
|
||||||
|
datasource,
|
||||||
|
$._clusterVariable,
|
||||||
|
variable.query.new('instance')
|
||||||
|
+ variable.query.withDatasourceFromVariable(datasource)
|
||||||
|
+ variable.query.queryTypes.withLabelValues(
|
||||||
'instance',
|
'instance',
|
||||||
'$datasource',
|
'node_exporter_build_info{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}' % $._config,
|
||||||
'label_values(node_exporter_build_info{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}, instance)' % $._config,
|
|
||||||
refresh='time',
|
|
||||||
sort=1
|
|
||||||
)
|
)
|
||||||
)
|
+ variable.query.refresh.onTime()
|
||||||
.addRow(
|
+ variable.query.withSort(asc=true),
|
||||||
|
])
|
||||||
|
+ dashboard.withPanels(
|
||||||
|
grafana.util.grid.makeGrid([
|
||||||
row.new('CPU')
|
row.new('CPU')
|
||||||
.addPanel(CPUUtilisation.addTarget(prometheus.target('instance:node_cpu_utilisation:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Utilisation')))
|
+ row.withPanels([
|
||||||
.addPanel(CPUSaturation.addTarget(prometheus.target('instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Saturation')))
|
CPUUtilisation + tsQueryOptions.withTargets([prometheus.new('$datasource', 'instance:node_cpu_utilisation:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('Utilisation')]),
|
||||||
)
|
CPUSaturation + tsQueryOptions.withTargets([prometheus.new('$datasource', 'instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('Saturation')]),
|
||||||
.addRow(
|
]),
|
||||||
row.new('Memory')
|
row.new('Memory')
|
||||||
.addPanel(memoryUtilisation.addTarget(prometheus.target('instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Utilisation')))
|
+ row.withPanels([
|
||||||
.addPanel(memorySaturation.addTarget(prometheus.target('instance:node_vmstat_pgmajfault:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Major page Faults')))
|
memoryUtilisation + tsQueryOptions.withTargets([prometheus.new('$datasource', 'instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('Utilisation')]),
|
||||||
)
|
memorySaturation + tsQueryOptions.withTargets([prometheus.new('$datasource', 'instance:node_vmstat_pgmajfault:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('Major page Faults')]),
|
||||||
.addRow(
|
]),
|
||||||
row.new('Network')
|
row.new('Network')
|
||||||
.addPanel(
|
+ row.withPanels([
|
||||||
networkUtilisation
|
networkUtilisation + tsQueryOptions.withTargets([
|
||||||
.addTarget(prometheus.target('instance:node_network_receive_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Receive'))
|
prometheus.new('$datasource', 'instance:node_network_receive_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('Receive'),
|
||||||
.addTarget(prometheus.target('instance:node_network_transmit_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Transmit'))
|
prometheus.new('$datasource', 'instance:node_network_transmit_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('Transmit'),
|
||||||
)
|
]),
|
||||||
.addPanel(
|
networkSaturation + tsQueryOptions.withTargets([
|
||||||
networkSaturation
|
prometheus.new('$datasource', 'instance:node_network_receive_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('Receive'),
|
||||||
.addTarget(prometheus.target('instance:node_network_receive_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Receive'))
|
prometheus.new('$datasource', 'instance:node_network_transmit_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('Transmit'),
|
||||||
.addTarget(prometheus.target('instance:node_network_transmit_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Transmit'))
|
]),
|
||||||
)
|
]),
|
||||||
)
|
|
||||||
.addRow(
|
|
||||||
row.new('Disk IO')
|
row.new('Disk IO')
|
||||||
.addPanel(diskIOUtilisation.addTarget(prometheus.target('instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{device}}')))
|
+ row.withPanels([
|
||||||
.addPanel(diskIOSaturation.addTarget(prometheus.target('instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{device}}')))
|
diskIOUtilisation + tsQueryOptions.withTargets([prometheus.new('$datasource', 'instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('{{device}}')]),
|
||||||
)
|
diskIOSaturation + tsQueryOptions.withTargets([prometheus.new('$datasource', 'instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('{{device}}')]),
|
||||||
.addRow(
|
]),
|
||||||
|
], panelWidth=12, panelHeight=7)
|
||||||
|
+ grafana.util.grid.makeGrid([
|
||||||
row.new('Disk Space')
|
row.new('Disk Space')
|
||||||
.addPanel(
|
+ row.withPanels([
|
||||||
diskSpaceUtilisation.addTarget(prometheus.target(
|
diskSpaceUtilisation + tsQueryOptions.withTargets([
|
||||||
|
prometheus.new(
|
||||||
|
'$datasource',
|
||||||
|||
|
|||
|
||||||
sort_desc(1 -
|
sort_desc(1 -
|
||||||
(
|
(
|
||||||
|
@ -204,28 +181,36 @@ local diskSpaceUtilisation =
|
||||||
max without (mountpoint, fstype) (node_filesystem_size_bytes{%(nodeExporterSelector)s, fstype!="", instance="$instance", %(clusterLabel)s="$cluster"})
|
max without (mountpoint, fstype) (node_filesystem_size_bytes{%(nodeExporterSelector)s, fstype!="", instance="$instance", %(clusterLabel)s="$cluster"})
|
||||||
) != 0
|
) != 0
|
||||||
)
|
)
|
||||||
||| % $._config, legendFormat='{{device}}'
|
||| % $._config
|
||||||
))
|
) + prometheus.withLegendFormat('{{device}}'),
|
||||||
)
|
]),
|
||||||
|
]),
|
||||||
|
], panelWidth=24, panelHeight=7, startY=34),
|
||||||
),
|
),
|
||||||
|
|
||||||
'node-cluster-rsrc-use.json':
|
'node-cluster-rsrc-use.json':
|
||||||
dashboard.new(
|
dashboard.new(
|
||||||
'%sUSE Method / Cluster' % $._config.dashboardNamePrefix,
|
'%sUSE Method / Cluster' % $._config.dashboardNamePrefix,
|
||||||
time_from='now-1h',
|
|
||||||
tags=($._config.dashboardTags),
|
|
||||||
timezone='utc',
|
|
||||||
refresh='30s',
|
|
||||||
graphTooltip='shared_crosshair',
|
|
||||||
uid=std.md5('node-cluster-rsrc-use.json')
|
|
||||||
)
|
)
|
||||||
.addTemplate(datasourceTemplate)
|
+ dashboard.time.withFrom('now-1h')
|
||||||
.addTemplate($._clusterTemplate)
|
+ dashboard.withTags($._config.dashboardTags)
|
||||||
.addRow(
|
+ dashboard.withTimezone('utc')
|
||||||
|
+ dashboard.withRefresh('30s')
|
||||||
|
+ dashboard.graphTooltip.withSharedCrosshair()
|
||||||
|
+ dashboard.withUid(std.md5('node-cluster-rsrc-use.json'))
|
||||||
|
+ dashboard.withVariables([
|
||||||
|
datasource,
|
||||||
|
$._clusterVariable,
|
||||||
|
variable.query.withDatasourceFromVariable(datasource)
|
||||||
|
+ variable.query.refresh.onTime()
|
||||||
|
+ variable.query.withSort(asc=true),
|
||||||
|
])
|
||||||
|
+ dashboard.withPanels(
|
||||||
|
grafana.util.grid.makeGrid([
|
||||||
row.new('CPU')
|
row.new('CPU')
|
||||||
.addPanel(
|
+ row.withPanels([
|
||||||
CPUUtilisation
|
CPUUtilisation + tsQueryOptions.withTargets([
|
||||||
.addTarget(prometheus.target(
|
prometheus.new(
|
||||||
|
'$datasource',
|
||||||
|||
|
|||
|
||||||
((
|
((
|
||||||
instance:node_cpu_utilisation:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}
|
instance:node_cpu_utilisation:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}
|
||||||
|
@ -233,79 +218,90 @@ local diskSpaceUtilisation =
|
||||||
instance:node_num_cpu:sum{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}
|
instance:node_num_cpu:sum{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}
|
||||||
) != 0 )
|
) != 0 )
|
||||||
/ scalar(sum(instance:node_num_cpu:sum{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}))
|
/ scalar(sum(instance:node_num_cpu:sum{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}))
|
||||||
||| % $._config, legendFormat='{{ instance }}'
|
||| % $._config
|
||||||
))
|
) + prometheus.withLegendFormat('{{ instance }}'),
|
||||||
)
|
]),
|
||||||
.addPanel(
|
CPUSaturation + tsQueryOptions.withTargets([
|
||||||
CPUSaturation
|
prometheus.new(
|
||||||
.addTarget(prometheus.target(
|
'$datasource',
|
||||||
|||
|
|||
|
||||||
(
|
(
|
||||||
instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}
|
instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}
|
||||||
/ scalar(count(instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}))
|
/ scalar(count(instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}))
|
||||||
) != 0
|
) != 0
|
||||||
||| % $._config, legendFormat='{{instance}}'
|
||| % $._config
|
||||||
))
|
) + prometheus.withLegendFormat('{{ instance }}'),
|
||||||
)
|
]),
|
||||||
)
|
]),
|
||||||
.addRow(
|
|
||||||
row.new('Memory')
|
row.new('Memory')
|
||||||
.addPanel(
|
+ row.withPanels([
|
||||||
memoryUtilisation
|
memoryUtilisation + tsQueryOptions.withTargets([
|
||||||
.addTarget(prometheus.target(
|
prometheus.new(
|
||||||
|
'$datasource',
|
||||||
|||
|
|||
|
||||||
(
|
(
|
||||||
instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}
|
instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}
|
||||||
/ scalar(count(instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}))
|
/ scalar(count(instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}))
|
||||||
) != 0
|
) != 0
|
||||||
||| % $._config, legendFormat='{{instance}}',
|
||| % $._config
|
||||||
))
|
) + prometheus.withLegendFormat('{{ instance }}'),
|
||||||
)
|
]),
|
||||||
.addPanel(memorySaturation.addTarget(prometheus.target('instance:node_vmstat_pgmajfault:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}' % $._config, legendFormat='{{instance}}')))
|
memorySaturation + tsQueryOptions.withTargets([
|
||||||
)
|
prometheus.new(
|
||||||
.addRow(
|
'$datasource',
|
||||||
|
'instance:node_vmstat_pgmajfault:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}' % $._config
|
||||||
|
) + prometheus.withLegendFormat('{{ instance }}'),
|
||||||
|
]),
|
||||||
|
]),
|
||||||
row.new('Network')
|
row.new('Network')
|
||||||
.addPanel(
|
+ row.withPanels([
|
||||||
networkUtilisation
|
networkUtilisation + tsQueryOptions.withTargets([
|
||||||
.addTarget(prometheus.target('instance:node_network_receive_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{instance}} Receive'))
|
prometheus.new(
|
||||||
.addTarget(prometheus.target('instance:node_network_transmit_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{instance}} Transmit'))
|
'$datasource',
|
||||||
)
|
'instance:node_network_receive_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config
|
||||||
.addPanel(
|
) + prometheus.withLegendFormat('{{ instance }} Receive'),
|
||||||
networkSaturation
|
prometheus.new(
|
||||||
.addTarget(prometheus.target('instance:node_network_receive_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{instance}} Receive'))
|
'$datasource',
|
||||||
.addTarget(prometheus.target('instance:node_network_transmit_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{instance}} Transmit'))
|
'instance:node_network_transmit_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config
|
||||||
)
|
) + prometheus.withLegendFormat('{{ instance }} Transmit'),
|
||||||
)
|
]),
|
||||||
.addRow(
|
networkSaturation + tsQueryOptions.withTargets([
|
||||||
|
prometheus.new(
|
||||||
|
'$datasource',
|
||||||
|
'instance:node_network_receive_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config
|
||||||
|
) + prometheus.withLegendFormat('{{ instance }} Receive'),
|
||||||
|
prometheus.new(
|
||||||
|
'$datasource',
|
||||||
|
'instance:node_network_transmit_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config
|
||||||
|
) + prometheus.withLegendFormat('{{ instance }} Transmit'),
|
||||||
|
]),
|
||||||
|
]),
|
||||||
row.new('Disk IO')
|
row.new('Disk IO')
|
||||||
.addPanel(
|
+ row.withPanels([
|
||||||
diskIOUtilisation
|
diskIOUtilisation + tsQueryOptions.withTargets([
|
||||||
.addTarget(prometheus.target(
|
prometheus.new(
|
||||||
|
'$datasource',
|
||||||
|||
|
|||
|
||||||
(
|
|
||||||
instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}
|
instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}
|
||||||
/ scalar(count(instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}))
|
/ scalar(count(instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}))
|
||||||
) != 0
|
||| % $._config
|
||||||
||| % $._config, legendFormat='{{instance}} {{device}}'
|
) + prometheus.withLegendFormat('{{ instance }} {{device}}'),
|
||||||
))
|
]),
|
||||||
)
|
diskIOSaturation + tsQueryOptions.withTargets([prometheus.new(
|
||||||
.addPanel(
|
'$datasource',
|
||||||
diskIOSaturation
|
|
||||||
.addTarget(prometheus.target(
|
|
||||||
|||
|
|||
|
||||||
(
|
|
||||||
instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}
|
instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}
|
||||||
/ scalar(count(instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}))
|
/ scalar(count(instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}))
|
||||||
) != 0
|
||| % $._config
|
||||||
||| % $._config, legendFormat='{{instance}} {{device}}'
|
) + prometheus.withLegendFormat('{{ instance }} {{device}}')]),
|
||||||
))
|
]),
|
||||||
)
|
], panelWidth=12, panelHeight=7)
|
||||||
)
|
+ grafana.util.grid.makeGrid([
|
||||||
.addRow(
|
|
||||||
row.new('Disk Space')
|
row.new('Disk Space')
|
||||||
.addPanel(
|
+ row.withPanels([
|
||||||
diskSpaceUtilisation
|
diskSpaceUtilisation + tsQueryOptions.withTargets([
|
||||||
.addTarget(prometheus.target(
|
prometheus.new(
|
||||||
|
'$datasource',
|
||||||
|||
|
|||
|
||||||
sum without (device) (
|
sum without (device) (
|
||||||
max without (fstype, mountpoint) ((
|
max without (fstype, mountpoint) ((
|
||||||
|
@ -315,28 +311,37 @@ local diskSpaceUtilisation =
|
||||||
) != 0)
|
) != 0)
|
||||||
)
|
)
|
||||||
/ scalar(sum(max without (fstype, mountpoint) (node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s, %(clusterLabel)s="$cluster"})))
|
/ scalar(sum(max without (fstype, mountpoint) (node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s, %(clusterLabel)s="$cluster"})))
|
||||||
||| % $._config, legendFormat='{{instance}}'
|
||| % $._config
|
||||||
))
|
) + prometheus.withLegendFormat('{{ instance }}'),
|
||||||
)
|
]),
|
||||||
|
]),
|
||||||
|
], panelWidth=24, panelHeight=7, startY=34),
|
||||||
),
|
),
|
||||||
} +
|
} +
|
||||||
if $._config.showMultiCluster then {
|
if $._config.showMultiCluster then {
|
||||||
'node-multicluster-rsrc-use.json':
|
'node-multicluster-rsrc-use.json':
|
||||||
dashboard.new(
|
dashboard.new(
|
||||||
'%sUSE Method / Multi-cluster' % $._config.dashboardNamePrefix,
|
'%sUSE Method / Multi-cluster' % $._config.dashboardNamePrefix,
|
||||||
time_from='now-1h',
|
|
||||||
tags=($._config.dashboardTags),
|
|
||||||
timezone='utc',
|
|
||||||
refresh='30s',
|
|
||||||
graphTooltip='shared_crosshair',
|
|
||||||
uid=std.md5('node-multicluster-rsrc-use.json')
|
|
||||||
)
|
)
|
||||||
.addTemplate(datasourceTemplate)
|
+ dashboard.time.withFrom('now-1h')
|
||||||
.addRow(
|
+ dashboard.withTags($._config.dashboardTags)
|
||||||
|
+ dashboard.withTimezone('utc')
|
||||||
|
+ dashboard.withRefresh('30s')
|
||||||
|
+ dashboard.graphTooltip.withSharedCrosshair()
|
||||||
|
+ dashboard.withUid(std.md5('node-multicluster-rsrc-use.json'))
|
||||||
|
+ dashboard.withVariables([
|
||||||
|
datasource,
|
||||||
|
variable.query.withDatasourceFromVariable(datasource)
|
||||||
|
+ variable.query.refresh.onTime()
|
||||||
|
+ variable.query.withSort(asc=true),
|
||||||
|
])
|
||||||
|
+ dashboard.withPanels(
|
||||||
|
grafana.util.grid.makeGrid([
|
||||||
row.new('CPU')
|
row.new('CPU')
|
||||||
.addPanel(
|
+ row.withPanels([
|
||||||
CPUUtilisation
|
CPUUtilisation + tsQueryOptions.withTargets([
|
||||||
.addTarget(prometheus.target(
|
prometheus.new(
|
||||||
|
'$datasource',
|
||||||
|||
|
|||
|
||||||
sum(
|
sum(
|
||||||
((
|
((
|
||||||
|
@ -346,112 +351,116 @@ local diskSpaceUtilisation =
|
||||||
) != 0)
|
) != 0)
|
||||||
/ scalar(sum(instance:node_num_cpu:sum{%(nodeExporterSelector)s}))
|
/ scalar(sum(instance:node_num_cpu:sum{%(nodeExporterSelector)s}))
|
||||||
) by (%(clusterLabel)s)
|
) by (%(clusterLabel)s)
|
||||||
||| % $._config, legendFormat='{{%(clusterLabel)s}}' % $._config
|
||| % $._config
|
||||||
))
|
) + prometheus.withLegendFormat('{{%(clusterLabel)s}}'),
|
||||||
)
|
]),
|
||||||
.addPanel(
|
CPUSaturation + tsQueryOptions.withTargets([
|
||||||
CPUSaturation
|
prometheus.new(
|
||||||
.addTarget(prometheus.target(
|
'$datasource',
|
||||||
|||
|
|||
|
||||||
sum((
|
sum((
|
||||||
instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s}
|
instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s}
|
||||||
/ scalar(count(instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s}))
|
/ scalar(count(instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s}))
|
||||||
) != 0) by (%(clusterLabel)s)
|
) != 0) by (%(clusterLabel)s)
|
||||||
||| % $._config, legendFormat='{{%(clusterLabel)s}}' % $._config
|
||| % $._config
|
||||||
))
|
) + prometheus.withLegendFormat('{{%(clusterLabel)s}}'),
|
||||||
)
|
]),
|
||||||
)
|
]),
|
||||||
.addRow(
|
|
||||||
row.new('Memory')
|
row.new('Memory')
|
||||||
.addPanel(
|
+ row.withPanels([
|
||||||
memoryUtilisation
|
memoryUtilisation + tsQueryOptions.withTargets([
|
||||||
.addTarget(prometheus.target(
|
prometheus.new(
|
||||||
|
'$datasource',
|
||||||
|||
|
|||
|
||||||
sum((
|
sum((
|
||||||
instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s}
|
instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s}
|
||||||
/ scalar(count(instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s}))
|
/ scalar(count(instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s}))
|
||||||
) != 0) by (%(clusterLabel)s)
|
) != 0) by (%(clusterLabel)s)
|
||||||
||| % $._config, legendFormat='{{%(clusterLabel)s}}' % $._config
|
||| % $._config
|
||||||
))
|
) + prometheus.withLegendFormat('{{%(clusterLabel)s}}'),
|
||||||
)
|
]),
|
||||||
.addPanel(
|
memorySaturation + tsQueryOptions.withTargets([
|
||||||
memorySaturation
|
prometheus.new(
|
||||||
.addTarget(prometheus.target(
|
'$datasource',
|
||||||
|||
|
|||
|
||||||
sum((
|
sum((
|
||||||
instance:node_vmstat_pgmajfault:rate%(rateInterval)s{%(nodeExporterSelector)s}
|
instance:node_vmstat_pgmajfault:rate%(rateInterval)s{%(nodeExporterSelector)s}
|
||||||
) != 0) by (%(clusterLabel)s)
|
) != 0) by (%(clusterLabel)s)
|
||||||
||| % $._config, legendFormat='{{%(clusterLabel)s}}' % $._config
|
|||
|
||||||
))
|
% $._config
|
||||||
)
|
) + prometheus.withLegendFormat('{{%(clusterLabel)s}}'),
|
||||||
)
|
]),
|
||||||
.addRow(
|
]),
|
||||||
row.new('Network')
|
row.new('Network')
|
||||||
.addPanel(
|
+ row.withPanels([
|
||||||
networkUtilisation
|
networkUtilisation + tsQueryOptions.withTargets([
|
||||||
.addTarget(prometheus.target(
|
prometheus.new(
|
||||||
|
'$datasource',
|
||||||
|||
|
|||
|
||||||
sum((
|
sum((
|
||||||
instance:node_network_receive_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s}
|
instance:node_network_receive_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s}
|
||||||
) != 0) by (%(clusterLabel)s)
|
) != 0) by (%(clusterLabel)s)
|
||||||
||| % $._config, legendFormat='{{%(clusterLabel)s}} Receive' % $._config
|
||| % $._config
|
||||||
))
|
) + prometheus.withLegendFormat('{{%(clusterLabel)s}} Receive'),
|
||||||
.addTarget(prometheus.target(
|
prometheus.new(
|
||||||
|
'$datasource',
|
||||||
|||
|
|||
|
||||||
sum((
|
sum((
|
||||||
instance:node_network_transmit_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s}
|
instance:node_network_transmit_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s}
|
||||||
) != 0) by (%(clusterLabel)s)
|
) != 0) by (%(clusterLabel)s)
|
||||||
||| % $._config, legendFormat='{{%(clusterLabel)s}} Transmit' % $._config
|
||| % $._config
|
||||||
))
|
) + prometheus.withLegendFormat('{{%(clusterLabel)s}} Transmit'),
|
||||||
)
|
]),
|
||||||
.addPanel(
|
networkSaturation + tsQueryOptions.withTargets([
|
||||||
networkSaturation
|
prometheus.new(
|
||||||
.addTarget(prometheus.target(
|
'$datasource',
|
||||||
|||
|
|||
|
||||||
sum((
|
sum((
|
||||||
instance:node_network_receive_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s}
|
instance:node_network_receive_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s}
|
||||||
) != 0) by (%(clusterLabel)s)
|
) != 0) by (%(clusterLabel)s)
|
||||||
||| % $._config, legendFormat='{{%(clusterLabel)s}} Receive' % $._config
|
||| % $._config
|
||||||
))
|
) + prometheus.withLegendFormat('{{%(clusterLabel)s}} Receive'),
|
||||||
.addTarget(prometheus.target(
|
prometheus.new(
|
||||||
|
'$datasource',
|
||||||
|||
|
|||
|
||||||
sum((
|
sum((
|
||||||
instance:node_network_transmit_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s}
|
instance:node_network_transmit_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s}
|
||||||
) != 0) by (%(clusterLabel)s)
|
) != 0) by (%(clusterLabel)s)
|
||||||
||| % $._config, legendFormat='{{%(clusterLabel)s}} Transmit' % $._config
|
||| % $._config
|
||||||
))
|
) + prometheus.withLegendFormat('{{%(clusterLabel)s}} Transmit'),
|
||||||
)
|
]),
|
||||||
)
|
]),
|
||||||
.addRow(
|
|
||||||
row.new('Disk IO')
|
row.new('Disk IO')
|
||||||
.addPanel(
|
+ row.withPanels([
|
||||||
diskIOUtilisation
|
diskIOUtilisation + tsQueryOptions.withTargets([
|
||||||
.addTarget(prometheus.target(
|
prometheus.new(
|
||||||
|
'$datasource',
|
||||||
|||
|
|||
|
||||||
sum((
|
sum((
|
||||||
instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s}
|
instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s}
|
||||||
/ scalar(count(instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s}))
|
/ scalar(count(instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s}))
|
||||||
) != 0) by (%(clusterLabel)s, device)
|
) != 0) by (%(clusterLabel)s, device)
|
||||||
||| % $._config, legendFormat='{{%(clusterLabel)s}} {{device}}' % $._config
|
||| % $._config
|
||||||
))
|
) + prometheus.withLegendFormat('{{%(clusterLabel)s}} {{device}}'),
|
||||||
)
|
]),
|
||||||
.addPanel(
|
diskIOSaturation + tsQueryOptions.withTargets([prometheus.new(
|
||||||
diskIOSaturation
|
'$datasource',
|
||||||
.addTarget(prometheus.target(
|
|
||||||
|||
|
|||
|
||||||
sum((
|
sum((
|
||||||
instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s}
|
instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s}
|
||||||
/ scalar(count(instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s}))
|
/ scalar(count(instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s}))
|
||||||
) != 0) by (%(clusterLabel)s, device)
|
) != 0) by (%(clusterLabel)s, device)
|
||||||
||| % $._config, legendFormat='{{%(clusterLabel)s}} {{device}}' % $._config
|
||| % $._config
|
||||||
))
|
) + prometheus.withLegendFormat('{{%(clusterLabel)s}} {{device}}')]),
|
||||||
)
|
]),
|
||||||
)
|
|
||||||
.addRow(
|
], panelWidth=12, panelHeight=7)
|
||||||
|
+ grafana.util.grid.makeGrid([
|
||||||
row.new('Disk Space')
|
row.new('Disk Space')
|
||||||
.addPanel(
|
+ row.withPanels([
|
||||||
diskSpaceUtilisation
|
diskSpaceUtilisation + tsQueryOptions.withTargets([
|
||||||
.addTarget(prometheus.target(
|
prometheus.new(
|
||||||
|
'$datasource',
|
||||||
|||
|
|||
|
||||||
sum (
|
sum (
|
||||||
sum without (device) (
|
sum without (device) (
|
||||||
|
@ -461,9 +470,11 @@ local diskSpaceUtilisation =
|
||||||
)
|
)
|
||||||
/ scalar(sum(max without (fstype, mountpoint) (node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s})))
|
/ scalar(sum(max without (fstype, mountpoint) (node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s})))
|
||||||
) by (%(clusterLabel)s)
|
) by (%(clusterLabel)s)
|
||||||
||| % $._config, legendFormat='{{%(clusterLabel)s}}' % $._config
|
||| % $._config
|
||||||
))
|
) + prometheus.withLegendFormat('{{%(clusterLabel)s}}'),
|
||||||
)
|
]),
|
||||||
|
]),
|
||||||
|
], panelWidth=24, panelHeight=7, startY=34),
|
||||||
),
|
),
|
||||||
} else {},
|
} else {},
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,20 +4,11 @@
|
||||||
{
|
{
|
||||||
"source": {
|
"source": {
|
||||||
"git": {
|
"git": {
|
||||||
"remote": "https://github.com/grafana/grafonnet-lib.git",
|
"remote": "https://github.com/grafana/grafonnet.git",
|
||||||
"subdir": "grafonnet"
|
"subdir": "gen/grafonnet-latest"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"version": "master"
|
"version": "main"
|
||||||
},
|
|
||||||
{
|
|
||||||
"source": {
|
|
||||||
"git": {
|
|
||||||
"remote": "https://github.com/grafana/grafonnet-lib.git",
|
|
||||||
"subdir": "grafonnet-7.0"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"version": "master"
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"legacyImports": false
|
"legacyImports": false
|
||||||
|
|
|
@ -1,76 +1,85 @@
|
||||||
local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet';
|
local grafana = import 'github.com/grafana/grafonnet/gen/grafonnet-latest/main.libsonnet';
|
||||||
local dashboard = grafana.dashboard;
|
local dashboard = grafana.dashboard;
|
||||||
local row = grafana.row;
|
local row = grafana.panel.row;
|
||||||
local prometheus = grafana.prometheus;
|
local prometheus = grafana.query.prometheus;
|
||||||
local template = grafana.template;
|
local variable = dashboard.variable;
|
||||||
local graphPanel = grafana.graphPanel;
|
|
||||||
local grafana70 = import 'github.com/grafana/grafonnet-lib/grafonnet-7.0/grafana.libsonnet';
|
local timeSeriesPanel = grafana.panel.timeSeries;
|
||||||
local gaugePanel = grafana70.panel.gauge;
|
local tsOptions = timeSeriesPanel.options;
|
||||||
local table = grafana70.panel.table;
|
local tsStandardOptions = timeSeriesPanel.standardOptions;
|
||||||
|
local tsQueryOptions = timeSeriesPanel.queryOptions;
|
||||||
|
local tsCustom = timeSeriesPanel.fieldConfig.defaults.custom;
|
||||||
|
|
||||||
|
local gaugePanel = grafana.panel.gauge;
|
||||||
|
local gaugeStep = gaugePanel.standardOptions.threshold.step;
|
||||||
|
|
||||||
|
local table = grafana.panel.table;
|
||||||
|
local tableStep = table.standardOptions.threshold.step;
|
||||||
|
local tableOverride = table.standardOptions.override;
|
||||||
|
local tableTransformation = table.queryOptions.transformation;
|
||||||
|
|
||||||
{
|
{
|
||||||
|
|
||||||
new(config=null, platform=null, uid=null):: {
|
new(config=null, platform=null, uid=null):: {
|
||||||
|
|
||||||
local prometheusDatasourceTemplate = {
|
local prometheusDatasourceVariable = variable.datasource.new(
|
||||||
current: {
|
'datasource', 'prometheus'
|
||||||
text: 'default',
|
|
||||||
value: 'default',
|
|
||||||
},
|
|
||||||
hide: 0,
|
|
||||||
label: 'Data Source',
|
|
||||||
name: 'datasource',
|
|
||||||
options: [],
|
|
||||||
query: 'prometheus',
|
|
||||||
refresh: 1,
|
|
||||||
regex: '',
|
|
||||||
type: 'datasource',
|
|
||||||
},
|
|
||||||
|
|
||||||
local clusterTemplatePrototype =
|
|
||||||
template.new(
|
|
||||||
'cluster',
|
|
||||||
'$datasource',
|
|
||||||
'',
|
|
||||||
hide=if config.showMultiCluster then '' else '2',
|
|
||||||
refresh='time',
|
|
||||||
label='Cluster',
|
|
||||||
),
|
),
|
||||||
local clusterTemplate =
|
|
||||||
if platform == 'Darwin' then
|
|
||||||
clusterTemplatePrototype
|
|
||||||
{ query: 'label_values(node_uname_info{%(nodeExporterSelector)s, sysname="Darwin"}, %(clusterLabel)s)' % config }
|
|
||||||
else
|
|
||||||
clusterTemplatePrototype
|
|
||||||
{ query: 'label_values(node_uname_info{%(nodeExporterSelector)s, sysname!="Darwin"}, %(clusterLabel)s)' % config },
|
|
||||||
|
|
||||||
local instanceTemplatePrototype =
|
local clusterVariablePrototype =
|
||||||
template.new(
|
variable.query.new('cluster')
|
||||||
|
+ variable.query.withDatasourceFromVariable(prometheusDatasourceVariable)
|
||||||
|
+ (if config.showMultiCluster then variable.query.generalOptions.showOnDashboard.withLabelAndValue() else variable.query.generalOptions.showOnDashboard.withNothing())
|
||||||
|
+ variable.query.refresh.onTime()
|
||||||
|
+ variable.query.generalOptions.withLabel('Cluster'),
|
||||||
|
|
||||||
|
local clusterVariable =
|
||||||
|
if platform == 'Darwin' then
|
||||||
|
clusterVariablePrototype
|
||||||
|
+ variable.query.queryTypes.withLabelValues(
|
||||||
|
' %(clusterLabel)s' % config,
|
||||||
|
'node_uname_info{%(nodeExporterSelector)s, sysname="Darwin"}' % config,
|
||||||
|
)
|
||||||
|
else
|
||||||
|
clusterVariablePrototype
|
||||||
|
+ variable.query.queryTypes.withLabelValues(
|
||||||
|
'%(clusterLabel)s' % config,
|
||||||
|
'node_uname_info{%(nodeExporterSelector)s, sysname!="Darwin"}' % config,
|
||||||
|
),
|
||||||
|
|
||||||
|
local instanceVariablePrototype =
|
||||||
|
variable.query.new('instance')
|
||||||
|
+ variable.query.withDatasourceFromVariable(prometheusDatasourceVariable)
|
||||||
|
+ variable.query.refresh.onTime()
|
||||||
|
+ variable.query.generalOptions.withLabel('Instance'),
|
||||||
|
|
||||||
|
local instanceVariable =
|
||||||
|
if platform == 'Darwin' then
|
||||||
|
instanceVariablePrototype
|
||||||
|
+ variable.query.queryTypes.withLabelValues(
|
||||||
'instance',
|
'instance',
|
||||||
'$datasource',
|
'node_uname_info{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster", sysname="Darwin"}' % config,
|
||||||
'',
|
)
|
||||||
refresh='time',
|
|
||||||
label='Instance',
|
|
||||||
),
|
|
||||||
local instanceTemplate =
|
|
||||||
if platform == 'Darwin' then
|
|
||||||
instanceTemplatePrototype
|
|
||||||
{ query: 'label_values(node_uname_info{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster", sysname="Darwin"}, instance)' % config }
|
|
||||||
else
|
else
|
||||||
instanceTemplatePrototype
|
instanceVariablePrototype
|
||||||
{ query: 'label_values(node_uname_info{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster", sysname!="Darwin"}, instance)' % config },
|
+ variable.query.queryTypes.withLabelValues(
|
||||||
|
'instance',
|
||||||
|
'node_uname_info{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster", sysname!="Darwin"}' % config,
|
||||||
|
),
|
||||||
|
|
||||||
local idleCPU =
|
local idleCPU =
|
||||||
graphPanel.new(
|
timeSeriesPanel.new('CPU Usage')
|
||||||
'CPU Usage',
|
+ variable.query.withDatasourceFromVariable(prometheusDatasourceVariable)
|
||||||
datasource='$datasource',
|
+ tsStandardOptions.withUnit('percentunit')
|
||||||
span=6,
|
+ tsCustom.stacking.withMode('normal')
|
||||||
format='percentunit',
|
+ tsStandardOptions.withMax(1)
|
||||||
max=1,
|
+ tsStandardOptions.withMin(0)
|
||||||
min=0,
|
+ tsOptions.tooltip.withMode('multi')
|
||||||
stack=true,
|
+ tsCustom.withFillOpacity(10)
|
||||||
)
|
+ tsCustom.withShowPoints('never')
|
||||||
.addTarget(prometheus.target(
|
+ tsQueryOptions.withTargets([
|
||||||
|
prometheus.new(
|
||||||
|
'$datasource',
|
||||||
|||
|
|||
|
||||||
(
|
(
|
||||||
(1 - sum without (mode) (rate(node_cpu_seconds_total{%(nodeExporterSelector)s, mode=~"idle|iowait|steal", instance="$instance", %(clusterLabel)s="$cluster"}[$__rate_interval])))
|
(1 - sum without (mode) (rate(node_cpu_seconds_total{%(nodeExporterSelector)s, mode=~"idle|iowait|steal", instance="$instance", %(clusterLabel)s="$cluster"}[$__rate_interval])))
|
||||||
|
@ -78,36 +87,42 @@ local table = grafana70.panel.table;
|
||||||
count without (cpu, mode) (node_cpu_seconds_total{%(nodeExporterSelector)s, mode="idle", instance="$instance", %(clusterLabel)s="$cluster"})
|
count without (cpu, mode) (node_cpu_seconds_total{%(nodeExporterSelector)s, mode="idle", instance="$instance", %(clusterLabel)s="$cluster"})
|
||||||
)
|
)
|
||||||
||| % config,
|
||| % config,
|
||||||
legendFormat='{{cpu}}',
|
)
|
||||||
intervalFactor=5,
|
+ prometheus.withLegendFormat('{{cpu}}')
|
||||||
)),
|
+ prometheus.withIntervalFactor(5),
|
||||||
|
]),
|
||||||
|
|
||||||
local systemLoad =
|
local systemLoad =
|
||||||
graphPanel.new(
|
timeSeriesPanel.new('Load Average')
|
||||||
'Load Average',
|
+ variable.query.withDatasourceFromVariable(prometheusDatasourceVariable)
|
||||||
datasource='$datasource',
|
+ tsStandardOptions.withUnit('short')
|
||||||
span=6,
|
+ tsStandardOptions.withMin(0)
|
||||||
format='short',
|
+ tsCustom.withFillOpacity(0)
|
||||||
min=0,
|
+ tsCustom.withShowPoints('never')
|
||||||
fill=0,
|
+ tsOptions.tooltip.withMode('multi')
|
||||||
)
|
+ tsQueryOptions.withTargets([
|
||||||
.addTarget(prometheus.target('node_load1{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config, legendFormat='1m load average'))
|
prometheus.new('$datasource', 'node_load1{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config) + prometheus.withLegendFormat('1m load average'),
|
||||||
.addTarget(prometheus.target('node_load5{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config, legendFormat='5m load average'))
|
prometheus.new('$datasource', 'node_load5{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config) + prometheus.withLegendFormat('5m load average'),
|
||||||
.addTarget(prometheus.target('node_load15{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config, legendFormat='15m load average'))
|
prometheus.new('$datasource', 'node_load15{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config) + prometheus.withLegendFormat('15m load average'),
|
||||||
.addTarget(prometheus.target('count(node_cpu_seconds_total{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster", mode="idle"})' % config, legendFormat='logical cores')),
|
prometheus.new('$datasource', 'count(node_cpu_seconds_total{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster", mode="idle"})' % config) + prometheus.withLegendFormat('logical cores'),
|
||||||
|
]),
|
||||||
|
|
||||||
local memoryGraphPanelPrototype =
|
local memoryGraphPanelPrototype =
|
||||||
graphPanel.new(
|
timeSeriesPanel.new('Memory Usage')
|
||||||
'Memory Usage',
|
+ variable.query.withDatasourceFromVariable(prometheusDatasourceVariable)
|
||||||
datasource='$datasource',
|
+ tsStandardOptions.withUnit('bytes')
|
||||||
span=9,
|
+ tsStandardOptions.withMin(0)
|
||||||
format='bytes',
|
+ tsOptions.tooltip.withMode('multi')
|
||||||
min=0,
|
+ tsCustom.withFillOpacity(10)
|
||||||
),
|
+ tsCustom.withShowPoints('never'),
|
||||||
|
|
||||||
local memoryGraph =
|
local memoryGraph =
|
||||||
if platform == 'Linux' then
|
if platform == 'Linux' then
|
||||||
memoryGraphPanelPrototype { stack: true }
|
memoryGraphPanelPrototype
|
||||||
.addTarget(prometheus.target(
|
+ tsCustom.stacking.withMode('normal')
|
||||||
|
+ tsQueryOptions.withTargets([
|
||||||
|
prometheus.new(
|
||||||
|
'$datasource',
|
||||||
|||
|
|||
|
||||||
(
|
(
|
||||||
node_memory_MemTotal_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}
|
node_memory_MemTotal_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}
|
||||||
|
@ -119,16 +134,19 @@ local table = grafana70.panel.table;
|
||||||
node_memory_Cached_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}
|
node_memory_Cached_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}
|
||||||
)
|
)
|
||||||
||| % config,
|
||| % config,
|
||||||
legendFormat='memory used'
|
) + prometheus.withLegendFormat('memory used'),
|
||||||
))
|
prometheus.new('$datasource', 'node_memory_Buffers_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config) + prometheus.withLegendFormat('memory buffers'),
|
||||||
.addTarget(prometheus.target('node_memory_Buffers_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config, legendFormat='memory buffers'))
|
prometheus.new('$datasource', 'node_memory_Cached_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config) + prometheus.withLegendFormat('memory cached'),
|
||||||
.addTarget(prometheus.target('node_memory_Cached_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config, legendFormat='memory cached'))
|
prometheus.new('$datasource', 'node_memory_MemFree_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config) + prometheus.withLegendFormat('memory free'),
|
||||||
.addTarget(prometheus.target('node_memory_MemFree_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config, legendFormat='memory free'))
|
])
|
||||||
else if platform == 'Darwin' then
|
else if platform == 'Darwin' then
|
||||||
// not useful to stack
|
// not useful to stack
|
||||||
memoryGraphPanelPrototype { stack: false }
|
memoryGraphPanelPrototype
|
||||||
.addTarget(prometheus.target('node_memory_total_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config, legendFormat='Physical Memory'))
|
+ tsCustom.stacking.withMode('none')
|
||||||
.addTarget(prometheus.target(
|
+ tsQueryOptions.withTargets([
|
||||||
|
prometheus.new('$datasource', 'node_memory_total_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config) + prometheus.withLegendFormat('Physical Memory'),
|
||||||
|
prometheus.new(
|
||||||
|
'$datasource',
|
||||||
|||
|
|||
|
||||||
(
|
(
|
||||||
node_memory_internal_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} -
|
node_memory_internal_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} -
|
||||||
|
@ -136,50 +154,61 @@ local table = grafana70.panel.table;
|
||||||
node_memory_wired_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} +
|
node_memory_wired_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} +
|
||||||
node_memory_compressed_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}
|
node_memory_compressed_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}
|
||||||
)
|
)
|
||||||
||| % config, legendFormat='Memory Used'
|
||| % config
|
||||||
))
|
) + prometheus.withLegendFormat(
|
||||||
.addTarget(prometheus.target(
|
'Memory Used'
|
||||||
|
),
|
||||||
|
prometheus.new(
|
||||||
|
'$datasource',
|
||||||
|||
|
|||
|
||||||
(
|
(
|
||||||
node_memory_internal_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} -
|
node_memory_internal_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} -
|
||||||
node_memory_purgeable_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}
|
node_memory_purgeable_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}
|
||||||
)
|
)
|
||||||
||| % config, legendFormat='App Memory'
|
||| % config
|
||||||
))
|
) + prometheus.withLegendFormat(
|
||||||
.addTarget(prometheus.target('node_memory_wired_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config, legendFormat='Wired Memory'))
|
'App Memory'
|
||||||
.addTarget(prometheus.target('node_memory_compressed_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config, legendFormat='Compressed'))
|
),
|
||||||
|
prometheus.new('$datasource', 'node_memory_wired_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config) + prometheus.withLegendFormat('Wired Memory'),
|
||||||
|
prometheus.new('$datasource', 'node_memory_compressed_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config) + prometheus.withLegendFormat('Compressed'),
|
||||||
|
])
|
||||||
|
|
||||||
else if platform == 'AIX' then
|
else if platform == 'AIX' then
|
||||||
memoryGraphPanelPrototype { stack: false }
|
memoryGraphPanelPrototype
|
||||||
.addTarget(prometheus.target('node_memory_total_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config, legendFormat='Physical Memory'))
|
+ tsCustom.stacking.withMode('none')
|
||||||
.addTarget(prometheus.target(
|
+ tsQueryOptions.withTargets([
|
||||||
|
prometheus.new('$datasource', 'node_memory_total_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}' % config) + prometheus.withLegendFormat('Physical Memory'),
|
||||||
|
prometheus.new(
|
||||||
|
'$datasource',
|
||||||
|||
|
|||
|
||||||
(
|
(
|
||||||
node_memory_total_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} -
|
node_memory_total_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} -
|
||||||
node_memory_available_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}
|
node_memory_available_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"}
|
||||||
)
|
)
|
||||||
||| % config, legendFormat='Memory Used'
|
||| % config
|
||||||
)),
|
) + prometheus.withLegendFormat('Memory Used'),
|
||||||
|
]),
|
||||||
|
|
||||||
|
|
||||||
// NOTE: avg() is used to circumvent a label change caused by a node_exporter rollout.
|
// NOTE: avg() is used to circumvent a label change caused by a node_exporter rollout.
|
||||||
local memoryGaugePanelPrototype =
|
local memoryGaugePanelPrototype =
|
||||||
gaugePanel.new(
|
gaugePanel.new('Memory Usage')
|
||||||
title='Memory Usage',
|
+ variable.query.withDatasourceFromVariable(prometheusDatasourceVariable)
|
||||||
datasource='$datasource',
|
+ gaugePanel.standardOptions.thresholds.withSteps([
|
||||||
)
|
gaugeStep.withColor('rgba(50, 172, 45, 0.97)'),
|
||||||
.addThresholdStep('rgba(50, 172, 45, 0.97)')
|
gaugeStep.withColor('rgba(237, 129, 40, 0.89)') + gaugeStep.withValue(80),
|
||||||
.addThresholdStep('rgba(237, 129, 40, 0.89)', 80)
|
gaugeStep.withColor('rgba(245, 54, 54, 0.9)') + gaugeStep.withValue(90),
|
||||||
.addThresholdStep('rgba(245, 54, 54, 0.9)', 90)
|
])
|
||||||
.setFieldConfig(max=100, min=0, unit='percent')
|
+ gaugePanel.standardOptions.withMax(100)
|
||||||
+ {
|
+ gaugePanel.standardOptions.withMin(0)
|
||||||
span: 3,
|
+ gaugePanel.standardOptions.withUnit('percent'),
|
||||||
},
|
|
||||||
|
|
||||||
local memoryGauge =
|
local memoryGauge =
|
||||||
if platform == 'Linux' then
|
if platform == 'Linux' then
|
||||||
memoryGaugePanelPrototype
|
memoryGaugePanelPrototype
|
||||||
|
+ gaugePanel.queryOptions.withTargets([
|
||||||
.addTarget(prometheus.target(
|
prometheus.new(
|
||||||
|
'$datasource',
|
||||||
|||
|
|||
|
||||||
100 -
|
100 -
|
||||||
(
|
(
|
||||||
|
@ -188,11 +217,14 @@ local table = grafana70.panel.table;
|
||||||
* 100
|
* 100
|
||||||
)
|
)
|
||||||
||| % config,
|
||| % config,
|
||||||
))
|
),
|
||||||
|
])
|
||||||
|
|
||||||
else if platform == 'Darwin' then
|
else if platform == 'Darwin' then
|
||||||
memoryGaugePanelPrototype
|
memoryGaugePanelPrototype
|
||||||
.addTarget(prometheus.target(
|
+ gaugePanel.queryOptions.withTargets([
|
||||||
|
prometheus.new(
|
||||||
|
'$datasource',
|
||||||
|||
|
|||
|
||||||
(
|
(
|
||||||
(
|
(
|
||||||
|
@ -206,10 +238,14 @@ local table = grafana70.panel.table;
|
||||||
*
|
*
|
||||||
100
|
100
|
||||||
||| % config
|
||| % config
|
||||||
))
|
),
|
||||||
|
])
|
||||||
|
|
||||||
else if platform == 'AIX' then
|
else if platform == 'AIX' then
|
||||||
memoryGaugePanelPrototype
|
memoryGaugePanelPrototype
|
||||||
.addTarget(prometheus.target(
|
+ gaugePanel.queryOptions.withTargets([
|
||||||
|
prometheus.new(
|
||||||
|
'$datasource',
|
||||||
|||
|
|||
|
||||||
100 -
|
100 -
|
||||||
(
|
(
|
||||||
|
@ -218,156 +254,94 @@ local table = grafana70.panel.table;
|
||||||
* 100
|
* 100
|
||||||
)
|
)
|
||||||
||| % config
|
||| % config
|
||||||
)),
|
),
|
||||||
|
]),
|
||||||
|
|
||||||
|
|
||||||
local diskIO =
|
local diskIO =
|
||||||
graphPanel.new(
|
timeSeriesPanel.new('Disk I/O')
|
||||||
'Disk I/O',
|
+ variable.query.withDatasourceFromVariable(prometheusDatasourceVariable)
|
||||||
datasource='$datasource',
|
+ tsStandardOptions.withMin(0)
|
||||||
span=6,
|
+ tsCustom.withFillOpacity(0)
|
||||||
min=0,
|
+ tsCustom.withShowPoints('never')
|
||||||
fill=0,
|
+ tsOptions.tooltip.withMode('multi')
|
||||||
)
|
+ tsQueryOptions.withTargets([
|
||||||
// TODO: Does it make sense to have those three in the same panel?
|
// TODO: Does it make sense to have those three in the same panel?
|
||||||
.addTarget(prometheus.target(
|
prometheus.new('$datasource', 'rate(node_disk_read_bytes_total{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster", %(diskDeviceSelector)s}[$__rate_interval])' % config)
|
||||||
'rate(node_disk_read_bytes_total{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster", %(diskDeviceSelector)s}[$__rate_interval])' % config,
|
+ prometheus.withLegendFormat('{{device}} read')
|
||||||
legendFormat='{{device}} read',
|
+ prometheus.withIntervalFactor(1),
|
||||||
intervalFactor=1,
|
prometheus.new('$datasource', 'rate(node_disk_written_bytes_total{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster", %(diskDeviceSelector)s}[$__rate_interval])' % config)
|
||||||
))
|
+ prometheus.withLegendFormat('{{device}} written')
|
||||||
.addTarget(prometheus.target(
|
+ prometheus.withIntervalFactor(1),
|
||||||
'rate(node_disk_written_bytes_total{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster", %(diskDeviceSelector)s}[$__rate_interval])' % config,
|
prometheus.new('$datasource', 'rate(node_disk_io_time_seconds_total{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster", %(diskDeviceSelector)s}[$__rate_interval])' % config)
|
||||||
legendFormat='{{device}} written',
|
+ prometheus.withLegendFormat('{{device}} io time')
|
||||||
intervalFactor=1,
|
+ prometheus.withIntervalFactor(1),
|
||||||
))
|
])
|
||||||
.addTarget(prometheus.target(
|
+ tsStandardOptions.withOverrides(
|
||||||
'rate(node_disk_io_time_seconds_total{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster", %(diskDeviceSelector)s}[$__rate_interval])' % config,
|
[
|
||||||
legendFormat='{{device}} io time',
|
tsStandardOptions.override.byRegexp.new('/ read| written/')
|
||||||
intervalFactor=1,
|
+ tsStandardOptions.override.byRegexp.withPropertiesFromOptions(
|
||||||
)) +
|
tsStandardOptions.withUnit('Bps')
|
||||||
{
|
),
|
||||||
seriesOverrides: [
|
tsStandardOptions.override.byRegexp.new('/ io time/')
|
||||||
{
|
+ tsStandardOptions.override.byRegexp.withPropertiesFromOptions(tsStandardOptions.withUnit('percentunit')),
|
||||||
alias: '/ read| written/',
|
]
|
||||||
yaxis: 1,
|
),
|
||||||
},
|
|
||||||
{
|
|
||||||
alias: '/ io time/',
|
|
||||||
yaxis: 2,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
yaxes: [
|
|
||||||
self.yaxe(format='Bps'),
|
|
||||||
self.yaxe(format='percentunit'),
|
|
||||||
],
|
|
||||||
},
|
|
||||||
|
|
||||||
local diskSpaceUsage =
|
local diskSpaceUsage =
|
||||||
table.new(
|
table.new('Disk Space Usage')
|
||||||
title='Disk Space Usage',
|
+ variable.query.withDatasourceFromVariable(prometheusDatasourceVariable)
|
||||||
datasource='$datasource',
|
+ table.standardOptions.withUnit('decbytes')
|
||||||
)
|
+ table.standardOptions.thresholds.withSteps(
|
||||||
.setFieldConfig(unit='decbytes')
|
[
|
||||||
.addThresholdStep(color='green', value=null)
|
tableStep.withColor('green'),
|
||||||
.addThresholdStep(color='yellow', value=0.8)
|
tableStep.withColor('yellow') + gaugeStep.withValue(0.8),
|
||||||
.addThresholdStep(color='red', value=0.9)
|
tableStep.withColor('red') + gaugeStep.withValue(0.9),
|
||||||
.addTarget(prometheus.target(
|
|
||||||
|||
|
|
||||||
max by (mountpoint) (node_filesystem_size_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster", %(fsSelector)s, %(fsMountpointSelector)s})
|
|
||||||
||| % config,
|
|
||||||
legendFormat='',
|
|
||||||
instant=true,
|
|
||||||
format='table'
|
|
||||||
))
|
|
||||||
.addTarget(prometheus.target(
|
|
||||||
|||
|
|
||||||
max by (mountpoint) (node_filesystem_avail_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster", %(fsSelector)s, %(fsMountpointSelector)s})
|
|
||||||
||| % config,
|
|
||||||
legendFormat='',
|
|
||||||
instant=true,
|
|
||||||
format='table'
|
|
||||||
))
|
|
||||||
.addOverride(
|
|
||||||
matcher={
|
|
||||||
id: 'byName',
|
|
||||||
options: 'Mounted on',
|
|
||||||
},
|
|
||||||
properties=[
|
|
||||||
{
|
|
||||||
id: 'custom.width',
|
|
||||||
value: 260,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
)
|
|
||||||
.addOverride(
|
|
||||||
matcher={
|
|
||||||
id: 'byName',
|
|
||||||
options: 'Size',
|
|
||||||
},
|
|
||||||
properties=[
|
|
||||||
|
|
||||||
{
|
|
||||||
id: 'custom.width',
|
|
||||||
value: 93,
|
|
||||||
},
|
|
||||||
|
|
||||||
],
|
|
||||||
)
|
|
||||||
.addOverride(
|
|
||||||
matcher={
|
|
||||||
id: 'byName',
|
|
||||||
options: 'Used',
|
|
||||||
},
|
|
||||||
properties=[
|
|
||||||
{
|
|
||||||
id: 'custom.width',
|
|
||||||
value: 72,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
)
|
|
||||||
.addOverride(
|
|
||||||
matcher={
|
|
||||||
id: 'byName',
|
|
||||||
options: 'Available',
|
|
||||||
},
|
|
||||||
properties=[
|
|
||||||
{
|
|
||||||
id: 'custom.width',
|
|
||||||
value: 88,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
.addOverride(
|
|
||||||
matcher={
|
|
||||||
id: 'byName',
|
|
||||||
options: 'Used, %',
|
|
||||||
},
|
|
||||||
properties=[
|
|
||||||
{
|
|
||||||
id: 'unit',
|
|
||||||
value: 'percentunit',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
id: 'custom.displayMode',
|
|
||||||
value: 'gradient-gauge',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
id: 'max',
|
|
||||||
value: 1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
id: 'min',
|
|
||||||
value: 0,
|
|
||||||
},
|
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
+ { span: 6 }
|
+ table.queryOptions.withTargets([
|
||||||
+ {
|
prometheus.new(
|
||||||
transformations: [
|
'$datasource',
|
||||||
|
|||
|
||||||
|
max by (mountpoint) (node_filesystem_size_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster", %(fsSelector)s, %(fsMountpointSelector)s})
|
||||||
|
||| % config
|
||||||
|
)
|
||||||
|
+ prometheus.withLegendFormat('')
|
||||||
|
+ prometheus.withInstant()
|
||||||
|
+ prometheus.withFormat('table'),
|
||||||
|
prometheus.new(
|
||||||
|
'$datasource',
|
||||||
|
|||
|
||||||
|
max by (mountpoint) (node_filesystem_avail_bytes{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster", %(fsSelector)s, %(fsMountpointSelector)s})
|
||||||
|
||| % config
|
||||||
|
)
|
||||||
|
+ prometheus.withLegendFormat('')
|
||||||
|
+ prometheus.withInstant()
|
||||||
|
+ prometheus.withFormat('table'),
|
||||||
|
])
|
||||||
|
+ table.standardOptions.withOverrides([
|
||||||
|
tableOverride.byName.new('Mounted on')
|
||||||
|
+ tableOverride.byName.withProperty('custom.width', 260),
|
||||||
|
tableOverride.byName.new('Size')
|
||||||
|
+ tableOverride.byName.withProperty('custom.width', 93),
|
||||||
|
tableOverride.byName.new('Used')
|
||||||
|
+ tableOverride.byName.withProperty('custom.width', 72),
|
||||||
|
tableOverride.byName.new('Available')
|
||||||
|
+ tableOverride.byName.withProperty('custom.width', 88),
|
||||||
|
tableOverride.byName.new('Used, %')
|
||||||
|
+ tableOverride.byName.withProperty('unit', 'percentunit')
|
||||||
|
+ tableOverride.byName.withPropertiesFromOptions(
|
||||||
|
table.fieldConfig.defaults.custom.withCellOptions(
|
||||||
|
{ type: 'gauge' },
|
||||||
|
)
|
||||||
|
)
|
||||||
|
+ tableOverride.byName.withProperty('max', 1)
|
||||||
|
+ tableOverride.byName.withProperty('min', 0),
|
||||||
|
])
|
||||||
|
+ table.queryOptions.withTransformations([
|
||||||
|
tableTransformation.withId('groupBy')
|
||||||
|
+ tableTransformation.withOptions(
|
||||||
{
|
{
|
||||||
id: 'groupBy',
|
|
||||||
options: {
|
|
||||||
fields: {
|
fields: {
|
||||||
'Value #A': {
|
'Value #A': {
|
||||||
aggregations: [
|
aggregations: [
|
||||||
|
@ -386,15 +360,12 @@ local table = grafana70.panel.table;
|
||||||
operation: 'groupby',
|
operation: 'groupby',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
}
|
||||||
},
|
),
|
||||||
|
tableTransformation.withId('merge'),
|
||||||
|
tableTransformation.withId('calculateField')
|
||||||
|
+ tableTransformation.withOptions(
|
||||||
{
|
{
|
||||||
id: 'merge',
|
|
||||||
options: {},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
id: 'calculateField',
|
|
||||||
options: {
|
|
||||||
alias: 'Used',
|
alias: 'Used',
|
||||||
binary: {
|
binary: {
|
||||||
left: 'Value #A (lastNotNull)',
|
left: 'Value #A (lastNotNull)',
|
||||||
|
@ -406,11 +377,11 @@ local table = grafana70.panel.table;
|
||||||
reduce: {
|
reduce: {
|
||||||
reducer: 'sum',
|
reducer: 'sum',
|
||||||
},
|
},
|
||||||
},
|
}
|
||||||
},
|
),
|
||||||
|
tableTransformation.withId('calculateField')
|
||||||
|
+ tableTransformation.withOptions(
|
||||||
{
|
{
|
||||||
id: 'calculateField',
|
|
||||||
options: {
|
|
||||||
alias: 'Used, %',
|
alias: 'Used, %',
|
||||||
binary: {
|
binary: {
|
||||||
left: 'Used',
|
left: 'Used',
|
||||||
|
@ -422,11 +393,11 @@ local table = grafana70.panel.table;
|
||||||
reduce: {
|
reduce: {
|
||||||
reducer: 'sum',
|
reducer: 'sum',
|
||||||
},
|
},
|
||||||
},
|
}
|
||||||
},
|
),
|
||||||
|
tableTransformation.withId('organize')
|
||||||
|
+ tableTransformation.withOptions(
|
||||||
{
|
{
|
||||||
id: 'organize',
|
|
||||||
options: {
|
|
||||||
excludeByName: {},
|
excludeByName: {},
|
||||||
indexByName: {},
|
indexByName: {},
|
||||||
renameByName: {
|
renameByName: {
|
||||||
|
@ -434,127 +405,131 @@ local table = grafana70.panel.table;
|
||||||
'Value #B (lastNotNull)': 'Available',
|
'Value #B (lastNotNull)': 'Available',
|
||||||
mountpoint: 'Mounted on',
|
mountpoint: 'Mounted on',
|
||||||
},
|
},
|
||||||
},
|
}
|
||||||
},
|
),
|
||||||
|
tableTransformation.withId('sortBy')
|
||||||
|
+ tableTransformation.withOptions(
|
||||||
{
|
{
|
||||||
id: 'sortBy',
|
|
||||||
options: {
|
|
||||||
fields: {},
|
fields: {},
|
||||||
sort: [
|
sort: [
|
||||||
{
|
{
|
||||||
field: 'Mounted on',
|
field: 'Mounted on',
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
},
|
}
|
||||||
},
|
),
|
||||||
],
|
|
||||||
},
|
|
||||||
|
|
||||||
|
]),
|
||||||
|
|
||||||
local networkReceived =
|
local networkReceived =
|
||||||
graphPanel.new(
|
timeSeriesPanel.new('Network Received')
|
||||||
'Network Received',
|
+ timeSeriesPanel.panelOptions.withDescription('Network received (bits/s)')
|
||||||
description='Network received (bits/s)',
|
+ variable.query.withDatasourceFromVariable(prometheusDatasourceVariable)
|
||||||
datasource='$datasource',
|
+ tsStandardOptions.withUnit('bps')
|
||||||
span=6,
|
+ tsStandardOptions.withMin(0)
|
||||||
format='bps',
|
+ tsCustom.withFillOpacity(0)
|
||||||
min=0,
|
+ tsCustom.withShowPoints('never')
|
||||||
fill=0,
|
+ tsOptions.tooltip.withMode('multi')
|
||||||
)
|
+ tsQueryOptions.withTargets([
|
||||||
.addTarget(prometheus.target(
|
prometheus.new('$datasource', 'rate(node_network_receive_bytes_total{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster", device!="lo"}[$__rate_interval]) * 8' % config)
|
||||||
'rate(node_network_receive_bytes_total{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster", device!="lo"}[$__rate_interval]) * 8' % config,
|
+ prometheus.withLegendFormat('{{device}}')
|
||||||
legendFormat='{{device}}',
|
+ prometheus.withIntervalFactor(1),
|
||||||
intervalFactor=1,
|
]),
|
||||||
)),
|
|
||||||
|
|
||||||
local networkTransmitted =
|
local networkTransmitted =
|
||||||
graphPanel.new(
|
timeSeriesPanel.new('Network Transmitted')
|
||||||
'Network Transmitted',
|
+ timeSeriesPanel.panelOptions.withDescription('Network transmitted (bits/s)')
|
||||||
description='Network transmitted (bits/s)',
|
+ variable.query.withDatasourceFromVariable(prometheusDatasourceVariable)
|
||||||
datasource='$datasource',
|
+ tsStandardOptions.withUnit('bps')
|
||||||
span=6,
|
+ tsStandardOptions.withMin(0)
|
||||||
format='bps',
|
+ tsCustom.withFillOpacity(0)
|
||||||
min=0,
|
+ tsOptions.tooltip.withMode('multi')
|
||||||
fill=0,
|
+ tsQueryOptions.withTargets([
|
||||||
)
|
prometheus.new('$datasource', 'rate(node_network_transmit_bytes_total{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster", device!="lo"}[$__rate_interval]) * 8' % config)
|
||||||
.addTarget(prometheus.target(
|
+ prometheus.withLegendFormat('{{device}}')
|
||||||
'rate(node_network_transmit_bytes_total{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster", device!="lo"}[$__rate_interval]) * 8' % config,
|
+ prometheus.withIntervalFactor(1),
|
||||||
legendFormat='{{device}}',
|
]),
|
||||||
intervalFactor=1,
|
|
||||||
)),
|
|
||||||
|
|
||||||
local cpuRow =
|
local cpuRow =
|
||||||
row.new('CPU')
|
row.new('CPU')
|
||||||
.addPanel(idleCPU)
|
+ row.withPanels([
|
||||||
.addPanel(systemLoad),
|
idleCPU,
|
||||||
|
systemLoad,
|
||||||
|
]),
|
||||||
|
|
||||||
local memoryRow =
|
local memoryRow = [
|
||||||
row.new('Memory')
|
row.new('Memory') + row.gridPos.withY(8),
|
||||||
.addPanel(memoryGraph)
|
memoryGraph + row.gridPos.withX(0) + row.gridPos.withY(9) + row.gridPos.withH(7) + row.gridPos.withW(18),
|
||||||
.addPanel(memoryGauge),
|
memoryGauge + row.gridPos.withX(18) + row.gridPos.withY(9) + row.gridPos.withH(7) + row.gridPos.withW(6),
|
||||||
|
],
|
||||||
|
|
||||||
local diskRow =
|
local diskRow =
|
||||||
row.new('Disk')
|
row.new('Disk')
|
||||||
.addPanel(diskIO)
|
+ row.withPanels([
|
||||||
.addPanel(diskSpaceUsage),
|
diskIO,
|
||||||
|
diskSpaceUsage,
|
||||||
|
]),
|
||||||
|
|
||||||
local networkRow =
|
local networkRow =
|
||||||
row.new('Network')
|
row.new('Network')
|
||||||
.addPanel(networkReceived)
|
+ row.withPanels([
|
||||||
.addPanel(networkTransmitted),
|
networkReceived,
|
||||||
|
networkTransmitted,
|
||||||
|
]),
|
||||||
|
|
||||||
local rows =
|
local panels =
|
||||||
[
|
grafana.util.grid.makeGrid([
|
||||||
cpuRow,
|
cpuRow,
|
||||||
memoryRow,
|
], panelWidth=12, panelHeight=7)
|
||||||
|
+ memoryRow
|
||||||
|
+ grafana.util.grid.makeGrid([
|
||||||
diskRow,
|
diskRow,
|
||||||
networkRow,
|
networkRow,
|
||||||
],
|
], panelWidth=12, panelHeight=7, startY=18),
|
||||||
|
|
||||||
local templates =
|
local variables =
|
||||||
[
|
[
|
||||||
prometheusDatasourceTemplate,
|
prometheusDatasourceVariable,
|
||||||
clusterTemplate,
|
clusterVariable,
|
||||||
instanceTemplate,
|
instanceVariable,
|
||||||
],
|
],
|
||||||
|
|
||||||
|
|
||||||
dashboard: if platform == 'Linux' then
|
dashboard: if platform == 'Linux' then
|
||||||
dashboard.new(
|
dashboard.new(
|
||||||
'%sNodes' % config.dashboardNamePrefix,
|
'%sNodes' % config.dashboardNamePrefix,
|
||||||
time_from='now-1h',
|
|
||||||
tags=(config.dashboardTags),
|
|
||||||
timezone='utc',
|
|
||||||
refresh='30s',
|
|
||||||
uid=std.md5(uid),
|
|
||||||
graphTooltip='shared_crosshair'
|
|
||||||
)
|
)
|
||||||
.addTemplates(templates)
|
+ dashboard.time.withFrom('now-1h')
|
||||||
.addRows(rows)
|
+ dashboard.withTags(config.dashboardTags)
|
||||||
|
+ dashboard.withTimezone('utc')
|
||||||
|
+ dashboard.withRefresh('30s')
|
||||||
|
+ dashboard.withUid(std.md5(uid))
|
||||||
|
+ dashboard.graphTooltip.withSharedCrosshair()
|
||||||
|
+ dashboard.withVariables(variables)
|
||||||
|
+ dashboard.withPanels(panels)
|
||||||
else if platform == 'Darwin' then
|
else if platform == 'Darwin' then
|
||||||
dashboard.new(
|
dashboard.new(
|
||||||
'%sMacOS' % config.dashboardNamePrefix,
|
'%sMacOS' % config.dashboardNamePrefix,
|
||||||
time_from='now-1h',
|
|
||||||
tags=(config.dashboardTags),
|
|
||||||
timezone='utc',
|
|
||||||
refresh='30s',
|
|
||||||
uid=std.md5(uid),
|
|
||||||
graphTooltip='shared_crosshair'
|
|
||||||
)
|
)
|
||||||
.addTemplates(templates)
|
+ dashboard.time.withFrom('now-1h')
|
||||||
.addRows(rows)
|
+ dashboard.withTags(config.dashboardTags)
|
||||||
|
+ dashboard.withTimezone('utc')
|
||||||
|
+ dashboard.withRefresh('30s')
|
||||||
|
+ dashboard.withUid(std.md5(uid))
|
||||||
|
+ dashboard.graphTooltip.withSharedCrosshair()
|
||||||
|
+ dashboard.withVariables(variables)
|
||||||
|
+ dashboard.withPanels(panels)
|
||||||
else if platform == 'AIX' then
|
else if platform == 'AIX' then
|
||||||
dashboard.new(
|
dashboard.new(
|
||||||
'%sAIX' % config.dashboardNamePrefix,
|
'%sAIX' % config.dashboardNamePrefix,
|
||||||
time_from='now-1h',
|
|
||||||
tags=(config.dashboardTags),
|
|
||||||
timezone='utc',
|
|
||||||
refresh='30s',
|
|
||||||
uid=std.md5(uid),
|
|
||||||
graphTooltip='shared_crosshair'
|
|
||||||
)
|
)
|
||||||
.addTemplates(templates)
|
+ dashboard.time.withFrom('now-1h')
|
||||||
.addRows(rows),
|
+ dashboard.withTags(config.dashboardTags)
|
||||||
|
+ dashboard.withTimezone('utc')
|
||||||
|
+ dashboard.withRefresh('30s')
|
||||||
|
+ dashboard.withUid(std.md5(uid))
|
||||||
|
+ dashboard.graphTooltip.withSharedCrosshair()
|
||||||
|
+ dashboard.withVariables(variables)
|
||||||
|
+ dashboard.withPanels(panels),
|
||||||
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
24
go.mod
24
go.mod
|
@ -18,18 +18,18 @@ require (
|
||||||
github.com/mattn/go-xmlrpc v0.0.3
|
github.com/mattn/go-xmlrpc v0.0.3
|
||||||
github.com/mdlayher/ethtool v0.2.0
|
github.com/mdlayher/ethtool v0.2.0
|
||||||
github.com/mdlayher/netlink v1.7.2
|
github.com/mdlayher/netlink v1.7.2
|
||||||
github.com/mdlayher/wifi v0.3.0
|
github.com/mdlayher/wifi v0.3.1
|
||||||
github.com/opencontainers/selinux v1.11.1
|
github.com/opencontainers/selinux v1.11.1
|
||||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55
|
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55
|
||||||
github.com/prometheus-community/go-runit v0.1.0
|
github.com/prometheus-community/go-runit v0.1.0
|
||||||
github.com/prometheus/client_golang v1.20.5
|
github.com/prometheus/client_golang v1.20.5
|
||||||
github.com/prometheus/client_model v0.6.1
|
github.com/prometheus/client_model v0.6.1
|
||||||
github.com/prometheus/common v0.60.1
|
github.com/prometheus/common v0.61.0
|
||||||
github.com/prometheus/exporter-toolkit v0.13.1
|
github.com/prometheus/exporter-toolkit v0.13.2
|
||||||
github.com/prometheus/procfs v0.15.1
|
github.com/prometheus/procfs v0.15.2-0.20240603130017-1754b780536b // == v0.15.1 + https://github.com/prometheus/procfs/commit/1754b780536bb81082baa913e04cc4fff4d2baea
|
||||||
github.com/safchain/ethtool v0.4.1
|
github.com/safchain/ethtool v0.5.9
|
||||||
golang.org/x/exp v0.0.0-20240909161429-701f63a606c0
|
golang.org/x/exp v0.0.0-20240909161429-701f63a606c0
|
||||||
golang.org/x/sys v0.26.0
|
golang.org/x/sys v0.28.0
|
||||||
howett.net/plist v1.0.1
|
howett.net/plist v1.0.1
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -51,11 +51,11 @@ require (
|
||||||
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
|
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
|
||||||
go.uber.org/atomic v1.7.0 // indirect
|
go.uber.org/atomic v1.7.0 // indirect
|
||||||
go.uber.org/multierr v1.6.0 // indirect
|
go.uber.org/multierr v1.6.0 // indirect
|
||||||
golang.org/x/crypto v0.28.0 // indirect
|
golang.org/x/crypto v0.31.0 // indirect
|
||||||
golang.org/x/net v0.29.0 // indirect
|
golang.org/x/net v0.32.0 // indirect
|
||||||
golang.org/x/oauth2 v0.23.0 // indirect
|
golang.org/x/oauth2 v0.24.0 // indirect
|
||||||
golang.org/x/sync v0.8.0 // indirect
|
golang.org/x/sync v0.10.0 // indirect
|
||||||
golang.org/x/text v0.19.0 // indirect
|
golang.org/x/text v0.21.0 // indirect
|
||||||
google.golang.org/protobuf v1.34.2 // indirect
|
google.golang.org/protobuf v1.35.2 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
)
|
)
|
||||||
|
|
54
go.sum
54
go.sum
|
@ -61,8 +61,8 @@ github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U
|
||||||
github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA=
|
github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA=
|
||||||
github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ=
|
github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ=
|
||||||
github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE=
|
github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE=
|
||||||
github.com/mdlayher/wifi v0.3.0 h1:ZfS81w/7xTWBJfhM77K0k6m3sJckwoNOoZUwOW34omo=
|
github.com/mdlayher/wifi v0.3.1 h1:bZDuMI1f7z5BtUUO3NgHRdR/R88YtywIe6dsEFI0Txs=
|
||||||
github.com/mdlayher/wifi v0.3.0/go.mod h1:/bdkqKYl+lD4recmQM6bTHxMrEUW70reibTyr93CAd0=
|
github.com/mdlayher/wifi v0.3.1/go.mod h1:ODQaObvsglghTuNhezD9grkTB4shVNc28aJfTXmvSi8=
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
|
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
|
||||||
|
@ -79,48 +79,48 @@ github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+
|
||||||
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||||
github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc=
|
github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ=
|
||||||
github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
|
github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s=
|
||||||
github.com/prometheus/exporter-toolkit v0.13.1 h1:Evsh0gWQo2bdOHlnz9+0Nm7/OFfIwhE2Ws4A2jIlR04=
|
github.com/prometheus/exporter-toolkit v0.13.2 h1:Z02fYtbqTMy2i/f+xZ+UK5jy/bl1Ex3ndzh06T/Q9DQ=
|
||||||
github.com/prometheus/exporter-toolkit v0.13.1/go.mod h1:ujdv2YIOxtdFxxqtloLpbqmxd5J0Le6IITUvIRSWjj0=
|
github.com/prometheus/exporter-toolkit v0.13.2/go.mod h1:tCqnfx21q6qN1KA4U3Bfb8uWzXfijIrJz3/kTIqMV7g=
|
||||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
github.com/prometheus/procfs v0.15.2-0.20240603130017-1754b780536b h1:4EJkx3vycI+n5JY5ht+bnSUGamkmmXkpcNeO/OBT/0A=
|
||||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
github.com/prometheus/procfs v0.15.2-0.20240603130017-1754b780536b/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||||
github.com/safchain/ethtool v0.4.1 h1:S6mEleTADqgynileXoiapt/nKnatyR6bmIHoF+h2ADo=
|
github.com/safchain/ethtool v0.5.9 h1://6RvaOKFf3nQ0rl5+8zBbE4/72455VC9Jq61pfq67E=
|
||||||
github.com/safchain/ethtool v0.4.1/go.mod h1:XLLnZmy4OCRTkksP/UiMjij96YmIsBfmBQcs7H6tA48=
|
github.com/safchain/ethtool v0.5.9/go.mod h1:w8oSsZeowyRaM7xJJBAbubzzrOkwO8TBgPSEqPP/5mg=
|
||||||
github.com/siebenmann/go-kstat v0.0.0-20210513183136-173c9b0a9973 h1:GfSdC6wKfTGcgCS7BtzF5694Amne1pGCSTY252WhlEY=
|
github.com/siebenmann/go-kstat v0.0.0-20210513183136-173c9b0a9973 h1:GfSdC6wKfTGcgCS7BtzF5694Amne1pGCSTY252WhlEY=
|
||||||
github.com/siebenmann/go-kstat v0.0.0-20210513183136-173c9b0a9973/go.mod h1:G81aIFAMS9ECrwBYR9YxhlPjWgrItd+Kje78O6+uqm8=
|
github.com/siebenmann/go-kstat v0.0.0-20210513183136-173c9b0a9973/go.mod h1:G81aIFAMS9ECrwBYR9YxhlPjWgrItd+Kje78O6+uqm8=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||||
github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc=
|
github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc=
|
||||||
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
|
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
|
||||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||||
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
|
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
|
||||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||||
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
|
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||||
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
|
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||||
golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk=
|
golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk=
|
||||||
golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY=
|
golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY=
|
||||||
golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
|
golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI=
|
||||||
golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
|
golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs=
|
||||||
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE=
|
||||||
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||||
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20211031064116-611d5d643895/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211031064116-611d5d643895/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
|
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||||
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
|
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||||
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||||
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
|
||||||
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
|
|
Loading…
Reference in a new issue