Merge branch 'prometheus:master' into add-drm-chip

This commit is contained in:
Yurii Kondrakov 2025-05-30 15:37:31 -04:00 committed by GitHub
commit d45ed475ff
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
83 changed files with 5722 additions and 1302 deletions

View file

@ -7,10 +7,10 @@ executors:
# should also be updated.
golang:
docker:
- image: cimg/go:1.23
- image: cimg/go:1.24
arm:
docker:
- image: cimg/go:1.23
- image: cimg/go:1.24
resource_class: arm.medium
jobs:
@ -42,7 +42,7 @@ jobs:
- run: git diff --exit-code
build:
machine:
image: ubuntu-2204:current
image: ubuntu-2404:current
parallelism: 3
steps:
- prometheus/setup_environment
@ -68,9 +68,9 @@ jobs:
destination: /build
test_docker:
machine:
image: ubuntu-2204:current
image: ubuntu-2404:current
environment:
DOCKER_TEST_IMAGE_NAME: quay.io/prometheus/golang-builder:1.23-base
DOCKER_TEST_IMAGE_NAME: quay.io/prometheus/golang-builder:1.24-base
REPO_PATH: github.com/prometheus/node_exporter
steps:
- prometheus/setup_environment

313
.github/workflows/bsd.yml vendored Normal file
View file

@ -0,0 +1,313 @@
name: bsd
on:
push:
branches:
- master
pull_request:
branches:
- master
permissions:
contents: read
env:
GNU_TAR_VERSION: "1.35"
GO_VERSION_DRAGONFLY: "1.24.1"
GO_VERSION_FREEBSD: "123"
GO_VERSION_NETBSD: "1.24.1"
GO_VERSION_OPENBSD: "1.23.1"
GO_VERSION_SOLARIS: "1.24.1"
# To spin up one of the VMs below, see the "Debug Shell" section here: https://github.com/vmactions
jobs:
test_freebsd:
name: Run end-to-end tests on FreeBSD
runs-on: ubuntu-latest
steps:
- name: Checkout the repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: test-e2e
uses: vmactions/freebsd-vm@8873d98fd1413b5977cb2f7348fe329775159892 # v1.1.9
with:
copyback: false
envs: 'GO_VERSION_FREEBSD GNU_TAR_VERSION'
usesh: true
prepare: |
pkg update -f
pkg install -y \
bash \
git \
gmake \
gnugrep \
go${GO_VERSION_FREEBSD} \
gsed \
gtar \
python \
wget
run: |
echo "::group::Setup prerequisites"
set -eu
mkdir bin
ln -s $(which go${GO_VERSION_FREEBSD}) $(pwd)/bin/go
ln -s $(which ggrep) $(pwd)/bin/grep
ln -s $(which gmake) $(pwd)/bin/make
ln -s $(which gsed) $(pwd)/bin/sed
ln -s $(which gtar) $(pwd)/bin/tar
export PATH=$(pwd)/bin:$PATH
echo "::endgroup::"
echo "::group::Print environment information"
uname -a
echo "GOOS: $(go env GOOS)"
echo "GOARCH: $(go env GOARCH)"
echo "::endgroup::"
echo "::group::Run End-to-End Tests"
git config --global --add safe.directory $(pwd)
gmake test-e2e
echo "::endgroup::"
test_openbsd:
name: Run end-to-end tests on OpenBSD
runs-on: ubuntu-latest
steps:
- name: Checkout the repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: test-e2e
uses: vmactions/openbsd-vm@7ac70b6de6f33efc74a90c1964afa3bcf0ee4401 # v1.1.6
with:
copyback: false
envs: 'GO_VERSION_OPENBSD GNU_TAR_VERSION'
usesh: true
prepare: |
pkg_add -u
pkg_add \
bash \
ggrep \
git \
gmake \
go-${GO_VERSION_OPENBSD} \
gsed \
gtar-${GNU_TAR_VERSION}p0-static \
python \
wget
run: |
echo "::group::Setup prerequisites"
set -eu
mkdir bin
ln -s $(which ggrep) $(pwd)/bin/grep
ln -s $(which gmake) $(pwd)/bin/make
ln -s $(which gsed) $(pwd)/bin/sed
ln -s $(which gtar) $(pwd)/bin/tar
export PATH=$(pwd)/bin:$PATH
echo "::endgroup::"
echo "::group::Print environment information"
uname -a
echo "GOOS: $(go env GOOS)"
echo "GOARCH: $(go env GOARCH)"
echo "::endgroup::"
echo "::group::Run End-to-End Tests"
git config --global --add safe.directory $(pwd)
make test-e2e
echo "::endgroup::"
test_netbsd:
name: Run end-to-end tests on NetBSD
runs-on: ubuntu-latest
steps:
- name: Checkout the repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: test-e2e
uses: vmactions/netbsd-vm@46a58bbf03682b4cb24142b97fa315ae52bed573 # v1.1.8
with:
copyback: false
envs: 'GO_VERSION_NETBSD GNU_TAR_VERSION'
usesh: true
prepare: |
/usr/sbin/pkg_add -u
/usr/sbin/pkg_add \
git \
gmake \
grep \
gsed \
gtar-base-${GNU_TAR_VERSION}\
python312 \
wget
run: |
echo "::group::Setup prerequisites"
set -eu
mkdir bin
GOGZ="go${GO_VERSION_NETBSD}.netbsd-amd64.tar.gz"
wget https://go.dev/dl/${GOGZ}
gtar xzf ${GOGZ}
ln -s $(pwd)/go/bin/go $(pwd)/bin/go
ln -s $(which ggrep) $(pwd)/bin/grep
ln -s $(which gmake) $(pwd)/bin/make
ln -s $(which gsed) $(pwd)/bin/sed
ln -s $(which gtar) $(pwd)/bin/tar
export PATH=$(pwd)/bin:$PATH
echo "::endgroup::"
echo "::group::Print environment information"
uname -a
echo "GOOS: $(go env GOOS)"
echo "GOARCH: $(go env GOARCH)"
echo "::endgroup::"
echo "::group::Run End-to-End Tests"
git config --global --add safe.directory $(pwd)
make test-e2e
echo "::endgroup::"
test_dragonfly:
name: Run end-to-end tests on DragonFly
runs-on: ubuntu-latest
steps:
- name: Checkout the repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: test-e2e
uses: vmactions/dragonflybsd-vm@e3c420e8a2362c2496fca6e76a291abd46f5d8e7 # v1.1.0
with:
copyback: false
envs: 'GO_VERSION_DRAGONFLY'
usesh: true
prepare: |
pkg update && pkg upgrade -y
pkg install -y \
bash \
git \
gmake \
gnugrep \
gsed \
gtar \
python3 \
wget
run: |
echo "::group::Setup prerequisites"
set -eu
mkdir bin
GOGZ="go${GO_VERSION_DRAGONFLY}.dragonfly-amd64.tar.gz"
wget https://go.dev/dl/${GOGZ}
gtar xzf ${GOGZ}
ln -s $(pwd)/go/bin/go $(pwd)/bin/go
ln -s $(which ggrep) $(pwd)/bin/grep
ln -s $(which gmake) $(pwd)/bin/make
ln -s $(which gsed) $(pwd)/bin/sed
ln -s $(which gtar) $(pwd)/bin/tar
ln -s $(which python3) $(pwd)/bin/python
export PATH=$(pwd)/bin:$PATH
echo "::endgroup::"
echo "::group::Print environment information"
uname -a
echo "GOOS: $(go env GOOS)"
echo "GOARCH: $(go env GOARCH)"
echo "::endgroup::"
echo "::group::Run End-to-End Tests"
git config --global --add safe.directory $(pwd)
gmake test-e2e
echo "::endgroup::"
test_solaris:
name: Run end-to-end tests on Solaris
runs-on: ubuntu-latest
steps:
- name: Checkout the repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: test-e2e
uses: vmactions/solaris-vm@cc8f82fa1a7cc746153ec3f71bf11f311f16e225 # v1.1.1
with:
copyback: false
envs: 'GO_VERSION_SOLARIS'
usesh: true
prepare: |
pkg update
pkg install \
bash \
curl \
gcc \
git \
gnu-grep \
gnu-make \
gnu-sed \
gnu-tar
run: |
echo "::group::Setup prerequisites"
set -eu
mkdir bin
GOGZ="go${GO_VERSION_SOLARIS}.solaris-amd64.tar.gz"
wget https://go.dev/dl/${GOGZ}
gtar xzf ${GOGZ}
ln -s $(pwd)/go/bin/go $(pwd)/bin/go
ln -s $(which ggrep) $(pwd)/bin/grep
ln -s $(which gmake) $(pwd)/bin/make
ln -s $(which gsed) $(pwd)/bin/sed
ln -s $(which gtar) $(pwd)/bin/tar
export PATH=$(pwd)/bin:$PATH
echo ">> building promu as it is not shipped for Solaris"
git clone https://github.com/prometheus/promu.git
cd promu
go build .
cd -
mkdir -p $(go env GOPATH)/bin
ln -s $(pwd)/promu/promu $(go env GOPATH)/bin/promu
export PATH=$(go env GOPATH)/bin:$PATH
echo "::endgroup::"
echo "::group::Print environment information"
uname -a
echo "GOOS: $(go env GOOS)"
echo "GOARCH: $(go env GOARCH)"
echo "::endgroup::"
echo "::group::Run End-to-End Tests"
git config --global --add safe.directory $(pwd)
make test-e2e
echo "::endgroup::"
test_macos:
name: Run end-to-end tests on macOS
runs-on: macos-latest
steps:
- name: Checkout the repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install dependencies
run: |
brew install \
bash \
curl \
git \
grep \
make \
gnu-sed \
gnu-tar \
go \
python3
- name: test-e2e
run: |
echo "::group::Setup prerequisites"
set -eu
mkdir bin
ln -s $(which ggrep) $(pwd)/bin/grep
ln -s $(which gmake) $(pwd)/bin/make
ln -s $(which gsed) $(pwd)/bin/sed
ln -s $(which gtar) $(pwd)/bin/tar
export PATH=$(pwd)/bin:$PATH
echo "::endgroup::"
echo "::group::Print environment information"
uname -a
echo "GOOS: $(go env GOOS)"
echo "GOARCH: $(go env GOARCH)"
echo "::endgroup::"
echo "::group::Run End-to-End Tests"
git config --global --add safe.directory $(pwd)
make test-e2e
echo "::endgroup::"

View file

@ -18,7 +18,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Set docker hub repo name
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
- name: Push README to Dockerhub
@ -40,7 +40,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Set quay.io org name
run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV
- name: Set quay.io repo name

View file

@ -24,16 +24,16 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Go
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
with:
go-version: 1.23.x
go-version: 1.24.x
- name: Install snmp_exporter/generator dependencies
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter'
- name: Lint
uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0
uses: golangci/golangci-lint-action@55c2c1448f86e01eaae002a5a3a9624417608d84 # v6.5.2
with:
args: --verbose
version: v1.60.2
version: v1.64.6

2
.gitignore vendored
View file

@ -27,6 +27,8 @@ dependencies-stamp
/.release
/.tarballs
tools/tools
# Intellij
/.idea

View file

@ -1,17 +1,9 @@
linters:
enable:
- depguard
- goimports
- misspell
- revive
disable:
# Disable soon to deprecated[1] linters that lead to false
# positives when build tags disable certain files[2]
# 1: https://github.com/golangci/golangci-lint/issues/1841
# 2: https://github.com/prometheus/node_exporter/issues/1545
- deadcode
- unused
- structcheck
- varcheck
issues:
exclude-rules:

View file

@ -1,7 +1,7 @@
go:
# Whenever the Go version is updated here, .circle/config.yml and
# .promu.yml should also be updated.
version: 1.23
version: 1.24
cgo: true
repository:
path: github.com/prometheus/node_exporter

View file

@ -1,7 +1,7 @@
go:
# Whenever the Go version is updated here, .circle/config.yml and
# .promu-cgo.yml should also be updated.
version: 1.23
version: 1.24
repository:
path: github.com/prometheus/node_exporter
build:

View file

@ -1,7 +1,7 @@
---
extends: default
ignore: |
ui/react-app/node_modules
**/node_modules
rules:
braces:

View file

@ -5,6 +5,38 @@
* [ENHANCEMENT]
* [BUGFIX]
## 1.9.0 / 2025-02-17
* [CHANGE] meminfo: Convert linux implementation to use procfs lib #3049
* [CHANGE] Update logging to use Go log/slog #3097
* [FEATURE] filesystem: Add `node_filesystem_mount_info` metric #2970
* [FEATURE] btrfs: Add metrics for commit statistics #3010
* [FEATURE] interrupts: Add collector include/exclude filtering #3028
* [FEATURE] interrupts: Add "exclude zeros" filtering #3028
* [FEATURE] slabinfo: Add filters for slab name. #3041
* [FEATURE] pressure: add IRQ PSI metrics #3048
* [FEATURE] hwmon: Add include and exclude filter for sensors #3072
* [FEATURE] filesystem: Add NetBSD support #3082
* [FEATURE] netdev: Add ifAlias label #3087
* [FEATURE] hwmon: Add Support for GPU Clock Frequencies #3093
* [FEATURE] Add `exclude[]` URL parameter #3116
* [FEATURE] Add AIX support #3136
* [FEATURE] filesystem: Add fs-types/mount-points include flags #3171
* [FEATURE] netstat: Add collector for tcp packet counters for FreeBSD. #3177
* [ENHANCEMENT] ethtool: Add logging for filtering flags #2979
* [ENHANCEMENT] netstat: Add TCPRcvQDrop to default metrics #3021
* [ENHANCEMENT] diskstats: Add block device rotational #3022
* [ENHANCEMENT] cpu: Support CPU online status #3032
* [ENHANCEMENT] arp: optimize interface name resolution #3133
* [ENHANCEMENT] textfile: Allow specifiying multiple directory globs #3135
* [ENHANCEMENT] filesystem: Add reporting of purgeable space on MacOS #3206
* [ENHANCEMENT] ethtool: Skip full scan of NetClass directories #3239
* [BUGFIX] zfs: Prevent `procfs` integer underflow #2961
* [BUGFIX] pressure: Fix collection on systems that do not expose a full CPU stat #3054
* [BUGFIX] cpu: Fix FreeBSD 32-bit host support and plug memory leak #3083
* [BUGFIX] hwmon: Add safety check to hwmon read #3134
* [BUGFIX] zfs: Allow space in dataset name #3186
## 1.8.1 / 2024-05-16
* [BUGFIX] Fix CPU seconds on Solaris #2963

View file

@ -113,9 +113,13 @@ update_fixtures:
rm -vf collector/fixtures/udev/.unpacked
./ttar -C collector/fixtures -c -f collector/fixtures/udev.ttar udev
.PHONY: tools
tools:
@rm ./tools/tools >/dev/null 2>&1 || true
@$(GO) build -o tools ./tools/...
.PHONY: test-e2e
test-e2e: build collector/fixtures/sys/.unpacked collector/fixtures/udev/.unpacked
test-e2e: build collector/fixtures/sys/.unpacked collector/fixtures/udev/.unpacked tools
@echo ">> running end-to-end tests"
./end-to-end-test.sh

View file

@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.60.2
GOLANGCI_LINT_VERSION ?= v1.64.6
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))

View file

@ -1,7 +1,8 @@
# Node exporter
[![CircleCI](https://circleci.com/gh/prometheus/node_exporter/tree/master.svg?style=shield)][circleci]
[![Buildkite status](https://badge.buildkite.com/94a0c1fb00b1f46883219c256efe9ce01d63b6505f3a942f9b.svg)](https://buildkite.com/prometheus/node-exporter)
![bsd workflow](https://github.com/prometheus/node_exporter/actions/workflows/bsd.yml/badge.svg)
![golangci-lint workflow](https://github.com/prometheus/node_exporter/actions/workflows/golangci-lint.yml/badge.svg)
[![Docker Repository on Quay](https://quay.io/repository/prometheus/node-exporter/status)][quay]
[![Docker Pulls](https://img.shields.io/docker/pulls/prom/node-exporter.svg?maxAge=604800)][hub]
[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/node_exporter)][goreportcard]
@ -99,8 +100,8 @@ cpu | flags | --collector.cpu.info.flags-include | N/A
diskstats | device | --collector.diskstats.device-include | --collector.diskstats.device-exclude
ethtool | device | --collector.ethtool.device-include | --collector.ethtool.device-exclude
ethtool | metrics | --collector.ethtool.metrics-include | N/A
filesystem | fs-types | N/A | --collector.filesystem.fs-types-exclude
filesystem | mount-points | N/A | --collector.filesystem.mount-points-exclude
filesystem | fs-types | --collector.filesystem.fs-types-include | --collector.filesystem.fs-types-exclude
filesystem | mount-points | --collector.filesystem.mount-points-include | --collector.filesystem.mount-points-exclude
hwmon | chip | --collector.hwmon.chip-include | --collector.hwmon.chip-exclude
hwmon | sensor | --collector.hwmon.sensor-include | --collector.hwmon.sensor-exclude
interrupts | name | --collector.interrupts.name-include | --collector.interrupts.name-exclude

View file

@ -1 +1 @@
1.8.1
1.9.0

View file

@ -17,13 +17,11 @@
package collector
import (
"errors"
"fmt"
"log/slog"
"net"
"github.com/alecthomas/kingpin/v2"
"github.com/jsimonetti/rtnetlink/v2"
"github.com/jsimonetti/rtnetlink/v2/rtnl"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs"
"golang.org/x/sys/unix"
@ -76,44 +74,30 @@ func getTotalArpEntries(deviceEntries []procfs.ARPEntry) map[string]uint32 {
}
func getTotalArpEntriesRTNL() (map[string]uint32, error) {
conn, err := rtnetlink.Dial(nil)
conn, err := rtnl.Dial(nil)
if err != nil {
return nil, err
}
defer conn.Close()
neighbors, err := conn.Neigh.List()
// Neighbors will also contain IPv6 neighbors, but since this is purely an ARP collector,
// restrict to AF_INET.
neighbors, err := conn.Neighbours(nil, unix.AF_INET)
if err != nil {
return nil, err
}
ifIndexEntries := make(map[uint32]uint32)
// Map of interface name to ARP neighbor count.
entries := make(map[string]uint32)
for _, n := range neighbors {
// Neighbors will also contain IPv6 neighbors, but since this is purely an ARP collector,
// restrict to AF_INET. Also skip entries which have state NUD_NOARP to conform to output
// of /proc/net/arp.
if n.Family == unix.AF_INET && n.State&unix.NUD_NOARP == 0 {
ifIndexEntries[n.Index]++
// Skip entries which have state NUD_NOARP to conform to output of /proc/net/arp.
if n.State&unix.NUD_NOARP == 0 {
entries[n.Interface.Name]++
}
}
enumEntries := make(map[string]uint32)
// Convert interface indexes to names.
for ifIndex, entryCount := range ifIndexEntries {
iface, err := net.InterfaceByIndex(int(ifIndex))
if err != nil {
if errors.Unwrap(err).Error() == "no such network interface" {
continue
}
return nil, err
}
enumEntries[iface.Name] = entryCount
}
return enumEntries, nil
return entries, nil
}
func (c *arpCollector) Update(ch chan<- prometheus.Metric) error {

View file

@ -30,22 +30,50 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
var (
nodeCPUPhysicalSecondsDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "physical_seconds_total"),
"Seconds the physical CPUs spent in each mode.",
[]string{"cpu", "mode"}, nil,
)
nodeCPUSRunQueueDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "runqueue"),
"Length of the run queue.", []string{"cpu"}, nil,
)
nodeCPUFlagsDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "flags"),
"CPU flags.",
[]string{"cpu", "flag"}, nil,
)
nodeCPUContextSwitchDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "context_switches_total"),
"Number of context switches.",
[]string{"cpu"}, nil,
)
)
type cpuCollector struct {
cpu typedDesc
logger *slog.Logger
tickPerSecond int64
cpu typedDesc
cpuPhysical typedDesc
cpuRunQueue typedDesc
cpuFlags typedDesc
cpuContextSwitch typedDesc
logger *slog.Logger
tickPerSecond float64
purrTicksPerSecond float64
}
func init() {
registerCollector("cpu", defaultEnabled, NewCpuCollector)
}
func tickPerSecond() (int64, error) {
func tickPerSecond() (float64, error) {
ticks, err := C.sysconf(C._SC_CLK_TCK)
if ticks == -1 || err != nil {
return 0, fmt.Errorf("failed to get clock ticks per second: %v", err)
}
return int64(ticks), nil
return float64(ticks), nil
}
func NewCpuCollector(logger *slog.Logger) (Collector, error) {
@ -53,10 +81,22 @@ func NewCpuCollector(logger *slog.Logger) (Collector, error) {
if err != nil {
return nil, err
}
pconfig, err := perfstat.PartitionStat()
if err != nil {
return nil, err
}
return &cpuCollector{
cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue},
logger: logger,
tickPerSecond: ticks,
cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue},
cpuPhysical: typedDesc{nodeCPUPhysicalSecondsDesc, prometheus.CounterValue},
cpuRunQueue: typedDesc{nodeCPUSRunQueueDesc, prometheus.GaugeValue},
cpuFlags: typedDesc{nodeCPUFlagsDesc, prometheus.GaugeValue},
cpuContextSwitch: typedDesc{nodeCPUContextSwitchDesc, prometheus.CounterValue},
logger: logger,
tickPerSecond: ticks,
purrTicksPerSecond: float64(pconfig.ProcessorMhz * 1e6),
}, nil
}
@ -67,10 +107,26 @@ func (c *cpuCollector) Update(ch chan<- prometheus.Metric) error {
}
for n, stat := range stats {
ch <- c.cpu.mustNewConstMetric(float64(stat.User/c.tickPerSecond), strconv.Itoa(n), "user")
ch <- c.cpu.mustNewConstMetric(float64(stat.Sys/c.tickPerSecond), strconv.Itoa(n), "system")
ch <- c.cpu.mustNewConstMetric(float64(stat.Idle/c.tickPerSecond), strconv.Itoa(n), "idle")
ch <- c.cpu.mustNewConstMetric(float64(stat.Wait/c.tickPerSecond), strconv.Itoa(n), "wait")
// LPAR metrics
ch <- c.cpu.mustNewConstMetric(float64(stat.User)/c.tickPerSecond, strconv.Itoa(n), "user")
ch <- c.cpu.mustNewConstMetric(float64(stat.Sys)/c.tickPerSecond, strconv.Itoa(n), "system")
ch <- c.cpu.mustNewConstMetric(float64(stat.Idle)/c.tickPerSecond, strconv.Itoa(n), "idle")
ch <- c.cpu.mustNewConstMetric(float64(stat.Wait)/c.tickPerSecond, strconv.Itoa(n), "wait")
// Physical CPU metrics
ch <- c.cpuPhysical.mustNewConstMetric(float64(stat.PIdle)/c.purrTicksPerSecond, strconv.Itoa(n), "pidle")
ch <- c.cpuPhysical.mustNewConstMetric(float64(stat.PUser)/c.purrTicksPerSecond, strconv.Itoa(n), "puser")
ch <- c.cpuPhysical.mustNewConstMetric(float64(stat.PSys)/c.purrTicksPerSecond, strconv.Itoa(n), "psys")
ch <- c.cpuPhysical.mustNewConstMetric(float64(stat.PWait)/c.purrTicksPerSecond, strconv.Itoa(n), "pwait")
// Run queue length
ch <- c.cpuRunQueue.mustNewConstMetric(float64(stat.RunQueue), strconv.Itoa(n))
// Flags
ch <- c.cpuFlags.mustNewConstMetric(float64(stat.SpurrFlag), strconv.Itoa(n), "spurr")
// Context switches
ch <- c.cpuContextSwitch.mustNewConstMetric(float64(stat.CSwitches), strconv.Itoa(n))
}
return nil
}

View file

@ -17,6 +17,7 @@
package collector
import (
"errors"
"fmt"
"log/slog"
"os"
@ -26,15 +27,17 @@ import (
"strconv"
"sync"
"golang.org/x/exp/maps"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs"
"github.com/prometheus/procfs/sysfs"
"golang.org/x/exp/maps"
)
type cpuCollector struct {
fs procfs.FS
procfs procfs.FS
sysfs sysfs.FS
cpu *prometheus.Desc
cpuInfo *prometheus.Desc
cpuFrequencyHz *prometheus.Desc
@ -45,6 +48,7 @@ type cpuCollector struct {
cpuPackageThrottle *prometheus.Desc
cpuIsolated *prometheus.Desc
logger *slog.Logger
cpuOnline *prometheus.Desc
cpuStats map[int64]procfs.CPUStat
cpuStatsMutex sync.Mutex
isolatedCpus []uint16
@ -70,17 +74,17 @@ func init() {
// NewCPUCollector returns a new Collector exposing kernel/system statistics.
func NewCPUCollector(logger *slog.Logger) (Collector, error) {
fs, err := procfs.NewFS(*procPath)
pfs, err := procfs.NewFS(*procPath)
if err != nil {
return nil, fmt.Errorf("failed to open procfs: %w", err)
}
sysfs, err := sysfs.NewFS(*sysPath)
sfs, err := sysfs.NewFS(*sysPath)
if err != nil {
return nil, fmt.Errorf("failed to open sysfs: %w", err)
}
isolcpus, err := sysfs.IsolatedCPUs()
isolcpus, err := sfs.IsolatedCPUs()
if err != nil {
if !os.IsNotExist(err) {
return nil, fmt.Errorf("Unable to get isolated cpus: %w", err)
@ -89,8 +93,9 @@ func NewCPUCollector(logger *slog.Logger) (Collector, error) {
}
c := &cpuCollector{
fs: fs,
cpu: nodeCPUSecondsDesc,
procfs: pfs,
sysfs: sfs,
cpu: nodeCPUSecondsDesc,
cpuInfo: prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "info"),
"CPU information from /proc/cpuinfo.",
@ -131,6 +136,11 @@ func NewCPUCollector(logger *slog.Logger) (Collector, error) {
"Whether each core is isolated, information from /sys/devices/system/cpu/isolated.",
[]string{"cpu"}, nil,
),
cpuOnline: prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "online"),
"CPUs that are online and being scheduled.",
[]string{"cpu"}, nil,
),
logger: logger,
isolatedCpus: isolcpus,
cpuStats: make(map[int64]procfs.CPUStat),
@ -177,12 +187,21 @@ func (c *cpuCollector) Update(ch chan<- prometheus.Metric) error {
if c.isolatedCpus != nil {
c.updateIsolated(ch)
}
return c.updateThermalThrottle(ch)
err := c.updateThermalThrottle(ch)
if err != nil {
return err
}
err = c.updateOnline(ch)
if err != nil {
return err
}
return nil
}
// updateInfo reads /proc/cpuinfo
func (c *cpuCollector) updateInfo(ch chan<- prometheus.Metric) error {
info, err := c.fs.CPUInfo()
info, err := c.procfs.CPUInfo()
if err != nil {
return err
}
@ -333,9 +352,31 @@ func (c *cpuCollector) updateIsolated(ch chan<- prometheus.Metric) {
}
}
// updateOnline reads /sys/devices/system/cpu/cpu*/online through sysfs and exports online status metrics.
func (c *cpuCollector) updateOnline(ch chan<- prometheus.Metric) error {
cpus, err := c.sysfs.CPUs()
if err != nil {
return err
}
// No-op if the system does not support CPU online stats.
cpu0 := cpus[0]
if _, err := cpu0.Online(); err != nil && errors.Is(err, os.ErrNotExist) {
return nil
}
for _, cpu := range cpus {
setOnline := float64(0)
if online, _ := cpu.Online(); online {
setOnline = 1
}
ch <- prometheus.MustNewConstMetric(c.cpuOnline, prometheus.GaugeValue, setOnline, cpu.Number())
}
return nil
}
// updateStat reads /proc/stat through procfs and exports CPU-related metrics.
func (c *cpuCollector) updateStat(ch chan<- prometheus.Metric) error {
stats, err := c.fs.Stat()
stats, err := c.procfs.Stat()
if err != nil {
return err
}

View file

@ -32,6 +32,15 @@ import (
"howett.net/plist"
)
const (
_IOC_OUT = uint(0x40000000)
_IOC_IN = uint(0x80000000)
_IOC_INOUT = (_IOC_IN | _IOC_OUT)
_IOCPARM_MASK = uint(0x1fff)
_IOCPARM_SHIFT = uint(16)
_IOCGROUP_SHIFT = uint(8)
)
type clockinfo struct {
hz int32 // clock frequency
tick int32 // micro-seconds per hz tick
@ -50,7 +59,7 @@ type cputime struct {
type plistref struct {
pref_plist unsafe.Pointer
pref_len uint64
pref_len uint
}
type sysmonValues struct {
@ -64,25 +73,19 @@ type sysmonProperty []sysmonValues
type sysmonProperties map[string]sysmonProperty
func readBytes(ptr unsafe.Pointer, length uint64) []byte {
buf := make([]byte, length-1)
var i uint64
for ; i < length-1; i++ {
buf[i] = *(*byte)(unsafe.Pointer(uintptr(ptr) + uintptr(i)))
}
return buf
func _IOC(inout uint, group byte, num uint, len uintptr) uint {
return ((inout) | ((uint(len) & _IOCPARM_MASK) << _IOCPARM_SHIFT) | (uint(group) << _IOCGROUP_SHIFT) | (num))
}
func ioctl(fd int, nr int64, typ byte, size uintptr, retptr unsafe.Pointer) error {
func _IOWR(group byte, num uint, len uintptr) uint {
return _IOC(_IOC_INOUT, group, num, len)
}
func ioctl(fd int, nr uint, typ byte, size uintptr, retptr unsafe.Pointer) error {
_, _, errno := unix.Syscall(
unix.SYS_IOCTL,
uintptr(fd),
// Some magicks derived from sys/ioccom.h.
uintptr((0x40000000|0x80000000)|
((int64(size)&(1<<13-1))<<16)|
(int64(typ)<<8)|
nr,
),
uintptr(_IOWR(typ, nr, size)),
uintptr(retptr),
)
if errno != 0 {
@ -92,7 +95,7 @@ func ioctl(fd int, nr int64, typ byte, size uintptr, retptr unsafe.Pointer) erro
}
func readSysmonProperties() (sysmonProperties, error) {
fd, err := unix.Open(rootfsFilePath("/dev/sysmon"), unix.O_RDONLY, 0777)
fd, err := unix.Open(rootfsFilePath("/dev/sysmon"), unix.O_RDONLY, 0)
if err != nil {
return nil, err
}
@ -103,8 +106,8 @@ func readSysmonProperties() (sysmonProperties, error) {
if err = ioctl(fd, 0, 'E', unsafe.Sizeof(retptr), unsafe.Pointer(&retptr)); err != nil {
return nil, err
}
bytes := readBytes(retptr.pref_plist, retptr.pref_len)
defer unix.Syscall(unix.SYS_MUNMAP, uintptr(retptr.pref_plist), uintptr(retptr.pref_len), uintptr(0))
bytes := unsafe.Slice((*byte)(unsafe.Pointer(retptr.pref_plist)), retptr.pref_len-1)
var props sysmonProperties
if _, err = plist.Unmarshal(bytes, &props); err != nil {
@ -179,7 +182,7 @@ func getCPUTimes() ([]cputime, error) {
if err != nil {
return nil, err
}
ncpus := *(*int)(unsafe.Pointer(&ncpusb[0]))
ncpus := int(*(*uint32)(unsafe.Pointer(&ncpusb[0])))
if ncpus < 1 {
return nil, errors.New("Invalid cpu number")
@ -191,10 +194,10 @@ func getCPUTimes() ([]cputime, error) {
if err != nil {
return nil, err
}
for len(cpb) >= int(unsafe.Sizeof(int(0))) {
t := *(*int)(unsafe.Pointer(&cpb[0]))
for len(cpb) >= int(unsafe.Sizeof(uint64(0))) {
t := *(*uint64)(unsafe.Pointer(&cpb[0]))
times = append(times, float64(t)/cpufreq)
cpb = cpb[unsafe.Sizeof(int(0)):]
cpb = cpb[unsafe.Sizeof(uint64(0)):]
}
}

View file

@ -22,12 +22,12 @@ import (
)
const (
cpuVulerabilitiesCollector = "cpu_vulnerabilities"
cpuVulnerabilitiesCollectorSubsystem = "cpu_vulnerabilities"
)
var (
vulnerabilityDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuVulerabilitiesCollector, "info"),
prometheus.BuildFQName(namespace, cpuVulnerabilitiesCollectorSubsystem, "info"),
"Details of each CPU vulnerability reported by sysfs. The value of the series is an int encoded state of the vulnerability. The same state is stored as a string in the label",
[]string{"codename", "state", "mitigation"},
nil,
@ -37,7 +37,7 @@ var (
type cpuVulnerabilitiesCollector struct{}
func init() {
registerCollector(cpuVulerabilitiesCollector, defaultDisabled, NewVulnerabilitySysfsCollector)
registerCollector(cpuVulnerabilitiesCollectorSubsystem, defaultDisabled, NewVulnerabilitySysfsCollector)
}
func NewVulnerabilitySysfsCollector(logger *slog.Logger) (Collector, error) {

View file

@ -18,10 +18,11 @@ package collector
import (
"fmt"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs/sysfs"
"log/slog"
"strings"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs/sysfs"
)
type cpuFreqCollector struct {

View file

@ -30,11 +30,19 @@ type diskstatsCollector struct {
rbytes typedDesc
wbytes typedDesc
time typedDesc
bsize typedDesc
qdepth typedDesc
rserv typedDesc
wserv typedDesc
xfers typedDesc
xrate typedDesc
deviceFilter deviceFilter
logger *slog.Logger
tickPerSecond int64
tickPerSecond float64
}
func init() {
@ -57,6 +65,54 @@ func NewDiskstatsCollector(logger *slog.Logger) (Collector, error) {
wbytes: typedDesc{writtenBytesDesc, prometheus.CounterValue},
time: typedDesc{ioTimeSecondsDesc, prometheus.CounterValue},
bsize: typedDesc{
prometheus.NewDesc(
prometheus.BuildFQName(namespace, diskSubsystem, "block_size_bytes"),
"Size of the block device in bytes.",
diskLabelNames, nil,
),
prometheus.GaugeValue,
},
qdepth: typedDesc{
prometheus.NewDesc(
prometheus.BuildFQName(namespace, diskSubsystem, "queue_depth"),
"Number of requests in the queue.",
diskLabelNames, nil,
),
prometheus.GaugeValue,
},
rserv: typedDesc{
prometheus.NewDesc(
prometheus.BuildFQName(namespace, diskSubsystem, "read_time_seconds_total"),
"The total time spent servicing read requests.",
diskLabelNames, nil,
),
prometheus.CounterValue,
},
wserv: typedDesc{
prometheus.NewDesc(
prometheus.BuildFQName(namespace, diskSubsystem, "write_time_seconds_total"),
"The total time spent servicing write requests.",
diskLabelNames, nil,
),
prometheus.CounterValue,
},
xfers: typedDesc{
prometheus.NewDesc(
prometheus.BuildFQName(namespace, diskSubsystem, "transfers_total"),
"The total number of transfers to/from disk.",
diskLabelNames, nil,
),
prometheus.CounterValue,
},
xrate: typedDesc{
prometheus.NewDesc(
prometheus.BuildFQName(namespace, diskSubsystem, "transfers_to_disk_total"),
"The total number of transfers from disk.",
diskLabelNames, nil,
),
prometheus.CounterValue,
},
deviceFilter: deviceFilter,
logger: logger,
@ -76,7 +132,14 @@ func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) error {
}
ch <- c.rbytes.mustNewConstMetric(float64(stat.Rblks*512), stat.Name)
ch <- c.wbytes.mustNewConstMetric(float64(stat.Wblks*512), stat.Name)
ch <- c.time.mustNewConstMetric(float64(stat.Time/c.tickPerSecond), stat.Name)
ch <- c.time.mustNewConstMetric(float64(stat.Time)/float64(c.tickPerSecond), stat.Name)
ch <- c.bsize.mustNewConstMetric(float64(stat.BSize), stat.Name)
ch <- c.qdepth.mustNewConstMetric(float64(stat.QDepth), stat.Name)
ch <- c.rserv.mustNewConstMetric(float64(stat.Rserv)/1e9, stat.Name)
ch <- c.wserv.mustNewConstMetric(float64(stat.Wserv)/1e9, stat.Name)
ch <- c.xfers.mustNewConstMetric(float64(stat.Xfers), stat.Name)
ch <- c.xrate.mustNewConstMetric(float64(stat.XRate), stat.Name)
}
return nil
}

View file

@ -112,7 +112,7 @@ func NewDiskstatsCollector(logger *slog.Logger) (Collector, error) {
infoDesc: typedFactorDesc{
desc: prometheus.NewDesc(prometheus.BuildFQName(namespace, diskSubsystem, "info"),
"Info of /sys/block/<block_device>.",
[]string{"device", "major", "minor", "path", "wwn", "model", "serial", "revision"},
[]string{"device", "major", "minor", "path", "wwn", "model", "serial", "revision", "rotational"},
nil,
), valueType: prometheus.GaugeValue,
},
@ -294,6 +294,12 @@ func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) error {
serial = info[udevIDSerialShort]
}
queueStats, err := c.fs.SysBlockDeviceQueueStats(dev)
// Block Device Queue stats may not exist for all devices.
if err != nil && !os.IsNotExist(err) {
c.logger.Debug("Failed to get block device queue stats", "device", dev, "err", err)
}
ch <- c.infoDesc.mustNewConstMetric(1.0, dev,
fmt.Sprint(stats.MajorNumber),
fmt.Sprint(stats.MinorNumber),
@ -302,6 +308,7 @@ func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) error {
info[udevIDModel],
serial,
info[udevIDRevision],
strconv.FormatUint(queueStats.Rotational, 2),
)
statCount := stats.IoStatsCount - 3 // Total diskstats record count, less MajorNumber, MinorNumber and DeviceName
@ -391,15 +398,9 @@ func getUdevDeviceProperties(major, minor uint32) (udevInfo, error) {
line = strings.TrimPrefix(line, udevDevicePropertyPrefix)
/* TODO: After we drop support for Go 1.17, the condition below can be simplified to:
if name, value, found := strings.Cut(line, "="); found {
info[name] = value
}
*/
if fields := strings.SplitN(line, "=", 2); len(fields) == 2 {
info[fields[0]] = fields[1]
}
}
return info, nil

View file

@ -113,21 +113,21 @@ node_disk_flush_requests_time_seconds_total{device="sdc"} 1.944
node_disk_flush_requests_total{device="sdc"} 1555
# HELP node_disk_info Info of /sys/block/<block_device>.
# TYPE node_disk_info gauge
node_disk_info{device="dm-0",major="252",minor="0",model="",path="",revision="",serial="",wwn=""} 1
node_disk_info{device="dm-1",major="252",minor="1",model="",path="",revision="",serial="",wwn=""} 1
node_disk_info{device="dm-2",major="252",minor="2",model="",path="",revision="",serial="",wwn=""} 1
node_disk_info{device="dm-3",major="252",minor="3",model="",path="",revision="",serial="",wwn=""} 1
node_disk_info{device="dm-4",major="252",minor="4",model="",path="",revision="",serial="",wwn=""} 1
node_disk_info{device="dm-5",major="252",minor="5",model="",path="",revision="",serial="",wwn=""} 1
node_disk_info{device="mmcblk0",major="179",minor="0",model="",path="platform-df2969f3.mmc",revision="",serial="",wwn=""} 1
node_disk_info{device="mmcblk0p1",major="179",minor="1",model="",path="platform-df2969f3.mmc",revision="",serial="",wwn=""} 1
node_disk_info{device="mmcblk0p2",major="179",minor="2",model="",path="platform-df2969f3.mmc",revision="",serial="",wwn=""} 1
node_disk_info{device="nvme0n1",major="259",minor="0",model="SAMSUNG EHFTF55LURSY-000Y9",path="pci-0000:02:00.0-nvme-1",revision="4NBTUY95",serial="S252B6CU1HG3M1",wwn="eui.p3vbbiejx5aae2r3"} 1
node_disk_info{device="sda",major="8",minor="0",model="TOSHIBA_KSDB4U86",path="pci-0000:3b:00.0-sas-phy7-lun-0",revision="0102",serial="2160A0D5FVGG",wwn="0x7c72382b8de36a64"} 1
node_disk_info{device="sdb",major="8",minor="16",model="SuperMicro_SSD",path="pci-0000:00:1f.2-ata-1",revision="0R",serial="SMC0E1B87ABBB16BD84E",wwn="0xe1b87abbb16bd84e"} 1
node_disk_info{device="sdc",major="8",minor="32",model="INTEL_SSDS9X9SI0",path="pci-0000:00:1f.2-ata-4",revision="0100",serial="3EWB5Y25CWQWA7EH1U",wwn="0x58907ddc573a5de"} 1
node_disk_info{device="sr0",major="11",minor="0",model="Virtual_CDROM0",path="pci-0000:00:14.0-usb-0:1.1:1.0-scsi-0:0:0:0",revision="1.00",serial="AAAABBBBCCCC1",wwn=""} 1
node_disk_info{device="vda",major="254",minor="0",model="",path="pci-0000:00:06.0",revision="",serial="",wwn=""} 1
node_disk_info{device="dm-0",major="252",minor="0",model="",path="",revision="",rotational="0",serial="",wwn=""} 1
node_disk_info{device="dm-1",major="252",minor="1",model="",path="",revision="",rotational="0",serial="",wwn=""} 1
node_disk_info{device="dm-2",major="252",minor="2",model="",path="",revision="",rotational="0",serial="",wwn=""} 1
node_disk_info{device="dm-3",major="252",minor="3",model="",path="",revision="",rotational="0",serial="",wwn=""} 1
node_disk_info{device="dm-4",major="252",minor="4",model="",path="",revision="",rotational="0",serial="",wwn=""} 1
node_disk_info{device="dm-5",major="252",minor="5",model="",path="",revision="",rotational="0",serial="",wwn=""} 1
node_disk_info{device="mmcblk0",major="179",minor="0",model="",path="platform-df2969f3.mmc",revision="",rotational="0",serial="",wwn=""} 1
node_disk_info{device="mmcblk0p1",major="179",minor="1",model="",path="platform-df2969f3.mmc",revision="",rotational="0",serial="",wwn=""} 1
node_disk_info{device="mmcblk0p2",major="179",minor="2",model="",path="platform-df2969f3.mmc",revision="",rotational="0",serial="",wwn=""} 1
node_disk_info{device="nvme0n1",major="259",minor="0",model="SAMSUNG EHFTF55LURSY-000Y9",path="pci-0000:02:00.0-nvme-1",revision="4NBTUY95",rotational="0",serial="S252B6CU1HG3M1",wwn="eui.p3vbbiejx5aae2r3"} 1
node_disk_info{device="sda",major="8",minor="0",model="TOSHIBA_KSDB4U86",path="pci-0000:3b:00.0-sas-phy7-lun-0",revision="0102",rotational="1",serial="2160A0D5FVGG",wwn="0x7c72382b8de36a64"} 1
node_disk_info{device="sdb",major="8",minor="16",model="SuperMicro_SSD",path="pci-0000:00:1f.2-ata-1",revision="0R",rotational="0",serial="SMC0E1B87ABBB16BD84E",wwn="0xe1b87abbb16bd84e"} 1
node_disk_info{device="sdc",major="8",minor="32",model="INTEL_SSDS9X9SI0",path="pci-0000:00:1f.2-ata-4",revision="0100",rotational="0",serial="3EWB5Y25CWQWA7EH1U",wwn="0x58907ddc573a5de"} 1
node_disk_info{device="sr0",major="11",minor="0",model="Virtual_CDROM0",path="pci-0000:00:14.0-usb-0:1.1:1.0-scsi-0:0:0:0",revision="1.00",rotational="0",serial="AAAABBBBCCCC1",wwn=""} 1
node_disk_info{device="vda",major="254",minor="0",model="",path="pci-0000:00:06.0",revision="",rotational="0",serial="",wwn=""} 1
# HELP node_disk_io_now The number of I/Os currently in progress.
# TYPE node_disk_io_now gauge
node_disk_io_now{device="dm-0"} 0

View file

@ -372,7 +372,7 @@ func (c *ethtoolCollector) updateSpeeds(ch chan<- prometheus.Metric, prefix stri
}
func (c *ethtoolCollector) Update(ch chan<- prometheus.Metric) error {
netClass, err := c.fs.NetClass()
netClass, err := c.fs.NetClassDevices()
if err != nil {
if errors.Is(err, os.ErrNotExist) || errors.Is(err, os.ErrPermission) {
c.logger.Debug("Could not read netclass file", "err", err)
@ -385,7 +385,7 @@ func (c *ethtoolCollector) Update(ch chan<- prometheus.Metric) error {
return fmt.Errorf("no network devices found")
}
for device := range netClass {
for _, device := range netClass {
var stats map[string]uint64
var err error
@ -446,13 +446,14 @@ func (c *ethtoolCollector) Update(ch chan<- prometheus.Metric) error {
}
}
if stats == nil || len(stats) < 1 {
if len(stats) == 0 {
// No stats returned; device does not support ethtool stats.
continue
}
// Sanitizing the metric names can lead to duplicate metric names. Therefore check for clashes beforehand.
metricFQNames := make(map[string]string)
renamedStats := make(map[string]uint64, len(stats))
for metric := range stats {
metricName := SanitizeMetricName(metric)
if !c.metricsPattern.MatchString(metricName) {
@ -467,6 +468,8 @@ func (c *ethtoolCollector) Update(ch chan<- prometheus.Metric) error {
metricFQNames[metricFQName] = ""
} else {
metricFQNames[metricFQName] = metricName
// Later we'll go look for the stat with the "sanitized" metric name, so we can copy it there already
renamedStats[metricName] = stats[metric]
}
}
@ -484,7 +487,7 @@ func (c *ethtoolCollector) Update(ch chan<- prometheus.Metric) error {
continue
}
val := stats[metric]
val := renamedStats[metric]
// Check to see if this metric exists; if not then create it and store it in c.entries.
entry := c.entryWithCreate(metric, metricFQName)

View file

@ -269,6 +269,7 @@ func NewEthtoolTestCollector(logger *slog.Logger) (Collector, error) {
func TestBuildEthtoolFQName(t *testing.T) {
testcases := map[string]string{
"port.rx_errors": "node_ethtool_port_received_errors",
"rx_errors": "node_ethtool_received_errors",
"Queue[0] AllocFails": "node_ethtool_queue_0_allocfails",
"Tx LPI entry count": "node_ethtool_transmitted_lpi_entry_count",
@ -292,6 +293,9 @@ node_ethtool_align_errors{device="eth0"} 0
# HELP node_ethtool_info A metric with a constant '1' value labeled by bus_info, device, driver, expansion_rom_version, firmware_version, version.
# TYPE node_ethtool_info gauge
node_ethtool_info{bus_info="0000:00:1f.6",device="eth0",driver="e1000e",expansion_rom_version="",firmware_version="0.5-4",version="5.11.0-22-generic"} 1
# HELP node_ethtool_port_received_dropped Network interface port_rx_dropped
# TYPE node_ethtool_port_received_dropped untyped
node_ethtool_port_received_dropped{device="eth0"} 12028
# HELP node_ethtool_received_broadcast Network interface rx_broadcast
# TYPE node_ethtool_received_broadcast untyped
node_ethtool_received_broadcast{device="eth0"} 5792

View file

@ -32,12 +32,12 @@ func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) {
return nil, err
}
for _, stat := range fsStat {
if c.excludedMountPointsPattern.MatchString(stat.MountPoint) {
if c.mountPointFilter.ignored(stat.MountPoint) {
c.logger.Debug("Ignoring mount point", "mountpoint", stat.MountPoint)
continue
}
fstype := stat.TypeString()
if c.excludedFSTypesPattern.MatchString(fstype) {
if c.fsTypeFilter.ignored(fstype) {
c.logger.Debug("Ignoring fs type", "type", fstype)
continue
}
@ -53,9 +53,9 @@ func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) {
mountPoint: stat.MountPoint,
fsType: fstype,
},
size: float64(stat.TotalBlocks / 512.0),
free: float64(stat.FreeBlocks / 512.0),
avail: float64(stat.FreeBlocks / 512.0), // AIX doesn't distinguish between free and available blocks.
size: float64(stat.TotalBlocks * 512.0),
free: float64(stat.FreeBlocks * 512.0),
avail: float64(stat.FreeBlocks * 512.0), // AIX doesn't distinguish between free and available blocks.
files: float64(stat.TotalInodes),
filesFree: float64(stat.FreeInodes),
ro: ro,

View file

@ -11,9 +11,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build (darwin || dragonfly) && !nofilesystem
// +build darwin dragonfly
// +build !nofilesystem
//go:build dragonfly && !nofilesystem
// +build dragonfly,!nofilesystem
package collector
@ -48,14 +47,14 @@ func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) {
stats = []filesystemStats{}
for i := 0; i < int(count); i++ {
mountpoint := C.GoString(&mnt[i].f_mntonname[0])
if c.excludedMountPointsPattern.MatchString(mountpoint) {
if c.mountPointFilter.ignored(mountpoint) {
c.logger.Debug("Ignoring mount point", "mountpoint", mountpoint)
continue
}
device := C.GoString(&mnt[i].f_mntfromname[0])
fstype := C.GoString(&mnt[i].f_fstypename[0])
if c.excludedFSTypesPattern.MatchString(fstype) {
if c.fsTypeFilter.ignored(fstype) {
c.logger.Debug("Ignoring fs type", "type", fstype)
continue
}

View file

@ -19,8 +19,8 @@ package collector
import (
"errors"
"fmt"
"log/slog"
"regexp"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus/client_golang/prometheus"
@ -36,7 +36,7 @@ var (
mountPointsExcludeSet bool
mountPointsExclude = kingpin.Flag(
"collector.filesystem.mount-points-exclude",
"Regexp of mount points to exclude for filesystem collector.",
"Regexp of mount points to exclude for filesystem collector. (mutually exclusive to mount-points-include)",
).Default(defMountPointsExcluded).PreAction(func(c *kingpin.ParseContext) error {
mountPointsExcludeSet = true
return nil
@ -45,11 +45,15 @@ var (
"collector.filesystem.ignored-mount-points",
"Regexp of mount points to ignore for filesystem collector.",
).Hidden().String()
mountPointsInclude = kingpin.Flag(
"collector.filesystem.mount-points-include",
"Regexp of mount points to include for filesystem collector. (mutually exclusive to mount-points-exclude)",
).String()
fsTypesExcludeSet bool
fsTypesExclude = kingpin.Flag(
"collector.filesystem.fs-types-exclude",
"Regexp of filesystem types to exclude for filesystem collector.",
"Regexp of filesystem types to exclude for filesystem collector. (mutually exclusive to fs-types-include)",
).Default(defFSTypesExcluded).PreAction(func(c *kingpin.ParseContext) error {
fsTypesExcludeSet = true
return nil
@ -58,15 +62,20 @@ var (
"collector.filesystem.ignored-fs-types",
"Regexp of filesystem types to ignore for filesystem collector.",
).Hidden().String()
fsTypesInclude = kingpin.Flag(
"collector.filesystem.fs-types-include",
"Regexp of filesystem types to exclude for filesystem collector. (mutually exclusive to fs-types-exclude)",
).String()
filesystemLabelNames = []string{"device", "mountpoint", "fstype", "device_error"}
)
type filesystemCollector struct {
excludedMountPointsPattern *regexp.Regexp
excludedFSTypesPattern *regexp.Regexp
mountPointFilter deviceFilter
fsTypeFilter deviceFilter
sizeDesc, freeDesc, availDesc *prometheus.Desc
filesDesc, filesFreeDesc *prometheus.Desc
purgeableDesc *prometheus.Desc
roDesc, deviceErrorDesc *prometheus.Desc
mountInfoDesc *prometheus.Desc
logger *slog.Logger
@ -80,6 +89,7 @@ type filesystemStats struct {
labels filesystemLabels
size, free, avail float64
files, filesFree float64
purgeable float64
ro, deviceError float64
}
@ -89,29 +99,7 @@ func init() {
// NewFilesystemCollector returns a new Collector exposing filesystems stats.
func NewFilesystemCollector(logger *slog.Logger) (Collector, error) {
if *oldMountPointsExcluded != "" {
if !mountPointsExcludeSet {
logger.Warn("--collector.filesystem.ignored-mount-points is DEPRECATED and will be removed in 2.0.0, use --collector.filesystem.mount-points-exclude")
*mountPointsExclude = *oldMountPointsExcluded
} else {
return nil, errors.New("--collector.filesystem.ignored-mount-points and --collector.filesystem.mount-points-exclude are mutually exclusive")
}
}
if *oldFSTypesExcluded != "" {
if !fsTypesExcludeSet {
logger.Warn("--collector.filesystem.ignored-fs-types is DEPRECATED and will be removed in 2.0.0, use --collector.filesystem.fs-types-exclude")
*fsTypesExclude = *oldFSTypesExcluded
} else {
return nil, errors.New("--collector.filesystem.ignored-fs-types and --collector.filesystem.fs-types-exclude are mutually exclusive")
}
}
subsystem := "filesystem"
logger.Info("Parsed flag --collector.filesystem.mount-points-exclude", "flag", *mountPointsExclude)
mountPointPattern := regexp.MustCompile(*mountPointsExclude)
logger.Info("Parsed flag --collector.filesystem.fs-types-exclude", "flag", *fsTypesExclude)
filesystemsTypesPattern := regexp.MustCompile(*fsTypesExclude)
const subsystem = "filesystem"
sizeDesc := prometheus.NewDesc(
prometheus.BuildFQName(namespace, subsystem, "size_bytes"),
@ -143,6 +131,12 @@ func NewFilesystemCollector(logger *slog.Logger) (Collector, error) {
filesystemLabelNames, nil,
)
purgeableDesc := prometheus.NewDesc(
prometheus.BuildFQName(namespace, subsystem, "purgeable_bytes"),
"Filesystem space available including purgeable space (MacOS specific).",
filesystemLabelNames, nil,
)
roDesc := prometheus.NewDesc(
prometheus.BuildFQName(namespace, subsystem, "readonly"),
"Filesystem read-only status.",
@ -162,18 +156,29 @@ func NewFilesystemCollector(logger *slog.Logger) (Collector, error) {
nil,
)
mountPointFilter, err := newMountPointsFilter(logger)
if err != nil {
return nil, fmt.Errorf("unable to parse mount points filter flags: %w", err)
}
fsTypeFilter, err := newFSTypeFilter(logger)
if err != nil {
return nil, fmt.Errorf("unable to parse fs types filter flags: %w", err)
}
return &filesystemCollector{
excludedMountPointsPattern: mountPointPattern,
excludedFSTypesPattern: filesystemsTypesPattern,
sizeDesc: sizeDesc,
freeDesc: freeDesc,
availDesc: availDesc,
filesDesc: filesDesc,
filesFreeDesc: filesFreeDesc,
roDesc: roDesc,
deviceErrorDesc: deviceErrorDesc,
mountInfoDesc: mountInfoDesc,
logger: logger,
mountPointFilter: mountPointFilter,
fsTypeFilter: fsTypeFilter,
sizeDesc: sizeDesc,
freeDesc: freeDesc,
availDesc: availDesc,
filesDesc: filesDesc,
filesFreeDesc: filesFreeDesc,
purgeableDesc: purgeableDesc,
roDesc: roDesc,
deviceErrorDesc: deviceErrorDesc,
mountInfoDesc: mountInfoDesc,
logger: logger,
}, nil
}
@ -227,6 +232,70 @@ func (c *filesystemCollector) Update(ch chan<- prometheus.Metric) error {
c.mountInfoDesc, prometheus.GaugeValue,
1.0, s.labels.device, s.labels.major, s.labels.minor, s.labels.mountPoint,
)
if s.purgeable >= 0 {
ch <- prometheus.MustNewConstMetric(
c.purgeableDesc, prometheus.GaugeValue,
s.purgeable, s.labels.device, s.labels.mountPoint, s.labels.fsType, s.labels.deviceError,
)
}
}
return nil
}
func newMountPointsFilter(logger *slog.Logger) (deviceFilter, error) {
if *oldMountPointsExcluded != "" {
if !mountPointsExcludeSet {
logger.Warn("--collector.filesystem.ignored-mount-points is DEPRECATED and will be removed in 2.0.0, use --collector.filesystem.mount-points-exclude")
*mountPointsExclude = *oldMountPointsExcluded
} else {
return deviceFilter{}, errors.New("--collector.filesystem.ignored-mount-points and --collector.filesystem.mount-points-exclude are mutually exclusive")
}
}
if *mountPointsInclude != "" && !mountPointsExcludeSet {
logger.Debug("mount-points-exclude flag not set when mount-points-include flag is set, assuming include is desired")
*mountPointsExclude = ""
}
if *mountPointsExclude != "" && *mountPointsInclude != "" {
return deviceFilter{}, errors.New("--collector.filesystem.mount-points-exclude and --collector.filesystem.mount-points-include are mutually exclusive")
}
if *mountPointsExclude != "" {
logger.Info("Parsed flag --collector.filesystem.mount-points-exclude", "flag", *mountPointsExclude)
}
if *mountPointsInclude != "" {
logger.Info("Parsed flag --collector.filesystem.mount-points-include", "flag", *mountPointsInclude)
}
return newDeviceFilter(*mountPointsExclude, *mountPointsInclude), nil
}
func newFSTypeFilter(logger *slog.Logger) (deviceFilter, error) {
if *oldFSTypesExcluded != "" {
if !fsTypesExcludeSet {
logger.Warn("--collector.filesystem.ignored-fs-types is DEPRECATED and will be removed in 2.0.0, use --collector.filesystem.fs-types-exclude")
*fsTypesExclude = *oldFSTypesExcluded
} else {
return deviceFilter{}, errors.New("--collector.filesystem.ignored-fs-types and --collector.filesystem.fs-types-exclude are mutually exclusive")
}
}
if *fsTypesInclude != "" && !fsTypesExcludeSet {
logger.Debug("fs-types-exclude flag not set when fs-types-include flag is set, assuming include is desired")
*fsTypesExclude = ""
}
if *fsTypesExclude != "" && *fsTypesInclude != "" {
return deviceFilter{}, errors.New("--collector.filesystem.fs-types-exclude and --collector.filesystem.fs-types-include are mutually exclusive")
}
if *fsTypesExclude != "" {
logger.Info("Parsed flag --collector.filesystem.fs-types-exclude", "flag", *fsTypesExclude)
}
if *fsTypesInclude != "" {
logger.Info("Parsed flag --collector.filesystem.fs-types-include", "flag", *fsTypesInclude)
}
return newDeviceFilter(*fsTypesExclude, *fsTypesInclude), nil
}

View file

@ -39,14 +39,14 @@ func (c *filesystemCollector) GetStats() ([]filesystemStats, error) {
stats := []filesystemStats{}
for _, fs := range buf {
mountpoint := unix.ByteSliceToString(fs.Mntonname[:])
if c.excludedMountPointsPattern.MatchString(mountpoint) {
if c.mountPointFilter.ignored(mountpoint) {
c.logger.Debug("Ignoring mount point", "mountpoint", mountpoint)
continue
}
device := unix.ByteSliceToString(fs.Mntfromname[:])
fstype := unix.ByteSliceToString(fs.Fstypename[:])
if c.excludedFSTypesPattern.MatchString(fstype) {
if c.fsTypeFilter.ignored(fstype) {
c.logger.Debug("Ignoring fs type", "type", fstype)
continue
}

View file

@ -73,12 +73,12 @@ func (c *filesystemCollector) GetStats() ([]filesystemStats, error) {
go func() {
for _, labels := range mps {
if c.excludedMountPointsPattern.MatchString(labels.mountPoint) {
if c.mountPointFilter.ignored(labels.mountPoint) {
c.logger.Debug("Ignoring mount point", "mountpoint", labels.mountPoint)
continue
}
if c.excludedFSTypesPattern.MatchString(labels.fsType) {
c.logger.Debug("Ignoring fs", "type", labels.fsType)
if c.fsTypeFilter.ignored(labels.fsType) {
c.logger.Debug("Ignoring fs type", "type", labels.fsType)
continue
}

View file

@ -0,0 +1,114 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build darwin && !nofilesystem
// +build darwin,!nofilesystem
package collector
/*
#cgo CFLAGS: -x objective-c
#cgo LDFLAGS: -framework Foundation
#import <Foundation/Foundation.h>
Float64 purgeable(char *path) {
Float64 value = -1.0f;
@autoreleasepool {
NSError *error = nil;
NSString *str = [NSString stringWithUTF8String:path];
NSURL *fileURL = [[NSURL alloc] initFileURLWithPath:str];
NSDictionary *results = [fileURL resourceValuesForKeys:@[NSURLVolumeAvailableCapacityForImportantUsageKey] error:&error];
if (results) {
CFNumberRef tmp = CFDictionaryGetValue((CFDictionaryRef)results, NSURLVolumeAvailableCapacityForImportantUsageKey);
if (tmp != NULL) {
CFNumberGetValue(tmp, kCFNumberFloat64Type, &value);
}
}
[fileURL release];
}
return value;
}
*/
import "C"
import (
"errors"
"unsafe"
)
/*
#include <sys/param.h>
#include <sys/ucred.h>
#include <sys/mount.h>
#include <stdio.h>
*/
import "C"
const (
defMountPointsExcluded = "^/(dev)($|/)"
defFSTypesExcluded = "^devfs$"
readOnly = 0x1 // MNT_RDONLY
)
// Expose filesystem fullness.
func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) {
var mntbuf *C.struct_statfs
count := C.getmntinfo(&mntbuf, C.MNT_NOWAIT)
if count == 0 {
return nil, errors.New("getmntinfo() failed")
}
mnt := (*[1 << 20]C.struct_statfs)(unsafe.Pointer(mntbuf))
stats = []filesystemStats{}
for i := 0; i < int(count); i++ {
mountpoint := C.GoString(&mnt[i].f_mntonname[0])
if c.mountPointFilter.ignored(mountpoint) {
c.logger.Debug("Ignoring mount point", "mountpoint", mountpoint)
continue
}
device := C.GoString(&mnt[i].f_mntfromname[0])
fstype := C.GoString(&mnt[i].f_fstypename[0])
if c.fsTypeFilter.ignored(fstype) {
c.logger.Debug("Ignoring fs type", "type", fstype)
continue
}
var ro float64
if (mnt[i].f_flags & readOnly) != 0 {
ro = 1
}
mountpointCString := C.CString(mountpoint)
defer C.free(unsafe.Pointer(mountpointCString))
stats = append(stats, filesystemStats{
labels: filesystemLabels{
device: device,
mountPoint: rootfsStripPrefix(mountpoint),
fsType: fstype,
},
size: float64(mnt[i].f_blocks) * float64(mnt[i].f_bsize),
free: float64(mnt[i].f_bfree) * float64(mnt[i].f_bsize),
avail: float64(mnt[i].f_bavail) * float64(mnt[i].f_bsize),
files: float64(mnt[i].f_files),
filesFree: float64(mnt[i].f_ffree),
purgeable: float64(C.purgeable(mountpointCString)),
ro: ro,
})
}
return stats, nil
}

View file

@ -97,14 +97,14 @@ func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) {
stats = []filesystemStats{}
for _, v := range mnt {
mountpoint := unix.ByteSliceToString(v.F_mntonname[:])
if c.excludedMountPointsPattern.MatchString(mountpoint) {
if c.mountPointFilter.ignored(mountpoint) {
c.logger.Debug("msg", "Ignoring mount point", "mountpoint", mountpoint)
continue
}
device := unix.ByteSliceToString(v.F_mntfromname[:])
fstype := unix.ByteSliceToString(v.F_fstypename[:])
if c.excludedFSTypesPattern.MatchString(fstype) {
if c.fsTypeFilter.ignored(fstype) {
c.logger.Debug("msg", "Ignoring fs type", "type", fstype)
continue
}

View file

@ -41,14 +41,14 @@ func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) {
stats = []filesystemStats{}
for _, v := range mnt {
mountpoint := unix.ByteSliceToString(v.F_mntonname[:])
if c.excludedMountPointsPattern.MatchString(mountpoint) {
if c.mountPointFilter.ignored(mountpoint) {
c.logger.Debug("Ignoring mount point", "mountpoint", mountpoint)
continue
}
device := unix.ByteSliceToString(v.F_mntfromname[:])
fstype := unix.ByteSliceToString(v.F_fstypename[:])
if c.excludedFSTypesPattern.MatchString(fstype) {
if c.fsTypeFilter.ignored(fstype) {
c.logger.Debug("Ignoring fs type", "type", fstype)
continue
}

View file

@ -1,8 +1,8 @@
# HELP go_gc_duration_seconds A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.
# TYPE go_gc_duration_seconds summary
# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent
# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent.
# TYPE go_gc_gogc_percent gauge
# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes
# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes.
# TYPE go_gc_gomemlimit_bytes gauge
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
@ -52,7 +52,7 @@
# TYPE go_memstats_stack_sys_bytes gauge
# HELP go_memstats_sys_bytes Number of bytes obtained from system. Equals to /memory/classes/total:byte.
# TYPE go_memstats_sys_bytes gauge
# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads
# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads.
# TYPE go_sched_gomaxprocs_threads gauge
# HELP go_threads Number of OS threads created.
# TYPE go_threads gauge
@ -488,21 +488,21 @@ node_disk_flush_requests_time_seconds_total{device="sdc"} 1.944
node_disk_flush_requests_total{device="sdc"} 1555
# HELP node_disk_info Info of /sys/block/<block_device>.
# TYPE node_disk_info gauge
node_disk_info{device="dm-0",major="252",minor="0",model="",path="",revision="",serial="",wwn=""} 1
node_disk_info{device="dm-1",major="252",minor="1",model="",path="",revision="",serial="",wwn=""} 1
node_disk_info{device="dm-2",major="252",minor="2",model="",path="",revision="",serial="",wwn=""} 1
node_disk_info{device="dm-3",major="252",minor="3",model="",path="",revision="",serial="",wwn=""} 1
node_disk_info{device="dm-4",major="252",minor="4",model="",path="",revision="",serial="",wwn=""} 1
node_disk_info{device="dm-5",major="252",minor="5",model="",path="",revision="",serial="",wwn=""} 1
node_disk_info{device="mmcblk0",major="179",minor="0",model="",path="platform-df2969f3.mmc",revision="",serial="",wwn=""} 1
node_disk_info{device="mmcblk0p1",major="179",minor="1",model="",path="platform-df2969f3.mmc",revision="",serial="",wwn=""} 1
node_disk_info{device="mmcblk0p2",major="179",minor="2",model="",path="platform-df2969f3.mmc",revision="",serial="",wwn=""} 1
node_disk_info{device="nvme0n1",major="259",minor="0",model="SAMSUNG EHFTF55LURSY-000Y9",path="pci-0000:02:00.0-nvme-1",revision="4NBTUY95",serial="S252B6CU1HG3M1",wwn="eui.p3vbbiejx5aae2r3"} 1
node_disk_info{device="sda",major="8",minor="0",model="TOSHIBA_KSDB4U86",path="pci-0000:3b:00.0-sas-phy7-lun-0",revision="0102",serial="2160A0D5FVGG",wwn="0x7c72382b8de36a64"} 1
node_disk_info{device="sdb",major="8",minor="16",model="SuperMicro_SSD",path="pci-0000:00:1f.2-ata-1",revision="0R",serial="SMC0E1B87ABBB16BD84E",wwn="0xe1b87abbb16bd84e"} 1
node_disk_info{device="sdc",major="8",minor="32",model="INTEL_SSDS9X9SI0",path="pci-0000:00:1f.2-ata-4",revision="0100",serial="3EWB5Y25CWQWA7EH1U",wwn="0x58907ddc573a5de"} 1
node_disk_info{device="sr0",major="11",minor="0",model="Virtual_CDROM0",path="pci-0000:00:14.0-usb-0:1.1:1.0-scsi-0:0:0:0",revision="1.00",serial="AAAABBBBCCCC1",wwn=""} 1
node_disk_info{device="vda",major="254",minor="0",model="",path="pci-0000:00:06.0",revision="",serial="",wwn=""} 1
node_disk_info{device="dm-0",major="252",minor="0",model="",path="",revision="",rotational="0",serial="",wwn=""} 1
node_disk_info{device="dm-1",major="252",minor="1",model="",path="",revision="",rotational="0",serial="",wwn=""} 1
node_disk_info{device="dm-2",major="252",minor="2",model="",path="",revision="",rotational="0",serial="",wwn=""} 1
node_disk_info{device="dm-3",major="252",minor="3",model="",path="",revision="",rotational="0",serial="",wwn=""} 1
node_disk_info{device="dm-4",major="252",minor="4",model="",path="",revision="",rotational="0",serial="",wwn=""} 1
node_disk_info{device="dm-5",major="252",minor="5",model="",path="",revision="",rotational="0",serial="",wwn=""} 1
node_disk_info{device="mmcblk0",major="179",minor="0",model="",path="platform-df2969f3.mmc",revision="",rotational="0",serial="",wwn=""} 1
node_disk_info{device="mmcblk0p1",major="179",minor="1",model="",path="platform-df2969f3.mmc",revision="",rotational="0",serial="",wwn=""} 1
node_disk_info{device="mmcblk0p2",major="179",minor="2",model="",path="platform-df2969f3.mmc",revision="",rotational="0",serial="",wwn=""} 1
node_disk_info{device="nvme0n1",major="259",minor="0",model="SAMSUNG EHFTF55LURSY-000Y9",path="pci-0000:02:00.0-nvme-1",revision="4NBTUY95",rotational="0",serial="S252B6CU1HG3M1",wwn="eui.p3vbbiejx5aae2r3"} 1
node_disk_info{device="sda",major="8",minor="0",model="TOSHIBA_KSDB4U86",path="pci-0000:3b:00.0-sas-phy7-lun-0",revision="0102",rotational="1",serial="2160A0D5FVGG",wwn="0x7c72382b8de36a64"} 1
node_disk_info{device="sdb",major="8",minor="16",model="SuperMicro_SSD",path="pci-0000:00:1f.2-ata-1",revision="0R",rotational="0",serial="SMC0E1B87ABBB16BD84E",wwn="0xe1b87abbb16bd84e"} 1
node_disk_info{device="sdc",major="8",minor="32",model="INTEL_SSDS9X9SI0",path="pci-0000:00:1f.2-ata-4",revision="0100",rotational="0",serial="3EWB5Y25CWQWA7EH1U",wwn="0x58907ddc573a5de"} 1
node_disk_info{device="sr0",major="11",minor="0",model="Virtual_CDROM0",path="pci-0000:00:14.0-usb-0:1.1:1.0-scsi-0:0:0:0",revision="1.00",rotational="0",serial="AAAABBBBCCCC1",wwn=""} 1
node_disk_info{device="vda",major="254",minor="0",model="",path="pci-0000:00:06.0",revision="",rotational="0",serial="",wwn=""} 1
# HELP node_disk_io_now The number of I/Os currently in progress.
# TYPE node_disk_io_now gauge
node_disk_io_now{device="dm-0"} 0
@ -871,6 +871,10 @@ node_hwmon_fan_target_rpm{chip="nct6779",sensor="fan2"} 27000
# HELP node_hwmon_fan_tolerance Hardware monitor fan element tolerance
# TYPE node_hwmon_fan_tolerance gauge
node_hwmon_fan_tolerance{chip="nct6779",sensor="fan2"} 0
# HELP node_hwmon_freq_freq_mhz Hardware monitor for GPU frequency in MHz
# TYPE node_hwmon_freq_freq_mhz gauge
node_hwmon_freq_freq_mhz{chip="hwmon4",sensor="mclk"} 300
node_hwmon_freq_freq_mhz{chip="hwmon4",sensor="sclk"} 214
# HELP node_hwmon_in_alarm Hardware sensor alarm status (in)
# TYPE node_hwmon_in_alarm gauge
node_hwmon_in_alarm{chip="nct6779",sensor="in0"} 0
@ -984,8 +988,10 @@ node_hwmon_pwm_weight_temp_step_tol{chip="nct6779",sensor="pwm1"} 0
# TYPE node_hwmon_sensor_label gauge
node_hwmon_sensor_label{chip="hwmon4",label="foosensor",sensor="temp1"} 1
node_hwmon_sensor_label{chip="hwmon4",label="foosensor",sensor="temp2"} 1
node_hwmon_sensor_label{chip="platform_applesmc_768",label="Left side ",sensor="fan1"} 1
node_hwmon_sensor_label{chip="platform_applesmc_768",label="Right side ",sensor="fan2"} 1
node_hwmon_sensor_label{chip="hwmon4",label="mclk",sensor="freq2"} 1
node_hwmon_sensor_label{chip="hwmon4",label="sclk",sensor="freq1"} 1
node_hwmon_sensor_label{chip="platform_applesmc_768",label="Left side",sensor="fan1"} 1
node_hwmon_sensor_label{chip="platform_applesmc_768",label="Right side",sensor="fan2"} 1
node_hwmon_sensor_label{chip="platform_coretemp_0",label="Core 0",sensor="temp2"} 1
node_hwmon_sensor_label{chip="platform_coretemp_0",label="Core 1",sensor="temp3"} 1
node_hwmon_sensor_label{chip="platform_coretemp_0",label="Core 2",sensor="temp4"} 1
@ -1579,6 +1585,14 @@ node_md_blocks_synced{device="md6"} 1.6775552e+07
node_md_blocks_synced{device="md7"} 7.813735424e+09
node_md_blocks_synced{device="md8"} 1.6775552e+07
node_md_blocks_synced{device="md9"} 0
# HELP node_md_degraded Number of degraded disks on device.
# TYPE node_md_degraded gauge
node_md_degraded{device="md0"} 0
node_md_degraded{device="md1"} 0
node_md_degraded{device="md10"} 0
node_md_degraded{device="md4"} 0
node_md_degraded{device="md5"} 1
node_md_degraded{device="md6"} 1
# HELP node_md_disks Number of active/failed/spare disks of device.
# TYPE node_md_disks gauge
node_md_disks{device="md0",state="active"} 2
@ -1651,6 +1665,14 @@ node_md_disks_required{device="md6"} 2
node_md_disks_required{device="md7"} 4
node_md_disks_required{device="md8"} 2
node_md_disks_required{device="md9"} 4
# HELP node_md_raid_disks Number of raid disks on device.
# TYPE node_md_raid_disks gauge
node_md_raid_disks{device="md0"} 2
node_md_raid_disks{device="md1"} 2
node_md_raid_disks{device="md10"} 4
node_md_raid_disks{device="md4"} 3
node_md_raid_disks{device="md5"} 3
node_md_raid_disks{device="md6"} 4
# HELP node_md_state Indicates the state of md-device.
# TYPE node_md_state gauge
node_md_state{device="md0",state="active"} 1
@ -4315,108 +4337,139 @@ node_zfs_zil_zil_itx_needcopy_count 0
# TYPE node_zfs_zpool_dataset_nread untyped
node_zfs_zpool_dataset_nread{dataset="pool1",zpool="pool1"} 0
node_zfs_zpool_dataset_nread{dataset="pool1/dataset1",zpool="pool1"} 28
node_zfs_zpool_dataset_nread{dataset="pool3",zpool="pool3"} 0
node_zfs_zpool_dataset_nread{dataset="pool3/dataset with space",zpool="pool3"} 28
node_zfs_zpool_dataset_nread{dataset="poolz1",zpool="poolz1"} 0
node_zfs_zpool_dataset_nread{dataset="poolz1/dataset1",zpool="poolz1"} 28
# HELP node_zfs_zpool_dataset_nunlinked kstat.zfs.misc.objset.nunlinked
# TYPE node_zfs_zpool_dataset_nunlinked untyped
node_zfs_zpool_dataset_nunlinked{dataset="pool1",zpool="pool1"} 0
node_zfs_zpool_dataset_nunlinked{dataset="pool1/dataset1",zpool="pool1"} 3
node_zfs_zpool_dataset_nunlinked{dataset="pool3",zpool="pool3"} 0
node_zfs_zpool_dataset_nunlinked{dataset="pool3/dataset with space",zpool="pool3"} 3
node_zfs_zpool_dataset_nunlinked{dataset="poolz1",zpool="poolz1"} 0
node_zfs_zpool_dataset_nunlinked{dataset="poolz1/dataset1",zpool="poolz1"} 14
# HELP node_zfs_zpool_dataset_nunlinks kstat.zfs.misc.objset.nunlinks
# TYPE node_zfs_zpool_dataset_nunlinks untyped
node_zfs_zpool_dataset_nunlinks{dataset="pool1",zpool="pool1"} 0
node_zfs_zpool_dataset_nunlinks{dataset="pool1/dataset1",zpool="pool1"} 3
node_zfs_zpool_dataset_nunlinks{dataset="pool3",zpool="pool3"} 0
node_zfs_zpool_dataset_nunlinks{dataset="pool3/dataset with space",zpool="pool3"} 3
node_zfs_zpool_dataset_nunlinks{dataset="poolz1",zpool="poolz1"} 0
node_zfs_zpool_dataset_nunlinks{dataset="poolz1/dataset1",zpool="poolz1"} 14
# HELP node_zfs_zpool_dataset_nwritten kstat.zfs.misc.objset.nwritten
# TYPE node_zfs_zpool_dataset_nwritten untyped
node_zfs_zpool_dataset_nwritten{dataset="pool1",zpool="pool1"} 0
node_zfs_zpool_dataset_nwritten{dataset="pool1/dataset1",zpool="pool1"} 12302
node_zfs_zpool_dataset_nwritten{dataset="pool3",zpool="pool3"} 0
node_zfs_zpool_dataset_nwritten{dataset="pool3/dataset with space",zpool="pool3"} 12302
node_zfs_zpool_dataset_nwritten{dataset="poolz1",zpool="poolz1"} 0
node_zfs_zpool_dataset_nwritten{dataset="poolz1/dataset1",zpool="poolz1"} 32806
# HELP node_zfs_zpool_dataset_reads kstat.zfs.misc.objset.reads
# TYPE node_zfs_zpool_dataset_reads untyped
node_zfs_zpool_dataset_reads{dataset="pool1",zpool="pool1"} 0
node_zfs_zpool_dataset_reads{dataset="pool1/dataset1",zpool="pool1"} 2
node_zfs_zpool_dataset_reads{dataset="pool3",zpool="pool3"} 0
node_zfs_zpool_dataset_reads{dataset="pool3/dataset with space",zpool="pool3"} 2
node_zfs_zpool_dataset_reads{dataset="poolz1",zpool="poolz1"} 0
node_zfs_zpool_dataset_reads{dataset="poolz1/dataset1",zpool="poolz1"} 2
# HELP node_zfs_zpool_dataset_writes kstat.zfs.misc.objset.writes
# TYPE node_zfs_zpool_dataset_writes untyped
node_zfs_zpool_dataset_writes{dataset="pool1",zpool="pool1"} 0
node_zfs_zpool_dataset_writes{dataset="pool1/dataset1",zpool="pool1"} 4
node_zfs_zpool_dataset_writes{dataset="pool3",zpool="pool3"} 0
node_zfs_zpool_dataset_writes{dataset="pool3/dataset with space",zpool="pool3"} 4
node_zfs_zpool_dataset_writes{dataset="poolz1",zpool="poolz1"} 0
node_zfs_zpool_dataset_writes{dataset="poolz1/dataset1",zpool="poolz1"} 10
# HELP node_zfs_zpool_nread kstat.zfs.misc.io.nread
# TYPE node_zfs_zpool_nread untyped
node_zfs_zpool_nread{zpool="pool1"} 1.88416e+06
node_zfs_zpool_nread{zpool="pool3"} 1.88416e+06
node_zfs_zpool_nread{zpool="poolz1"} 2.82624e+06
# HELP node_zfs_zpool_nwritten kstat.zfs.misc.io.nwritten
# TYPE node_zfs_zpool_nwritten untyped
node_zfs_zpool_nwritten{zpool="pool1"} 3.206144e+06
node_zfs_zpool_nwritten{zpool="pool3"} 3.206144e+06
node_zfs_zpool_nwritten{zpool="poolz1"} 2.680501248e+09
# HELP node_zfs_zpool_rcnt kstat.zfs.misc.io.rcnt
# TYPE node_zfs_zpool_rcnt untyped
node_zfs_zpool_rcnt{zpool="pool1"} 0
node_zfs_zpool_rcnt{zpool="pool3"} 0
node_zfs_zpool_rcnt{zpool="poolz1"} 0
# HELP node_zfs_zpool_reads kstat.zfs.misc.io.reads
# TYPE node_zfs_zpool_reads untyped
node_zfs_zpool_reads{zpool="pool1"} 22
node_zfs_zpool_reads{zpool="pool3"} 22
node_zfs_zpool_reads{zpool="poolz1"} 33
# HELP node_zfs_zpool_rlentime kstat.zfs.misc.io.rlentime
# TYPE node_zfs_zpool_rlentime untyped
node_zfs_zpool_rlentime{zpool="pool1"} 1.04112268e+08
node_zfs_zpool_rlentime{zpool="pool3"} 1.04112268e+08
node_zfs_zpool_rlentime{zpool="poolz1"} 6.472105124093e+12
# HELP node_zfs_zpool_rtime kstat.zfs.misc.io.rtime
# TYPE node_zfs_zpool_rtime untyped
node_zfs_zpool_rtime{zpool="pool1"} 2.4168078e+07
node_zfs_zpool_rtime{zpool="pool3"} 2.4168078e+07
node_zfs_zpool_rtime{zpool="poolz1"} 9.82909164e+09
# HELP node_zfs_zpool_rupdate kstat.zfs.misc.io.rupdate
# TYPE node_zfs_zpool_rupdate untyped
node_zfs_zpool_rupdate{zpool="pool1"} 7.921048984922e+13
node_zfs_zpool_rupdate{zpool="pool3"} 7.921048984922e+13
node_zfs_zpool_rupdate{zpool="poolz1"} 1.10734831944501e+14
# HELP node_zfs_zpool_state kstat.zfs.misc.state
# TYPE node_zfs_zpool_state gauge
node_zfs_zpool_state{state="degraded",zpool="pool1"} 0
node_zfs_zpool_state{state="degraded",zpool="pool2"} 0
node_zfs_zpool_state{state="degraded",zpool="pool3"} 0
node_zfs_zpool_state{state="degraded",zpool="poolz1"} 1
node_zfs_zpool_state{state="faulted",zpool="pool1"} 0
node_zfs_zpool_state{state="faulted",zpool="pool2"} 0
node_zfs_zpool_state{state="faulted",zpool="pool3"} 0
node_zfs_zpool_state{state="faulted",zpool="poolz1"} 0
node_zfs_zpool_state{state="offline",zpool="pool1"} 0
node_zfs_zpool_state{state="offline",zpool="pool2"} 0
node_zfs_zpool_state{state="offline",zpool="pool3"} 0
node_zfs_zpool_state{state="offline",zpool="poolz1"} 0
node_zfs_zpool_state{state="online",zpool="pool1"} 1
node_zfs_zpool_state{state="online",zpool="pool2"} 0
node_zfs_zpool_state{state="online",zpool="pool3"} 1
node_zfs_zpool_state{state="online",zpool="poolz1"} 0
node_zfs_zpool_state{state="removed",zpool="pool1"} 0
node_zfs_zpool_state{state="removed",zpool="pool2"} 0
node_zfs_zpool_state{state="removed",zpool="pool3"} 0
node_zfs_zpool_state{state="removed",zpool="poolz1"} 0
node_zfs_zpool_state{state="suspended",zpool="pool1"} 0
node_zfs_zpool_state{state="suspended",zpool="pool2"} 1
node_zfs_zpool_state{state="suspended",zpool="pool3"} 0
node_zfs_zpool_state{state="suspended",zpool="poolz1"} 0
node_zfs_zpool_state{state="unavail",zpool="pool1"} 0
node_zfs_zpool_state{state="unavail",zpool="pool2"} 0
node_zfs_zpool_state{state="unavail",zpool="pool3"} 0
node_zfs_zpool_state{state="unavail",zpool="poolz1"} 0
# HELP node_zfs_zpool_wcnt kstat.zfs.misc.io.wcnt
# TYPE node_zfs_zpool_wcnt untyped
node_zfs_zpool_wcnt{zpool="pool1"} 0
node_zfs_zpool_wcnt{zpool="pool3"} 0
node_zfs_zpool_wcnt{zpool="poolz1"} 0
# HELP node_zfs_zpool_wlentime kstat.zfs.misc.io.wlentime
# TYPE node_zfs_zpool_wlentime untyped
node_zfs_zpool_wlentime{zpool="pool1"} 1.04112268e+08
node_zfs_zpool_wlentime{zpool="pool3"} 1.04112268e+08
node_zfs_zpool_wlentime{zpool="poolz1"} 6.472105124093e+12
# HELP node_zfs_zpool_writes kstat.zfs.misc.io.writes
# TYPE node_zfs_zpool_writes untyped
node_zfs_zpool_writes{zpool="pool1"} 132
node_zfs_zpool_writes{zpool="pool3"} 132
node_zfs_zpool_writes{zpool="poolz1"} 25294
# HELP node_zfs_zpool_wtime kstat.zfs.misc.io.wtime
# TYPE node_zfs_zpool_wtime untyped
node_zfs_zpool_wtime{zpool="pool1"} 7.155162e+06
node_zfs_zpool_wtime{zpool="pool3"} 7.155162e+06
node_zfs_zpool_wtime{zpool="poolz1"} 9.673715628e+09
# HELP node_zfs_zpool_wupdate kstat.zfs.misc.io.wupdate
# TYPE node_zfs_zpool_wupdate untyped
node_zfs_zpool_wupdate{zpool="pool1"} 7.9210489694949e+13
node_zfs_zpool_wupdate{zpool="pool3"} 7.9210489694949e+13
node_zfs_zpool_wupdate{zpool="poolz1"} 1.10734831833266e+14
# HELP node_zoneinfo_high_pages Zone watermark pages_high
# TYPE node_zoneinfo_high_pages gauge

View file

@ -0,0 +1,291 @@
# HELP go_gc_duration_seconds A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.
# TYPE go_gc_duration_seconds summary
# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent.
# TYPE go_gc_gogc_percent gauge
# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes.
# TYPE go_gc_gomemlimit_bytes gauge
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
# HELP go_info Information about the Go environment.
# TYPE go_info gauge
# HELP go_memstats_alloc_bytes Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_alloc_bytes gauge
# HELP go_memstats_alloc_bytes_total Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.
# TYPE go_memstats_alloc_bytes_total counter
# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.
# TYPE go_memstats_buck_hash_sys_bytes gauge
# HELP go_memstats_frees_total Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_frees_total counter
# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.
# TYPE go_memstats_gc_sys_bytes gauge
# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_heap_alloc_bytes gauge
# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_idle_bytes gauge
# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes
# TYPE go_memstats_heap_inuse_bytes gauge
# HELP go_memstats_heap_objects Number of currently allocated objects. Equals to /gc/heap/objects:objects.
# TYPE go_memstats_heap_objects gauge
# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.
# TYPE go_memstats_heap_released_bytes gauge
# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_sys_bytes gauge
# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
# TYPE go_memstats_last_gc_time_seconds gauge
# HELP go_memstats_mallocs_total Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_mallocs_total counter
# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.
# TYPE go_memstats_mcache_inuse_bytes gauge
# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.
# TYPE go_memstats_mcache_sys_bytes gauge
# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.
# TYPE go_memstats_mspan_inuse_bytes gauge
# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.
# TYPE go_memstats_mspan_sys_bytes gauge
# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.
# TYPE go_memstats_next_gc_bytes gauge
# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.
# TYPE go_memstats_other_sys_bytes gauge
# HELP go_memstats_stack_inuse_bytes Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.
# TYPE go_memstats_stack_inuse_bytes gauge
# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.
# TYPE go_memstats_stack_sys_bytes gauge
# HELP go_memstats_sys_bytes Number of bytes obtained from system. Equals to /memory/classes/total:byte.
# TYPE go_memstats_sys_bytes gauge
# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads.
# TYPE go_sched_gomaxprocs_threads gauge
# HELP go_threads Number of OS threads created.
# TYPE go_threads gauge
# HELP node_buddyinfo_blocks Count of free blocks according to size.
# TYPE node_buddyinfo_blocks gauge
node_buddyinfo_blocks{node="0",size="0",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="0",zone="DMA32"} 759
node_buddyinfo_blocks{node="0",size="0",zone="Normal"} 4381
node_buddyinfo_blocks{node="0",size="1",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="1",zone="DMA32"} 572
node_buddyinfo_blocks{node="0",size="1",zone="Normal"} 1093
node_buddyinfo_blocks{node="0",size="10",zone="DMA"} 3
node_buddyinfo_blocks{node="0",size="10",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="10",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="2",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="2",zone="DMA32"} 791
node_buddyinfo_blocks{node="0",size="2",zone="Normal"} 185
node_buddyinfo_blocks{node="0",size="3",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="3",zone="DMA32"} 475
node_buddyinfo_blocks{node="0",size="3",zone="Normal"} 1530
node_buddyinfo_blocks{node="0",size="4",zone="DMA"} 2
node_buddyinfo_blocks{node="0",size="4",zone="DMA32"} 194
node_buddyinfo_blocks{node="0",size="4",zone="Normal"} 567
node_buddyinfo_blocks{node="0",size="5",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="5",zone="DMA32"} 45
node_buddyinfo_blocks{node="0",size="5",zone="Normal"} 102
node_buddyinfo_blocks{node="0",size="6",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="6",zone="DMA32"} 12
node_buddyinfo_blocks{node="0",size="6",zone="Normal"} 4
node_buddyinfo_blocks{node="0",size="7",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="7",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="7",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="8",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="8",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="8",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="9",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="9",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="9",zone="Normal"} 0
# HELP node_disk_read_errors_total The total number of read errors.
# TYPE node_disk_read_errors_total counter
node_disk_read_errors_total{device="disk0"} 0
# HELP node_disk_read_retries_total The total number of read retries.
# TYPE node_disk_read_retries_total counter
node_disk_read_retries_total{device="disk0"} 0
# HELP node_disk_write_errors_total The total number of write errors.
# TYPE node_disk_write_errors_total counter
node_disk_write_errors_total{device="disk0"} 0
# HELP node_disk_write_retries_total The total number of write retries.
# TYPE node_disk_write_retries_total counter
node_disk_write_retries_total{device="disk0"} 0
# HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which node_exporter was built, and the goos and goarch for the build.
# TYPE node_exporter_build_info gauge
# HELP node_memory_swap_total_bytes Memory information field swap_total_bytes.
# TYPE node_memory_swap_total_bytes gauge
node_memory_swap_total_bytes 0
# HELP node_memory_swap_used_bytes Memory information field swap_used_bytes.
# TYPE node_memory_swap_used_bytes gauge
node_memory_swap_used_bytes 0
# HELP node_memory_total_bytes Memory information field total_bytes.
# TYPE node_memory_total_bytes gauge
node_memory_total_bytes 7.516192768e+09
# HELP node_network_noproto_total Network device statistic noproto.
# TYPE node_network_noproto_total counter
node_network_noproto_total{device="lo0"} 0
# HELP node_network_receive_drop_total Network device statistic receive_drop.
# TYPE node_network_receive_drop_total counter
node_network_receive_drop_total{device="lo0"} 0
# HELP node_network_receive_errs_total Network device statistic receive_errs.
# TYPE node_network_receive_errs_total counter
node_network_receive_errs_total{device="lo0"} 0
# HELP node_network_receive_packets_total Network device statistic receive_packets.
# TYPE node_network_receive_packets_total counter
# HELP node_network_transmit_bytes_total Network device statistic transmit_bytes.
# TYPE node_network_transmit_bytes_total counter
# HELP node_network_transmit_colls_total Network device statistic transmit_colls.
# TYPE node_network_transmit_colls_total counter
node_network_transmit_colls_total{device="lo0"} 0
# HELP node_network_transmit_errs_total Network device statistic transmit_errs.
# TYPE node_network_transmit_errs_total counter
node_network_transmit_errs_total{device="lo0"} 0
# HELP node_network_transmit_packets_total Network device statistic transmit_packets.
# TYPE node_network_transmit_packets_total counter
# HELP node_os_info A metric with a constant '1' value labeled by build_id, id, id_like, image_id, image_version, name, pretty_name, variant, variant_id, version, version_codename, version_id.
# TYPE node_os_info gauge
node_os_info{build_id="",id="ubuntu",id_like="debian",image_id="",image_version="",name="Ubuntu",pretty_name="Ubuntu 20.04.2 LTS",variant="",variant_id="",version="20.04.2 LTS (Focal Fossa)",version_codename="focal",version_id="20.04"} 1
# HELP node_os_version Metric containing the major.minor part of the OS version.
# TYPE node_os_version gauge
node_os_version{id="ubuntu",id_like="debian",name="Ubuntu"} 20.04
# HELP node_scrape_collector_duration_seconds node_exporter: Duration of a collector scrape.
# TYPE node_scrape_collector_duration_seconds gauge
# HELP node_scrape_collector_success node_exporter: Whether a collector succeeded.
# TYPE node_scrape_collector_success gauge
node_scrape_collector_success{collector="boottime"} 1
node_scrape_collector_success{collector="buddyinfo"} 1
node_scrape_collector_success{collector="cpu"} 1
node_scrape_collector_success{collector="diskstats"} 1
node_scrape_collector_success{collector="loadavg"} 1
node_scrape_collector_success{collector="meminfo"} 1
node_scrape_collector_success{collector="netdev"} 1
node_scrape_collector_success{collector="os"} 1
node_scrape_collector_success{collector="powersupplyclass"} 1
node_scrape_collector_success{collector="textfile"} 1
node_scrape_collector_success{collector="thermal"} 0
node_scrape_collector_success{collector="time"} 1
node_scrape_collector_success{collector="xfrm"} 1
# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read.
# TYPE node_textfile_mtime_seconds gauge
# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise
# TYPE node_textfile_scrape_error gauge
node_textfile_scrape_error 0
# HELP node_time_seconds System time in seconds since epoch (1970).
# TYPE node_time_seconds gauge
# HELP node_time_zone_offset_seconds System time zone offset in seconds.
# TYPE node_time_zone_offset_seconds gauge
# HELP node_xfrm_acquire_error_packets_total State hasnt been fully acquired before use
# TYPE node_xfrm_acquire_error_packets_total counter
node_xfrm_acquire_error_packets_total 24532
# HELP node_xfrm_fwd_hdr_error_packets_total Forward routing of a packet is not allowed
# TYPE node_xfrm_fwd_hdr_error_packets_total counter
node_xfrm_fwd_hdr_error_packets_total 6654
# HELP node_xfrm_in_buffer_error_packets_total No buffer is left
# TYPE node_xfrm_in_buffer_error_packets_total counter
node_xfrm_in_buffer_error_packets_total 2
# HELP node_xfrm_in_error_packets_total All errors not matched by other
# TYPE node_xfrm_in_error_packets_total counter
node_xfrm_in_error_packets_total 1
# HELP node_xfrm_in_hdr_error_packets_total Header error
# TYPE node_xfrm_in_hdr_error_packets_total counter
node_xfrm_in_hdr_error_packets_total 4
# HELP node_xfrm_in_no_pols_packets_total No policy is found for states e.g. Inbound SAs are correct but no SP is found
# TYPE node_xfrm_in_no_pols_packets_total counter
node_xfrm_in_no_pols_packets_total 65432
# HELP node_xfrm_in_no_states_packets_total No state is found i.e. Either inbound SPI, address, or IPsec protocol at SA is wrong
# TYPE node_xfrm_in_no_states_packets_total counter
node_xfrm_in_no_states_packets_total 3
# HELP node_xfrm_in_pol_block_packets_total Policy discards
# TYPE node_xfrm_in_pol_block_packets_total counter
node_xfrm_in_pol_block_packets_total 100
# HELP node_xfrm_in_pol_error_packets_total Policy error
# TYPE node_xfrm_in_pol_error_packets_total counter
node_xfrm_in_pol_error_packets_total 10000
# HELP node_xfrm_in_state_expired_packets_total State is expired
# TYPE node_xfrm_in_state_expired_packets_total counter
node_xfrm_in_state_expired_packets_total 7
# HELP node_xfrm_in_state_invalid_packets_total State is invalid
# TYPE node_xfrm_in_state_invalid_packets_total counter
node_xfrm_in_state_invalid_packets_total 55555
# HELP node_xfrm_in_state_mismatch_packets_total State has mismatch option e.g. UDP encapsulation type is mismatch
# TYPE node_xfrm_in_state_mismatch_packets_total counter
node_xfrm_in_state_mismatch_packets_total 23451
# HELP node_xfrm_in_state_mode_error_packets_total Transformation mode specific error
# TYPE node_xfrm_in_state_mode_error_packets_total counter
node_xfrm_in_state_mode_error_packets_total 100
# HELP node_xfrm_in_state_proto_error_packets_total Transformation protocol specific error e.g. SA key is wrong
# TYPE node_xfrm_in_state_proto_error_packets_total counter
node_xfrm_in_state_proto_error_packets_total 40
# HELP node_xfrm_in_state_seq_error_packets_total Sequence error i.e. Sequence number is out of window
# TYPE node_xfrm_in_state_seq_error_packets_total counter
node_xfrm_in_state_seq_error_packets_total 6000
# HELP node_xfrm_in_tmpl_mismatch_packets_total No matching template for states e.g. Inbound SAs are correct but SP rule is wrong
# TYPE node_xfrm_in_tmpl_mismatch_packets_total counter
node_xfrm_in_tmpl_mismatch_packets_total 51
# HELP node_xfrm_out_bundle_check_error_packets_total Bundle check error
# TYPE node_xfrm_out_bundle_check_error_packets_total counter
node_xfrm_out_bundle_check_error_packets_total 555
# HELP node_xfrm_out_bundle_gen_error_packets_total Bundle generation error
# TYPE node_xfrm_out_bundle_gen_error_packets_total counter
node_xfrm_out_bundle_gen_error_packets_total 43321
# HELP node_xfrm_out_error_packets_total All errors which is not matched others
# TYPE node_xfrm_out_error_packets_total counter
node_xfrm_out_error_packets_total 1e+06
# HELP node_xfrm_out_no_states_packets_total No state is found
# TYPE node_xfrm_out_no_states_packets_total counter
node_xfrm_out_no_states_packets_total 869
# HELP node_xfrm_out_pol_block_packets_total Policy discards
# TYPE node_xfrm_out_pol_block_packets_total counter
node_xfrm_out_pol_block_packets_total 43456
# HELP node_xfrm_out_pol_dead_packets_total Policy is dead
# TYPE node_xfrm_out_pol_dead_packets_total counter
node_xfrm_out_pol_dead_packets_total 7656
# HELP node_xfrm_out_pol_error_packets_total Policy error
# TYPE node_xfrm_out_pol_error_packets_total counter
node_xfrm_out_pol_error_packets_total 1454
# HELP node_xfrm_out_state_expired_packets_total State is expired
# TYPE node_xfrm_out_state_expired_packets_total counter
node_xfrm_out_state_expired_packets_total 565
# HELP node_xfrm_out_state_invalid_packets_total State is invalid, perhaps expired
# TYPE node_xfrm_out_state_invalid_packets_total counter
node_xfrm_out_state_invalid_packets_total 28765
# HELP node_xfrm_out_state_mode_error_packets_total Transformation mode specific error
# TYPE node_xfrm_out_state_mode_error_packets_total counter
node_xfrm_out_state_mode_error_packets_total 8
# HELP node_xfrm_out_state_proto_error_packets_total Transformation protocol specific error
# TYPE node_xfrm_out_state_proto_error_packets_total counter
node_xfrm_out_state_proto_error_packets_total 4542
# HELP node_xfrm_out_state_seq_error_packets_total Sequence error i.e. Sequence number overflow
# TYPE node_xfrm_out_state_seq_error_packets_total counter
node_xfrm_out_state_seq_error_packets_total 543
# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
# TYPE process_cpu_seconds_total counter
# HELP process_max_fds Maximum number of open file descriptors.
# TYPE process_max_fds gauge
# HELP process_open_fds Number of open file descriptors.
# TYPE process_open_fds gauge
# HELP process_resident_memory_bytes Resident memory size in bytes.
# TYPE process_resident_memory_bytes gauge
# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
# TYPE process_start_time_seconds gauge
# HELP process_virtual_memory_bytes Virtual memory size in bytes.
# TYPE process_virtual_memory_bytes gauge
# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes.
# TYPE process_virtual_memory_max_bytes gauge
# HELP promhttp_metric_handler_errors_total Total number of internal errors encountered by the promhttp metric handler.
# TYPE promhttp_metric_handler_errors_total counter
promhttp_metric_handler_errors_total{cause="encoding"} 0
promhttp_metric_handler_errors_total{cause="gathering"} 0
# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served.
# TYPE promhttp_metric_handler_requests_in_flight gauge
promhttp_metric_handler_requests_in_flight 1
# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code.
# TYPE promhttp_metric_handler_requests_total counter
promhttp_metric_handler_requests_total{code="200"} 0
promhttp_metric_handler_requests_total{code="500"} 0
promhttp_metric_handler_requests_total{code="503"} 0
# HELP testmetric1_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom
# TYPE testmetric1_1 untyped
testmetric1_1{foo="bar"} 10
# HELP testmetric1_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom
# TYPE testmetric1_2 untyped
testmetric1_2{foo="baz"} 20
# HELP testmetric2_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom
# TYPE testmetric2_1 untyped
testmetric2_1{foo="bar"} 30
# HELP testmetric2_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom
# TYPE testmetric2_2 untyped
testmetric2_2{foo="baz"} 40

View file

@ -0,0 +1,251 @@
# HELP go_gc_duration_seconds A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.
# TYPE go_gc_duration_seconds summary
# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent.
# TYPE go_gc_gogc_percent gauge
# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes.
# TYPE go_gc_gomemlimit_bytes gauge
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
# HELP go_info Information about the Go environment.
# TYPE go_info gauge
# HELP go_memstats_alloc_bytes Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_alloc_bytes gauge
# HELP go_memstats_alloc_bytes_total Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.
# TYPE go_memstats_alloc_bytes_total counter
# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.
# TYPE go_memstats_buck_hash_sys_bytes gauge
# HELP go_memstats_frees_total Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_frees_total counter
# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.
# TYPE go_memstats_gc_sys_bytes gauge
# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_heap_alloc_bytes gauge
# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_idle_bytes gauge
# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes
# TYPE go_memstats_heap_inuse_bytes gauge
# HELP go_memstats_heap_objects Number of currently allocated objects. Equals to /gc/heap/objects:objects.
# TYPE go_memstats_heap_objects gauge
# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.
# TYPE go_memstats_heap_released_bytes gauge
# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_sys_bytes gauge
# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
# TYPE go_memstats_last_gc_time_seconds gauge
# HELP go_memstats_mallocs_total Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_mallocs_total counter
# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.
# TYPE go_memstats_mcache_inuse_bytes gauge
# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.
# TYPE go_memstats_mcache_sys_bytes gauge
# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.
# TYPE go_memstats_mspan_inuse_bytes gauge
# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.
# TYPE go_memstats_mspan_sys_bytes gauge
# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.
# TYPE go_memstats_next_gc_bytes gauge
# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.
# TYPE go_memstats_other_sys_bytes gauge
# HELP go_memstats_stack_inuse_bytes Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.
# TYPE go_memstats_stack_inuse_bytes gauge
# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.
# TYPE go_memstats_stack_sys_bytes gauge
# HELP go_memstats_sys_bytes Number of bytes obtained from system. Equals to /memory/classes/total:byte.
# TYPE go_memstats_sys_bytes gauge
# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads.
# TYPE go_sched_gomaxprocs_threads gauge
# HELP go_threads Number of OS threads created.
# TYPE go_threads gauge
# HELP node_buddyinfo_blocks Count of free blocks according to size.
# TYPE node_buddyinfo_blocks gauge
node_buddyinfo_blocks{node="0",size="0",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="0",zone="DMA32"} 759
node_buddyinfo_blocks{node="0",size="0",zone="Normal"} 4381
node_buddyinfo_blocks{node="0",size="1",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="1",zone="DMA32"} 572
node_buddyinfo_blocks{node="0",size="1",zone="Normal"} 1093
node_buddyinfo_blocks{node="0",size="10",zone="DMA"} 3
node_buddyinfo_blocks{node="0",size="10",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="10",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="2",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="2",zone="DMA32"} 791
node_buddyinfo_blocks{node="0",size="2",zone="Normal"} 185
node_buddyinfo_blocks{node="0",size="3",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="3",zone="DMA32"} 475
node_buddyinfo_blocks{node="0",size="3",zone="Normal"} 1530
node_buddyinfo_blocks{node="0",size="4",zone="DMA"} 2
node_buddyinfo_blocks{node="0",size="4",zone="DMA32"} 194
node_buddyinfo_blocks{node="0",size="4",zone="Normal"} 567
node_buddyinfo_blocks{node="0",size="5",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="5",zone="DMA32"} 45
node_buddyinfo_blocks{node="0",size="5",zone="Normal"} 102
node_buddyinfo_blocks{node="0",size="6",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="6",zone="DMA32"} 12
node_buddyinfo_blocks{node="0",size="6",zone="Normal"} 4
node_buddyinfo_blocks{node="0",size="7",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="7",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="7",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="8",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="8",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="8",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="9",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="9",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="9",zone="Normal"} 0
# HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which node_exporter was built, and the goos and goarch for the build.
# TYPE node_exporter_build_info gauge
# HELP node_network_receive_drop_total Network device statistic receive_drop.
# TYPE node_network_receive_drop_total counter
node_network_receive_drop_total{device="lo0"} 0
# HELP node_network_receive_errs_total Network device statistic receive_errs.
# TYPE node_network_receive_errs_total counter
node_network_receive_errs_total{device="lo0"} 0
# HELP node_network_receive_packets_total Network device statistic receive_packets.
# TYPE node_network_receive_packets_total counter
# HELP node_network_transmit_bytes_total Network device statistic transmit_bytes.
# TYPE node_network_transmit_bytes_total counter
# HELP node_network_transmit_drop_total Network device statistic transmit_drop.
# TYPE node_network_transmit_drop_total counter
node_network_transmit_drop_total{device="lo0"} 0
# HELP node_network_transmit_errs_total Network device statistic transmit_errs.
# TYPE node_network_transmit_errs_total counter
node_network_transmit_errs_total{device="lo0"} 0
# HELP node_network_transmit_packets_total Network device statistic transmit_packets.
# TYPE node_network_transmit_packets_total counter
# HELP node_os_info A metric with a constant '1' value labeled by build_id, id, id_like, image_id, image_version, name, pretty_name, variant, variant_id, version, version_codename, version_id.
# TYPE node_os_info gauge
node_os_info{build_id="",id="ubuntu",id_like="debian",image_id="",image_version="",name="Ubuntu",pretty_name="Ubuntu 20.04.2 LTS",variant="",variant_id="",version="20.04.2 LTS (Focal Fossa)",version_codename="focal",version_id="20.04"} 1
# HELP node_os_version Metric containing the major.minor part of the OS version.
# TYPE node_os_version gauge
node_os_version{id="ubuntu",id_like="debian",name="Ubuntu"} 20.04
# HELP node_scrape_collector_duration_seconds node_exporter: Duration of a collector scrape.
# TYPE node_scrape_collector_duration_seconds gauge
# HELP node_scrape_collector_success node_exporter: Whether a collector succeeded.
# TYPE node_scrape_collector_success gauge
node_scrape_collector_success{collector="boottime"} 1
node_scrape_collector_success{collector="buddyinfo"} 1
node_scrape_collector_success{collector="cpu"} 1
node_scrape_collector_success{collector="exec"} 1
node_scrape_collector_success{collector="loadavg"} 1
node_scrape_collector_success{collector="meminfo"} 0
node_scrape_collector_success{collector="netdev"} 1
node_scrape_collector_success{collector="os"} 1
node_scrape_collector_success{collector="textfile"} 1
node_scrape_collector_success{collector="time"} 1
node_scrape_collector_success{collector="xfrm"} 1
# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read.
# TYPE node_textfile_mtime_seconds gauge
# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise
# TYPE node_textfile_scrape_error gauge
node_textfile_scrape_error 0
# HELP node_time_seconds System time in seconds since epoch (1970).
# TYPE node_time_seconds gauge
# HELP node_time_zone_offset_seconds System time zone offset in seconds.
# TYPE node_time_zone_offset_seconds gauge
# HELP node_xfrm_acquire_error_packets_total State hasnt been fully acquired before use
# TYPE node_xfrm_acquire_error_packets_total counter
node_xfrm_acquire_error_packets_total 24532
# HELP node_xfrm_fwd_hdr_error_packets_total Forward routing of a packet is not allowed
# TYPE node_xfrm_fwd_hdr_error_packets_total counter
node_xfrm_fwd_hdr_error_packets_total 6654
# HELP node_xfrm_in_buffer_error_packets_total No buffer is left
# TYPE node_xfrm_in_buffer_error_packets_total counter
node_xfrm_in_buffer_error_packets_total 2
# HELP node_xfrm_in_error_packets_total All errors not matched by other
# TYPE node_xfrm_in_error_packets_total counter
node_xfrm_in_error_packets_total 1
# HELP node_xfrm_in_hdr_error_packets_total Header error
# TYPE node_xfrm_in_hdr_error_packets_total counter
node_xfrm_in_hdr_error_packets_total 4
# HELP node_xfrm_in_no_pols_packets_total No policy is found for states e.g. Inbound SAs are correct but no SP is found
# TYPE node_xfrm_in_no_pols_packets_total counter
node_xfrm_in_no_pols_packets_total 65432
# HELP node_xfrm_in_no_states_packets_total No state is found i.e. Either inbound SPI, address, or IPsec protocol at SA is wrong
# TYPE node_xfrm_in_no_states_packets_total counter
node_xfrm_in_no_states_packets_total 3
# HELP node_xfrm_in_pol_block_packets_total Policy discards
# TYPE node_xfrm_in_pol_block_packets_total counter
node_xfrm_in_pol_block_packets_total 100
# HELP node_xfrm_in_pol_error_packets_total Policy error
# TYPE node_xfrm_in_pol_error_packets_total counter
node_xfrm_in_pol_error_packets_total 10000
# HELP node_xfrm_in_state_expired_packets_total State is expired
# TYPE node_xfrm_in_state_expired_packets_total counter
node_xfrm_in_state_expired_packets_total 7
# HELP node_xfrm_in_state_invalid_packets_total State is invalid
# TYPE node_xfrm_in_state_invalid_packets_total counter
node_xfrm_in_state_invalid_packets_total 55555
# HELP node_xfrm_in_state_mismatch_packets_total State has mismatch option e.g. UDP encapsulation type is mismatch
# TYPE node_xfrm_in_state_mismatch_packets_total counter
node_xfrm_in_state_mismatch_packets_total 23451
# HELP node_xfrm_in_state_mode_error_packets_total Transformation mode specific error
# TYPE node_xfrm_in_state_mode_error_packets_total counter
node_xfrm_in_state_mode_error_packets_total 100
# HELP node_xfrm_in_state_proto_error_packets_total Transformation protocol specific error e.g. SA key is wrong
# TYPE node_xfrm_in_state_proto_error_packets_total counter
node_xfrm_in_state_proto_error_packets_total 40
# HELP node_xfrm_in_state_seq_error_packets_total Sequence error i.e. Sequence number is out of window
# TYPE node_xfrm_in_state_seq_error_packets_total counter
node_xfrm_in_state_seq_error_packets_total 6000
# HELP node_xfrm_in_tmpl_mismatch_packets_total No matching template for states e.g. Inbound SAs are correct but SP rule is wrong
# TYPE node_xfrm_in_tmpl_mismatch_packets_total counter
node_xfrm_in_tmpl_mismatch_packets_total 51
# HELP node_xfrm_out_bundle_check_error_packets_total Bundle check error
# TYPE node_xfrm_out_bundle_check_error_packets_total counter
node_xfrm_out_bundle_check_error_packets_total 555
# HELP node_xfrm_out_bundle_gen_error_packets_total Bundle generation error
# TYPE node_xfrm_out_bundle_gen_error_packets_total counter
node_xfrm_out_bundle_gen_error_packets_total 43321
# HELP node_xfrm_out_error_packets_total All errors which is not matched others
# TYPE node_xfrm_out_error_packets_total counter
node_xfrm_out_error_packets_total 1e+06
# HELP node_xfrm_out_no_states_packets_total No state is found
# TYPE node_xfrm_out_no_states_packets_total counter
node_xfrm_out_no_states_packets_total 869
# HELP node_xfrm_out_pol_block_packets_total Policy discards
# TYPE node_xfrm_out_pol_block_packets_total counter
node_xfrm_out_pol_block_packets_total 43456
# HELP node_xfrm_out_pol_dead_packets_total Policy is dead
# TYPE node_xfrm_out_pol_dead_packets_total counter
node_xfrm_out_pol_dead_packets_total 7656
# HELP node_xfrm_out_pol_error_packets_total Policy error
# TYPE node_xfrm_out_pol_error_packets_total counter
node_xfrm_out_pol_error_packets_total 1454
# HELP node_xfrm_out_state_expired_packets_total State is expired
# TYPE node_xfrm_out_state_expired_packets_total counter
node_xfrm_out_state_expired_packets_total 565
# HELP node_xfrm_out_state_invalid_packets_total State is invalid, perhaps expired
# TYPE node_xfrm_out_state_invalid_packets_total counter
node_xfrm_out_state_invalid_packets_total 28765
# HELP node_xfrm_out_state_mode_error_packets_total Transformation mode specific error
# TYPE node_xfrm_out_state_mode_error_packets_total counter
node_xfrm_out_state_mode_error_packets_total 8
# HELP node_xfrm_out_state_proto_error_packets_total Transformation protocol specific error
# TYPE node_xfrm_out_state_proto_error_packets_total counter
node_xfrm_out_state_proto_error_packets_total 4542
# HELP node_xfrm_out_state_seq_error_packets_total Sequence error i.e. Sequence number overflow
# TYPE node_xfrm_out_state_seq_error_packets_total counter
node_xfrm_out_state_seq_error_packets_total 543
# HELP promhttp_metric_handler_errors_total Total number of internal errors encountered by the promhttp metric handler.
# TYPE promhttp_metric_handler_errors_total counter
promhttp_metric_handler_errors_total{cause="encoding"} 0
promhttp_metric_handler_errors_total{cause="gathering"} 0
# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served.
# TYPE promhttp_metric_handler_requests_in_flight gauge
promhttp_metric_handler_requests_in_flight 1
# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code.
# TYPE promhttp_metric_handler_requests_total counter
promhttp_metric_handler_requests_total{code="200"} 0
promhttp_metric_handler_requests_total{code="500"} 0
promhttp_metric_handler_requests_total{code="503"} 0
# HELP testmetric1_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom
# TYPE testmetric1_1 untyped
testmetric1_1{foo="bar"} 10
# HELP testmetric1_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom
# TYPE testmetric1_2 untyped
testmetric1_2{foo="baz"} 20
# HELP testmetric2_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom
# TYPE testmetric2_1 untyped
testmetric2_1{foo="bar"} 30
# HELP testmetric2_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom
# TYPE testmetric2_2 untyped
testmetric2_2{foo="baz"} 40

View file

@ -0,0 +1,287 @@
# HELP go_gc_duration_seconds A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.
# TYPE go_gc_duration_seconds summary
# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent.
# TYPE go_gc_gogc_percent gauge
# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes.
# TYPE go_gc_gomemlimit_bytes gauge
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
# HELP go_info Information about the Go environment.
# TYPE go_info gauge
# HELP go_memstats_alloc_bytes Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_alloc_bytes gauge
# HELP go_memstats_alloc_bytes_total Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.
# TYPE go_memstats_alloc_bytes_total counter
# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.
# TYPE go_memstats_buck_hash_sys_bytes gauge
# HELP go_memstats_frees_total Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_frees_total counter
# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.
# TYPE go_memstats_gc_sys_bytes gauge
# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_heap_alloc_bytes gauge
# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_idle_bytes gauge
# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes
# TYPE go_memstats_heap_inuse_bytes gauge
# HELP go_memstats_heap_objects Number of currently allocated objects. Equals to /gc/heap/objects:objects.
# TYPE go_memstats_heap_objects gauge
# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.
# TYPE go_memstats_heap_released_bytes gauge
# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_sys_bytes gauge
# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
# TYPE go_memstats_last_gc_time_seconds gauge
# HELP go_memstats_mallocs_total Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_mallocs_total counter
# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.
# TYPE go_memstats_mcache_inuse_bytes gauge
# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.
# TYPE go_memstats_mcache_sys_bytes gauge
# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.
# TYPE go_memstats_mspan_inuse_bytes gauge
# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.
# TYPE go_memstats_mspan_sys_bytes gauge
# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.
# TYPE go_memstats_next_gc_bytes gauge
# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.
# TYPE go_memstats_other_sys_bytes gauge
# HELP go_memstats_stack_inuse_bytes Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.
# TYPE go_memstats_stack_inuse_bytes gauge
# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.
# TYPE go_memstats_stack_sys_bytes gauge
# HELP go_memstats_sys_bytes Number of bytes obtained from system. Equals to /memory/classes/total:byte.
# TYPE go_memstats_sys_bytes gauge
# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads.
# TYPE go_sched_gomaxprocs_threads gauge
# HELP go_threads Number of OS threads created.
# TYPE go_threads gauge
# HELP node_buddyinfo_blocks Count of free blocks according to size.
# TYPE node_buddyinfo_blocks gauge
node_buddyinfo_blocks{node="0",size="0",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="0",zone="DMA32"} 759
node_buddyinfo_blocks{node="0",size="0",zone="Normal"} 4381
node_buddyinfo_blocks{node="0",size="1",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="1",zone="DMA32"} 572
node_buddyinfo_blocks{node="0",size="1",zone="Normal"} 1093
node_buddyinfo_blocks{node="0",size="10",zone="DMA"} 3
node_buddyinfo_blocks{node="0",size="10",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="10",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="2",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="2",zone="DMA32"} 791
node_buddyinfo_blocks{node="0",size="2",zone="Normal"} 185
node_buddyinfo_blocks{node="0",size="3",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="3",zone="DMA32"} 475
node_buddyinfo_blocks{node="0",size="3",zone="Normal"} 1530
node_buddyinfo_blocks{node="0",size="4",zone="DMA"} 2
node_buddyinfo_blocks{node="0",size="4",zone="DMA32"} 194
node_buddyinfo_blocks{node="0",size="4",zone="Normal"} 567
node_buddyinfo_blocks{node="0",size="5",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="5",zone="DMA32"} 45
node_buddyinfo_blocks{node="0",size="5",zone="Normal"} 102
node_buddyinfo_blocks{node="0",size="6",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="6",zone="DMA32"} 12
node_buddyinfo_blocks{node="0",size="6",zone="Normal"} 4
node_buddyinfo_blocks{node="0",size="7",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="7",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="7",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="8",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="8",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="8",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="9",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="9",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="9",zone="Normal"} 0
# HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which node_exporter was built, and the goos and goarch for the build.
# TYPE node_exporter_build_info gauge
# HELP node_memory_swap_in_bytes_total Bytes paged in from swap devices
# TYPE node_memory_swap_in_bytes_total counter
node_memory_swap_in_bytes_total 0
# HELP node_memory_swap_out_bytes_total Bytes paged out to swap devices
# TYPE node_memory_swap_out_bytes_total counter
node_memory_swap_out_bytes_total 0
# HELP node_memory_swap_size_bytes Total swap memory size
# TYPE node_memory_swap_size_bytes gauge
node_memory_swap_size_bytes 1.073741824e+09
# HELP node_memory_swap_used_bytes Currently allocated swap
# TYPE node_memory_swap_used_bytes gauge
node_memory_swap_used_bytes 0
# HELP node_memory_user_wired_bytes Locked in memory by user, mlock, etc
# TYPE node_memory_user_wired_bytes gauge
node_memory_user_wired_bytes 0
# HELP node_netisr_bindthreads netisr threads bound to CPUs
# TYPE node_netisr_bindthreads gauge
node_netisr_bindthreads 0
# HELP node_netisr_defaultqlimit netisr default queue limit
# TYPE node_netisr_defaultqlimit gauge
node_netisr_defaultqlimit 256
# HELP node_netisr_maxprot netisr maximum protocols
# TYPE node_netisr_maxprot gauge
node_netisr_maxprot 16
# HELP node_netisr_maxqlimit netisr maximum queue limit
# TYPE node_netisr_maxqlimit gauge
node_netisr_maxqlimit 10240
# HELP node_netisr_maxthreads netisr maximum thread count
# TYPE node_netisr_maxthreads gauge
node_netisr_maxthreads 1
# HELP node_netisr_numthreads netisr current thread count
# TYPE node_netisr_numthreads gauge
node_netisr_numthreads 1
# HELP node_network_receive_drop_total Network device statistic receive_drop.
# TYPE node_network_receive_drop_total counter
node_network_receive_drop_total{device="lo0"} 0
# HELP node_network_receive_errs_total Network device statistic receive_errs.
# TYPE node_network_receive_errs_total counter
node_network_receive_errs_total{device="lo0"} 0
# HELP node_network_receive_packets_total Network device statistic receive_packets.
# TYPE node_network_receive_packets_total counter
# HELP node_network_transmit_bytes_total Network device statistic transmit_bytes.
# TYPE node_network_transmit_bytes_total counter
# HELP node_network_transmit_drop_total Network device statistic transmit_drop.
# TYPE node_network_transmit_drop_total counter
node_network_transmit_drop_total{device="lo0"} 0
# HELP node_network_transmit_errs_total Network device statistic transmit_errs.
# TYPE node_network_transmit_errs_total counter
node_network_transmit_errs_total{device="lo0"} 0
# HELP node_network_transmit_packets_total Network device statistic transmit_packets.
# TYPE node_network_transmit_packets_total counter
# HELP node_os_info A metric with a constant '1' value labeled by build_id, id, id_like, image_id, image_version, name, pretty_name, variant, variant_id, version, version_codename, version_id.
# TYPE node_os_info gauge
node_os_info{build_id="",id="ubuntu",id_like="debian",image_id="",image_version="",name="Ubuntu",pretty_name="Ubuntu 20.04.2 LTS",variant="",variant_id="",version="20.04.2 LTS (Focal Fossa)",version_codename="focal",version_id="20.04"} 1
# HELP node_os_version Metric containing the major.minor part of the OS version.
# TYPE node_os_version gauge
node_os_version{id="ubuntu",id_like="debian",name="Ubuntu"} 20.04
# HELP node_scrape_collector_duration_seconds node_exporter: Duration of a collector scrape.
# TYPE node_scrape_collector_duration_seconds gauge
# HELP node_scrape_collector_success node_exporter: Whether a collector succeeded.
# TYPE node_scrape_collector_success gauge
node_scrape_collector_success{collector="boottime"} 1
node_scrape_collector_success{collector="buddyinfo"} 1
node_scrape_collector_success{collector="cpu"} 1
node_scrape_collector_success{collector="exec"} 1
node_scrape_collector_success{collector="loadavg"} 1
node_scrape_collector_success{collector="meminfo"} 1
node_scrape_collector_success{collector="netdev"} 1
node_scrape_collector_success{collector="netisr"} 1
node_scrape_collector_success{collector="netstat"} 1
node_scrape_collector_success{collector="os"} 1
node_scrape_collector_success{collector="textfile"} 1
node_scrape_collector_success{collector="time"} 1
node_scrape_collector_success{collector="xfrm"} 1
node_scrape_collector_success{collector="zfs"} 1
# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read.
# TYPE node_textfile_mtime_seconds gauge
# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise
# TYPE node_textfile_scrape_error gauge
node_textfile_scrape_error 0
# HELP node_time_seconds System time in seconds since epoch (1970).
# TYPE node_time_seconds gauge
# HELP node_time_zone_offset_seconds System time zone offset in seconds.
# TYPE node_time_zone_offset_seconds gauge
# HELP node_xfrm_acquire_error_packets_total State hasnt been fully acquired before use
# TYPE node_xfrm_acquire_error_packets_total counter
node_xfrm_acquire_error_packets_total 24532
# HELP node_xfrm_fwd_hdr_error_packets_total Forward routing of a packet is not allowed
# TYPE node_xfrm_fwd_hdr_error_packets_total counter
node_xfrm_fwd_hdr_error_packets_total 6654
# HELP node_xfrm_in_buffer_error_packets_total No buffer is left
# TYPE node_xfrm_in_buffer_error_packets_total counter
node_xfrm_in_buffer_error_packets_total 2
# HELP node_xfrm_in_error_packets_total All errors not matched by other
# TYPE node_xfrm_in_error_packets_total counter
node_xfrm_in_error_packets_total 1
# HELP node_xfrm_in_hdr_error_packets_total Header error
# TYPE node_xfrm_in_hdr_error_packets_total counter
node_xfrm_in_hdr_error_packets_total 4
# HELP node_xfrm_in_no_pols_packets_total No policy is found for states e.g. Inbound SAs are correct but no SP is found
# TYPE node_xfrm_in_no_pols_packets_total counter
node_xfrm_in_no_pols_packets_total 65432
# HELP node_xfrm_in_no_states_packets_total No state is found i.e. Either inbound SPI, address, or IPsec protocol at SA is wrong
# TYPE node_xfrm_in_no_states_packets_total counter
node_xfrm_in_no_states_packets_total 3
# HELP node_xfrm_in_pol_block_packets_total Policy discards
# TYPE node_xfrm_in_pol_block_packets_total counter
node_xfrm_in_pol_block_packets_total 100
# HELP node_xfrm_in_pol_error_packets_total Policy error
# TYPE node_xfrm_in_pol_error_packets_total counter
node_xfrm_in_pol_error_packets_total 10000
# HELP node_xfrm_in_state_expired_packets_total State is expired
# TYPE node_xfrm_in_state_expired_packets_total counter
node_xfrm_in_state_expired_packets_total 7
# HELP node_xfrm_in_state_invalid_packets_total State is invalid
# TYPE node_xfrm_in_state_invalid_packets_total counter
node_xfrm_in_state_invalid_packets_total 55555
# HELP node_xfrm_in_state_mismatch_packets_total State has mismatch option e.g. UDP encapsulation type is mismatch
# TYPE node_xfrm_in_state_mismatch_packets_total counter
node_xfrm_in_state_mismatch_packets_total 23451
# HELP node_xfrm_in_state_mode_error_packets_total Transformation mode specific error
# TYPE node_xfrm_in_state_mode_error_packets_total counter
node_xfrm_in_state_mode_error_packets_total 100
# HELP node_xfrm_in_state_proto_error_packets_total Transformation protocol specific error e.g. SA key is wrong
# TYPE node_xfrm_in_state_proto_error_packets_total counter
node_xfrm_in_state_proto_error_packets_total 40
# HELP node_xfrm_in_state_seq_error_packets_total Sequence error i.e. Sequence number is out of window
# TYPE node_xfrm_in_state_seq_error_packets_total counter
node_xfrm_in_state_seq_error_packets_total 6000
# HELP node_xfrm_in_tmpl_mismatch_packets_total No matching template for states e.g. Inbound SAs are correct but SP rule is wrong
# TYPE node_xfrm_in_tmpl_mismatch_packets_total counter
node_xfrm_in_tmpl_mismatch_packets_total 51
# HELP node_xfrm_out_bundle_check_error_packets_total Bundle check error
# TYPE node_xfrm_out_bundle_check_error_packets_total counter
node_xfrm_out_bundle_check_error_packets_total 555
# HELP node_xfrm_out_bundle_gen_error_packets_total Bundle generation error
# TYPE node_xfrm_out_bundle_gen_error_packets_total counter
node_xfrm_out_bundle_gen_error_packets_total 43321
# HELP node_xfrm_out_error_packets_total All errors which is not matched others
# TYPE node_xfrm_out_error_packets_total counter
node_xfrm_out_error_packets_total 1e+06
# HELP node_xfrm_out_no_states_packets_total No state is found
# TYPE node_xfrm_out_no_states_packets_total counter
node_xfrm_out_no_states_packets_total 869
# HELP node_xfrm_out_pol_block_packets_total Policy discards
# TYPE node_xfrm_out_pol_block_packets_total counter
node_xfrm_out_pol_block_packets_total 43456
# HELP node_xfrm_out_pol_dead_packets_total Policy is dead
# TYPE node_xfrm_out_pol_dead_packets_total counter
node_xfrm_out_pol_dead_packets_total 7656
# HELP node_xfrm_out_pol_error_packets_total Policy error
# TYPE node_xfrm_out_pol_error_packets_total counter
node_xfrm_out_pol_error_packets_total 1454
# HELP node_xfrm_out_state_expired_packets_total State is expired
# TYPE node_xfrm_out_state_expired_packets_total counter
node_xfrm_out_state_expired_packets_total 565
# HELP node_xfrm_out_state_invalid_packets_total State is invalid, perhaps expired
# TYPE node_xfrm_out_state_invalid_packets_total counter
node_xfrm_out_state_invalid_packets_total 28765
# HELP node_xfrm_out_state_mode_error_packets_total Transformation mode specific error
# TYPE node_xfrm_out_state_mode_error_packets_total counter
node_xfrm_out_state_mode_error_packets_total 8
# HELP node_xfrm_out_state_proto_error_packets_total Transformation protocol specific error
# TYPE node_xfrm_out_state_proto_error_packets_total counter
node_xfrm_out_state_proto_error_packets_total 4542
# HELP node_xfrm_out_state_seq_error_packets_total Sequence error i.e. Sequence number overflow
# TYPE node_xfrm_out_state_seq_error_packets_total counter
node_xfrm_out_state_seq_error_packets_total 543
# HELP promhttp_metric_handler_errors_total Total number of internal errors encountered by the promhttp metric handler.
# TYPE promhttp_metric_handler_errors_total counter
promhttp_metric_handler_errors_total{cause="encoding"} 0
promhttp_metric_handler_errors_total{cause="gathering"} 0
# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served.
# TYPE promhttp_metric_handler_requests_in_flight gauge
promhttp_metric_handler_requests_in_flight 1
# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code.
# TYPE promhttp_metric_handler_requests_total counter
promhttp_metric_handler_requests_total{code="200"} 0
promhttp_metric_handler_requests_total{code="500"} 0
promhttp_metric_handler_requests_total{code="503"} 0
# HELP testmetric1_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom
# TYPE testmetric1_1 untyped
testmetric1_1{foo="bar"} 10
# HELP testmetric1_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom
# TYPE testmetric1_2 untyped
testmetric1_2{foo="baz"} 20
# HELP testmetric2_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom
# TYPE testmetric2_1 untyped
testmetric2_1{foo="bar"} 30
# HELP testmetric2_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom
# TYPE testmetric2_2 untyped
testmetric2_2{foo="baz"} 40

View file

@ -0,0 +1,209 @@
# HELP go_gc_duration_seconds A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.
# TYPE go_gc_duration_seconds summary
# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent.
# TYPE go_gc_gogc_percent gauge
# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes.
# TYPE go_gc_gomemlimit_bytes gauge
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
# HELP go_info Information about the Go environment.
# TYPE go_info gauge
# HELP go_memstats_alloc_bytes Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_alloc_bytes gauge
# HELP go_memstats_alloc_bytes_total Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.
# TYPE go_memstats_alloc_bytes_total counter
# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.
# TYPE go_memstats_buck_hash_sys_bytes gauge
# HELP go_memstats_frees_total Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_frees_total counter
# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.
# TYPE go_memstats_gc_sys_bytes gauge
# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_heap_alloc_bytes gauge
# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_idle_bytes gauge
# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes
# TYPE go_memstats_heap_inuse_bytes gauge
# HELP go_memstats_heap_objects Number of currently allocated objects. Equals to /gc/heap/objects:objects.
# TYPE go_memstats_heap_objects gauge
# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.
# TYPE go_memstats_heap_released_bytes gauge
# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_sys_bytes gauge
# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
# TYPE go_memstats_last_gc_time_seconds gauge
# HELP go_memstats_mallocs_total Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_mallocs_total counter
# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.
# TYPE go_memstats_mcache_inuse_bytes gauge
# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.
# TYPE go_memstats_mcache_sys_bytes gauge
# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.
# TYPE go_memstats_mspan_inuse_bytes gauge
# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.
# TYPE go_memstats_mspan_sys_bytes gauge
# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.
# TYPE go_memstats_next_gc_bytes gauge
# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.
# TYPE go_memstats_other_sys_bytes gauge
# HELP go_memstats_stack_inuse_bytes Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.
# TYPE go_memstats_stack_inuse_bytes gauge
# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.
# TYPE go_memstats_stack_sys_bytes gauge
# HELP go_memstats_sys_bytes Number of bytes obtained from system. Equals to /memory/classes/total:byte.
# TYPE go_memstats_sys_bytes gauge
# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads.
# TYPE go_sched_gomaxprocs_threads gauge
# HELP go_threads Number of OS threads created.
# TYPE go_threads gauge
# HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which node_exporter was built, and the goos and goarch for the build.
# TYPE node_exporter_build_info gauge
# HELP node_memory_swap_size_bytes Memory information field swap_size_bytes.
# TYPE node_memory_swap_size_bytes gauge
node_memory_swap_size_bytes 6.442426368e+09
# HELP node_memory_swap_used_bytes Memory information field swap_used_bytes.
# TYPE node_memory_swap_used_bytes gauge
node_memory_swap_used_bytes 0
# HELP node_memory_swapped_in_pages_bytes_total Memory information field swapped_in_pages_bytes_total.
# TYPE node_memory_swapped_in_pages_bytes_total counter
node_memory_swapped_in_pages_bytes_total 0
# HELP node_memory_swapped_out_pages_bytes_total Memory information field swapped_out_pages_bytes_total.
# TYPE node_memory_swapped_out_pages_bytes_total counter
node_memory_swapped_out_pages_bytes_total 0
# HELP node_os_info A metric with a constant '1' value labeled by build_id, id, id_like, image_id, image_version, name, pretty_name, variant, variant_id, version, version_codename, version_id.
# TYPE node_os_info gauge
node_os_info{build_id="",id="ubuntu",id_like="debian",image_id="",image_version="",name="Ubuntu",pretty_name="Ubuntu 20.04.2 LTS",variant="",variant_id="",version="20.04.2 LTS (Focal Fossa)",version_codename="focal",version_id="20.04"} 1
# HELP node_os_version Metric containing the major.minor part of the OS version.
# TYPE node_os_version gauge
node_os_version{id="ubuntu",id_like="debian",name="Ubuntu"} 20.04
# HELP node_scrape_collector_duration_seconds node_exporter: Duration of a collector scrape.
# TYPE node_scrape_collector_duration_seconds gauge
# HELP node_scrape_collector_success node_exporter: Whether a collector succeeded.
# TYPE node_scrape_collector_success gauge
node_scrape_collector_success{collector="boottime"} 1
node_scrape_collector_success{collector="cpu"} 0
node_scrape_collector_success{collector="loadavg"} 1
node_scrape_collector_success{collector="meminfo"} 1
node_scrape_collector_success{collector="os"} 1
node_scrape_collector_success{collector="textfile"} 1
node_scrape_collector_success{collector="time"} 1
node_scrape_collector_success{collector="xfrm"} 1
# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read.
# TYPE node_textfile_mtime_seconds gauge
# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise
# TYPE node_textfile_scrape_error gauge
node_textfile_scrape_error 0
# HELP node_time_seconds System time in seconds since epoch (1970).
# TYPE node_time_seconds gauge
# HELP node_time_zone_offset_seconds System time zone offset in seconds.
# TYPE node_time_zone_offset_seconds gauge
# HELP node_xfrm_acquire_error_packets_total State hasnt been fully acquired before use
# TYPE node_xfrm_acquire_error_packets_total counter
node_xfrm_acquire_error_packets_total 24532
# HELP node_xfrm_fwd_hdr_error_packets_total Forward routing of a packet is not allowed
# TYPE node_xfrm_fwd_hdr_error_packets_total counter
node_xfrm_fwd_hdr_error_packets_total 6654
# HELP node_xfrm_in_buffer_error_packets_total No buffer is left
# TYPE node_xfrm_in_buffer_error_packets_total counter
node_xfrm_in_buffer_error_packets_total 2
# HELP node_xfrm_in_error_packets_total All errors not matched by other
# TYPE node_xfrm_in_error_packets_total counter
node_xfrm_in_error_packets_total 1
# HELP node_xfrm_in_hdr_error_packets_total Header error
# TYPE node_xfrm_in_hdr_error_packets_total counter
node_xfrm_in_hdr_error_packets_total 4
# HELP node_xfrm_in_no_pols_packets_total No policy is found for states e.g. Inbound SAs are correct but no SP is found
# TYPE node_xfrm_in_no_pols_packets_total counter
node_xfrm_in_no_pols_packets_total 65432
# HELP node_xfrm_in_no_states_packets_total No state is found i.e. Either inbound SPI, address, or IPsec protocol at SA is wrong
# TYPE node_xfrm_in_no_states_packets_total counter
node_xfrm_in_no_states_packets_total 3
# HELP node_xfrm_in_pol_block_packets_total Policy discards
# TYPE node_xfrm_in_pol_block_packets_total counter
node_xfrm_in_pol_block_packets_total 100
# HELP node_xfrm_in_pol_error_packets_total Policy error
# TYPE node_xfrm_in_pol_error_packets_total counter
node_xfrm_in_pol_error_packets_total 10000
# HELP node_xfrm_in_state_expired_packets_total State is expired
# TYPE node_xfrm_in_state_expired_packets_total counter
node_xfrm_in_state_expired_packets_total 7
# HELP node_xfrm_in_state_invalid_packets_total State is invalid
# TYPE node_xfrm_in_state_invalid_packets_total counter
node_xfrm_in_state_invalid_packets_total 55555
# HELP node_xfrm_in_state_mismatch_packets_total State has mismatch option e.g. UDP encapsulation type is mismatch
# TYPE node_xfrm_in_state_mismatch_packets_total counter
node_xfrm_in_state_mismatch_packets_total 23451
# HELP node_xfrm_in_state_mode_error_packets_total Transformation mode specific error
# TYPE node_xfrm_in_state_mode_error_packets_total counter
node_xfrm_in_state_mode_error_packets_total 100
# HELP node_xfrm_in_state_proto_error_packets_total Transformation protocol specific error e.g. SA key is wrong
# TYPE node_xfrm_in_state_proto_error_packets_total counter
node_xfrm_in_state_proto_error_packets_total 40
# HELP node_xfrm_in_state_seq_error_packets_total Sequence error i.e. Sequence number is out of window
# TYPE node_xfrm_in_state_seq_error_packets_total counter
node_xfrm_in_state_seq_error_packets_total 6000
# HELP node_xfrm_in_tmpl_mismatch_packets_total No matching template for states e.g. Inbound SAs are correct but SP rule is wrong
# TYPE node_xfrm_in_tmpl_mismatch_packets_total counter
node_xfrm_in_tmpl_mismatch_packets_total 51
# HELP node_xfrm_out_bundle_check_error_packets_total Bundle check error
# TYPE node_xfrm_out_bundle_check_error_packets_total counter
node_xfrm_out_bundle_check_error_packets_total 555
# HELP node_xfrm_out_bundle_gen_error_packets_total Bundle generation error
# TYPE node_xfrm_out_bundle_gen_error_packets_total counter
node_xfrm_out_bundle_gen_error_packets_total 43321
# HELP node_xfrm_out_error_packets_total All errors which is not matched others
# TYPE node_xfrm_out_error_packets_total counter
node_xfrm_out_error_packets_total 1e+06
# HELP node_xfrm_out_no_states_packets_total No state is found
# TYPE node_xfrm_out_no_states_packets_total counter
node_xfrm_out_no_states_packets_total 869
# HELP node_xfrm_out_pol_block_packets_total Policy discards
# TYPE node_xfrm_out_pol_block_packets_total counter
node_xfrm_out_pol_block_packets_total 43456
# HELP node_xfrm_out_pol_dead_packets_total Policy is dead
# TYPE node_xfrm_out_pol_dead_packets_total counter
node_xfrm_out_pol_dead_packets_total 7656
# HELP node_xfrm_out_pol_error_packets_total Policy error
# TYPE node_xfrm_out_pol_error_packets_total counter
node_xfrm_out_pol_error_packets_total 1454
# HELP node_xfrm_out_state_expired_packets_total State is expired
# TYPE node_xfrm_out_state_expired_packets_total counter
node_xfrm_out_state_expired_packets_total 565
# HELP node_xfrm_out_state_invalid_packets_total State is invalid, perhaps expired
# TYPE node_xfrm_out_state_invalid_packets_total counter
node_xfrm_out_state_invalid_packets_total 28765
# HELP node_xfrm_out_state_mode_error_packets_total Transformation mode specific error
# TYPE node_xfrm_out_state_mode_error_packets_total counter
node_xfrm_out_state_mode_error_packets_total 8
# HELP node_xfrm_out_state_proto_error_packets_total Transformation protocol specific error
# TYPE node_xfrm_out_state_proto_error_packets_total counter
node_xfrm_out_state_proto_error_packets_total 4542
# HELP node_xfrm_out_state_seq_error_packets_total Sequence error i.e. Sequence number overflow
# TYPE node_xfrm_out_state_seq_error_packets_total counter
node_xfrm_out_state_seq_error_packets_total 543
# HELP process_open_fds Number of open file descriptors.
# TYPE process_open_fds gauge
# HELP promhttp_metric_handler_errors_total Total number of internal errors encountered by the promhttp metric handler.
# TYPE promhttp_metric_handler_errors_total counter
promhttp_metric_handler_errors_total{cause="encoding"} 0
promhttp_metric_handler_errors_total{cause="gathering"} 0
# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served.
# TYPE promhttp_metric_handler_requests_in_flight gauge
promhttp_metric_handler_requests_in_flight 1
# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code.
# TYPE promhttp_metric_handler_requests_total counter
promhttp_metric_handler_requests_total{code="200"} 0
promhttp_metric_handler_requests_total{code="500"} 0
promhttp_metric_handler_requests_total{code="503"} 0
# HELP testmetric1_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom
# TYPE testmetric1_1 untyped
testmetric1_1{foo="bar"} 10
# HELP testmetric1_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom
# TYPE testmetric1_2 untyped
testmetric1_2{foo="baz"} 20
# HELP testmetric2_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom
# TYPE testmetric2_1 untyped
testmetric2_1{foo="bar"} 30
# HELP testmetric2_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom
# TYPE testmetric2_2 untyped
testmetric2_2{foo="baz"} 40

View file

@ -0,0 +1,276 @@
# HELP go_gc_duration_seconds A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.
# TYPE go_gc_duration_seconds summary
# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent.
# TYPE go_gc_gogc_percent gauge
# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes.
# TYPE go_gc_gomemlimit_bytes gauge
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
# HELP go_info Information about the Go environment.
# TYPE go_info gauge
# HELP go_memstats_alloc_bytes Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_alloc_bytes gauge
# HELP go_memstats_alloc_bytes_total Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.
# TYPE go_memstats_alloc_bytes_total counter
# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.
# TYPE go_memstats_buck_hash_sys_bytes gauge
# HELP go_memstats_frees_total Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_frees_total counter
# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.
# TYPE go_memstats_gc_sys_bytes gauge
# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_heap_alloc_bytes gauge
# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_idle_bytes gauge
# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes
# TYPE go_memstats_heap_inuse_bytes gauge
# HELP go_memstats_heap_objects Number of currently allocated objects. Equals to /gc/heap/objects:objects.
# TYPE go_memstats_heap_objects gauge
# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.
# TYPE go_memstats_heap_released_bytes gauge
# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_sys_bytes gauge
# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
# TYPE go_memstats_last_gc_time_seconds gauge
# HELP go_memstats_mallocs_total Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_mallocs_total counter
# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.
# TYPE go_memstats_mcache_inuse_bytes gauge
# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.
# TYPE go_memstats_mcache_sys_bytes gauge
# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.
# TYPE go_memstats_mspan_inuse_bytes gauge
# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.
# TYPE go_memstats_mspan_sys_bytes gauge
# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.
# TYPE go_memstats_next_gc_bytes gauge
# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.
# TYPE go_memstats_other_sys_bytes gauge
# HELP go_memstats_stack_inuse_bytes Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.
# TYPE go_memstats_stack_inuse_bytes gauge
# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.
# TYPE go_memstats_stack_sys_bytes gauge
# HELP go_memstats_sys_bytes Number of bytes obtained from system. Equals to /memory/classes/total:byte.
# TYPE go_memstats_sys_bytes gauge
# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads.
# TYPE go_sched_gomaxprocs_threads gauge
# HELP go_threads Number of OS threads created.
# TYPE go_threads gauge
# HELP node_buddyinfo_blocks Count of free blocks according to size.
# TYPE node_buddyinfo_blocks gauge
node_buddyinfo_blocks{node="0",size="0",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="0",zone="DMA32"} 759
node_buddyinfo_blocks{node="0",size="0",zone="Normal"} 4381
node_buddyinfo_blocks{node="0",size="1",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="1",zone="DMA32"} 572
node_buddyinfo_blocks{node="0",size="1",zone="Normal"} 1093
node_buddyinfo_blocks{node="0",size="10",zone="DMA"} 3
node_buddyinfo_blocks{node="0",size="10",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="10",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="2",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="2",zone="DMA32"} 791
node_buddyinfo_blocks{node="0",size="2",zone="Normal"} 185
node_buddyinfo_blocks{node="0",size="3",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="3",zone="DMA32"} 475
node_buddyinfo_blocks{node="0",size="3",zone="Normal"} 1530
node_buddyinfo_blocks{node="0",size="4",zone="DMA"} 2
node_buddyinfo_blocks{node="0",size="4",zone="DMA32"} 194
node_buddyinfo_blocks{node="0",size="4",zone="Normal"} 567
node_buddyinfo_blocks{node="0",size="5",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="5",zone="DMA32"} 45
node_buddyinfo_blocks{node="0",size="5",zone="Normal"} 102
node_buddyinfo_blocks{node="0",size="6",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="6",zone="DMA32"} 12
node_buddyinfo_blocks{node="0",size="6",zone="Normal"} 4
node_buddyinfo_blocks{node="0",size="7",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="7",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="7",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="8",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="8",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="8",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="9",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="9",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="9",zone="Normal"} 0
# HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which node_exporter was built, and the goos and goarch for the build.
# TYPE node_exporter_build_info gauge
# HELP node_memory_swap_size_bytes Memory information field swap_size_bytes.
# TYPE node_memory_swap_size_bytes gauge
node_memory_swap_size_bytes 6.693941248e+09
# HELP node_memory_swap_used_bytes Memory information field swap_used_bytes.
# TYPE node_memory_swap_used_bytes gauge
node_memory_swap_used_bytes 0
# HELP node_memory_swapped_in_pages_bytes_total Memory information field swapped_in_pages_bytes_total.
# TYPE node_memory_swapped_in_pages_bytes_total counter
node_memory_swapped_in_pages_bytes_total 0
# HELP node_memory_swapped_out_pages_bytes_total Memory information field swapped_out_pages_bytes_total.
# TYPE node_memory_swapped_out_pages_bytes_total counter
node_memory_swapped_out_pages_bytes_total 0
# HELP node_network_noproto_total Network device statistic noproto.
# TYPE node_network_noproto_total counter
node_network_noproto_total{device="lo0"} 0
node_network_noproto_total{device="pflog0"} 0
# HELP node_network_receive_drop_total Network device statistic receive_drop.
# TYPE node_network_receive_drop_total counter
node_network_receive_drop_total{device="lo0"} 0
node_network_receive_drop_total{device="pflog0"} 0
# HELP node_network_receive_errs_total Network device statistic receive_errs.
# TYPE node_network_receive_errs_total counter
node_network_receive_errs_total{device="lo0"} 0
node_network_receive_errs_total{device="pflog0"} 0
# HELP node_network_receive_packets_total Network device statistic receive_packets.
# TYPE node_network_receive_packets_total counter
# HELP node_network_transmit_bytes_total Network device statistic transmit_bytes.
# TYPE node_network_transmit_bytes_total counter
# HELP node_network_transmit_colls_total Network device statistic transmit_colls.
# TYPE node_network_transmit_colls_total counter
node_network_transmit_colls_total{device="lo0"} 0
node_network_transmit_colls_total{device="pflog0"} 0
# HELP node_network_transmit_drop_total Network device statistic transmit_drop.
# TYPE node_network_transmit_drop_total counter
node_network_transmit_drop_total{device="lo0"} 0
node_network_transmit_drop_total{device="pflog0"} 0
# HELP node_network_transmit_errs_total Network device statistic transmit_errs.
# TYPE node_network_transmit_errs_total counter
node_network_transmit_errs_total{device="lo0"} 0
node_network_transmit_errs_total{device="pflog0"} 0
# HELP node_network_transmit_packets_total Network device statistic transmit_packets.
# TYPE node_network_transmit_packets_total counter
# HELP node_os_info A metric with a constant '1' value labeled by build_id, id, id_like, image_id, image_version, name, pretty_name, variant, variant_id, version, version_codename, version_id.
# TYPE node_os_info gauge
node_os_info{build_id="",id="ubuntu",id_like="debian",image_id="",image_version="",name="Ubuntu",pretty_name="Ubuntu 20.04.2 LTS",variant="",variant_id="",version="20.04.2 LTS (Focal Fossa)",version_codename="focal",version_id="20.04"} 1
# HELP node_os_version Metric containing the major.minor part of the OS version.
# TYPE node_os_version gauge
node_os_version{id="ubuntu",id_like="debian",name="Ubuntu"} 20.04
# HELP node_scrape_collector_duration_seconds node_exporter: Duration of a collector scrape.
# TYPE node_scrape_collector_duration_seconds gauge
# HELP node_scrape_collector_success node_exporter: Whether a collector succeeded.
# TYPE node_scrape_collector_success gauge
node_scrape_collector_success{collector="boottime"} 1
node_scrape_collector_success{collector="buddyinfo"} 1
node_scrape_collector_success{collector="cpu"} 1
node_scrape_collector_success{collector="diskstats"} 1
node_scrape_collector_success{collector="interrupts"} 1
node_scrape_collector_success{collector="loadavg"} 1
node_scrape_collector_success{collector="meminfo"} 1
node_scrape_collector_success{collector="netdev"} 1
node_scrape_collector_success{collector="os"} 1
node_scrape_collector_success{collector="textfile"} 1
node_scrape_collector_success{collector="time"} 1
node_scrape_collector_success{collector="xfrm"} 1
# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read.
# TYPE node_textfile_mtime_seconds gauge
# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise
# TYPE node_textfile_scrape_error gauge
node_textfile_scrape_error 0
# HELP node_time_seconds System time in seconds since epoch (1970).
# TYPE node_time_seconds gauge
# HELP node_time_zone_offset_seconds System time zone offset in seconds.
# TYPE node_time_zone_offset_seconds gauge
# HELP node_xfrm_acquire_error_packets_total State hasnt been fully acquired before use
# TYPE node_xfrm_acquire_error_packets_total counter
node_xfrm_acquire_error_packets_total 24532
# HELP node_xfrm_fwd_hdr_error_packets_total Forward routing of a packet is not allowed
# TYPE node_xfrm_fwd_hdr_error_packets_total counter
node_xfrm_fwd_hdr_error_packets_total 6654
# HELP node_xfrm_in_buffer_error_packets_total No buffer is left
# TYPE node_xfrm_in_buffer_error_packets_total counter
node_xfrm_in_buffer_error_packets_total 2
# HELP node_xfrm_in_error_packets_total All errors not matched by other
# TYPE node_xfrm_in_error_packets_total counter
node_xfrm_in_error_packets_total 1
# HELP node_xfrm_in_hdr_error_packets_total Header error
# TYPE node_xfrm_in_hdr_error_packets_total counter
node_xfrm_in_hdr_error_packets_total 4
# HELP node_xfrm_in_no_pols_packets_total No policy is found for states e.g. Inbound SAs are correct but no SP is found
# TYPE node_xfrm_in_no_pols_packets_total counter
node_xfrm_in_no_pols_packets_total 65432
# HELP node_xfrm_in_no_states_packets_total No state is found i.e. Either inbound SPI, address, or IPsec protocol at SA is wrong
# TYPE node_xfrm_in_no_states_packets_total counter
node_xfrm_in_no_states_packets_total 3
# HELP node_xfrm_in_pol_block_packets_total Policy discards
# TYPE node_xfrm_in_pol_block_packets_total counter
node_xfrm_in_pol_block_packets_total 100
# HELP node_xfrm_in_pol_error_packets_total Policy error
# TYPE node_xfrm_in_pol_error_packets_total counter
node_xfrm_in_pol_error_packets_total 10000
# HELP node_xfrm_in_state_expired_packets_total State is expired
# TYPE node_xfrm_in_state_expired_packets_total counter
node_xfrm_in_state_expired_packets_total 7
# HELP node_xfrm_in_state_invalid_packets_total State is invalid
# TYPE node_xfrm_in_state_invalid_packets_total counter
node_xfrm_in_state_invalid_packets_total 55555
# HELP node_xfrm_in_state_mismatch_packets_total State has mismatch option e.g. UDP encapsulation type is mismatch
# TYPE node_xfrm_in_state_mismatch_packets_total counter
node_xfrm_in_state_mismatch_packets_total 23451
# HELP node_xfrm_in_state_mode_error_packets_total Transformation mode specific error
# TYPE node_xfrm_in_state_mode_error_packets_total counter
node_xfrm_in_state_mode_error_packets_total 100
# HELP node_xfrm_in_state_proto_error_packets_total Transformation protocol specific error e.g. SA key is wrong
# TYPE node_xfrm_in_state_proto_error_packets_total counter
node_xfrm_in_state_proto_error_packets_total 40
# HELP node_xfrm_in_state_seq_error_packets_total Sequence error i.e. Sequence number is out of window
# TYPE node_xfrm_in_state_seq_error_packets_total counter
node_xfrm_in_state_seq_error_packets_total 6000
# HELP node_xfrm_in_tmpl_mismatch_packets_total No matching template for states e.g. Inbound SAs are correct but SP rule is wrong
# TYPE node_xfrm_in_tmpl_mismatch_packets_total counter
node_xfrm_in_tmpl_mismatch_packets_total 51
# HELP node_xfrm_out_bundle_check_error_packets_total Bundle check error
# TYPE node_xfrm_out_bundle_check_error_packets_total counter
node_xfrm_out_bundle_check_error_packets_total 555
# HELP node_xfrm_out_bundle_gen_error_packets_total Bundle generation error
# TYPE node_xfrm_out_bundle_gen_error_packets_total counter
node_xfrm_out_bundle_gen_error_packets_total 43321
# HELP node_xfrm_out_error_packets_total All errors which is not matched others
# TYPE node_xfrm_out_error_packets_total counter
node_xfrm_out_error_packets_total 1e+06
# HELP node_xfrm_out_no_states_packets_total No state is found
# TYPE node_xfrm_out_no_states_packets_total counter
node_xfrm_out_no_states_packets_total 869
# HELP node_xfrm_out_pol_block_packets_total Policy discards
# TYPE node_xfrm_out_pol_block_packets_total counter
node_xfrm_out_pol_block_packets_total 43456
# HELP node_xfrm_out_pol_dead_packets_total Policy is dead
# TYPE node_xfrm_out_pol_dead_packets_total counter
node_xfrm_out_pol_dead_packets_total 7656
# HELP node_xfrm_out_pol_error_packets_total Policy error
# TYPE node_xfrm_out_pol_error_packets_total counter
node_xfrm_out_pol_error_packets_total 1454
# HELP node_xfrm_out_state_expired_packets_total State is expired
# TYPE node_xfrm_out_state_expired_packets_total counter
node_xfrm_out_state_expired_packets_total 565
# HELP node_xfrm_out_state_invalid_packets_total State is invalid, perhaps expired
# TYPE node_xfrm_out_state_invalid_packets_total counter
node_xfrm_out_state_invalid_packets_total 28765
# HELP node_xfrm_out_state_mode_error_packets_total Transformation mode specific error
# TYPE node_xfrm_out_state_mode_error_packets_total counter
node_xfrm_out_state_mode_error_packets_total 8
# HELP node_xfrm_out_state_proto_error_packets_total Transformation protocol specific error
# TYPE node_xfrm_out_state_proto_error_packets_total counter
node_xfrm_out_state_proto_error_packets_total 4542
# HELP node_xfrm_out_state_seq_error_packets_total Sequence error i.e. Sequence number overflow
# TYPE node_xfrm_out_state_seq_error_packets_total counter
node_xfrm_out_state_seq_error_packets_total 543
# HELP promhttp_metric_handler_errors_total Total number of internal errors encountered by the promhttp metric handler.
# TYPE promhttp_metric_handler_errors_total counter
promhttp_metric_handler_errors_total{cause="encoding"} 0
promhttp_metric_handler_errors_total{cause="gathering"} 0
# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served.
# TYPE promhttp_metric_handler_requests_in_flight gauge
promhttp_metric_handler_requests_in_flight 1
# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code.
# TYPE promhttp_metric_handler_requests_total counter
promhttp_metric_handler_requests_total{code="200"} 0
promhttp_metric_handler_requests_total{code="500"} 0
promhttp_metric_handler_requests_total{code="503"} 0
# HELP testmetric1_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom
# TYPE testmetric1_1 untyped
testmetric1_1{foo="bar"} 10
# HELP testmetric1_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom
# TYPE testmetric1_2 untyped
testmetric1_2{foo="baz"} 20
# HELP testmetric2_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom
# TYPE testmetric2_1 untyped
testmetric2_1{foo="bar"} 30
# HELP testmetric2_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom
# TYPE testmetric2_2 untyped
testmetric2_2{foo="baz"} 40

View file

@ -0,0 +1,234 @@
# HELP go_gc_duration_seconds A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.
# TYPE go_gc_duration_seconds summary
# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent.
# TYPE go_gc_gogc_percent gauge
# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes.
# TYPE go_gc_gomemlimit_bytes gauge
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
# HELP go_info Information about the Go environment.
# TYPE go_info gauge
# HELP go_memstats_alloc_bytes Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_alloc_bytes gauge
# HELP go_memstats_alloc_bytes_total Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.
# TYPE go_memstats_alloc_bytes_total counter
# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.
# TYPE go_memstats_buck_hash_sys_bytes gauge
# HELP go_memstats_frees_total Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_frees_total counter
# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.
# TYPE go_memstats_gc_sys_bytes gauge
# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_heap_alloc_bytes gauge
# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_idle_bytes gauge
# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes
# TYPE go_memstats_heap_inuse_bytes gauge
# HELP go_memstats_heap_objects Number of currently allocated objects. Equals to /gc/heap/objects:objects.
# TYPE go_memstats_heap_objects gauge
# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.
# TYPE go_memstats_heap_released_bytes gauge
# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_sys_bytes gauge
# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
# TYPE go_memstats_last_gc_time_seconds gauge
# HELP go_memstats_mallocs_total Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_mallocs_total counter
# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.
# TYPE go_memstats_mcache_inuse_bytes gauge
# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.
# TYPE go_memstats_mcache_sys_bytes gauge
# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.
# TYPE go_memstats_mspan_inuse_bytes gauge
# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.
# TYPE go_memstats_mspan_sys_bytes gauge
# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.
# TYPE go_memstats_next_gc_bytes gauge
# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.
# TYPE go_memstats_other_sys_bytes gauge
# HELP go_memstats_stack_inuse_bytes Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.
# TYPE go_memstats_stack_inuse_bytes gauge
# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.
# TYPE go_memstats_stack_sys_bytes gauge
# HELP go_memstats_sys_bytes Number of bytes obtained from system. Equals to /memory/classes/total:byte.
# TYPE go_memstats_sys_bytes gauge
# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads.
# TYPE go_sched_gomaxprocs_threads gauge
# HELP go_threads Number of OS threads created.
# TYPE go_threads gauge
# HELP node_buddyinfo_blocks Count of free blocks according to size.
# TYPE node_buddyinfo_blocks gauge
node_buddyinfo_blocks{node="0",size="0",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="0",zone="DMA32"} 759
node_buddyinfo_blocks{node="0",size="0",zone="Normal"} 4381
node_buddyinfo_blocks{node="0",size="1",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="1",zone="DMA32"} 572
node_buddyinfo_blocks{node="0",size="1",zone="Normal"} 1093
node_buddyinfo_blocks{node="0",size="10",zone="DMA"} 3
node_buddyinfo_blocks{node="0",size="10",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="10",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="2",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="2",zone="DMA32"} 791
node_buddyinfo_blocks{node="0",size="2",zone="Normal"} 185
node_buddyinfo_blocks{node="0",size="3",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="3",zone="DMA32"} 475
node_buddyinfo_blocks{node="0",size="3",zone="Normal"} 1530
node_buddyinfo_blocks{node="0",size="4",zone="DMA"} 2
node_buddyinfo_blocks{node="0",size="4",zone="DMA32"} 194
node_buddyinfo_blocks{node="0",size="4",zone="Normal"} 567
node_buddyinfo_blocks{node="0",size="5",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="5",zone="DMA32"} 45
node_buddyinfo_blocks{node="0",size="5",zone="Normal"} 102
node_buddyinfo_blocks{node="0",size="6",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="6",zone="DMA32"} 12
node_buddyinfo_blocks{node="0",size="6",zone="Normal"} 4
node_buddyinfo_blocks{node="0",size="7",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="7",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="7",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="8",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="8",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="8",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="9",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="9",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="9",zone="Normal"} 0
# HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which node_exporter was built, and the goos and goarch for the build.
# TYPE node_exporter_build_info gauge
# HELP node_os_info A metric with a constant '1' value labeled by build_id, id, id_like, image_id, image_version, name, pretty_name, variant, variant_id, version, version_codename, version_id.
# TYPE node_os_info gauge
node_os_info{build_id="",id="ubuntu",id_like="debian",image_id="",image_version="",name="Ubuntu",pretty_name="Ubuntu 20.04.2 LTS",variant="",variant_id="",version="20.04.2 LTS (Focal Fossa)",version_codename="focal",version_id="20.04"} 1
# HELP node_os_version Metric containing the major.minor part of the OS version.
# TYPE node_os_version gauge
node_os_version{id="ubuntu",id_like="debian",name="Ubuntu"} 20.04
# HELP node_scrape_collector_duration_seconds node_exporter: Duration of a collector scrape.
# TYPE node_scrape_collector_duration_seconds gauge
# HELP node_scrape_collector_success node_exporter: Whether a collector succeeded.
# TYPE node_scrape_collector_success gauge
node_scrape_collector_success{collector="boottime"} 1
node_scrape_collector_success{collector="buddyinfo"} 1
node_scrape_collector_success{collector="cpu"} 0
node_scrape_collector_success{collector="cpufreq"} 1
node_scrape_collector_success{collector="loadavg"} 1
node_scrape_collector_success{collector="os"} 1
node_scrape_collector_success{collector="textfile"} 1
node_scrape_collector_success{collector="time"} 1
node_scrape_collector_success{collector="xfrm"} 1
node_scrape_collector_success{collector="zfs"} 0
# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read.
# TYPE node_textfile_mtime_seconds gauge
# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise
# TYPE node_textfile_scrape_error gauge
node_textfile_scrape_error 0
# HELP node_time_seconds System time in seconds since epoch (1970).
# TYPE node_time_seconds gauge
# HELP node_time_zone_offset_seconds System time zone offset in seconds.
# TYPE node_time_zone_offset_seconds gauge
# HELP node_xfrm_acquire_error_packets_total State hasnt been fully acquired before use
# TYPE node_xfrm_acquire_error_packets_total counter
node_xfrm_acquire_error_packets_total 24532
# HELP node_xfrm_fwd_hdr_error_packets_total Forward routing of a packet is not allowed
# TYPE node_xfrm_fwd_hdr_error_packets_total counter
node_xfrm_fwd_hdr_error_packets_total 6654
# HELP node_xfrm_in_buffer_error_packets_total No buffer is left
# TYPE node_xfrm_in_buffer_error_packets_total counter
node_xfrm_in_buffer_error_packets_total 2
# HELP node_xfrm_in_error_packets_total All errors not matched by other
# TYPE node_xfrm_in_error_packets_total counter
node_xfrm_in_error_packets_total 1
# HELP node_xfrm_in_hdr_error_packets_total Header error
# TYPE node_xfrm_in_hdr_error_packets_total counter
node_xfrm_in_hdr_error_packets_total 4
# HELP node_xfrm_in_no_pols_packets_total No policy is found for states e.g. Inbound SAs are correct but no SP is found
# TYPE node_xfrm_in_no_pols_packets_total counter
node_xfrm_in_no_pols_packets_total 65432
# HELP node_xfrm_in_no_states_packets_total No state is found i.e. Either inbound SPI, address, or IPsec protocol at SA is wrong
# TYPE node_xfrm_in_no_states_packets_total counter
node_xfrm_in_no_states_packets_total 3
# HELP node_xfrm_in_pol_block_packets_total Policy discards
# TYPE node_xfrm_in_pol_block_packets_total counter
node_xfrm_in_pol_block_packets_total 100
# HELP node_xfrm_in_pol_error_packets_total Policy error
# TYPE node_xfrm_in_pol_error_packets_total counter
node_xfrm_in_pol_error_packets_total 10000
# HELP node_xfrm_in_state_expired_packets_total State is expired
# TYPE node_xfrm_in_state_expired_packets_total counter
node_xfrm_in_state_expired_packets_total 7
# HELP node_xfrm_in_state_invalid_packets_total State is invalid
# TYPE node_xfrm_in_state_invalid_packets_total counter
node_xfrm_in_state_invalid_packets_total 55555
# HELP node_xfrm_in_state_mismatch_packets_total State has mismatch option e.g. UDP encapsulation type is mismatch
# TYPE node_xfrm_in_state_mismatch_packets_total counter
node_xfrm_in_state_mismatch_packets_total 23451
# HELP node_xfrm_in_state_mode_error_packets_total Transformation mode specific error
# TYPE node_xfrm_in_state_mode_error_packets_total counter
node_xfrm_in_state_mode_error_packets_total 100
# HELP node_xfrm_in_state_proto_error_packets_total Transformation protocol specific error e.g. SA key is wrong
# TYPE node_xfrm_in_state_proto_error_packets_total counter
node_xfrm_in_state_proto_error_packets_total 40
# HELP node_xfrm_in_state_seq_error_packets_total Sequence error i.e. Sequence number is out of window
# TYPE node_xfrm_in_state_seq_error_packets_total counter
node_xfrm_in_state_seq_error_packets_total 6000
# HELP node_xfrm_in_tmpl_mismatch_packets_total No matching template for states e.g. Inbound SAs are correct but SP rule is wrong
# TYPE node_xfrm_in_tmpl_mismatch_packets_total counter
node_xfrm_in_tmpl_mismatch_packets_total 51
# HELP node_xfrm_out_bundle_check_error_packets_total Bundle check error
# TYPE node_xfrm_out_bundle_check_error_packets_total counter
node_xfrm_out_bundle_check_error_packets_total 555
# HELP node_xfrm_out_bundle_gen_error_packets_total Bundle generation error
# TYPE node_xfrm_out_bundle_gen_error_packets_total counter
node_xfrm_out_bundle_gen_error_packets_total 43321
# HELP node_xfrm_out_error_packets_total All errors which is not matched others
# TYPE node_xfrm_out_error_packets_total counter
node_xfrm_out_error_packets_total 1e+06
# HELP node_xfrm_out_no_states_packets_total No state is found
# TYPE node_xfrm_out_no_states_packets_total counter
node_xfrm_out_no_states_packets_total 869
# HELP node_xfrm_out_pol_block_packets_total Policy discards
# TYPE node_xfrm_out_pol_block_packets_total counter
node_xfrm_out_pol_block_packets_total 43456
# HELP node_xfrm_out_pol_dead_packets_total Policy is dead
# TYPE node_xfrm_out_pol_dead_packets_total counter
node_xfrm_out_pol_dead_packets_total 7656
# HELP node_xfrm_out_pol_error_packets_total Policy error
# TYPE node_xfrm_out_pol_error_packets_total counter
node_xfrm_out_pol_error_packets_total 1454
# HELP node_xfrm_out_state_expired_packets_total State is expired
# TYPE node_xfrm_out_state_expired_packets_total counter
node_xfrm_out_state_expired_packets_total 565
# HELP node_xfrm_out_state_invalid_packets_total State is invalid, perhaps expired
# TYPE node_xfrm_out_state_invalid_packets_total counter
node_xfrm_out_state_invalid_packets_total 28765
# HELP node_xfrm_out_state_mode_error_packets_total Transformation mode specific error
# TYPE node_xfrm_out_state_mode_error_packets_total counter
node_xfrm_out_state_mode_error_packets_total 8
# HELP node_xfrm_out_state_proto_error_packets_total Transformation protocol specific error
# TYPE node_xfrm_out_state_proto_error_packets_total counter
node_xfrm_out_state_proto_error_packets_total 4542
# HELP node_xfrm_out_state_seq_error_packets_total Sequence error i.e. Sequence number overflow
# TYPE node_xfrm_out_state_seq_error_packets_total counter
node_xfrm_out_state_seq_error_packets_total 543
# HELP process_open_fds Number of open file descriptors.
# TYPE process_open_fds gauge
# HELP promhttp_metric_handler_errors_total Total number of internal errors encountered by the promhttp metric handler.
# TYPE promhttp_metric_handler_errors_total counter
promhttp_metric_handler_errors_total{cause="encoding"} 0
promhttp_metric_handler_errors_total{cause="gathering"} 0
# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served.
# TYPE promhttp_metric_handler_requests_in_flight gauge
promhttp_metric_handler_requests_in_flight 1
# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code.
# TYPE promhttp_metric_handler_requests_total counter
promhttp_metric_handler_requests_total{code="200"} 0
promhttp_metric_handler_requests_total{code="500"} 0
promhttp_metric_handler_requests_total{code="503"} 0
# HELP testmetric1_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom
# TYPE testmetric1_1 untyped
testmetric1_1{foo="bar"} 10
# HELP testmetric1_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom
# TYPE testmetric1_2 untyped
testmetric1_2{foo="baz"} 20
# HELP testmetric2_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom
# TYPE testmetric2_1 untyped
testmetric2_1{foo="bar"} 30
# HELP testmetric2_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom
# TYPE testmetric2_2 untyped
testmetric2_2{foo="baz"} 40

View file

@ -1,8 +1,8 @@
# HELP go_gc_duration_seconds A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.
# TYPE go_gc_duration_seconds summary
# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent
# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent.
# TYPE go_gc_gogc_percent gauge
# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes
# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes.
# TYPE go_gc_gomemlimit_bytes gauge
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
@ -52,7 +52,7 @@
# TYPE go_memstats_stack_sys_bytes gauge
# HELP go_memstats_sys_bytes Number of bytes obtained from system. Equals to /memory/classes/total:byte.
# TYPE go_memstats_sys_bytes gauge
# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads
# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads.
# TYPE go_sched_gomaxprocs_threads gauge
# HELP go_threads Number of OS threads created.
# TYPE go_threads gauge
@ -510,21 +510,21 @@ node_disk_flush_requests_time_seconds_total{device="sdc"} 1.944
node_disk_flush_requests_total{device="sdc"} 1555
# HELP node_disk_info Info of /sys/block/<block_device>.
# TYPE node_disk_info gauge
node_disk_info{device="dm-0",major="252",minor="0",model="",path="",revision="",serial="",wwn=""} 1
node_disk_info{device="dm-1",major="252",minor="1",model="",path="",revision="",serial="",wwn=""} 1
node_disk_info{device="dm-2",major="252",minor="2",model="",path="",revision="",serial="",wwn=""} 1
node_disk_info{device="dm-3",major="252",minor="3",model="",path="",revision="",serial="",wwn=""} 1
node_disk_info{device="dm-4",major="252",minor="4",model="",path="",revision="",serial="",wwn=""} 1
node_disk_info{device="dm-5",major="252",minor="5",model="",path="",revision="",serial="",wwn=""} 1
node_disk_info{device="mmcblk0",major="179",minor="0",model="",path="platform-df2969f3.mmc",revision="",serial="",wwn=""} 1
node_disk_info{device="mmcblk0p1",major="179",minor="1",model="",path="platform-df2969f3.mmc",revision="",serial="",wwn=""} 1
node_disk_info{device="mmcblk0p2",major="179",minor="2",model="",path="platform-df2969f3.mmc",revision="",serial="",wwn=""} 1
node_disk_info{device="nvme0n1",major="259",minor="0",model="SAMSUNG EHFTF55LURSY-000Y9",path="pci-0000:02:00.0-nvme-1",revision="4NBTUY95",serial="S252B6CU1HG3M1",wwn="eui.p3vbbiejx5aae2r3"} 1
node_disk_info{device="sda",major="8",minor="0",model="TOSHIBA_KSDB4U86",path="pci-0000:3b:00.0-sas-phy7-lun-0",revision="0102",serial="2160A0D5FVGG",wwn="0x7c72382b8de36a64"} 1
node_disk_info{device="sdb",major="8",minor="16",model="SuperMicro_SSD",path="pci-0000:00:1f.2-ata-1",revision="0R",serial="SMC0E1B87ABBB16BD84E",wwn="0xe1b87abbb16bd84e"} 1
node_disk_info{device="sdc",major="8",minor="32",model="INTEL_SSDS9X9SI0",path="pci-0000:00:1f.2-ata-4",revision="0100",serial="3EWB5Y25CWQWA7EH1U",wwn="0x58907ddc573a5de"} 1
node_disk_info{device="sr0",major="11",minor="0",model="Virtual_CDROM0",path="pci-0000:00:14.0-usb-0:1.1:1.0-scsi-0:0:0:0",revision="1.00",serial="AAAABBBBCCCC1",wwn=""} 1
node_disk_info{device="vda",major="254",minor="0",model="",path="pci-0000:00:06.0",revision="",serial="",wwn=""} 1
node_disk_info{device="dm-0",major="252",minor="0",model="",path="",revision="",rotational="0",serial="",wwn=""} 1
node_disk_info{device="dm-1",major="252",minor="1",model="",path="",revision="",rotational="0",serial="",wwn=""} 1
node_disk_info{device="dm-2",major="252",minor="2",model="",path="",revision="",rotational="0",serial="",wwn=""} 1
node_disk_info{device="dm-3",major="252",minor="3",model="",path="",revision="",rotational="0",serial="",wwn=""} 1
node_disk_info{device="dm-4",major="252",minor="4",model="",path="",revision="",rotational="0",serial="",wwn=""} 1
node_disk_info{device="dm-5",major="252",minor="5",model="",path="",revision="",rotational="0",serial="",wwn=""} 1
node_disk_info{device="mmcblk0",major="179",minor="0",model="",path="platform-df2969f3.mmc",revision="",rotational="0",serial="",wwn=""} 1
node_disk_info{device="mmcblk0p1",major="179",minor="1",model="",path="platform-df2969f3.mmc",revision="",rotational="0",serial="",wwn=""} 1
node_disk_info{device="mmcblk0p2",major="179",minor="2",model="",path="platform-df2969f3.mmc",revision="",rotational="0",serial="",wwn=""} 1
node_disk_info{device="nvme0n1",major="259",minor="0",model="SAMSUNG EHFTF55LURSY-000Y9",path="pci-0000:02:00.0-nvme-1",revision="4NBTUY95",rotational="0",serial="S252B6CU1HG3M1",wwn="eui.p3vbbiejx5aae2r3"} 1
node_disk_info{device="sda",major="8",minor="0",model="TOSHIBA_KSDB4U86",path="pci-0000:3b:00.0-sas-phy7-lun-0",revision="0102",rotational="1",serial="2160A0D5FVGG",wwn="0x7c72382b8de36a64"} 1
node_disk_info{device="sdb",major="8",minor="16",model="SuperMicro_SSD",path="pci-0000:00:1f.2-ata-1",revision="0R",rotational="0",serial="SMC0E1B87ABBB16BD84E",wwn="0xe1b87abbb16bd84e"} 1
node_disk_info{device="sdc",major="8",minor="32",model="INTEL_SSDS9X9SI0",path="pci-0000:00:1f.2-ata-4",revision="0100",rotational="0",serial="3EWB5Y25CWQWA7EH1U",wwn="0x58907ddc573a5de"} 1
node_disk_info{device="sr0",major="11",minor="0",model="Virtual_CDROM0",path="pci-0000:00:14.0-usb-0:1.1:1.0-scsi-0:0:0:0",revision="1.00",rotational="0",serial="AAAABBBBCCCC1",wwn=""} 1
node_disk_info{device="vda",major="254",minor="0",model="",path="pci-0000:00:06.0",revision="",rotational="0",serial="",wwn=""} 1
# HELP node_disk_io_now The number of I/Os currently in progress.
# TYPE node_disk_io_now gauge
node_disk_io_now{device="dm-0"} 0
@ -893,6 +893,10 @@ node_hwmon_fan_target_rpm{chip="nct6779",sensor="fan2"} 27000
# HELP node_hwmon_fan_tolerance Hardware monitor fan element tolerance
# TYPE node_hwmon_fan_tolerance gauge
node_hwmon_fan_tolerance{chip="nct6779",sensor="fan2"} 0
# HELP node_hwmon_freq_freq_mhz Hardware monitor for GPU frequency in MHz
# TYPE node_hwmon_freq_freq_mhz gauge
node_hwmon_freq_freq_mhz{chip="hwmon4",sensor="mclk"} 300
node_hwmon_freq_freq_mhz{chip="hwmon4",sensor="sclk"} 214
# HELP node_hwmon_in_alarm Hardware sensor alarm status (in)
# TYPE node_hwmon_in_alarm gauge
node_hwmon_in_alarm{chip="nct6779",sensor="in0"} 0
@ -1006,8 +1010,10 @@ node_hwmon_pwm_weight_temp_step_tol{chip="nct6779",sensor="pwm1"} 0
# TYPE node_hwmon_sensor_label gauge
node_hwmon_sensor_label{chip="hwmon4",label="foosensor",sensor="temp1"} 1
node_hwmon_sensor_label{chip="hwmon4",label="foosensor",sensor="temp2"} 1
node_hwmon_sensor_label{chip="platform_applesmc_768",label="Left side ",sensor="fan1"} 1
node_hwmon_sensor_label{chip="platform_applesmc_768",label="Right side ",sensor="fan2"} 1
node_hwmon_sensor_label{chip="hwmon4",label="mclk",sensor="freq2"} 1
node_hwmon_sensor_label{chip="hwmon4",label="sclk",sensor="freq1"} 1
node_hwmon_sensor_label{chip="platform_applesmc_768",label="Left side",sensor="fan1"} 1
node_hwmon_sensor_label{chip="platform_applesmc_768",label="Right side",sensor="fan2"} 1
node_hwmon_sensor_label{chip="platform_coretemp_0",label="Core 0",sensor="temp2"} 1
node_hwmon_sensor_label{chip="platform_coretemp_0",label="Core 1",sensor="temp3"} 1
node_hwmon_sensor_label{chip="platform_coretemp_0",label="Core 2",sensor="temp4"} 1
@ -1601,6 +1607,14 @@ node_md_blocks_synced{device="md6"} 1.6775552e+07
node_md_blocks_synced{device="md7"} 7.813735424e+09
node_md_blocks_synced{device="md8"} 1.6775552e+07
node_md_blocks_synced{device="md9"} 0
# HELP node_md_degraded Number of degraded disks on device.
# TYPE node_md_degraded gauge
node_md_degraded{device="md0"} 0
node_md_degraded{device="md1"} 0
node_md_degraded{device="md10"} 0
node_md_degraded{device="md4"} 0
node_md_degraded{device="md5"} 1
node_md_degraded{device="md6"} 1
# HELP node_md_disks Number of active/failed/spare disks of device.
# TYPE node_md_disks gauge
node_md_disks{device="md0",state="active"} 2
@ -1673,6 +1687,14 @@ node_md_disks_required{device="md6"} 2
node_md_disks_required{device="md7"} 4
node_md_disks_required{device="md8"} 2
node_md_disks_required{device="md9"} 4
# HELP node_md_raid_disks Number of raid disks on device.
# TYPE node_md_raid_disks gauge
node_md_raid_disks{device="md0"} 2
node_md_raid_disks{device="md1"} 2
node_md_raid_disks{device="md10"} 4
node_md_raid_disks{device="md4"} 3
node_md_raid_disks{device="md5"} 3
node_md_raid_disks{device="md6"} 4
# HELP node_md_state Indicates the state of md-device.
# TYPE node_md_state gauge
node_md_state{device="md0",state="active"} 1
@ -4337,108 +4359,139 @@ node_zfs_zil_zil_itx_needcopy_count 0
# TYPE node_zfs_zpool_dataset_nread untyped
node_zfs_zpool_dataset_nread{dataset="pool1",zpool="pool1"} 0
node_zfs_zpool_dataset_nread{dataset="pool1/dataset1",zpool="pool1"} 28
node_zfs_zpool_dataset_nread{dataset="pool3",zpool="pool3"} 0
node_zfs_zpool_dataset_nread{dataset="pool3/dataset with space",zpool="pool3"} 28
node_zfs_zpool_dataset_nread{dataset="poolz1",zpool="poolz1"} 0
node_zfs_zpool_dataset_nread{dataset="poolz1/dataset1",zpool="poolz1"} 28
# HELP node_zfs_zpool_dataset_nunlinked kstat.zfs.misc.objset.nunlinked
# TYPE node_zfs_zpool_dataset_nunlinked untyped
node_zfs_zpool_dataset_nunlinked{dataset="pool1",zpool="pool1"} 0
node_zfs_zpool_dataset_nunlinked{dataset="pool1/dataset1",zpool="pool1"} 3
node_zfs_zpool_dataset_nunlinked{dataset="pool3",zpool="pool3"} 0
node_zfs_zpool_dataset_nunlinked{dataset="pool3/dataset with space",zpool="pool3"} 3
node_zfs_zpool_dataset_nunlinked{dataset="poolz1",zpool="poolz1"} 0
node_zfs_zpool_dataset_nunlinked{dataset="poolz1/dataset1",zpool="poolz1"} 14
# HELP node_zfs_zpool_dataset_nunlinks kstat.zfs.misc.objset.nunlinks
# TYPE node_zfs_zpool_dataset_nunlinks untyped
node_zfs_zpool_dataset_nunlinks{dataset="pool1",zpool="pool1"} 0
node_zfs_zpool_dataset_nunlinks{dataset="pool1/dataset1",zpool="pool1"} 3
node_zfs_zpool_dataset_nunlinks{dataset="pool3",zpool="pool3"} 0
node_zfs_zpool_dataset_nunlinks{dataset="pool3/dataset with space",zpool="pool3"} 3
node_zfs_zpool_dataset_nunlinks{dataset="poolz1",zpool="poolz1"} 0
node_zfs_zpool_dataset_nunlinks{dataset="poolz1/dataset1",zpool="poolz1"} 14
# HELP node_zfs_zpool_dataset_nwritten kstat.zfs.misc.objset.nwritten
# TYPE node_zfs_zpool_dataset_nwritten untyped
node_zfs_zpool_dataset_nwritten{dataset="pool1",zpool="pool1"} 0
node_zfs_zpool_dataset_nwritten{dataset="pool1/dataset1",zpool="pool1"} 12302
node_zfs_zpool_dataset_nwritten{dataset="pool3",zpool="pool3"} 0
node_zfs_zpool_dataset_nwritten{dataset="pool3/dataset with space",zpool="pool3"} 12302
node_zfs_zpool_dataset_nwritten{dataset="poolz1",zpool="poolz1"} 0
node_zfs_zpool_dataset_nwritten{dataset="poolz1/dataset1",zpool="poolz1"} 32806
# HELP node_zfs_zpool_dataset_reads kstat.zfs.misc.objset.reads
# TYPE node_zfs_zpool_dataset_reads untyped
node_zfs_zpool_dataset_reads{dataset="pool1",zpool="pool1"} 0
node_zfs_zpool_dataset_reads{dataset="pool1/dataset1",zpool="pool1"} 2
node_zfs_zpool_dataset_reads{dataset="pool3",zpool="pool3"} 0
node_zfs_zpool_dataset_reads{dataset="pool3/dataset with space",zpool="pool3"} 2
node_zfs_zpool_dataset_reads{dataset="poolz1",zpool="poolz1"} 0
node_zfs_zpool_dataset_reads{dataset="poolz1/dataset1",zpool="poolz1"} 2
# HELP node_zfs_zpool_dataset_writes kstat.zfs.misc.objset.writes
# TYPE node_zfs_zpool_dataset_writes untyped
node_zfs_zpool_dataset_writes{dataset="pool1",zpool="pool1"} 0
node_zfs_zpool_dataset_writes{dataset="pool1/dataset1",zpool="pool1"} 4
node_zfs_zpool_dataset_writes{dataset="pool3",zpool="pool3"} 0
node_zfs_zpool_dataset_writes{dataset="pool3/dataset with space",zpool="pool3"} 4
node_zfs_zpool_dataset_writes{dataset="poolz1",zpool="poolz1"} 0
node_zfs_zpool_dataset_writes{dataset="poolz1/dataset1",zpool="poolz1"} 10
# HELP node_zfs_zpool_nread kstat.zfs.misc.io.nread
# TYPE node_zfs_zpool_nread untyped
node_zfs_zpool_nread{zpool="pool1"} 1.88416e+06
node_zfs_zpool_nread{zpool="pool3"} 1.88416e+06
node_zfs_zpool_nread{zpool="poolz1"} 2.82624e+06
# HELP node_zfs_zpool_nwritten kstat.zfs.misc.io.nwritten
# TYPE node_zfs_zpool_nwritten untyped
node_zfs_zpool_nwritten{zpool="pool1"} 3.206144e+06
node_zfs_zpool_nwritten{zpool="pool3"} 3.206144e+06
node_zfs_zpool_nwritten{zpool="poolz1"} 2.680501248e+09
# HELP node_zfs_zpool_rcnt kstat.zfs.misc.io.rcnt
# TYPE node_zfs_zpool_rcnt untyped
node_zfs_zpool_rcnt{zpool="pool1"} 0
node_zfs_zpool_rcnt{zpool="pool3"} 0
node_zfs_zpool_rcnt{zpool="poolz1"} 0
# HELP node_zfs_zpool_reads kstat.zfs.misc.io.reads
# TYPE node_zfs_zpool_reads untyped
node_zfs_zpool_reads{zpool="pool1"} 22
node_zfs_zpool_reads{zpool="pool3"} 22
node_zfs_zpool_reads{zpool="poolz1"} 33
# HELP node_zfs_zpool_rlentime kstat.zfs.misc.io.rlentime
# TYPE node_zfs_zpool_rlentime untyped
node_zfs_zpool_rlentime{zpool="pool1"} 1.04112268e+08
node_zfs_zpool_rlentime{zpool="pool3"} 1.04112268e+08
node_zfs_zpool_rlentime{zpool="poolz1"} 6.472105124093e+12
# HELP node_zfs_zpool_rtime kstat.zfs.misc.io.rtime
# TYPE node_zfs_zpool_rtime untyped
node_zfs_zpool_rtime{zpool="pool1"} 2.4168078e+07
node_zfs_zpool_rtime{zpool="pool3"} 2.4168078e+07
node_zfs_zpool_rtime{zpool="poolz1"} 9.82909164e+09
# HELP node_zfs_zpool_rupdate kstat.zfs.misc.io.rupdate
# TYPE node_zfs_zpool_rupdate untyped
node_zfs_zpool_rupdate{zpool="pool1"} 7.921048984922e+13
node_zfs_zpool_rupdate{zpool="pool3"} 7.921048984922e+13
node_zfs_zpool_rupdate{zpool="poolz1"} 1.10734831944501e+14
# HELP node_zfs_zpool_state kstat.zfs.misc.state
# TYPE node_zfs_zpool_state gauge
node_zfs_zpool_state{state="degraded",zpool="pool1"} 0
node_zfs_zpool_state{state="degraded",zpool="pool2"} 0
node_zfs_zpool_state{state="degraded",zpool="pool3"} 0
node_zfs_zpool_state{state="degraded",zpool="poolz1"} 1
node_zfs_zpool_state{state="faulted",zpool="pool1"} 0
node_zfs_zpool_state{state="faulted",zpool="pool2"} 0
node_zfs_zpool_state{state="faulted",zpool="pool3"} 0
node_zfs_zpool_state{state="faulted",zpool="poolz1"} 0
node_zfs_zpool_state{state="offline",zpool="pool1"} 0
node_zfs_zpool_state{state="offline",zpool="pool2"} 0
node_zfs_zpool_state{state="offline",zpool="pool3"} 0
node_zfs_zpool_state{state="offline",zpool="poolz1"} 0
node_zfs_zpool_state{state="online",zpool="pool1"} 1
node_zfs_zpool_state{state="online",zpool="pool2"} 0
node_zfs_zpool_state{state="online",zpool="pool3"} 1
node_zfs_zpool_state{state="online",zpool="poolz1"} 0
node_zfs_zpool_state{state="removed",zpool="pool1"} 0
node_zfs_zpool_state{state="removed",zpool="pool2"} 0
node_zfs_zpool_state{state="removed",zpool="pool3"} 0
node_zfs_zpool_state{state="removed",zpool="poolz1"} 0
node_zfs_zpool_state{state="suspended",zpool="pool1"} 0
node_zfs_zpool_state{state="suspended",zpool="pool2"} 1
node_zfs_zpool_state{state="suspended",zpool="pool3"} 0
node_zfs_zpool_state{state="suspended",zpool="poolz1"} 0
node_zfs_zpool_state{state="unavail",zpool="pool1"} 0
node_zfs_zpool_state{state="unavail",zpool="pool2"} 0
node_zfs_zpool_state{state="unavail",zpool="pool3"} 0
node_zfs_zpool_state{state="unavail",zpool="poolz1"} 0
# HELP node_zfs_zpool_wcnt kstat.zfs.misc.io.wcnt
# TYPE node_zfs_zpool_wcnt untyped
node_zfs_zpool_wcnt{zpool="pool1"} 0
node_zfs_zpool_wcnt{zpool="pool3"} 0
node_zfs_zpool_wcnt{zpool="poolz1"} 0
# HELP node_zfs_zpool_wlentime kstat.zfs.misc.io.wlentime
# TYPE node_zfs_zpool_wlentime untyped
node_zfs_zpool_wlentime{zpool="pool1"} 1.04112268e+08
node_zfs_zpool_wlentime{zpool="pool3"} 1.04112268e+08
node_zfs_zpool_wlentime{zpool="poolz1"} 6.472105124093e+12
# HELP node_zfs_zpool_writes kstat.zfs.misc.io.writes
# TYPE node_zfs_zpool_writes untyped
node_zfs_zpool_writes{zpool="pool1"} 132
node_zfs_zpool_writes{zpool="pool3"} 132
node_zfs_zpool_writes{zpool="poolz1"} 25294
# HELP node_zfs_zpool_wtime kstat.zfs.misc.io.wtime
# TYPE node_zfs_zpool_wtime untyped
node_zfs_zpool_wtime{zpool="pool1"} 7.155162e+06
node_zfs_zpool_wtime{zpool="pool3"} 7.155162e+06
node_zfs_zpool_wtime{zpool="poolz1"} 9.673715628e+09
# HELP node_zfs_zpool_wupdate kstat.zfs.misc.io.wupdate
# TYPE node_zfs_zpool_wupdate untyped
node_zfs_zpool_wupdate{zpool="pool1"} 7.9210489694949e+13
node_zfs_zpool_wupdate{zpool="pool3"} 7.9210489694949e+13
node_zfs_zpool_wupdate{zpool="poolz1"} 1.10734831833266e+14
# HELP node_zoneinfo_high_pages Zone watermark pages_high
# TYPE node_zoneinfo_high_pages gauge

View file

@ -4,6 +4,7 @@ NIC statistics:
rx_packets: 1260062
tx_errors: 0
rx_errors: 0
port.rx_dropped: 12028
rx_missed: 401
align_errors: 0
tx_single_collisions: 0

View file

@ -0,0 +1,3 @@
12 3 0x00 1 80 79205351707403 395818011156865
nread nwritten reads writes wtime wlentime wupdate rtime rlentime rupdate wcnt rcnt
1884160 3206144 22 132 7155162 104112268 79210489694949 24168078 104112268 79210489849220 0 0

View file

@ -0,0 +1,9 @@
23 1 0x01 7 2160 221578688875 6665999035587
name type data
dataset_name 7 pool3
writes 4 0
nwritten 4 0
reads 4 0
nread 4 0
nunlinks 4 0
nunlinked 4 0

View file

@ -0,0 +1,9 @@
24 1 0x01 7 2160 221611904716 7145015038451
name type data
dataset_name 7 pool3/dataset with space
writes 4 4
nwritten 4 12302
reads 4 2
nread 4 28
nunlinks 4 3
nunlinked 4 3

View file

@ -0,0 +1 @@
ONLINE

View file

@ -2,6 +2,830 @@
Directory: sys
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/sda
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/sda/queue
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/add_random
Lines: 1
1
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/chunk_sectors
Lines: 1
0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/dax
Lines: 1
0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/discard_granularity
Lines: 1
0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/discard_max_bytes
Lines: 1
0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/discard_max_hw_bytes
Lines: 1
0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/discard_zeroes_data
Lines: 1
0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/fua
Lines: 1
0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/hw_sector_size
Lines: 1
512
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/io_poll
Lines: 1
0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/io_poll_delay
Lines: 1
-1
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/io_timeout
Lines: 1
30000
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/sda/queue/iosched
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/iosched/back_seek_max
Lines: 1
16384
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/iosched/back_seek_penalty
Lines: 1
2
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/iosched/fifo_expire_async
Lines: 1
250
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/iosched/fifo_expire_sync
Lines: 1
125
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/iosched/low_latency
Lines: 1
1
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/iosched/max_budget
Lines: 1
0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/iosched/slice_idle
Lines: 1
8
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/iosched/slice_idle_us
Lines: 1
8000
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/iosched/strict_guarantees
Lines: 1
0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/iosched/timeout_sync
Lines: 1
125
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/iostats
Lines: 1
1
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/logical_block_size
Lines: 1
512
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/max_discard_segments
Lines: 1
1
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/max_hw_sectors_kb
Lines: 1
32767
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/max_integrity_segments
Lines: 1
0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/max_sectors_kb
Lines: 1
1280
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/max_segment_size
Lines: 1
65536
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/max_segments
Lines: 1
168
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/minimum_io_size
Lines: 1
512
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/nomerges
Lines: 1
0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/nr_requests
Lines: 1
64
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/nr_zones
Lines: 1
0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/optimal_io_size
Lines: 1
0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/physical_block_size
Lines: 1
512
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/read_ahead_kb
Lines: 1
128
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/rotational
Lines: 1
1
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/rq_affinity
Lines: 1
1
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/scheduler
Lines: 1
mq-deadline kyber [bfq] none
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/wbt_lat_usec
Lines: 1
75000
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/write_cache
Lines: 1
write back
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/write_same_max_bytes
Lines: 1
0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/write_zeroes_max_bytes
Lines: 1
0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/sda/queue/zoned
Lines: 1
none
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md0
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md0/md
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md0/md/array_state
Lines: 1
clean
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md0/md/chunk_size
Lines: 1
524288
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md0/md/dev-sdg
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md0/md/dev-sdg/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md0/md/dev-sdh
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md0/md/dev-sdh/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md0/md/level
Lines: 1
raid0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md0/md/metadata_version
Lines: 1
1.2
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md0/md/raid_disks
Lines: 1
2
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md0/md/rd0
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md0/md/rd0/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md0/md/rd1
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md0/md/rd1/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md0/md/uuid
Lines: 1
155f29ff-1716-4107-b362-52307ef86cac
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md1
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md1/md
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md1/md/array_state
Lines: 1
clean
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md1/md/chunk_size
Lines: 1
0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md1/md/degraded
Lines: 1
0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md1/md/dev-sdi
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md1/md/dev-sdi/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md1/md/dev-sdj
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md1/md/dev-sdj/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md1/md/level
Lines: 1
raid1
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md1/md/metadata_version
Lines: 1
1.2
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md1/md/raid_disks
Lines: 1
2
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md1/md/rd0
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md1/md/rd0/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md1/md/rd1
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md1/md/rd1/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md1/md/sync_action
Lines: 1
idle
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md1/md/sync_completed
Lines: 1
none
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md1/md/uuid
Lines: 1
0fbf5f2c-add2-43c2-bd78-a4be3ab709ef
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md10
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md10/md
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md10/md/array_state
Lines: 1
clean
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md10/md/chunk_size
Lines: 1
524288
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md10/md/degraded
Lines: 1
0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md10/md/dev-sdu
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md10/md/dev-sdu/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md10/md/dev-sdv
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md10/md/dev-sdv/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md10/md/dev-sdw
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md10/md/dev-sdw/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md10/md/dev-sdx
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md10/md/dev-sdx/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md10/md/level
Lines: 1
raid10
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md10/md/metadata_version
Lines: 1
1.2
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md10/md/raid_disks
Lines: 1
4
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md10/md/rd0
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md10/md/rd0/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md10/md/rd1
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md10/md/rd1/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md10/md/rd2
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md10/md/rd2/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md10/md/rd3
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md10/md/rd3/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md10/md/sync_action
Lines: 1
idle
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md10/md/sync_completed
Lines: 1
none
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md10/md/uuid
Lines: 1
0c15f7e7-b159-4b1f-a5cd-a79b5c04b6f5
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md4
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md4/md
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md4/md/array_state
Lines: 1
clean
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md4/md/chunk_size
Lines: 1
524288
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md4/md/degraded
Lines: 1
0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md4/md/dev-sdk
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md4/md/dev-sdk/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md4/md/dev-sdl
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md4/md/dev-sdl/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md4/md/dev-sdm
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md4/md/dev-sdm/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md4/md/level
Lines: 1
raid4
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md4/md/metadata_version
Lines: 1
1.2
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md4/md/raid_disks
Lines: 1
3
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md4/md/rd0
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md4/md/rd0/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md4/md/rd1
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md4/md/rd1/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md4/md/rd2
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md4/md/rd2/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md4/md/sync_action
Lines: 1
idle
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md4/md/sync_completed
Lines: 1
none
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md4/md/uuid
Lines: 1
67f415d5-2c0c-4b69-8e0d-7e20ef553457
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md5
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md5/md
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md5/md/array_state
Lines: 1
clean
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md5/md/chunk_size
Lines: 1
524288
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md5/md/degraded
Lines: 1
1
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md5/md/dev-sdaa
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md5/md/dev-sdaa/state
Lines: 1
spare
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md5/md/dev-sdn
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md5/md/dev-sdn/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md5/md/dev-sdo
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md5/md/dev-sdo/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md5/md/dev-sdp
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md5/md/dev-sdp/state
Lines: 1
faulty
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md5/md/level
Lines: 1
raid5
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md5/md/metadata_version
Lines: 1
1.2
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md5/md/raid_disks
Lines: 1
3
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md5/md/rd0
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md5/md/rd0/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md5/md/rd1
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md5/md/rd1/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md5/md/rd2
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md5/md/rd2/state
Lines: 1
faulty
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md5/md/sync_action
Lines: 1
idle
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md5/md/sync_completed
Lines: 1
none
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md5/md/uuid
Lines: 1
7615b98d-f2ba-4d99-bee8-6202d8e130b9
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md6
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md6/md
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md6/md/array_state
Lines: 1
active
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md6/md/chunk_size
Lines: 1
524288
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md6/md/degraded
Lines: 1
1
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md6/md/dev-sdq
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md6/md/dev-sdq/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md6/md/dev-sdr
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md6/md/dev-sdr/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md6/md/dev-sds
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md6/md/dev-sds/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md6/md/dev-sdt
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md6/md/dev-sdt/state
Lines: 1
spare
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md6/md/level
Lines: 1
raid6
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md6/md/metadata_version
Lines: 1
1.2
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md6/md/raid_disks
Lines: 1
4
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md6/md/rd0
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md6/md/rd0/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md6/md/rd1
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md6/md/rd1/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md6/md/rd2
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md6/md/rd2/state
Lines: 1
in_sync
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/block/md6/md/rd3
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md6/md/rd3/state
Lines: 1
spare
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md6/md/sync_action
Lines: 1
recover
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md6/md/sync_completed
Lines: 1
1569888 / 2093056
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/block/md6/md/uuid
Lines: 1
5f529b25-6efd-46e4-99a2-31f6f597be6b
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/bus
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@ -437,6 +1261,26 @@ Lines: 1
100000
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/class/hwmon/hwmon4/freq1_input
Lines: 1
214000000
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/class/hwmon/hwmon4/freq1_label
Lines: 1
sclk
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/class/hwmon/hwmon4/freq2_input
Lines: 1
300000000
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/class/hwmon/hwmon4/freq2_label
Lines: 1
mclk
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/class/hwmon/hwmon5
SymlinkTo: ../../devices/platform/bogus.0/hwmon/hwmon5/
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@ -1337,7 +2181,7 @@ Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/class/nvme/nvme0/model
Lines: 1
Samsung SSD 970 PRO 512GB
Samsung SSD 970 PRO 512GB
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/class/nvme/nvme0/serial
@ -2750,7 +3594,7 @@ Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/devices/platform/applesmc.768/fan1_label
Lines: 1
Left side
Left side
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/devices/platform/applesmc.768/fan1_manual
@ -2784,7 +3628,7 @@ Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/devices/platform/applesmc.768/fan2_label
Lines: 1
Right side
Right side
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/devices/platform/applesmc.768/fan2_manual

View file

@ -44,7 +44,7 @@ var (
hwmonSensorTypes = []string{
"vrm", "beep_enable", "update_interval", "in", "cpu", "fan",
"pwm", "temp", "curr", "power", "energy", "humidity",
"intrusion",
"intrusion", "freq",
}
)
@ -357,6 +357,15 @@ func (c *hwMonCollector) updateHwmon(ch chan<- prometheus.Metric, dir string) er
continue
}
if sensorType == "freq" && element == "input" {
if label, ok := sensorData["label"]; ok {
sensorLabel := cleanMetricName(label)
desc := prometheus.NewDesc(name+"_freq_mhz", "Hardware monitor for GPU frequency in MHz", hwmonLabelDesc, nil)
ch <- prometheus.MustNewConstMetric(
desc, prometheus.GaugeValue, parsedValue/1000000.0, append(labels[:len(labels)-1], sensorLabel)...)
}
continue
}
// fallback, just dump the metric as is
desc := prometheus.NewDesc(name, "Hardware monitor "+sensorType+" element "+element, hwmonLabelDesc, nil)

View file

@ -18,9 +18,10 @@
package collector
import (
"log/slog"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus/client_golang/prometheus"
"log/slog"
)
type interruptsCollector struct {

View file

@ -21,6 +21,8 @@ import (
"strconv"
"unsafe"
"github.com/prometheus/node_exporter/collector/utils"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/unix"
)
@ -49,7 +51,7 @@ func intr(idx _C_int) (itr interrupt, err error) {
return
}
dev := *(*[128]byte)(unsafe.Pointer(&buf[0]))
itr.device = string(dev[:])
itr.device = utils.SafeBytesToString(dev[:])
mib[2] = KERN_INTRCNT_VECTOR
buf, err = sysctl(mib[:])

View file

@ -22,6 +22,8 @@ import (
"log/slog"
"os"
"github.com/prometheus/procfs/sysfs"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs"
)
@ -98,17 +100,30 @@ var (
[]string{"device"},
nil,
)
mdraidDisks = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "md", "raid_disks"),
"Number of raid disks on device.",
[]string{"device"},
nil,
)
mdraidDegradedDisksDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "md", "degraded"),
"Number of degraded disks on device.",
[]string{"device"},
nil,
)
)
func (c *mdadmCollector) Update(ch chan<- prometheus.Metric) error {
fs, err := procfs.NewFS(*procPath)
procFS, err := procfs.NewFS(*procPath)
if err != nil {
return fmt.Errorf("failed to open procfs: %w", err)
}
mdStats, err := fs.MDStat()
mdStats, err := procFS.MDStat()
if err != nil {
if errors.Is(err, os.ErrNotExist) {
c.logger.Debug("Not collecting mdstat, file does not exist", "file", *procPath)
@ -201,5 +216,34 @@ func (c *mdadmCollector) Update(ch chan<- prometheus.Metric) error {
)
}
sysFS, err := sysfs.NewFS(*sysPath)
if err != nil {
return fmt.Errorf("failed to open sysfs: %w", err)
}
mdraids, err := sysFS.Mdraids()
if err != nil {
if errors.Is(err, os.ErrNotExist) {
c.logger.Debug("Not collecting mdraids, file does not exist", "file", *sysPath)
return ErrNoData
}
return fmt.Errorf("error parsing mdraids: %w", err)
}
for _, mdraid := range mdraids {
ch <- prometheus.MustNewConstMetric(
mdraidDisks,
prometheus.GaugeValue,
float64(mdraid.Disks),
mdraid.Device,
)
ch <- prometheus.MustNewConstMetric(
mdraidDegradedDisksDesc,
prometheus.GaugeValue,
float64(mdraid.DegradedDisks),
mdraid.Device,
)
}
return nil
}

View file

@ -0,0 +1,294 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !nomdadm
// +build !nomdadm
package collector
import (
"log/slog"
"os"
"strings"
"testing"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/testutil"
)
type testMdadmCollector struct {
mc Collector
}
func (c testMdadmCollector) Collect(ch chan<- prometheus.Metric) {
c.mc.Update(ch)
}
func (c testMdadmCollector) Describe(ch chan<- *prometheus.Desc) {
prometheus.DescribeByCollect(c, ch)
}
func NewTestMdadmCollector(logger *slog.Logger) (prometheus.Collector, error) {
mc, err := NewMdadmCollector(logger)
if err != nil {
return testMdadmCollector{}, err
}
return &testMdadmCollector{mc}, nil
}
func TestMdadmStats(t *testing.T) {
*sysPath = "fixtures/sys"
*procPath = "fixtures/proc"
testcase := `# HELP node_md_blocks Total number of blocks on device.
# TYPE node_md_blocks gauge
node_md_blocks{device="md0"} 248896
node_md_blocks{device="md00"} 4.186624e+06
node_md_blocks{device="md10"} 3.14159265e+08
node_md_blocks{device="md101"} 322560
node_md_blocks{device="md11"} 4.190208e+06
node_md_blocks{device="md12"} 3.886394368e+09
node_md_blocks{device="md120"} 2.095104e+06
node_md_blocks{device="md126"} 1.855870976e+09
node_md_blocks{device="md127"} 3.12319552e+08
node_md_blocks{device="md201"} 1.993728e+06
node_md_blocks{device="md219"} 7932
node_md_blocks{device="md3"} 5.853468288e+09
node_md_blocks{device="md4"} 4.883648e+06
node_md_blocks{device="md6"} 1.95310144e+08
node_md_blocks{device="md7"} 7.813735424e+09
node_md_blocks{device="md8"} 1.95310144e+08
node_md_blocks{device="md9"} 523968
# HELP node_md_blocks_synced Number of blocks synced on device.
# TYPE node_md_blocks_synced gauge
node_md_blocks_synced{device="md0"} 248896
node_md_blocks_synced{device="md00"} 4.186624e+06
node_md_blocks_synced{device="md10"} 3.14159265e+08
node_md_blocks_synced{device="md101"} 322560
node_md_blocks_synced{device="md11"} 0
node_md_blocks_synced{device="md12"} 3.886394368e+09
node_md_blocks_synced{device="md120"} 2.095104e+06
node_md_blocks_synced{device="md126"} 1.855870976e+09
node_md_blocks_synced{device="md127"} 3.12319552e+08
node_md_blocks_synced{device="md201"} 114176
node_md_blocks_synced{device="md219"} 7932
node_md_blocks_synced{device="md3"} 5.853468288e+09
node_md_blocks_synced{device="md4"} 4.883648e+06
node_md_blocks_synced{device="md6"} 1.6775552e+07
node_md_blocks_synced{device="md7"} 7.813735424e+09
node_md_blocks_synced{device="md8"} 1.6775552e+07
node_md_blocks_synced{device="md9"} 0
# HELP node_md_degraded Number of degraded disks on device.
# TYPE node_md_degraded gauge
node_md_degraded{device="md0"} 0
node_md_degraded{device="md1"} 0
node_md_degraded{device="md10"} 0
node_md_degraded{device="md4"} 0
node_md_degraded{device="md5"} 1
node_md_degraded{device="md6"} 1
# HELP node_md_disks Number of active/failed/spare disks of device.
# TYPE node_md_disks gauge
node_md_disks{device="md0",state="active"} 2
node_md_disks{device="md0",state="failed"} 0
node_md_disks{device="md0",state="spare"} 0
node_md_disks{device="md00",state="active"} 1
node_md_disks{device="md00",state="failed"} 0
node_md_disks{device="md00",state="spare"} 0
node_md_disks{device="md10",state="active"} 2
node_md_disks{device="md10",state="failed"} 0
node_md_disks{device="md10",state="spare"} 0
node_md_disks{device="md101",state="active"} 3
node_md_disks{device="md101",state="failed"} 0
node_md_disks{device="md101",state="spare"} 0
node_md_disks{device="md11",state="active"} 2
node_md_disks{device="md11",state="failed"} 1
node_md_disks{device="md11",state="spare"} 2
node_md_disks{device="md12",state="active"} 2
node_md_disks{device="md12",state="failed"} 0
node_md_disks{device="md12",state="spare"} 0
node_md_disks{device="md120",state="active"} 2
node_md_disks{device="md120",state="failed"} 0
node_md_disks{device="md120",state="spare"} 0
node_md_disks{device="md126",state="active"} 2
node_md_disks{device="md126",state="failed"} 0
node_md_disks{device="md126",state="spare"} 0
node_md_disks{device="md127",state="active"} 2
node_md_disks{device="md127",state="failed"} 0
node_md_disks{device="md127",state="spare"} 0
node_md_disks{device="md201",state="active"} 2
node_md_disks{device="md201",state="failed"} 0
node_md_disks{device="md201",state="spare"} 0
node_md_disks{device="md219",state="active"} 0
node_md_disks{device="md219",state="failed"} 0
node_md_disks{device="md219",state="spare"} 3
node_md_disks{device="md3",state="active"} 8
node_md_disks{device="md3",state="failed"} 0
node_md_disks{device="md3",state="spare"} 2
node_md_disks{device="md4",state="active"} 0
node_md_disks{device="md4",state="failed"} 1
node_md_disks{device="md4",state="spare"} 1
node_md_disks{device="md6",state="active"} 1
node_md_disks{device="md6",state="failed"} 1
node_md_disks{device="md6",state="spare"} 1
node_md_disks{device="md7",state="active"} 3
node_md_disks{device="md7",state="failed"} 1
node_md_disks{device="md7",state="spare"} 0
node_md_disks{device="md8",state="active"} 2
node_md_disks{device="md8",state="failed"} 0
node_md_disks{device="md8",state="spare"} 2
node_md_disks{device="md9",state="active"} 4
node_md_disks{device="md9",state="failed"} 2
node_md_disks{device="md9",state="spare"} 1
# HELP node_md_disks_required Total number of disks of device.
# TYPE node_md_disks_required gauge
node_md_disks_required{device="md0"} 2
node_md_disks_required{device="md00"} 1
node_md_disks_required{device="md10"} 2
node_md_disks_required{device="md101"} 3
node_md_disks_required{device="md11"} 2
node_md_disks_required{device="md12"} 2
node_md_disks_required{device="md120"} 2
node_md_disks_required{device="md126"} 2
node_md_disks_required{device="md127"} 2
node_md_disks_required{device="md201"} 2
node_md_disks_required{device="md219"} 0
node_md_disks_required{device="md3"} 8
node_md_disks_required{device="md4"} 0
node_md_disks_required{device="md6"} 2
node_md_disks_required{device="md7"} 4
node_md_disks_required{device="md8"} 2
node_md_disks_required{device="md9"} 4
# HELP node_md_raid_disks Number of raid disks on device.
# TYPE node_md_raid_disks gauge
node_md_raid_disks{device="md0"} 2
node_md_raid_disks{device="md1"} 2
node_md_raid_disks{device="md10"} 4
node_md_raid_disks{device="md4"} 3
node_md_raid_disks{device="md5"} 3
node_md_raid_disks{device="md6"} 4
# HELP node_md_state Indicates the state of md-device.
# TYPE node_md_state gauge
node_md_state{device="md0",state="active"} 1
node_md_state{device="md0",state="check"} 0
node_md_state{device="md0",state="inactive"} 0
node_md_state{device="md0",state="recovering"} 0
node_md_state{device="md0",state="resync"} 0
node_md_state{device="md00",state="active"} 1
node_md_state{device="md00",state="check"} 0
node_md_state{device="md00",state="inactive"} 0
node_md_state{device="md00",state="recovering"} 0
node_md_state{device="md00",state="resync"} 0
node_md_state{device="md10",state="active"} 1
node_md_state{device="md10",state="check"} 0
node_md_state{device="md10",state="inactive"} 0
node_md_state{device="md10",state="recovering"} 0
node_md_state{device="md10",state="resync"} 0
node_md_state{device="md101",state="active"} 1
node_md_state{device="md101",state="check"} 0
node_md_state{device="md101",state="inactive"} 0
node_md_state{device="md101",state="recovering"} 0
node_md_state{device="md101",state="resync"} 0
node_md_state{device="md11",state="active"} 0
node_md_state{device="md11",state="check"} 0
node_md_state{device="md11",state="inactive"} 0
node_md_state{device="md11",state="recovering"} 0
node_md_state{device="md11",state="resync"} 1
node_md_state{device="md12",state="active"} 1
node_md_state{device="md12",state="check"} 0
node_md_state{device="md12",state="inactive"} 0
node_md_state{device="md12",state="recovering"} 0
node_md_state{device="md12",state="resync"} 0
node_md_state{device="md120",state="active"} 1
node_md_state{device="md120",state="check"} 0
node_md_state{device="md120",state="inactive"} 0
node_md_state{device="md120",state="recovering"} 0
node_md_state{device="md120",state="resync"} 0
node_md_state{device="md126",state="active"} 1
node_md_state{device="md126",state="check"} 0
node_md_state{device="md126",state="inactive"} 0
node_md_state{device="md126",state="recovering"} 0
node_md_state{device="md126",state="resync"} 0
node_md_state{device="md127",state="active"} 1
node_md_state{device="md127",state="check"} 0
node_md_state{device="md127",state="inactive"} 0
node_md_state{device="md127",state="recovering"} 0
node_md_state{device="md127",state="resync"} 0
node_md_state{device="md201",state="active"} 0
node_md_state{device="md201",state="check"} 1
node_md_state{device="md201",state="inactive"} 0
node_md_state{device="md201",state="recovering"} 0
node_md_state{device="md201",state="resync"} 0
node_md_state{device="md219",state="active"} 0
node_md_state{device="md219",state="check"} 0
node_md_state{device="md219",state="inactive"} 1
node_md_state{device="md219",state="recovering"} 0
node_md_state{device="md219",state="resync"} 0
node_md_state{device="md3",state="active"} 1
node_md_state{device="md3",state="check"} 0
node_md_state{device="md3",state="inactive"} 0
node_md_state{device="md3",state="recovering"} 0
node_md_state{device="md3",state="resync"} 0
node_md_state{device="md4",state="active"} 0
node_md_state{device="md4",state="check"} 0
node_md_state{device="md4",state="inactive"} 1
node_md_state{device="md4",state="recovering"} 0
node_md_state{device="md4",state="resync"} 0
node_md_state{device="md6",state="active"} 0
node_md_state{device="md6",state="check"} 0
node_md_state{device="md6",state="inactive"} 0
node_md_state{device="md6",state="recovering"} 1
node_md_state{device="md6",state="resync"} 0
node_md_state{device="md7",state="active"} 1
node_md_state{device="md7",state="check"} 0
node_md_state{device="md7",state="inactive"} 0
node_md_state{device="md7",state="recovering"} 0
node_md_state{device="md7",state="resync"} 0
node_md_state{device="md8",state="active"} 0
node_md_state{device="md8",state="check"} 0
node_md_state{device="md8",state="inactive"} 0
node_md_state{device="md8",state="recovering"} 0
node_md_state{device="md8",state="resync"} 1
node_md_state{device="md9",state="active"} 0
node_md_state{device="md9",state="check"} 0
node_md_state{device="md9",state="inactive"} 0
node_md_state{device="md9",state="recovering"} 0
node_md_state{device="md9",state="resync"} 1
`
logger := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{
Level: slog.LevelError,
AddSource: true,
}))
collector, err := NewMdadmCollector(logger)
if err != nil {
panic(err)
}
c, err := NewTestMdadmCollector(logger)
if err != nil {
t.Fatal(err)
}
reg := prometheus.NewRegistry()
reg.MustRegister(c)
sink := make(chan prometheus.Metric)
go func() {
err := collector.Update(sink)
if err != nil {
panic(err)
}
close(sink)
}()
err = testutil.GatherAndCompare(reg, strings.NewReader(testcase))
if err != nil {
t.Fatal(err)
}
}

View file

@ -40,8 +40,12 @@ func (c *meminfoCollector) getMemInfo() (map[string]float64, error) {
}
return map[string]float64{
"total_bytes": float64(stats.RealTotal * 4096),
"free_bytes": float64(stats.RealFree * 4096),
"available_bytes": float64(stats.RealAvailable * 4096),
"total_bytes": float64(stats.RealTotal * 4096),
"free_bytes": float64(stats.RealFree * 4096),
"available_bytes": float64(stats.RealAvailable * 4096),
"process_bytes": float64(stats.RealProcess * 4096),
"paging_space_total_bytes": float64(stats.PgSpTotal * 4096),
"paging_space_free_bytes": float64(stats.PgSpFree * 4096),
"page_scans_total": float64(stats.Scans),
}, nil
}

View file

@ -32,16 +32,20 @@ func getNetDevStats(filter *deviceFilter, logger *slog.Logger) (netDevStats, err
for _, stat := range stats {
netDev[stat.Name] = map[string]uint64{
"receive_packets": uint64(stat.RxPackets),
"transmit_packets": uint64(stat.TxPackets),
"receive_bytes": uint64(stat.RxBytes),
"transmit_bytes": uint64(stat.TxBytes),
"receive_errors": uint64(stat.RxErrors),
"transmit_errors": uint64(stat.TxErrors),
"receive_dropped": uint64(stat.RxPacketsDropped),
"transmit_dropped": uint64(stat.TxPacketsDropped),
"receive_multicast": uint64(stat.RxMulticastPackets),
"transmit_multicast": uint64(stat.TxMulticastPackets),
"receive_bytes": uint64(stat.RxBytes),
"receive_dropped": uint64(stat.RxPacketsDropped),
"receive_errors": uint64(stat.RxErrors),
"receive_multicast": uint64(stat.RxMulticastPackets),
"receive_packets": uint64(stat.RxPackets),
"receive_collision_errors": uint64(stat.RxCollisionErrors),
"transmit_bytes": uint64(stat.TxBytes),
"transmit_dropped": uint64(stat.TxPacketsDropped),
"transmit_errors": uint64(stat.TxErrors),
"transmit_multicast": uint64(stat.TxMulticastPackets),
"transmit_packets": uint64(stat.TxPackets),
"transmit_queue_overflow": uint64(stat.TxQueueOverflow),
"transmit_collision_single_errors": uint64(stat.TxSingleCollisionCount),
"transmit_collision_multiple_errors": uint64(stat.TxMultipleCollisionCount),
}
}

View file

@ -22,6 +22,7 @@ import (
"fmt"
"log/slog"
"net"
"unsafe"
"golang.org/x/sys/unix"
)
@ -71,51 +72,107 @@ func getIfaceData(index int) (*ifMsghdr2, error) {
return nil, err
}
err = binary.Read(bytes.NewReader(rawData), binary.LittleEndian, &data)
if err != nil {
return &data, err
}
/*
As of macOS Ventura 13.2.1, theres a kernel bug which truncates traffic values at the 4GiB mark.
This is a workaround to fetch the interface traffic metrics using a sysctl call.
Apple wants to prevent fingerprinting by 3rdparty apps and might fix this bug in future which would break this implementation.
*/
mib := []int32{
unix.CTL_NET,
unix.AF_LINK,
0, // NETLINK_GENERIC: functions not specific to a type of iface
2, //IFMIB_IFDATA: per-interface data table
int32(index),
1, // IFDATA_GENERAL: generic stats for all kinds of ifaces
}
var mibData ifMibData
size := unsafe.Sizeof(mibData)
if _, _, errno := unix.Syscall6(
unix.SYS___SYSCTL,
uintptr(unsafe.Pointer(&mib[0])),
uintptr(len(mib)),
uintptr(unsafe.Pointer(&mibData)),
uintptr(unsafe.Pointer(&size)),
uintptr(unsafe.Pointer(nil)),
0,
); errno != 0 {
return &data, err
}
var ifdata ifData64
err = binary.Read(bytes.NewReader(mibData.Data[:]), binary.LittleEndian, &ifdata)
if err != nil {
return &data, err
}
data.Data.Ibytes = ifdata.Ibytes
data.Data.Obytes = ifdata.Obytes
return &data, err
}
// https://github.com/apple-oss-distributions/xnu/blob/main/bsd/net/if.h#L220-L232
type ifMsghdr2 struct {
Msglen uint16
Version uint8
Type uint8
Addrs int32
Flags int32
Index uint16
_ [2]byte
SndLen int32
SndMaxlen int32
SndDrops int32
Timer int32
Data ifData64
Msglen uint16 // to skip over non-understood messages
Version uint8 // future binary compatabilit
Type uint8 // message type
Addrs int32 // like rtm_addrs
Flags int32 // value of if_flags
Index uint16 // index for associated ifp
_ [2]byte // padding for alignment
SndLen int32 // instantaneous length of send queue
SndMaxlen int32 // maximum length of send queue
SndDrops int32 // number of drops in send queue
Timer int32 // time until if_watchdog called
Data ifData64 // statistics and other data
}
// https://github.com/apple/darwin-xnu/blob/main/bsd/net/if_var.h#L199-L231
// https://github.com/apple-oss-distributions/xnu/blob/main/bsd/net/if_var.h#L207-L235
type ifData64 struct {
Type uint8
Typelen uint8
Physical uint8
Addrlen uint8
Hdrlen uint8
Recvquota uint8
Xmitquota uint8
Unused1 uint8
Mtu uint32
Metric uint32
Baudrate uint64
Ipackets uint64
Ierrors uint64
Opackets uint64
Oerrors uint64
Collisions uint64
Ibytes uint64
Obytes uint64
Imcasts uint64
Omcasts uint64
Iqdrops uint64
Noproto uint64
Recvtiming uint32
Xmittiming uint32
Lastchange unix.Timeval32
Type uint8 // ethernet, tokenring, etc
Typelen uint8 // Length of frame type id
Physical uint8 // e.g., AUI, Thinnet, 10base-T, etc
Addrlen uint8 // media address length
Hdrlen uint8 // media header length
Recvquota uint8 // polling quota for receive intrs
Xmitquota uint8 // polling quota for xmit intrs
Unused1 uint8 // for future use
Mtu uint32 // maximum transmission unit
Metric uint32 // routing metric (external only)
Baudrate uint64 // linespeed
// volatile statistics
Ipackets uint64 // packets received on interface
Ierrors uint64 // input errors on interface
Opackets uint64 // packets sent on interface
Oerrors uint64 // output errors on interface
Collisions uint64 // collisions on csma interfaces
Ibytes uint64 // total number of octets received
Obytes uint64 // total number of octets sent
Imcasts uint64 // packets received via multicast
Omcasts uint64 // packets sent via multicast
Iqdrops uint64 // dropped on input, this interface
Noproto uint64 // destined for unsupported protocol
Recvtiming uint32 // usec spent receiving when timing
Xmittiming uint32 // usec spent xmitting when timing
Lastchange unix.Timeval32 // time of last administrative change
}
// https://github.com/apple-oss-distributions/xnu/blob/main/bsd/net/if_mib.h#L65-L74
type ifMibData struct {
Name [16]byte // name of interface
PCount uint32 // number of promiscuous listeners
Flags uint32 // interface flags
SendLength uint32 // instantaneous length of send queue
MaxSendLength uint32 // maximum length of send queue
SendDrops uint32 // number of drops in send queue
_ [4]uint32 // for future expansion
Data [128]byte // generic information and statistics
}
func getNetDevLabels() (map[string]map[string]string, error) {

View file

@ -0,0 +1,86 @@
// Copyright 2025 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !nonetinterface
// +build !nonetinterface
package collector
import (
"log/slog"
"github.com/power-devops/perfstat"
"github.com/prometheus/client_golang/prometheus"
)
type netinterfaceCollector struct {
logger *slog.Logger
collisions *prometheus.Desc
ibytes *prometheus.Desc
ipackets *prometheus.Desc
obytes *prometheus.Desc
opackets *prometheus.Desc
}
const (
netinterfaceSubsystem = "netinterface"
)
func init() {
registerCollector("netinterface", defaultEnabled, NewNetinterfaceCollector)
}
func NewNetinterfaceCollector(logger *slog.Logger) (Collector, error) {
labels := []string{"interface"}
return &netinterfaceCollector{
logger: logger,
collisions: prometheus.NewDesc(
prometheus.BuildFQName(namespace, netinterfaceSubsystem, "collisions_total"),
"Total number of CSMA collisions on the interface.", labels, nil,
),
ibytes: prometheus.NewDesc(
prometheus.BuildFQName(namespace, netinterfaceSubsystem, "receive_bytes_total"),
"Total number of bytes received on the interface.", labels, nil,
),
ipackets: prometheus.NewDesc(
prometheus.BuildFQName(namespace, netinterfaceSubsystem, "receive_packets_total"),
"Total number of packets received on the interface.", labels, nil,
),
obytes: prometheus.NewDesc(
prometheus.BuildFQName(namespace, netinterfaceSubsystem, "transmit_bytes_total"),
"Total number of bytes transmitted on the interface.", labels, nil,
),
opackets: prometheus.NewDesc(
prometheus.BuildFQName(namespace, netinterfaceSubsystem, "transmit_packets_total"),
"Total number of packets transmitted on the interface.", labels, nil,
),
}, nil
}
func (c *netinterfaceCollector) Update(ch chan<- prometheus.Metric) error {
stats, err := perfstat.NetIfaceStat()
if err != nil {
return err
}
for _, stat := range stats {
iface := stat.Name
ch <- prometheus.MustNewConstMetric(c.collisions, prometheus.CounterValue, float64(stat.Collisions), iface)
ch <- prometheus.MustNewConstMetric(c.ibytes, prometheus.CounterValue, float64(stat.IBytes), iface)
ch <- prometheus.MustNewConstMetric(c.ipackets, prometheus.CounterValue, float64(stat.IPackets), iface)
ch <- prometheus.MustNewConstMetric(c.obytes, prometheus.CounterValue, float64(stat.OBytes), iface)
ch <- prometheus.MustNewConstMetric(c.opackets, prometheus.CounterValue, float64(stat.OPackets), iface)
}
return nil
}

View file

@ -0,0 +1,108 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build freebsd
// +build freebsd
package collector
import (
"errors"
"fmt"
"log/slog"
"unsafe"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/unix"
)
/*
#include <sys/types.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
#include <netinet/tcp_var.h>
#include <netinet/udp.h>
*/
import "C"
var (
bsdNetstatTcpSendPacketsTotal = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "netstat", "tcp_transmit_packets_total"),
"TCP packets sent",
nil, nil,
)
bsdNetstatTcpRecvPacketsTotal = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "netstat", "tcp_receive_packets_total"),
"TCP packets received",
nil, nil,
)
)
type netStatCollector struct {
netStatMetric *prometheus.Desc
}
func init() {
registerCollector("netstat", defaultEnabled, NewNetStatCollector)
}
func NewNetStatCollector(logger *slog.Logger) (Collector, error) {
return &netStatCollector{}, nil
}
func (c *netStatCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- c.netStatMetric
}
func (c *netStatCollector) Collect(ch chan<- prometheus.Metric) {
_ = c.Update(ch)
}
func getData(queryString string) ([]byte, error) {
data, err := unix.SysctlRaw(queryString)
if err != nil {
fmt.Println("Error:", err)
return nil, err
}
if len(data) < int(unsafe.Sizeof(C.struct_tcpstat{})) {
return nil, errors.New("Data Size mismatch")
}
return data, nil
}
func (c *netStatCollector) Update(ch chan<- prometheus.Metric) error {
tcpData, err := getData("net.inet.tcp.stats")
if err != nil {
return err
}
tcpStats := *(*C.struct_tcpstat)(unsafe.Pointer(&tcpData[0]))
ch <- prometheus.MustNewConstMetric(
bsdNetstatTcpSendPacketsTotal,
prometheus.CounterValue,
float64(tcpStats.tcps_sndtotal),
)
ch <- prometheus.MustNewConstMetric(
bsdNetstatTcpRecvPacketsTotal,
prometheus.CounterValue,
float64(tcpStats.tcps_rcvtotal),
)
return nil
}

View file

@ -0,0 +1,77 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build freebsd
// +build freebsd
package collector
import (
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/unix"
"testing"
"unsafe"
)
func TestNetStatCollectorDescribe(t *testing.T) {
ch := make(chan *prometheus.Desc, 1)
collector := &netStatCollector{
netStatMetric: prometheus.NewDesc("dummy_metric", "dummy", nil, nil),
}
collector.Describe(ch)
desc := <-ch
if want, got := "dummy_metric", desc.String(); want != got {
t.Errorf("want %s, got %s", want, got)
}
}
func TestGetData(t *testing.T) {
data, err := getData("net.inet.tcp.stats")
if err != nil {
t.Fatal("unexpected error:", err)
}
if got, want := len(data), int(unsafe.Sizeof(unix.TCPStats{})); got < want {
t.Errorf("data length too small: want >= %d, got %d", want, got)
}
}
func TestNetStatCollectorUpdate(t *testing.T) {
ch := make(chan prometheus.Metric, len(metrics))
collector := &netStatCollector{
netStatMetric: prometheus.NewDesc("netstat_metric", "NetStat Metric", nil, nil),
}
err := collector.Update(ch)
if err != nil {
t.Fatal("unexpected error:", err)
}
if got, want := len(ch), len(metrics); got != want {
t.Errorf("metric count mismatch: want %d, got %d", want, got)
}
for range metrics {
<-ch
}
}
func TestNewNetStatCollector(t *testing.T) {
collector, err := NewNetStatCollector(nil)
if err != nil {
t.Fatal("unexpected error:", err)
}
if collector == nil {
t.Fatal("collector is nil, want non-nil")
}
}

118
collector/partition_aix.go Normal file
View file

@ -0,0 +1,118 @@
// Copyright 2025 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !nopartition
// +build !nopartition
package collector
import (
"log/slog"
"github.com/power-devops/perfstat"
"github.com/prometheus/client_golang/prometheus"
)
type partitionCollector struct {
logger *slog.Logger
entitledCapacity *prometheus.Desc
memoryMax *prometheus.Desc
memoryOnline *prometheus.Desc
cpuOnline *prometheus.Desc
cpuSys *prometheus.Desc
cpuPool *prometheus.Desc
powerSaveMode *prometheus.Desc
smtThreads *prometheus.Desc
}
const (
partitionCollectorSubsystem = "partition"
)
func init() {
registerCollector("partition", defaultEnabled, NewPartitionCollector)
}
func NewPartitionCollector(logger *slog.Logger) (Collector, error) {
return &partitionCollector{
logger: logger,
entitledCapacity: prometheus.NewDesc(
prometheus.BuildFQName(namespace, partitionCollectorSubsystem, "entitled_capacity"),
"Entitled processor capacity of the partition in CPU units (e.g. 1.0 = one core).",
nil, nil,
),
memoryMax: prometheus.NewDesc(
prometheus.BuildFQName(namespace, partitionCollectorSubsystem, "memory_max"),
"Maximum memory of the partition in bytes.",
nil, nil,
),
memoryOnline: prometheus.NewDesc(
prometheus.BuildFQName(namespace, partitionCollectorSubsystem, "memory_online"),
"Online memory of the partition in bytes.",
nil, nil,
),
cpuOnline: prometheus.NewDesc(
prometheus.BuildFQName(namespace, partitionCollectorSubsystem, "cpus_online"),
"Number of online CPUs in the partition.",
nil, nil,
),
cpuSys: prometheus.NewDesc(
prometheus.BuildFQName(namespace, partitionCollectorSubsystem, "cpus_sys"),
"Number of physical CPUs in the system.",
nil, nil,
),
cpuPool: prometheus.NewDesc(
prometheus.BuildFQName(namespace, partitionCollectorSubsystem, "cpus_pool"),
"Number of physical CPUs in the pool.",
nil, nil,
),
powerSaveMode: prometheus.NewDesc(
prometheus.BuildFQName(namespace, partitionCollectorSubsystem, "power_save_mode"),
"Power save mode of the partition (1 for enabled, 0 for disabled).",
nil, nil,
),
smtThreads: prometheus.NewDesc(
prometheus.BuildFQName(namespace, partitionCollectorSubsystem, "smt_threads"),
"Number of SMT threads per core.",
nil, nil,
),
}, nil
}
func (c *partitionCollector) Update(ch chan<- prometheus.Metric) error {
stats, err := perfstat.PartitionStat()
if err != nil {
return err
}
powerSaveMode := 0.0
if stats.Conf.PowerSave {
powerSaveMode = 1.0
}
ch <- prometheus.MustNewConstMetric(c.entitledCapacity, prometheus.GaugeValue, float64(stats.EntCapacity)/100.0)
ch <- prometheus.MustNewConstMetric(c.memoryMax, prometheus.GaugeValue, float64(stats.Mem.Max)*1024*1024)
ch <- prometheus.MustNewConstMetric(c.memoryOnline, prometheus.GaugeValue, float64(stats.Mem.Online)*1024*1024)
ch <- prometheus.MustNewConstMetric(c.cpuOnline, prometheus.GaugeValue, float64(stats.VCpus.Online))
ch <- prometheus.MustNewConstMetric(c.cpuSys, prometheus.GaugeValue, float64(stats.NumProcessors.Online))
ch <- prometheus.MustNewConstMetric(c.cpuPool, prometheus.GaugeValue, float64(stats.ActiveCpusInPool))
ch <- prometheus.MustNewConstMetric(c.powerSaveMode, prometheus.GaugeValue, powerSaveMode)
ch <- prometheus.MustNewConstMetric(c.smtThreads, prometheus.GaugeValue, float64(stats.SmtThreads))
return nil
}

View file

@ -27,8 +27,15 @@ import (
"github.com/prometheus/procfs"
)
const (
psiResourceCPU = "cpu"
psiResourceIO = "io"
psiResourceMemory = "memory"
psiResourceIRQ = "irq"
)
var (
psiResources = []string{"cpu", "io", "memory", "irq"}
psiResources = []string{psiResourceCPU, psiResourceIO, psiResourceMemory, psiResourceIRQ}
)
type pressureStatsCollector struct {
@ -93,13 +100,18 @@ func NewPressureStatsCollector(logger *slog.Logger) (Collector, error) {
// Update calls procfs.NewPSIStatsForResource for the different resources and updates the values
func (c *pressureStatsCollector) Update(ch chan<- prometheus.Metric) error {
foundResources := 0
for _, res := range psiResources {
c.logger.Debug("collecting statistics for resource", "resource", res)
vals, err := c.fs.PSIStatsForResource(res)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
c.logger.Debug("pressure information is unavailable, you need a Linux kernel >= 4.20 and/or CONFIG_PSI enabled for your kernel")
return ErrNoData
if errors.Is(err, os.ErrNotExist) && res != psiResourceIRQ {
c.logger.Debug("pressure information is unavailable, you need a Linux kernel >= 4.20 and/or CONFIG_PSI enabled for your kernel", "resource", res)
continue
}
if errors.Is(err, os.ErrNotExist) && res == psiResourceIRQ {
c.logger.Debug("IRQ pressure information is unavailable, you need a Linux kernel >= 6.1 and/or CONFIG_PSI enabled for your kernel", "resource", res)
continue
}
if errors.Is(err, syscall.ENOTSUP) {
c.logger.Debug("pressure information is disabled, add psi=1 kernel command line to enable it")
@ -109,28 +121,35 @@ func (c *pressureStatsCollector) Update(ch chan<- prometheus.Metric) error {
}
// IRQ pressure does not have 'some' data.
// See https://github.com/torvalds/linux/blob/v6.9/include/linux/psi_types.h#L65
if vals.Some == nil && res != "irq" {
if vals.Some == nil && res != psiResourceIRQ {
c.logger.Debug("pressure information returned no 'some' data")
return ErrNoData
}
if vals.Full == nil && res != "cpu" {
if vals.Full == nil && res != psiResourceCPU {
c.logger.Debug("pressure information returned no 'full' data")
return ErrNoData
}
switch res {
case "cpu":
case psiResourceCPU:
ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, float64(vals.Some.Total)/1000.0/1000.0)
case "io":
case psiResourceIO:
ch <- prometheus.MustNewConstMetric(c.io, prometheus.CounterValue, float64(vals.Some.Total)/1000.0/1000.0)
ch <- prometheus.MustNewConstMetric(c.ioFull, prometheus.CounterValue, float64(vals.Full.Total)/1000.0/1000.0)
case "memory":
case psiResourceMemory:
ch <- prometheus.MustNewConstMetric(c.mem, prometheus.CounterValue, float64(vals.Some.Total)/1000.0/1000.0)
ch <- prometheus.MustNewConstMetric(c.memFull, prometheus.CounterValue, float64(vals.Full.Total)/1000.0/1000.0)
case "irq":
case psiResourceIRQ:
ch <- prometheus.MustNewConstMetric(c.irqFull, prometheus.CounterValue, float64(vals.Full.Total)/1000.0/1000.0)
default:
c.logger.Debug("did not account for resource", "resource", res)
continue
}
foundResources++
}
if foundResources == 0 {
c.logger.Debug("pressure information is unavailable, you need a Linux kernel >= 4.20 and/or CONFIG_PSI enabled for your kernel")
return ErrNoData
}
return nil

View file

@ -106,7 +106,7 @@ func (c *processCollector) Update(ch chan<- prometheus.Metric) error {
pidM, err := readUintFromFile(procFilePath("sys/kernel/pid_max"))
if err != nil {
return fmt.Errorf("unable to retrieve limit number of maximum pids alloved: %w", err)
return fmt.Errorf("unable to retrieve limit number of maximum pids allowed: %w", err)
}
ch <- prometheus.MustNewConstMetric(c.pidUsed, prometheus.GaugeValue, float64(pids))
ch <- prometheus.MustNewConstMetric(c.pidMax, prometheus.GaugeValue, float64(pidM))

View file

@ -48,7 +48,7 @@ func TestReadProcessStatus(t *testing.T) {
}
maxPid, err := readUintFromFile(procFilePath("sys/kernel/pid_max"))
if err != nil {
t.Fatalf("Unable to retrieve limit number of maximum pids alloved %v\n", err)
t.Fatalf("Unable to retrieve limit number of maximum pids allowed %v\n", err)
}
if uint64(pids) > maxPid || pids == 0 {
t.Fatalf("Total running pids cannot be greater than %d or equals to 0", maxPid)

View file

@ -17,10 +17,11 @@
package collector
import (
"log/slog"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/go-runit/runit"
"github.com/prometheus/client_golang/prometheus"
"log/slog"
)
var runitServiceDir = kingpin.Flag("collector.runit.servicedir", "Path to runit service directory.").Default("/etc/service").String()

View file

@ -17,9 +17,10 @@
package collector
import (
"log/slog"
"github.com/opencontainers/selinux/go-selinux"
"github.com/prometheus/client_golang/prometheus"
"log/slog"
)
type selinuxCollector struct {

View file

@ -18,9 +18,10 @@ package collector
import (
"fmt"
"log/slog"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs"
"log/slog"
)
type softirqsCollector struct {

View file

@ -74,6 +74,7 @@ type systemdCollector struct {
socketCurrentConnectionsDesc *prometheus.Desc
socketRefusedConnectionsDesc *prometheus.Desc
systemdVersionDesc *prometheus.Desc
virtualizationDesc *prometheus.Desc
// Use regexps for more flexibility than device_filter.go allows
systemdUnitIncludePattern *regexp.Regexp
systemdUnitExcludePattern *regexp.Regexp
@ -132,6 +133,9 @@ func NewSystemdCollector(logger *slog.Logger) (Collector, error) {
systemdVersionDesc := prometheus.NewDesc(
prometheus.BuildFQName(namespace, subsystem, "version"),
"Detected systemd version", []string{"version"}, nil)
virtualizationDesc := prometheus.NewDesc(
prometheus.BuildFQName(namespace, subsystem, "virtualization_info"),
"Detected virtualization technology", []string{"virtualization_type"}, nil)
if *oldSystemdUnitExclude != "" {
if !systemdUnitExcludeSet {
@ -167,6 +171,7 @@ func NewSystemdCollector(logger *slog.Logger) (Collector, error) {
socketCurrentConnectionsDesc: socketCurrentConnectionsDesc,
socketRefusedConnectionsDesc: socketRefusedConnectionsDesc,
systemdVersionDesc: systemdVersionDesc,
virtualizationDesc: virtualizationDesc,
systemdUnitIncludePattern: systemdUnitIncludePattern,
systemdUnitExcludePattern: systemdUnitExcludePattern,
logger: logger,
@ -194,6 +199,14 @@ func (c *systemdCollector) Update(ch chan<- prometheus.Metric) error {
systemdVersionFull,
)
systemdVirtualization := c.getSystemdVirtualization(conn)
ch <- prometheus.MustNewConstMetric(
c.virtualizationDesc,
prometheus.GaugeValue,
1.0,
systemdVirtualization,
)
allUnits, err := c.getAllUnits(conn)
if err != nil {
return fmt.Errorf("couldn't get units: %w", err)
@ -505,3 +518,19 @@ func (c *systemdCollector) getSystemdVersion(conn *dbus.Conn) (float64, string)
}
return v, version
}
func (c *systemdCollector) getSystemdVirtualization(conn *dbus.Conn) string {
virt, err := conn.GetManagerProperty("Virtualization")
if err != nil {
c.logger.Debug("Could not get Virtualization property", "err", err)
return "unknown"
}
virtStr := strings.Trim(virt, `"`)
if virtStr == "" {
// If no virtualization type is returned, assume it's bare metal.
return "none"
}
return virtStr
}

View file

@ -253,10 +253,16 @@ func (c *textFileCollector) Update(ch chan<- prometheus.Metric) error {
}
}
mfHelp := make(map[string]*string)
for _, mf := range parsedFamilies {
if mf.Help == nil {
if help, ok := mfHelp[*mf.Name]; ok {
mf.Help = help
continue
}
help := fmt.Sprintf("Metric read from %s", strings.Join(metricsNamesToFiles[*mf.Name], ", "))
mf.Help = &help
mfHelp[*mf.Name] = &help
}
}

View file

@ -50,6 +50,8 @@ import (
"log/slog"
"unsafe"
"github.com/prometheus/node_exporter/collector/utils"
"github.com/prometheus/client_golang/prometheus"
)
@ -176,7 +178,7 @@ func mappingCFStringToString(s C.CFStringRef) string {
buf := make([]byte, maxBufLen)
var usedBufLen C.CFIndex
_ = C.CFStringGetBytes(s, C.CFRange{0, length}, C.kCFStringEncodingUTF8, C.UInt8(0), C.false, (*C.UInt8)(&buf[0]), maxBufLen, &usedBufLen)
return string(buf[:usedBufLen])
return utils.SafeBytesToString(buf[:usedBufLen])
}
func mappingCFNumberLongToInt(n C.CFNumberRef) int {

View file

@ -18,8 +18,9 @@
package collector
import (
"github.com/prometheus/client_golang/prometheus"
"log/slog"
"github.com/prometheus/client_golang/prometheus"
)
var unameDesc = prometheus.NewDesc(
@ -49,11 +50,11 @@ type uname struct {
}
func init() {
registerCollector("uname", defaultEnabled, newUnameCollector)
registerCollector("uname", defaultEnabled, NewUnameCollector)
}
// NewUnameCollector returns new unameCollector.
func newUnameCollector(logger *slog.Logger) (Collector, error) {
func NewUnameCollector(logger *slog.Logger) (Collector, error) {
return &unameCollector{logger}, nil
}

View file

@ -13,6 +13,11 @@
package utils
import (
"bytes"
"strings"
)
func SafeDereference[T any](s ...*T) []T {
var resolved []T
for _, v := range s {
@ -25,3 +30,18 @@ func SafeDereference[T any](s ...*T) []T {
}
return resolved
}
// SafeBytesToString takes a slice of bytes and sanitizes it for Prometheus label
// values.
// * Terminate the string at the first null byte.
// * Convert any invalid UTF-8 to "<22>".
func SafeBytesToString(b []byte) string {
var s string
zeroIndex := bytes.IndexByte(b, 0)
if zeroIndex == -1 {
s = string(b)
} else {
s = string(b[:zeroIndex])
}
return strings.ToValidUTF8(s, "<22>")
}

View file

@ -0,0 +1,30 @@
// Copyright 2025 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file ewcept in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package utils
import (
"testing"
)
func TestSafeBytesToString(t *testing.T) {
foo := []byte("foo\x00")
if want, got := SafeBytesToString(foo), "foo"; want != got {
t.Errorf("Expected: %s, Got: %s", want, got)
}
foo = []byte{115, 97, 110, 101, 253, 190, 214}
if want, got := SafeBytesToString(foo), "sane<6E>"; want != got {
t.Errorf("Expected: %s, Got: %s", want, got)
}
}

View file

@ -301,7 +301,8 @@ func (c *zfsCollector) parsePoolObjsetFile(reader io.Reader, zpoolPath string, h
parseLine := false
var zpoolName, datasetName string
for scanner.Scan() {
parts := strings.Fields(scanner.Text())
line := scanner.Text()
parts := strings.Fields(line)
if !parseLine && len(parts) == 3 && parts[0] == "name" && parts[1] == "type" && parts[2] == "data" {
parseLine = true
@ -315,7 +316,7 @@ func (c *zfsCollector) parsePoolObjsetFile(reader io.Reader, zpoolPath string, h
zpoolPathElements := strings.Split(zpoolPath, "/")
pathLen := len(zpoolPathElements)
zpoolName = zpoolPathElements[pathLen-2]
datasetName = parts[2]
datasetName = line[strings.Index(line, parts[2]):]
continue
}

View file

@ -315,6 +315,55 @@ func TestZpoolParsing(t *testing.T) {
}
}
func TestZpoolObjsetParsingWithSpace(t *testing.T) {
tests := []struct {
path string
expectedDataset string
}{
{
path: "fixtures/proc/spl/kstat/zfs/pool1/objset-1",
expectedDataset: "pool1",
},
{
path: "fixtures/proc/spl/kstat/zfs/pool1/objset-2",
expectedDataset: "pool1/dataset1",
},
{
path: "fixtures/proc/spl/kstat/zfs/pool3/objset-1",
expectedDataset: "pool3",
},
{
path: "fixtures/proc/spl/kstat/zfs/pool3/objset-2",
expectedDataset: "pool3/dataset with space",
},
}
c := zfsCollector{}
var handlerCalled bool
for _, test := range tests {
file, err := os.Open(test.path)
if err != nil {
t.Fatal(err)
}
handlerCalled = false
err = c.parsePoolObjsetFile(file, test.path, func(poolName string, datasetName string, s zfsSysctl, v uint64) {
handlerCalled = true
if test.expectedDataset != datasetName {
t.Fatalf("Incorrectly parsed dataset name: expected: '%s', got: '%s'", test.expectedDataset, datasetName)
}
})
file.Close()
if err != nil {
t.Fatal(err)
}
if !handlerCalled {
t.Fatalf("Zpool parsing handler was not called for '%s'", test.path)
}
}
}
func TestZpoolObjsetParsing(t *testing.T) {
zpoolPaths, err := filepath.Glob("fixtures/proc/spl/kstat/zfs/*/objset-*")
if err != nil {

View file

@ -10,7 +10,7 @@
(
node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_size_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < %(fsSpaceFillingUpWarningThreshold)d
and
predict_linear(node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[%(fsSpaceFillingUpPredictionWindow)s], 24*60*60) < 0
predict_linear(node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[%(fsSpaceFillingUpPredictionWindow)s], %(nodeWarningWindowHours)s*60*60) < 0
and
node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0
)
@ -20,7 +20,7 @@
severity: 'warning',
},
annotations: {
summary: 'Filesystem is predicted to run out of space within the next 24 hours.',
summary: 'Filesystem is predicted to run out of space within the next %(nodeWarningWindowHours)s hours.' % $._config,
description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left and is filling up.',
},
},
@ -30,7 +30,7 @@
(
node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_size_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < %(fsSpaceFillingUpCriticalThreshold)d
and
predict_linear(node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[6h], 4*60*60) < 0
predict_linear(node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[6h], %(nodeCriticalWindowHours)s*60*60) < 0
and
node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0
)
@ -40,7 +40,7 @@
severity: '%(nodeCriticalSeverity)s' % $._config,
},
annotations: {
summary: 'Filesystem is predicted to run out of space within the next 4 hours.',
summary: 'Filesystem is predicted to run out of space within the next %(nodeCriticalWindowHours)s hours.' % $._config,
description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left and is filling up fast.',
},
},
@ -86,7 +86,7 @@
(
node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_files{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < 40
and
predict_linear(node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[6h], 24*60*60) < 0
predict_linear(node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[6h], %(nodeWarningWindowHours)s*60*60) < 0
and
node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0
)
@ -96,7 +96,7 @@
severity: 'warning',
},
annotations: {
summary: 'Filesystem is predicted to run out of inodes within the next 24 hours.',
summary: 'Filesystem is predicted to run out of inodes within the next %(nodeWarningWindowHours)s hours.' % $._config,
description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left and is filling up.',
},
},
@ -106,7 +106,7 @@
(
node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_files{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < 20
and
predict_linear(node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[6h], 4*60*60) < 0
predict_linear(node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[6h], %(nodeCriticalWindowHours)s*60*60) < 0
and
node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0
)
@ -116,7 +116,7 @@
severity: '%(nodeCriticalSeverity)s' % $._config,
},
annotations: {
summary: 'Filesystem is predicted to run out of inodes within the next 4 hours.',
summary: 'Filesystem is predicted to run out of inodes within the next %(nodeCriticalWindowHours)s hours.' % $._config,
description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left and is filling up fast.',
},
},
@ -191,7 +191,7 @@
||| % $._config,
annotations: {
summary: 'Number of conntrack are getting close to the limit.',
description: '{{ $value | humanizePercentage }} of conntrack entries are used.',
description: '{{ $labels.instance }} {{ $value | humanizePercentage }} of conntrack entries are used.',
},
labels: {
severity: 'warning',
@ -312,7 +312,7 @@
{
alert: 'NodeCPUHighUsage',
expr: |||
sum without(mode) (avg without (cpu) (rate(node_cpu_seconds_total{%(nodeExporterSelector)s, mode!="idle"}[2m]))) * 100 > %(cpuHighUsageThreshold)d
sum without(mode) (avg without (cpu) (rate(node_cpu_seconds_total{%(nodeExporterSelector)s, mode!~"idle|iowait"}[2m]))) * 100 > %(cpuHighUsageThreshold)d
||| % $._config,
'for': '15m',
labels: {
@ -407,6 +407,20 @@
description: 'Systemd service {{ $labels.name }} has entered failed state at {{ $labels.instance }}',
},
},
{
alert: 'NodeSystemdServiceCrashlooping',
expr: |||
increase(node_systemd_service_restart_total{%(nodeExporterSelector)s}[5m]) > 2
||| % $._config,
'for': '15m',
labels: {
severity: 'warning',
},
annotations: {
summary: 'Systemd service keeps restaring, possibly crash looping.',
description: 'Systemd service {{ $labels.name }} has being restarted too many times at {{ $labels.instance }} for the last 15 minutes. Please check if service is crash looping.',
},
},
{
alert: 'NodeBondingDegraded',
expr: |||

View file

@ -50,6 +50,16 @@
// 'NodeSystemSaturation' alert.
systemSaturationPerCoreThreshold: 2,
// Some of the alerts use predict_linear() to fire alerts ahead of time to
// prevent unrecoverable situations (eg. no more disk space). However, the
// node may have automatic processes (cronjobs) in place to prevent that
// within a certain time window, this may not align with the default time
// window of these alerts. This can cause these alerts to start flapping.
// By reducing the time window, the system gets more time to
// resolve this before problems occur.
nodeWarningWindowHours: '24',
nodeCriticalWindowHours: '4',
// Available disk space (%) thresholds on which to trigger the
// 'NodeFilesystemSpaceFillingUp' alerts. These alerts fire if the disk
// usage grows in a way that it is predicted to run out in 4h or 1d

View file

@ -1,469 +1,474 @@
local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet';
local grafana = import 'github.com/grafana/grafonnet/gen/grafonnet-latest/main.libsonnet';
local dashboard = grafana.dashboard;
local row = grafana.row;
local prometheus = grafana.prometheus;
local template = grafana.template;
local graphPanel = grafana.graphPanel;
local variable = dashboard.variable;
local row = grafana.panel.row;
local prometheus = grafana.query.prometheus;
local timeSeriesPanel = grafana.panel.timeSeries;
local tsOptions = timeSeriesPanel.options;
local tsStandardOptions = timeSeriesPanel.standardOptions;
local tsQueryOptions = timeSeriesPanel.queryOptions;
local tsCustom = timeSeriesPanel.fieldConfig.defaults.custom;
local tsLegend = tsOptions.legend;
local c = import '../config.libsonnet';
local datasourceTemplate = {
current: {
text: 'default',
value: 'default',
},
hide: 0,
label: 'Data Source',
name: 'datasource',
options: [],
query: 'prometheus',
refresh: 1,
regex: '',
type: 'datasource',
};
local datasource = variable.datasource.new(
'datasource', 'prometheus'
);
local tsCommonPanelOptions =
variable.query.withDatasourceFromVariable(datasource)
+ tsCustom.stacking.withMode('normal')
+ tsCustom.withFillOpacity(100)
+ tsCustom.withShowPoints('never')
+ tsLegend.withShowLegend(false)
+ tsOptions.tooltip.withMode('multi')
+ tsOptions.tooltip.withSort('desc');
local CPUUtilisation =
graphPanel.new(
timeSeriesPanel.new(
'CPU Utilisation',
datasource='$datasource',
span=6,
format='percentunit',
stack=true,
fill=10,
legend_show=false,
) { tooltip+: { sort: 2 } };
)
+ tsCommonPanelOptions
+ tsStandardOptions.withUnit('percentunit');
local CPUSaturation =
// TODO: Is this a useful panel? At least there should be some explanation how load
// average relates to the "CPU saturation" in the title.
graphPanel.new(
timeSeriesPanel.new(
'CPU Saturation (Load1 per CPU)',
datasource='$datasource',
span=6,
format='percentunit',
stack=true,
fill=10,
legend_show=false,
) { tooltip+: { sort: 2 } };
)
+ tsCommonPanelOptions
+ tsStandardOptions.withUnit('percentunit');
local memoryUtilisation =
graphPanel.new(
timeSeriesPanel.new(
'Memory Utilisation',
datasource='$datasource',
span=6,
format='percentunit',
stack=true,
fill=10,
legend_show=false,
) { tooltip+: { sort: 2 } };
)
+ tsCommonPanelOptions
+ tsStandardOptions.withUnit('percentunit');
local memorySaturation =
graphPanel.new(
timeSeriesPanel.new(
'Memory Saturation (Major Page Faults)',
datasource='$datasource',
span=6,
format='rds',
stack=true,
fill=10,
legend_show=false,
) { tooltip+: { sort: 2 } };
)
+ tsCommonPanelOptions
+ tsStandardOptions.withUnit('rds');
local networkOverrides = tsStandardOptions.withOverrides(
[
tsStandardOptions.override.byRegexp.new('/Transmit/')
+ tsStandardOptions.override.byRegexp.withPropertiesFromOptions(
tsCustom.withTransform('negative-Y')
),
]
);
local networkUtilisation =
graphPanel.new(
timeSeriesPanel.new(
'Network Utilisation (Bytes Receive/Transmit)',
datasource='$datasource',
span=6,
format='Bps',
stack=true,
fill=10,
legend_show=false,
)
.addSeriesOverride({ alias: '/Receive/', stack: 'A' })
.addSeriesOverride({ alias: '/Transmit/', stack: 'B', transform: 'negative-Y' })
{ tooltip+: { sort: 2 } };
+ tsCommonPanelOptions
+ tsStandardOptions.withUnit('Bps')
+ networkOverrides;
local networkSaturation =
graphPanel.new(
timeSeriesPanel.new(
'Network Saturation (Drops Receive/Transmit)',
datasource='$datasource',
span=6,
format='Bps',
stack=true,
fill=10,
legend_show=false,
)
.addSeriesOverride({ alias: '/ Receive/', stack: 'A' })
.addSeriesOverride({ alias: '/ Transmit/', stack: 'B', transform: 'negative-Y' })
{ tooltip+: { sort: 2 } };
+ tsCommonPanelOptions
+ tsStandardOptions.withUnit('Bps')
+ networkOverrides;
local diskIOUtilisation =
graphPanel.new(
timeSeriesPanel.new(
'Disk IO Utilisation',
datasource='$datasource',
span=6,
format='percentunit',
stack=true,
fill=10,
legend_show=false,
) { tooltip+: { sort: 2 } };
)
+ tsCommonPanelOptions
+ tsStandardOptions.withUnit('percentunit');
local diskIOSaturation =
graphPanel.new(
timeSeriesPanel.new(
'Disk IO Saturation',
datasource='$datasource',
span=6,
format='percentunit',
stack=true,
fill=10,
legend_show=false,
) { tooltip+: { sort: 2 } };
)
+ tsCommonPanelOptions
+ tsStandardOptions.withUnit('percentunit');
local diskSpaceUtilisation =
graphPanel.new(
timeSeriesPanel.new(
'Disk Space Utilisation',
datasource='$datasource',
span=12,
format='percentunit',
stack=true,
fill=10,
legend_show=false,
) { tooltip+: { sort: 2 } };
)
+ tsCommonPanelOptions
+ tsStandardOptions.withUnit('percentunit');
{
_clusterTemplate:: template.new(
name='cluster',
datasource='$datasource',
query='label_values(node_time_seconds, %s)' % $._config.clusterLabel,
current='',
hide=if $._config.showMultiCluster then '' else '2',
refresh=2,
includeAll=false,
sort=1
),
_clusterVariable::
variable.query.new('cluster')
+ variable.query.withDatasourceFromVariable(datasource)
+ variable.query.queryTypes.withLabelValues(
$._config.clusterLabel,
'node_time_seconds',
)
+ (if $._config.showMultiCluster then variable.query.generalOptions.showOnDashboard.withLabelAndValue() else variable.query.generalOptions.showOnDashboard.withNothing())
+ variable.query.refresh.onTime()
+ variable.query.selectionOptions.withIncludeAll(false)
+ variable.query.withSort(asc=true),
grafanaDashboards+:: {
'node-rsrc-use.json':
dashboard.new(
'%sUSE Method / Node' % $._config.dashboardNamePrefix,
time_from='now-1h',
tags=($._config.dashboardTags),
timezone='utc',
refresh='30s',
graphTooltip='shared_crosshair',
uid=std.md5('node-rsrc-use.json')
)
.addTemplate(datasourceTemplate)
.addTemplate($._clusterTemplate)
.addTemplate(
template.new(
+ dashboard.time.withFrom('now-1h')
+ dashboard.withTags($._config.dashboardTags)
+ dashboard.withTimezone('utc')
+ dashboard.withRefresh('30s')
+ dashboard.graphTooltip.withSharedCrosshair()
+ dashboard.withUid(std.md5('node-rsrc-use.json'))
+ dashboard.withVariables([
datasource,
$._clusterVariable,
variable.query.new('instance')
+ variable.query.withDatasourceFromVariable(datasource)
+ variable.query.queryTypes.withLabelValues(
'instance',
'$datasource',
'label_values(node_exporter_build_info{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}, instance)' % $._config,
refresh='time',
sort=1
)
)
.addRow(
row.new('CPU')
.addPanel(CPUUtilisation.addTarget(prometheus.target('instance:node_cpu_utilisation:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Utilisation')))
.addPanel(CPUSaturation.addTarget(prometheus.target('instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Saturation')))
)
.addRow(
row.new('Memory')
.addPanel(memoryUtilisation.addTarget(prometheus.target('instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Utilisation')))
.addPanel(memorySaturation.addTarget(prometheus.target('instance:node_vmstat_pgmajfault:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Major page Faults')))
)
.addRow(
row.new('Network')
.addPanel(
networkUtilisation
.addTarget(prometheus.target('instance:node_network_receive_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Receive'))
.addTarget(prometheus.target('instance:node_network_transmit_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Transmit'))
)
.addPanel(
networkSaturation
.addTarget(prometheus.target('instance:node_network_receive_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Receive'))
.addTarget(prometheus.target('instance:node_network_transmit_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Transmit'))
)
)
.addRow(
row.new('Disk IO')
.addPanel(diskIOUtilisation.addTarget(prometheus.target('instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{device}}')))
.addPanel(diskIOSaturation.addTarget(prometheus.target('instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{device}}')))
)
.addRow(
row.new('Disk Space')
.addPanel(
diskSpaceUtilisation.addTarget(prometheus.target(
|||
sort_desc(1 -
(
max without (mountpoint, fstype) (node_filesystem_avail_bytes{%(nodeExporterSelector)s, fstype!="", instance="$instance", %(clusterLabel)s="$cluster"})
/
max without (mountpoint, fstype) (node_filesystem_size_bytes{%(nodeExporterSelector)s, fstype!="", instance="$instance", %(clusterLabel)s="$cluster"})
) != 0
)
||| % $._config, legendFormat='{{device}}'
))
'node_exporter_build_info{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}' % $._config,
)
+ variable.query.refresh.onTime()
+ variable.query.withSort(asc=true),
])
+ dashboard.withPanels(
grafana.util.grid.makeGrid([
row.new('CPU')
+ row.withPanels([
CPUUtilisation + tsQueryOptions.withTargets([prometheus.new('$datasource', 'instance:node_cpu_utilisation:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('Utilisation')]),
CPUSaturation + tsQueryOptions.withTargets([prometheus.new('$datasource', 'instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('Saturation')]),
]),
row.new('Memory')
+ row.withPanels([
memoryUtilisation + tsQueryOptions.withTargets([prometheus.new('$datasource', 'instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('Utilisation')]),
memorySaturation + tsQueryOptions.withTargets([prometheus.new('$datasource', 'instance:node_vmstat_pgmajfault:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('Major page Faults')]),
]),
row.new('Network')
+ row.withPanels([
networkUtilisation + tsQueryOptions.withTargets([
prometheus.new('$datasource', 'instance:node_network_receive_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('Receive'),
prometheus.new('$datasource', 'instance:node_network_transmit_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('Transmit'),
]),
networkSaturation + tsQueryOptions.withTargets([
prometheus.new('$datasource', 'instance:node_network_receive_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('Receive'),
prometheus.new('$datasource', 'instance:node_network_transmit_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('Transmit'),
]),
]),
row.new('Disk IO')
+ row.withPanels([
diskIOUtilisation + tsQueryOptions.withTargets([prometheus.new('$datasource', 'instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('{{device}}')]),
diskIOSaturation + tsQueryOptions.withTargets([prometheus.new('$datasource', 'instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config) + prometheus.withLegendFormat('{{device}}')]),
]),
], panelWidth=12, panelHeight=7)
+ grafana.util.grid.makeGrid([
row.new('Disk Space')
+ row.withPanels([
diskSpaceUtilisation + tsQueryOptions.withTargets([
prometheus.new(
'$datasource',
|||
sort_desc(1 -
(
max without (mountpoint, fstype) (node_filesystem_avail_bytes{%(nodeExporterSelector)s, fstype!="", instance="$instance", %(clusterLabel)s="$cluster"})
/
max without (mountpoint, fstype) (node_filesystem_size_bytes{%(nodeExporterSelector)s, fstype!="", instance="$instance", %(clusterLabel)s="$cluster"})
) != 0
)
||| % $._config
) + prometheus.withLegendFormat('{{device}}'),
]),
]),
], panelWidth=24, panelHeight=7, startY=34),
),
'node-cluster-rsrc-use.json':
dashboard.new(
'%sUSE Method / Cluster' % $._config.dashboardNamePrefix,
time_from='now-1h',
tags=($._config.dashboardTags),
timezone='utc',
refresh='30s',
graphTooltip='shared_crosshair',
uid=std.md5('node-cluster-rsrc-use.json')
)
.addTemplate(datasourceTemplate)
.addTemplate($._clusterTemplate)
.addRow(
row.new('CPU')
.addPanel(
CPUUtilisation
.addTarget(prometheus.target(
|||
((
instance:node_cpu_utilisation:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}
*
instance:node_num_cpu:sum{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}
) != 0 )
/ scalar(sum(instance:node_num_cpu:sum{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}))
||| % $._config, legendFormat='{{ instance }}'
))
)
.addPanel(
CPUSaturation
.addTarget(prometheus.target(
|||
(
instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}
/ scalar(count(instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}))
) != 0
||| % $._config, legendFormat='{{instance}}'
))
)
)
.addRow(
row.new('Memory')
.addPanel(
memoryUtilisation
.addTarget(prometheus.target(
|||
(
instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}
/ scalar(count(instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}))
) != 0
||| % $._config, legendFormat='{{instance}}',
))
)
.addPanel(memorySaturation.addTarget(prometheus.target('instance:node_vmstat_pgmajfault:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}' % $._config, legendFormat='{{instance}}')))
)
.addRow(
row.new('Network')
.addPanel(
networkUtilisation
.addTarget(prometheus.target('instance:node_network_receive_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{instance}} Receive'))
.addTarget(prometheus.target('instance:node_network_transmit_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{instance}} Transmit'))
)
.addPanel(
networkSaturation
.addTarget(prometheus.target('instance:node_network_receive_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{instance}} Receive'))
.addTarget(prometheus.target('instance:node_network_transmit_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{instance}} Transmit'))
)
)
.addRow(
row.new('Disk IO')
.addPanel(
diskIOUtilisation
.addTarget(prometheus.target(
|||
(
instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}
/ scalar(count(instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}))
) != 0
||| % $._config, legendFormat='{{instance}} {{device}}'
))
)
.addPanel(
diskIOSaturation
.addTarget(prometheus.target(
|||
(
+ dashboard.time.withFrom('now-1h')
+ dashboard.withTags($._config.dashboardTags)
+ dashboard.withTimezone('utc')
+ dashboard.withRefresh('30s')
+ dashboard.graphTooltip.withSharedCrosshair()
+ dashboard.withUid(std.md5('node-cluster-rsrc-use.json'))
+ dashboard.withVariables([
datasource,
$._clusterVariable,
])
+ dashboard.withPanels(
grafana.util.grid.makeGrid([
row.new('CPU')
+ row.withPanels([
CPUUtilisation + tsQueryOptions.withTargets([
prometheus.new(
'$datasource',
|||
((
instance:node_cpu_utilisation:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}
*
instance:node_num_cpu:sum{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}
) != 0 )
/ scalar(sum(instance:node_num_cpu:sum{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}))
||| % $._config
) + prometheus.withLegendFormat('{{ instance }}'),
]),
CPUSaturation + tsQueryOptions.withTargets([
prometheus.new(
'$datasource',
|||
(
instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}
/ scalar(count(instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}))
) != 0
||| % $._config
) + prometheus.withLegendFormat('{{ instance }}'),
]),
]),
row.new('Memory')
+ row.withPanels([
memoryUtilisation + tsQueryOptions.withTargets([
prometheus.new(
'$datasource',
|||
(
instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}
/ scalar(count(instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}))
) != 0
||| % $._config
) + prometheus.withLegendFormat('{{ instance }}'),
]),
memorySaturation + tsQueryOptions.withTargets([
prometheus.new(
'$datasource',
'instance:node_vmstat_pgmajfault:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}' % $._config
) + prometheus.withLegendFormat('{{ instance }}'),
]),
]),
row.new('Network')
+ row.withPanels([
networkUtilisation + tsQueryOptions.withTargets([
prometheus.new(
'$datasource',
'instance:node_network_receive_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config
) + prometheus.withLegendFormat('{{ instance }} Receive'),
prometheus.new(
'$datasource',
'instance:node_network_transmit_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config
) + prometheus.withLegendFormat('{{ instance }} Transmit'),
]),
networkSaturation + tsQueryOptions.withTargets([
prometheus.new(
'$datasource',
'instance:node_network_receive_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config
) + prometheus.withLegendFormat('{{ instance }} Receive'),
prometheus.new(
'$datasource',
'instance:node_network_transmit_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config
) + prometheus.withLegendFormat('{{ instance }} Transmit'),
]),
]),
row.new('Disk IO')
+ row.withPanels([
diskIOUtilisation + tsQueryOptions.withTargets([
prometheus.new(
'$datasource',
|||
instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}
/ scalar(count(instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}))
||| % $._config
) + prometheus.withLegendFormat('{{ instance }} {{device}}'),
]),
diskIOSaturation + tsQueryOptions.withTargets([prometheus.new(
'$datasource',
|||
instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}
/ scalar(count(instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}))
) != 0
||| % $._config, legendFormat='{{instance}} {{device}}'
))
)
)
.addRow(
row.new('Disk Space')
.addPanel(
diskSpaceUtilisation
.addTarget(prometheus.target(
|||
sum without (device) (
max without (fstype, mountpoint) ((
node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s, %(clusterLabel)s="$cluster"}
-
node_filesystem_avail_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s, %(clusterLabel)s="$cluster"}
) != 0)
)
/ scalar(sum(max without (fstype, mountpoint) (node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s, %(clusterLabel)s="$cluster"})))
||| % $._config, legendFormat='{{instance}}'
))
)
||| % $._config
) + prometheus.withLegendFormat('{{ instance }} {{device}}')]),
]),
], panelWidth=12, panelHeight=7)
+ grafana.util.grid.makeGrid([
row.new('Disk Space')
+ row.withPanels([
diskSpaceUtilisation + tsQueryOptions.withTargets([
prometheus.new(
'$datasource',
|||
sum without (device) (
max without (fstype, mountpoint) ((
node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s, %(clusterLabel)s="$cluster"}
-
node_filesystem_avail_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s, %(clusterLabel)s="$cluster"}
) != 0)
)
/ scalar(sum(max without (fstype, mountpoint) (node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s, %(clusterLabel)s="$cluster"})))
||| % $._config
) + prometheus.withLegendFormat('{{ instance }}'),
]),
]),
], panelWidth=24, panelHeight=7, startY=34),
),
} +
if $._config.showMultiCluster then {
'node-multicluster-rsrc-use.json':
dashboard.new(
'%sUSE Method / Multi-cluster' % $._config.dashboardNamePrefix,
time_from='now-1h',
tags=($._config.dashboardTags),
timezone='utc',
refresh='30s',
graphTooltip='shared_crosshair',
uid=std.md5('node-multicluster-rsrc-use.json')
)
.addTemplate(datasourceTemplate)
.addRow(
row.new('CPU')
.addPanel(
CPUUtilisation
.addTarget(prometheus.target(
|||
sum(
((
instance:node_cpu_utilisation:rate%(rateInterval)s{%(nodeExporterSelector)s}
*
instance:node_num_cpu:sum{%(nodeExporterSelector)s}
) != 0)
/ scalar(sum(instance:node_num_cpu:sum{%(nodeExporterSelector)s}))
) by (%(clusterLabel)s)
||| % $._config, legendFormat='{{%(clusterLabel)s}}' % $._config
))
)
.addPanel(
CPUSaturation
.addTarget(prometheus.target(
|||
sum((
instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s}
/ scalar(count(instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s}))
) != 0) by (%(clusterLabel)s)
||| % $._config, legendFormat='{{%(clusterLabel)s}}' % $._config
))
)
)
.addRow(
row.new('Memory')
.addPanel(
memoryUtilisation
.addTarget(prometheus.target(
|||
sum((
instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s}
/ scalar(count(instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s}))
) != 0) by (%(clusterLabel)s)
||| % $._config, legendFormat='{{%(clusterLabel)s}}' % $._config
))
)
.addPanel(
memorySaturation
.addTarget(prometheus.target(
|||
sum((
instance:node_vmstat_pgmajfault:rate%(rateInterval)s{%(nodeExporterSelector)s}
) != 0) by (%(clusterLabel)s)
||| % $._config, legendFormat='{{%(clusterLabel)s}}' % $._config
))
)
)
.addRow(
row.new('Network')
.addPanel(
networkUtilisation
.addTarget(prometheus.target(
|||
sum((
instance:node_network_receive_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s}
) != 0) by (%(clusterLabel)s)
||| % $._config, legendFormat='{{%(clusterLabel)s}} Receive' % $._config
))
.addTarget(prometheus.target(
|||
sum((
instance:node_network_transmit_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s}
) != 0) by (%(clusterLabel)s)
||| % $._config, legendFormat='{{%(clusterLabel)s}} Transmit' % $._config
))
)
.addPanel(
networkSaturation
.addTarget(prometheus.target(
|||
sum((
instance:node_network_receive_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s}
) != 0) by (%(clusterLabel)s)
||| % $._config, legendFormat='{{%(clusterLabel)s}} Receive' % $._config
))
.addTarget(prometheus.target(
|||
sum((
instance:node_network_transmit_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s}
) != 0) by (%(clusterLabel)s)
||| % $._config, legendFormat='{{%(clusterLabel)s}} Transmit' % $._config
))
)
)
.addRow(
row.new('Disk IO')
.addPanel(
diskIOUtilisation
.addTarget(prometheus.target(
|||
sum((
instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s}
/ scalar(count(instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s}))
) != 0) by (%(clusterLabel)s, device)
||| % $._config, legendFormat='{{%(clusterLabel)s}} {{device}}' % $._config
))
)
.addPanel(
diskIOSaturation
.addTarget(prometheus.target(
|||
sum((
instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s}
/ scalar(count(instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s}))
) != 0) by (%(clusterLabel)s, device)
||| % $._config, legendFormat='{{%(clusterLabel)s}} {{device}}' % $._config
))
)
)
.addRow(
row.new('Disk Space')
.addPanel(
diskSpaceUtilisation
.addTarget(prometheus.target(
|||
sum (
sum without (device) (
max without (fstype, mountpoint, instance, pod) ((
node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s} - node_filesystem_avail_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s}
) != 0)
)
/ scalar(sum(max without (fstype, mountpoint) (node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s})))
) by (%(clusterLabel)s)
||| % $._config, legendFormat='{{%(clusterLabel)s}}' % $._config
))
)
+ dashboard.time.withFrom('now-1h')
+ dashboard.withTags($._config.dashboardTags)
+ dashboard.withTimezone('utc')
+ dashboard.withRefresh('30s')
+ dashboard.graphTooltip.withSharedCrosshair()
+ dashboard.withUid(std.md5('node-multicluster-rsrc-use.json'))
+ dashboard.withVariables([
datasource,
])
+ dashboard.withPanels(
grafana.util.grid.makeGrid([
row.new('CPU')
+ row.withPanels([
CPUUtilisation + tsQueryOptions.withTargets([
prometheus.new(
'$datasource',
|||
sum(
((
instance:node_cpu_utilisation:rate%(rateInterval)s{%(nodeExporterSelector)s}
*
instance:node_num_cpu:sum{%(nodeExporterSelector)s}
) != 0)
/ scalar(sum(instance:node_num_cpu:sum{%(nodeExporterSelector)s}))
) by (%(clusterLabel)s)
||| % $._config
) + prometheus.withLegendFormat('{{%(clusterLabel)s}}'),
]),
CPUSaturation + tsQueryOptions.withTargets([
prometheus.new(
'$datasource',
|||
sum((
instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s}
/ scalar(count(instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s}))
) != 0) by (%(clusterLabel)s)
||| % $._config
) + prometheus.withLegendFormat('{{%(clusterLabel)s}}'),
]),
]),
row.new('Memory')
+ row.withPanels([
memoryUtilisation + tsQueryOptions.withTargets([
prometheus.new(
'$datasource',
|||
sum((
instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s}
/ scalar(count(instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s}))
) != 0) by (%(clusterLabel)s)
||| % $._config
) + prometheus.withLegendFormat('{{%(clusterLabel)s}}'),
]),
memorySaturation + tsQueryOptions.withTargets([
prometheus.new(
'$datasource',
|||
sum((
instance:node_vmstat_pgmajfault:rate%(rateInterval)s{%(nodeExporterSelector)s}
) != 0) by (%(clusterLabel)s)
|||
% $._config
) + prometheus.withLegendFormat('{{%(clusterLabel)s}}'),
]),
]),
row.new('Network')
+ row.withPanels([
networkUtilisation + tsQueryOptions.withTargets([
prometheus.new(
'$datasource',
|||
sum((
instance:node_network_receive_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s}
) != 0) by (%(clusterLabel)s)
||| % $._config
) + prometheus.withLegendFormat('{{%(clusterLabel)s}} Receive'),
prometheus.new(
'$datasource',
|||
sum((
instance:node_network_transmit_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s}
) != 0) by (%(clusterLabel)s)
||| % $._config
) + prometheus.withLegendFormat('{{%(clusterLabel)s}} Transmit'),
]),
networkSaturation + tsQueryOptions.withTargets([
prometheus.new(
'$datasource',
|||
sum((
instance:node_network_receive_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s}
) != 0) by (%(clusterLabel)s)
||| % $._config
) + prometheus.withLegendFormat('{{%(clusterLabel)s}} Receive'),
prometheus.new(
'$datasource',
|||
sum((
instance:node_network_transmit_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s}
) != 0) by (%(clusterLabel)s)
||| % $._config
) + prometheus.withLegendFormat('{{%(clusterLabel)s}} Transmit'),
]),
]),
row.new('Disk IO')
+ row.withPanels([
diskIOUtilisation + tsQueryOptions.withTargets([
prometheus.new(
'$datasource',
|||
sum((
instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s}
/ scalar(count(instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s}))
) != 0) by (%(clusterLabel)s, device)
||| % $._config
) + prometheus.withLegendFormat('{{%(clusterLabel)s}} {{device}}'),
]),
diskIOSaturation + tsQueryOptions.withTargets([prometheus.new(
'$datasource',
|||
sum((
instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s}
/ scalar(count(instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s}))
) != 0) by (%(clusterLabel)s, device)
||| % $._config
) + prometheus.withLegendFormat('{{%(clusterLabel)s}} {{device}}')]),
]),
], panelWidth=12, panelHeight=7)
+ grafana.util.grid.makeGrid([
row.new('Disk Space')
+ row.withPanels([
diskSpaceUtilisation + tsQueryOptions.withTargets([
prometheus.new(
'$datasource',
|||
sum (
sum without (device) (
max without (fstype, mountpoint, instance, pod) ((
node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s} - node_filesystem_avail_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s}
) != 0)
)
/ scalar(sum(max without (fstype, mountpoint) (node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s})))
) by (%(clusterLabel)s)
||| % $._config
) + prometheus.withLegendFormat('{{%(clusterLabel)s}}'),
]),
]),
], panelWidth=24, panelHeight=7, startY=34),
),
} else {},
}

View file

@ -4,20 +4,11 @@
{
"source": {
"git": {
"remote": "https://github.com/grafana/grafonnet-lib.git",
"subdir": "grafonnet"
"remote": "https://github.com/grafana/grafonnet.git",
"subdir": "gen/grafonnet-latest"
}
},
"version": "master"
},
{
"source": {
"git": {
"remote": "https://github.com/grafana/grafonnet-lib.git",
"subdir": "grafonnet-7.0"
}
},
"version": "master"
"version": "main"
}
],
"legacyImports": false

File diff suppressed because it is too large Load diff

View file

@ -2,6 +2,38 @@
set -euf -o pipefail
# Allow setting GOHOSTOS for debugging purposes.
GOHOSTOS=${GOHOSTOS:-$(go env GOHOSTOS)}
# Allow setting arch for debugging purposes.
arch=${arch:-$(uname -m)}
maybe_flag_search_scope() {
local collector=$1
os_aux_os=""
if [[ $GOHOSTOS =~ ^(freebsd|openbsd|netbsd|solaris|dragonfly)$ ]]; then
os_aux_os=" ${collector}_bsd.go"
fi
echo "${collector}_common.go ${collector}.go ${collector}_${GOHOSTOS}.go ${collector}_${GOHOSTOS}_${arch}.go${os_aux_os}"
}
supported_collectors() {
local collectors=$1
local supported=""
for collector in ${collectors}; do
for filename in $(maybe_flag_search_scope "${collector}"); do
file="collector/${filename}"
if ./tools/tools match ${file} > /dev/null 2>&1; then
if grep -h -E -o -- "registerCollector\(" ${file} > /dev/null 2>&1; then
supported="${supported} ${collector}"
fi
break
fi
done
done
echo "${supported}" | tr ' ' '\n' | sort | uniq
}
enabled_collectors=$(cat << COLLECTORS
arp
bcache
@ -57,6 +89,8 @@ enabled_collectors=$(cat << COLLECTORS
zoneinfo
COLLECTORS
)
supported_enabled_collectors=$(supported_collectors "${enabled_collectors}")
disabled_collectors=$(cat << COLLECTORS
selinux
filesystem
@ -64,6 +98,8 @@ disabled_collectors=$(cat << COLLECTORS
uname
COLLECTORS
)
supported_disabled_collectors=$(supported_collectors "${disabled_collectors}")
cd "$(dirname $0)"
port="$((10000 + (RANDOM % 10000)))"
@ -71,11 +107,9 @@ tmpdir=$(mktemp -d /tmp/node_exporter_e2e_test.XXXXXX)
skip_re="^(go_|node_exporter_build_info|node_scrape_collector_duration_seconds|process_|node_textfile_mtime_seconds|node_time_(zone|seconds)|node_network_(receive|transmit)_(bytes|packets)_total)"
arch="$(uname -m)"
case "${arch}" in
aarch64|ppc64le) fixture='collector/fixtures/e2e-64k-page-output.txt' ;;
*) fixture='collector/fixtures/e2e-output.txt' ;;
aarch64|ppc64le) fixture_metrics='collector/fixtures/e2e-64k-page-output.txt' ;;
*) fixture_metrics='collector/fixtures/e2e-output.txt' ;;
esac
# Only test CPU info collection on x86_64.
@ -109,7 +143,7 @@ do
*)
echo "Usage: $0 [-k] [-u] [-v]"
echo " -k: keep temporary files and leave node_exporter running"
echo " -u: update fixture"
echo " -u: update fixture_metrics"
echo " -v: verbose output"
exit 1
;;
@ -122,37 +156,126 @@ then
exit 1
fi
collector_flags=$(cat << FLAGS
${cpu_info_collector}
--collector.arp.device-exclude=nope
--collector.bcache.priorityStats
--collector.cpu.info.bugs-include=${cpu_info_bugs}
--collector.cpu.info.flags-include=${cpu_info_flags}
--collector.hwmon.chip-include=(applesmc|coretemp|hwmon4|nct6779)
--collector.netclass.ignore-invalid-speed
--collector.netclass.ignored-devices=(dmz|int)
--collector.netdev.device-include=lo
--collector.qdisc.device-include=(wlan0|eth0)
--collector.qdisc.fixtures=collector/fixtures/qdisc/
--collector.stat.softirq
--collector.sysctl.include-info=kernel.seccomp.actions_avail
--collector.sysctl.include=fs.file-nr
--collector.sysctl.include=fs.file-nr:total,current,max
--collector.sysctl.include=kernel.threads-max
--collector.textfile.directory=collector/fixtures/textfile/two_metric_files/
--collector.wifi.fixtures=collector/fixtures/wifi
--no-collector.arp.netlink
FLAGS
)
# Handle supported --[no-]collector.<name> flags. These are not hardcoded.
_filtered_collector_flags=""
for flag in ${collector_flags}; do
collector=$(echo "${flag}" | cut -d"." -f2)
# If the flag is associated with an enabled-by-default collector, include it.
enabled_by_default=0
for filename in $(maybe_flag_search_scope "${collector}") ; do
file="collector/${filename}"
if grep -h -E -o -- "registerCollector\(.*, defaultEnabled" ${file} > /dev/null 2>&1; then
_filtered_collector_flags="${_filtered_collector_flags} ${flag}"
enabled_by_default=1
break
fi
done
if [ ${enabled_by_default} -eq 1 ]; then
continue
fi
# If the flag is associated with an enabled-list collector, include it.
if echo "${supported_enabled_collectors} ${supported_disabled_collectors}" | grep -q -w "${collector}"; then
_filtered_collector_flags="${_filtered_collector_flags} ${flag}"
fi
done
# Handle supported --[no-]collector.<name>.<collector> flags. These are hardcoded and matched by the expression below.
filtered_collector_flags=""
# Check flags of all supported collectors further down their sub-collectors (beyond the 2nd ".").
for flag in ${_filtered_collector_flags}; do
# Iterate through all possible files where the flag may be defined.
flag_collector="$(echo "${flag}" | cut -d"." -f2)"
for filename in $(maybe_flag_search_scope "${flag_collector}") ; do
file="collector/${filename}"
# Move to next iteration if the current file is not included under the build context.
if ! ./tools/tools match "$file" > /dev/null 2>&1; then
continue
fi
# Flag has the format: --[no-]collector.<name>.<collector>.
if [ -n "$(echo ${flag} | cut -d"." -f3)" ]; then
# Check if the flag is used in the file.
trimmed_flag=$(echo "${flag}" | tr -d "\"' " | cut -d"=" -f1 | cut -c 3-)
if [[ $trimmed_flag =~ ^no- ]]; then
trimmed_flag=$(echo $trimmed_flag | cut -c 4-)
fi
if grep -h -E -o -- "kingpin.Flag\(\"${trimmed_flag}" ${file} > /dev/null 2>&1; then
filtered_collector_flags="${filtered_collector_flags} ${flag}"
else
continue
fi
# Flag has the format: --[no-]collector.<name>.
else
# Flag is supported by the host.
filtered_collector_flags="${filtered_collector_flags} ${flag}"
fi
done
done
# Check for ignored flags.
ignored_flags=""
for flag in ${collector_flags}; do
flag=$(echo "${flag}" | tr -d " ")
if ! echo "${filtered_collector_flags}" | grep -q -F -- "${flag}" > /dev/null 2>&1; then
ignored_flags="${ignored_flags} ${flag}"
fi
done
echo "ENABLED COLLECTORS======="
echo "${supported_enabled_collectors:1}" | tr ' ' '\n' | sort
echo "========================="
echo "DISABLED COLLECTORS======"
echo "${supported_disabled_collectors:1}" | tr ' ' '\n' | sort
echo "========================="
echo "IGNORED FLAGS============"
echo "${ignored_flags:1}"| tr ' ' '\n' | sort | uniq
echo "========================="
./node_exporter \
--path.rootfs="collector/fixtures" \
--path.procfs="collector/fixtures/proc" \
--path.sysfs="collector/fixtures/sys" \
--path.udev.data="collector/fixtures/udev/data" \
$(for c in ${enabled_collectors}; do echo --collector.${c} ; done) \
$(for c in ${disabled_collectors}; do echo --no-collector.${c} ; done) \
--collector.textfile.directory="collector/fixtures/textfile/two_metric_files/" \
--collector.wifi.fixtures="collector/fixtures/wifi" \
--collector.qdisc.fixtures="collector/fixtures/qdisc/" \
--collector.qdisc.device-include="(wlan0|eth0)" \
--collector.arp.device-exclude="nope" \
--no-collector.arp.netlink \
--collector.hwmon.chip-include="(applesmc|coretemp|hwmon4|nct6779)" \
--collector.netclass.ignored-devices="(dmz|int)" \
--collector.netclass.ignore-invalid-speed \
--collector.netdev.device-include="lo" \
--collector.bcache.priorityStats \
"${cpu_info_collector}" \
--collector.cpu.info.bugs-include="${cpu_info_bugs}" \
--collector.cpu.info.flags-include="${cpu_info_flags}" \
--collector.stat.softirq \
--collector.sysctl.include="kernel.threads-max" \
--collector.sysctl.include="fs.file-nr" \
--collector.sysctl.include="fs.file-nr:total,current,max" \
--collector.sysctl.include-info="kernel.seccomp.actions_avail" \
$(for c in ${supported_enabled_collectors}; do echo --collector.${c} ; done) \
$(for c in ${supported_disabled_collectors}; do echo --no-collector.${c} ; done) \
${filtered_collector_flags} \
--web.listen-address "127.0.0.1:${port}" \
--log.level="debug" > "${tmpdir}/node_exporter.log" 2>&1 &
echo $! > "${tmpdir}/node_exporter.pid"
generated_metrics="${tmpdir}/e2e-output.txt"
for os in freebsd openbsd netbsd solaris dragonfly darwin; do
if [ "${GOHOSTOS}" = "${os}" ]; then
generated_metrics="${tmpdir}/e2e-output-${GOHOSTOS}.txt"
fixture_metrics="${fixture_metrics::-4}-${GOHOSTOS}.txt"
fi
done
finish() {
if [ $? -ne 0 -o ${verbose} -ne 0 ]
then
@ -165,7 +288,7 @@ EOF
if [ ${update} -ne 0 ]
then
cp "${tmpdir}/e2e-output.txt" "${fixture}"
cp "${generated_metrics}" "${fixture_metrics}"
fi
if [ ${keep} -eq 0 ]
@ -195,8 +318,64 @@ get() {
sleep 1
get "127.0.0.1:${port}/metrics" | grep -E -v "${skip_re}" > "${tmpdir}/e2e-output.txt"
get "127.0.0.1:${port}/metrics" | grep --text -E -v "${skip_re}" > "${generated_metrics}"
# The following ignore-list is only applicable to the VMs used to run E2E tests on platforms for which containerized environments are not available.
# However, owing to this, there are some non-deterministic metrics that end up generating samples, unlike their containerized counterparts, for e.g., node_network_receive_bytes_total.
non_deterministic_metrics=$(cat << METRICS
node_boot_time_seconds
node_cpu_frequency_hertz
node_cpu_frequency_max_hertz
node_cpu_seconds_total
node_disk_io_time_seconds_total
node_disk_read_bytes_total
node_disk_read_sectors_total
node_disk_read_time_seconds_total
node_disk_reads_completed_total
node_disk_write_time_seconds_total
node_disk_writes_completed_total
node_disk_written_bytes_total
node_disk_written_sectors_total
node_exec_context_switches_total
node_exec_device_interrupts_total
node_exec_forks_total
node_exec_software_interrupts_total
node_exec_system_calls_total
node_exec_traps_total
node_interrupts_total
node_load1
node_load15
node_load5
node_memory_active_bytes
node_memory_buffer_bytes
node_memory_cache_bytes
node_memory_compressed_bytes
node_memory_free_bytes
node_memory_inactive_bytes
node_memory_internal_bytes
node_memory_laundry_bytes
node_memory_purgeable_bytes
node_memory_size_bytes
node_memory_swapped_in_bytes_total
node_memory_swapped_out_bytes_total
node_memory_wired_bytes
node_netstat_tcp_receive_packets_total
node_netstat_tcp_transmit_packets_total
node_network_receive_bytes_total
node_network_receive_multicast_total
node_network_transmit_multicast_total
METRICS
)
# Remove non-deterministic metrics from the generated metrics file (as we run their workflows in VMs).
for os in freebsd openbsd netbsd solaris dragonfly darwin; do
if [ "${GOHOSTOS}" = "${os}" ]; then
for metric in ${non_deterministic_metrics}; do
sed -i "/${metric}/d" "${generated_metrics}"
done
fi
done
diff -u \
"${fixture}" \
"${tmpdir}/e2e-output.txt"
"${fixture_metrics}" \
"${generated_metrics}"

36
go.mod
View file

@ -1,6 +1,6 @@
module github.com/prometheus/node_exporter
go 1.22.0
go 1.23.0
require (
github.com/alecthomas/kingpin/v2 v2.4.0
@ -18,18 +18,18 @@ require (
github.com/mattn/go-xmlrpc v0.0.3
github.com/mdlayher/ethtool v0.2.0
github.com/mdlayher/netlink v1.7.2
github.com/mdlayher/wifi v0.2.0
github.com/opencontainers/selinux v1.11.0
github.com/mdlayher/wifi v0.5.0
github.com/opencontainers/selinux v1.11.1
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55
github.com/prometheus-community/go-runit v0.1.0
github.com/prometheus/client_golang v1.20.3
github.com/prometheus/client_model v0.6.1
github.com/prometheus/common v0.59.1
github.com/prometheus/exporter-toolkit v0.13.0
github.com/prometheus/procfs v0.15.1
github.com/safchain/ethtool v0.4.1
github.com/prometheus/client_golang v1.21.1
github.com/prometheus/client_model v0.6.2
github.com/prometheus/common v0.64.0
github.com/prometheus/exporter-toolkit v0.14.0
github.com/prometheus/procfs v0.16.1
github.com/safchain/ethtool v0.5.10
golang.org/x/exp v0.0.0-20240909161429-701f63a606c0
golang.org/x/sys v0.25.0
golang.org/x/sys v0.33.0
howett.net/plist v1.0.1
)
@ -38,9 +38,9 @@ require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/dennwc/ioctl v1.0.0 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/klauspost/compress v1.17.11 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mdlayher/genetlink v1.3.2 // indirect
github.com/mdlayher/socket v0.4.1 // indirect
@ -51,11 +51,11 @@ require (
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
golang.org/x/crypto v0.26.0 // indirect
golang.org/x/net v0.28.0 // indirect
golang.org/x/oauth2 v0.22.0 // indirect
golang.org/x/sync v0.8.0 // indirect
golang.org/x/text v0.17.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
golang.org/x/crypto v0.38.0 // indirect
golang.org/x/net v0.40.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect
golang.org/x/sync v0.14.0 // indirect
golang.org/x/text v0.25.0 // indirect
google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
)

74
go.sum
View file

@ -24,8 +24,8 @@ github.com/ema/qdisc v1.0.0/go.mod h1:FhIc0fLYi7f+lK5maMsesDqwYojIOh3VfRs8EVd5YJ
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/hashicorp/go-envparse v0.1.0 h1:bE++6bhIsNCPLvgDZkYqo3nA+/PFI51pkrHdmPSDFPY=
github.com/hashicorp/go-envparse v0.1.0/go.mod h1:OHheN1GoygLlAkTlXLXvAdnXdZxy8JUweQ1rAXx1xnc=
github.com/hodgesds/perf-utils v0.7.0 h1:7KlHGMuig4FRH5fNw68PV6xLmgTe7jKs9hgAcEAbioU=
@ -39,8 +39,8 @@ github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2E
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/jsimonetti/rtnetlink/v2 v2.0.2 h1:ZKlbCujrIpp4/u3V2Ka0oxlf4BCkt6ojkvpy3nZoCBY=
github.com/jsimonetti/rtnetlink/v2 v2.0.2/go.mod h1:7MoNYNbb3UaDHtF8udiJo/RH6VsTKP1pqKLUTVCvToE=
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@ -61,66 +61,66 @@ github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U
github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA=
github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ=
github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE=
github.com/mdlayher/wifi v0.2.0 h1:vwbVyu5MWTiFNvOmWdvIx9veBlMVnEasZ90PhUi1DYU=
github.com/mdlayher/wifi v0.2.0/go.mod h1:yOfWhVZ4FFJxeHzAxDzt87Om9EkqqcCiY9Gi5gfSXwI=
github.com/mdlayher/wifi v0.5.0 h1:TGZIcrhL6h3710amshpEJnMzLs74MrZOF+8qbm8Gx/I=
github.com/mdlayher/wifi v0.5.0/go.mod h1:yfQs+5zr1eOIfdsWDcZonWdznnt/Iiz0/4772cfZuHk=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8=
github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/prometheus-community/go-runit v0.1.0 h1:uTWEj/Fn2RoLdfg/etSqwzgYNOYPrARx1BHUN052tGA=
github.com/prometheus-community/go-runit v0.1.0/go.mod h1:AvJ9Jo3gAFu2lbM4+qfjdpq30FfiLDJZKbQ015u08IQ=
github.com/prometheus/client_golang v1.20.3 h1:oPksm4K8B+Vt35tUhw6GbSNSgVlVSBH0qELP/7u83l4=
github.com/prometheus/client_golang v1.20.3/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0=
github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0=
github.com/prometheus/exporter-toolkit v0.13.0 h1:lmA0Q+8IaXgmFRKw09RldZmZdnvu9wwcDLIXGmTPw1c=
github.com/prometheus/exporter-toolkit v0.13.0/go.mod h1:2uop99EZl80KdXhv/MxVI2181fMcwlsumFOqBecGkG0=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk=
github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4=
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg=
github.com/prometheus/exporter-toolkit v0.14.0/go.mod h1:Gu5LnVvt7Nr/oqTBUC23WILZepW0nffNo10XdhQcwWA=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/safchain/ethtool v0.4.1 h1:S6mEleTADqgynileXoiapt/nKnatyR6bmIHoF+h2ADo=
github.com/safchain/ethtool v0.4.1/go.mod h1:XLLnZmy4OCRTkksP/UiMjij96YmIsBfmBQcs7H6tA48=
github.com/safchain/ethtool v0.5.10 h1:Im294gZtuf4pSGJRAOGKaASNi3wMeFaGaWuSaomedpc=
github.com/safchain/ethtool v0.5.10/go.mod h1:w9jh2Lx7YBR4UwzLkzCmWl85UY0W2uZdd7/DckVE5+c=
github.com/siebenmann/go-kstat v0.0.0-20210513183136-173c9b0a9973 h1:GfSdC6wKfTGcgCS7BtzF5694Amne1pGCSTY252WhlEY=
github.com/siebenmann/go-kstat v0.0.0-20210513183136-173c9b0a9973/go.mod h1:G81aIFAMS9ECrwBYR9YxhlPjWgrItd+Kje78O6+uqm8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc=
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8=
golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk=
golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY=
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA=
golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20211031064116-611d5d643895/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=

79
tools/main.go Normal file
View file

@ -0,0 +1,79 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"flag"
"fmt"
"go/build"
"os"
"path/filepath"
"runtime"
)
func main() {
printHelpAndDie := func() {
fmt.Println(`
Usage: tools [command]`)
os.Exit(1)
}
if len(os.Args) < 2 {
printHelpAndDie()
}
// Sub-commands.
matchCmd := flag.NewFlagSet("match", flag.ExitOnError)
switch os.Args[1] {
case "match":
err := matchCmd.Parse(os.Args[2:])
if err != nil {
fmt.Println("Error parsing flags:", err)
os.Exit(1)
}
if matchCmd.NArg() != 1 {
fmt.Println("Usage: match [file]")
os.Exit(1)
}
file := matchCmd.Arg(0)
// For debugging purposes, allow overriding these.
goos, found := os.LookupEnv("GOHOSTOS")
if !found {
goos = runtime.GOOS
}
goarch, found := os.LookupEnv("GOARCH")
if !found {
goarch = runtime.GOARCH
}
ctx := build.Context{
GOOS: goos,
GOARCH: goarch,
}
abs, err := filepath.Abs(file)
if err != nil {
panic(err)
}
match, err := ctx.MatchFile(filepath.Dir(abs), filepath.Base(abs))
if err != nil {
fmt.Println("Error:", err)
os.Exit(1)
}
if match {
os.Exit(0)
}
os.Exit(1)
default:
printHelpAndDie()
}
}