Compare commits

..

No commits in common. "master" and "v1.4.0" have entirely different histories.

244 changed files with 4333 additions and 15366 deletions

View file

@ -1,16 +1,16 @@
--- ---
version: 2.1 version: 2.1
orbs: orbs:
prometheus: prometheus/prometheus@0.17.1 prometheus: prometheus/prometheus@0.16.0
executors: executors:
# Whenever the Go version is updated here, .promu.yml and .promu-cgo.yml # Whenever the Go version is updated here, .promu.yml and .promu-cgo.yml
# should also be updated. # should also be updated.
golang: golang:
docker: docker:
- image: cimg/go:1.24 - image: cimg/go:1.19
arm: arm:
docker: machine:
- image: cimg/go:1.24 image: ubuntu-2004:current
resource_class: arm.medium resource_class: arm.medium
jobs: jobs:
@ -25,9 +25,16 @@ jobs:
test-arm: test-arm:
executor: arm executor: arm
steps: steps:
- prometheus/setup_environment - checkout
- run: uname -a - run: uname -a
- run: make test-e2e - run: make test-e2e
codespell:
docker:
- image: circleci/python
steps:
- checkout
- run: sudo pip install codespell
- run: codespell --skip=".git,./vendor,ttar,go.mod,go.sum,*pem,./collector/fixtures" -I scripts/codespell_ignore.txt
test_mixins: test_mixins:
executor: golang executor: golang
steps: steps:
@ -42,23 +49,13 @@ jobs:
- run: git diff --exit-code - run: git diff --exit-code
build: build:
machine: machine:
image: ubuntu-2404:current image: ubuntu-2004:202101-01
parallelism: 3 parallelism: 3
steps: steps:
- prometheus/setup_environment - prometheus/setup_environment
- run: docker run --privileged linuxkit/binfmt:af88a591f9cc896a52ce596b9cf7ca26a061ef97 - run: docker run --privileged linuxkit/binfmt:v0.8
- run: promu crossbuild -v --parallelism $CIRCLE_NODE_TOTAL --parallelism-thread $CIRCLE_NODE_INDEX - run: promu crossbuild -v --parallelism $CIRCLE_NODE_TOTAL --parallelism-thread $CIRCLE_NODE_INDEX
- run: promu --config .promu-cgo.yml crossbuild -v --parallelism $CIRCLE_NODE_TOTAL --parallelism-thread $CIRCLE_NODE_INDEX - run: promu --config .promu-cgo.yml crossbuild -v --parallelism $CIRCLE_NODE_TOTAL --parallelism-thread $CIRCLE_NODE_INDEX
# sign the darwin build so it doesn't get SIGKILLed on start, see: https://github.com/prometheus/node_exporter/issues/2539
- run:
command: |
if [[ -f "$(pwd)/.build/darwin-arm64/node_exporter" ]]; then
promu codesign "$(pwd)/.build/darwin-arm64/node_exporter"
fi
if [[ -f "$(pwd)/.build/darwin-amd64/node_exporter" ]]; then
promu codesign "$(pwd)/.build/darwin-amd64/node_exporter"
fi
- persist_to_workspace: - persist_to_workspace:
root: . root: .
paths: paths:
@ -68,9 +65,9 @@ jobs:
destination: /build destination: /build
test_docker: test_docker:
machine: machine:
image: ubuntu-2404:current image: ubuntu-2204:2022.04.2
environment: environment:
DOCKER_TEST_IMAGE_NAME: quay.io/prometheus/golang-builder:1.24-base DOCKER_TEST_IMAGE_NAME: quay.io/prometheus/golang-builder:1.18-base
REPO_PATH: github.com/prometheus/node_exporter REPO_PATH: github.com/prometheus/node_exporter
steps: steps:
- prometheus/setup_environment - prometheus/setup_environment
@ -108,6 +105,10 @@ workflows:
filters: filters:
tags: tags:
only: /.*/ only: /.*/
- codespell:
filters:
tags:
only: /.*/
- test_docker: - test_docker:
requires: requires:
- test - test

View file

@ -6,7 +6,7 @@
Before filing a bug report, note that running node_exporter in Docker is Before filing a bug report, note that running node_exporter in Docker is
not recommended, for the reasons detailed in the README: not recommended, for the reasons detailed in the README:
https://github.com/prometheus/node_exporter#docker https://github.com/prometheus/node_exporter#using-docker
Finally, also note that node_exporter is focused on *NIX kernels, and the Finally, also note that node_exporter is focused on *NIX kernels, and the
WMI exporter should be used instead on Windows. WMI exporter should be used instead on Windows.
@ -23,8 +23,6 @@
### node_exporter command line flags ### node_exporter command line flags
<!-- Please list all of the command line flags --> <!-- Please list all of the command line flags -->
### node_exporter log output
### Are you running node_exporter in Docker? ### Are you running node_exporter in Docker?
<!-- Please note the warning above. --> <!-- Please note the warning above. -->

View file

@ -1,313 +0,0 @@
name: bsd
on:
push:
branches:
- master
pull_request:
branches:
- master
permissions:
contents: read
env:
GNU_TAR_VERSION: "1.35"
GO_VERSION_DRAGONFLY: "1.24.1"
GO_VERSION_FREEBSD: "123"
GO_VERSION_NETBSD: "1.24.1"
GO_VERSION_OPENBSD: "1.23.1"
GO_VERSION_SOLARIS: "1.24.1"
# To spin up one of the VMs below, see the "Debug Shell" section here: https://github.com/vmactions
jobs:
test_freebsd:
name: Run end-to-end tests on FreeBSD
runs-on: ubuntu-latest
steps:
- name: Checkout the repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: test-e2e
uses: vmactions/freebsd-vm@8873d98fd1413b5977cb2f7348fe329775159892 # v1.1.9
with:
copyback: false
envs: 'GO_VERSION_FREEBSD GNU_TAR_VERSION'
usesh: true
prepare: |
pkg update -f
pkg install -y \
bash \
git \
gmake \
gnugrep \
go${GO_VERSION_FREEBSD} \
gsed \
gtar \
python \
wget
run: |
echo "::group::Setup prerequisites"
set -eu
mkdir bin
ln -s $(which go${GO_VERSION_FREEBSD}) $(pwd)/bin/go
ln -s $(which ggrep) $(pwd)/bin/grep
ln -s $(which gmake) $(pwd)/bin/make
ln -s $(which gsed) $(pwd)/bin/sed
ln -s $(which gtar) $(pwd)/bin/tar
export PATH=$(pwd)/bin:$PATH
echo "::endgroup::"
echo "::group::Print environment information"
uname -a
echo "GOOS: $(go env GOOS)"
echo "GOARCH: $(go env GOARCH)"
echo "::endgroup::"
echo "::group::Run End-to-End Tests"
git config --global --add safe.directory $(pwd)
gmake test-e2e
echo "::endgroup::"
test_openbsd:
name: Run end-to-end tests on OpenBSD
runs-on: ubuntu-latest
steps:
- name: Checkout the repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: test-e2e
uses: vmactions/openbsd-vm@7ac70b6de6f33efc74a90c1964afa3bcf0ee4401 # v1.1.6
with:
copyback: false
envs: 'GO_VERSION_OPENBSD GNU_TAR_VERSION'
usesh: true
prepare: |
pkg_add -u
pkg_add \
bash \
ggrep \
git \
gmake \
go-${GO_VERSION_OPENBSD} \
gsed \
gtar-${GNU_TAR_VERSION}p0-static \
python \
wget
run: |
echo "::group::Setup prerequisites"
set -eu
mkdir bin
ln -s $(which ggrep) $(pwd)/bin/grep
ln -s $(which gmake) $(pwd)/bin/make
ln -s $(which gsed) $(pwd)/bin/sed
ln -s $(which gtar) $(pwd)/bin/tar
export PATH=$(pwd)/bin:$PATH
echo "::endgroup::"
echo "::group::Print environment information"
uname -a
echo "GOOS: $(go env GOOS)"
echo "GOARCH: $(go env GOARCH)"
echo "::endgroup::"
echo "::group::Run End-to-End Tests"
git config --global --add safe.directory $(pwd)
make test-e2e
echo "::endgroup::"
test_netbsd:
name: Run end-to-end tests on NetBSD
runs-on: ubuntu-latest
steps:
- name: Checkout the repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: test-e2e
uses: vmactions/netbsd-vm@46a58bbf03682b4cb24142b97fa315ae52bed573 # v1.1.8
with:
copyback: false
envs: 'GO_VERSION_NETBSD GNU_TAR_VERSION'
usesh: true
prepare: |
/usr/sbin/pkg_add -u
/usr/sbin/pkg_add \
git \
gmake \
grep \
gsed \
gtar-base-${GNU_TAR_VERSION}\
python312 \
wget
run: |
echo "::group::Setup prerequisites"
set -eu
mkdir bin
GOGZ="go${GO_VERSION_NETBSD}.netbsd-amd64.tar.gz"
wget https://go.dev/dl/${GOGZ}
gtar xzf ${GOGZ}
ln -s $(pwd)/go/bin/go $(pwd)/bin/go
ln -s $(which ggrep) $(pwd)/bin/grep
ln -s $(which gmake) $(pwd)/bin/make
ln -s $(which gsed) $(pwd)/bin/sed
ln -s $(which gtar) $(pwd)/bin/tar
export PATH=$(pwd)/bin:$PATH
echo "::endgroup::"
echo "::group::Print environment information"
uname -a
echo "GOOS: $(go env GOOS)"
echo "GOARCH: $(go env GOARCH)"
echo "::endgroup::"
echo "::group::Run End-to-End Tests"
git config --global --add safe.directory $(pwd)
make test-e2e
echo "::endgroup::"
test_dragonfly:
name: Run end-to-end tests on DragonFly
runs-on: ubuntu-latest
steps:
- name: Checkout the repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: test-e2e
uses: vmactions/dragonflybsd-vm@e3c420e8a2362c2496fca6e76a291abd46f5d8e7 # v1.1.0
with:
copyback: false
envs: 'GO_VERSION_DRAGONFLY'
usesh: true
prepare: |
pkg update && pkg upgrade -y
pkg install -y \
bash \
git \
gmake \
gnugrep \
gsed \
gtar \
python3 \
wget
run: |
echo "::group::Setup prerequisites"
set -eu
mkdir bin
GOGZ="go${GO_VERSION_DRAGONFLY}.dragonfly-amd64.tar.gz"
wget https://go.dev/dl/${GOGZ}
gtar xzf ${GOGZ}
ln -s $(pwd)/go/bin/go $(pwd)/bin/go
ln -s $(which ggrep) $(pwd)/bin/grep
ln -s $(which gmake) $(pwd)/bin/make
ln -s $(which gsed) $(pwd)/bin/sed
ln -s $(which gtar) $(pwd)/bin/tar
ln -s $(which python3) $(pwd)/bin/python
export PATH=$(pwd)/bin:$PATH
echo "::endgroup::"
echo "::group::Print environment information"
uname -a
echo "GOOS: $(go env GOOS)"
echo "GOARCH: $(go env GOARCH)"
echo "::endgroup::"
echo "::group::Run End-to-End Tests"
git config --global --add safe.directory $(pwd)
gmake test-e2e
echo "::endgroup::"
test_solaris:
name: Run end-to-end tests on Solaris
runs-on: ubuntu-latest
steps:
- name: Checkout the repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: test-e2e
uses: vmactions/solaris-vm@cc8f82fa1a7cc746153ec3f71bf11f311f16e225 # v1.1.1
with:
copyback: false
envs: 'GO_VERSION_SOLARIS'
usesh: true
prepare: |
pkg update
pkg install \
bash \
curl \
gcc \
git \
gnu-grep \
gnu-make \
gnu-sed \
gnu-tar
run: |
echo "::group::Setup prerequisites"
set -eu
mkdir bin
GOGZ="go${GO_VERSION_SOLARIS}.solaris-amd64.tar.gz"
wget https://go.dev/dl/${GOGZ}
gtar xzf ${GOGZ}
ln -s $(pwd)/go/bin/go $(pwd)/bin/go
ln -s $(which ggrep) $(pwd)/bin/grep
ln -s $(which gmake) $(pwd)/bin/make
ln -s $(which gsed) $(pwd)/bin/sed
ln -s $(which gtar) $(pwd)/bin/tar
export PATH=$(pwd)/bin:$PATH
echo ">> building promu as it is not shipped for Solaris"
git clone https://github.com/prometheus/promu.git
cd promu
go build .
cd -
mkdir -p $(go env GOPATH)/bin
ln -s $(pwd)/promu/promu $(go env GOPATH)/bin/promu
export PATH=$(go env GOPATH)/bin:$PATH
echo "::endgroup::"
echo "::group::Print environment information"
uname -a
echo "GOOS: $(go env GOOS)"
echo "GOARCH: $(go env GOARCH)"
echo "::endgroup::"
echo "::group::Run End-to-End Tests"
git config --global --add safe.directory $(pwd)
make test-e2e
echo "::endgroup::"
test_macos:
name: Run end-to-end tests on macOS
runs-on: macos-latest
steps:
- name: Checkout the repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install dependencies
run: |
brew install \
bash \
curl \
git \
grep \
make \
gnu-sed \
gnu-tar \
go \
python3
- name: test-e2e
run: |
echo "::group::Setup prerequisites"
set -eu
mkdir bin
ln -s $(which ggrep) $(pwd)/bin/grep
ln -s $(which gmake) $(pwd)/bin/make
ln -s $(which gsed) $(pwd)/bin/sed
ln -s $(which gtar) $(pwd)/bin/tar
export PATH=$(pwd)/bin:$PATH
echo "::endgroup::"
echo "::group::Print environment information"
uname -a
echo "GOOS: $(go env GOOS)"
echo "GOARCH: $(go env GOARCH)"
echo "::endgroup::"
echo "::group::Run End-to-End Tests"
git config --global --add safe.directory $(pwd)
make test-e2e
echo "::endgroup::"

View file

@ -1,57 +0,0 @@
---
name: Push README to Docker Hub
on:
push:
paths:
- "README.md"
- "README-containers.md"
- ".github/workflows/container_description.yml"
branches: [ main, master ]
permissions:
contents: read
jobs:
PushDockerHubReadme:
runs-on: ubuntu-latest
name: Push README to Docker Hub
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Set docker hub repo name
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
- name: Push README to Dockerhub
uses: christian-korneck/update-container-description-action@d36005551adeaba9698d8d67a296bd16fa91f8e8 # v1
env:
DOCKER_USER: ${{ secrets.DOCKER_HUB_LOGIN }}
DOCKER_PASS: ${{ secrets.DOCKER_HUB_PASSWORD }}
with:
destination_container_repo: ${{ env.DOCKER_REPO_NAME }}
provider: dockerhub
short_description: ${{ env.DOCKER_REPO_NAME }}
# Empty string results in README-containers.md being pushed if it
# exists. Otherwise, README.md is pushed.
readme_file: ''
PushQuayIoReadme:
runs-on: ubuntu-latest
name: Push README to quay.io
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Set quay.io org name
run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV
- name: Set quay.io repo name
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
- name: Push README to quay.io
uses: christian-korneck/update-container-description-action@d36005551adeaba9698d8d67a296bd16fa91f8e8 # v1
env:
DOCKER_APIKEY: ${{ secrets.QUAY_IO_API_TOKEN }}
with:
destination_container_repo: ${{ env.DOCKER_REPO_NAME }}
provider: quay
# Empty string results in README-containers.md being pushed if it
# exists. Otherwise, README.md is pushed.
readme_file: ''

View file

@ -1,5 +1,3 @@
---
# This action is synced from https://github.com/prometheus/prometheus
name: golangci-lint name: golangci-lint
on: on:
push: push:
@ -12,28 +10,21 @@ on:
- ".golangci.yml" - ".golangci.yml"
pull_request: pull_request:
permissions: # added using https://github.com/step-security/secure-repo
contents: read
jobs: jobs:
golangci: golangci:
permissions:
contents: read # for actions/checkout to fetch code
pull-requests: read # for golangci/golangci-lint-action to fetch pull requests
name: lint name: lint
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 uses: actions/checkout@v3
- name: Install Go - name: install Go
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 uses: actions/setup-go@v2
with: with:
go-version: 1.24.x go-version: 1.18.x
- name: Install snmp_exporter/generator dependencies - name: Install snmp_exporter/generator dependencies
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter' if: github.repository == 'prometheus/snmp_exporter'
- name: Lint - name: Lint
uses: golangci/golangci-lint-action@55c2c1448f86e01eaae002a5a3a9624417608d84 # v6.5.2 uses: golangci/golangci-lint-action@v3.2.0
with: with:
args: --verbose version: v1.45.2
version: v1.64.6

3
.gitignore vendored
View file

@ -27,8 +27,6 @@ dependencies-stamp
/.release /.release
/.tarballs /.tarballs
tools/tools
# Intellij # Intellij
/.idea /.idea
@ -36,6 +34,5 @@ tools/tools
# Test files extracted from ttar # Test files extracted from ttar
collector/fixtures/sys/ collector/fixtures/sys/
collector/fixtures/udev/
/vendor /vendor

View file

@ -1,9 +1,15 @@
linters: linters:
enable: enable:
- depguard
- goimports
- misspell
- revive - revive
disable:
# Disable soon to deprecated[1] linters that lead to false
# positives when build tags disable certain files[2]
# 1: https://github.com/golangci/golangci-lint/issues/1841
# 2: https://github.com/prometheus/node_exporter/issues/1545
- deadcode
- unused
- structcheck
- varcheck
issues: issues:
exclude-rules: exclude-rules:
@ -12,21 +18,5 @@ issues:
- errcheck - errcheck
linters-settings: linters-settings:
depguard:
rules:
no_exec_policy:
files:
- "!$test"
deny:
- pkg: "os/exec"
desc: "Using os/exec to run sub processes it not allowed by policy"
errcheck: errcheck:
exclude-functions: exclude: scripts/errcheck_excludes.txt
# Used in HTTP handlers, any error is handled by the server itself.
- (net/http.ResponseWriter).Write
revive:
rules:
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter
- name: unused-parameter
severity: warning
disabled: true

View file

@ -1,13 +1,14 @@
go: go:
# Whenever the Go version is updated here, .circle/config.yml and # Whenever the Go version is updated here, .circle/config.yml and
# .promu.yml should also be updated. # .promu.yml should also be updated.
version: 1.24 version: 1.19
cgo: true cgo: true
repository: repository:
path: github.com/prometheus/node_exporter path: github.com/prometheus/node_exporter
build: build:
binaries: binaries:
- name: node_exporter - name: node_exporter
flags: -a -tags 'netgo osusergo static_build'
ldflags: | ldflags: |
-X github.com/prometheus/common/version.Version={{.Version}} -X github.com/prometheus/common/version.Version={{.Version}}
-X github.com/prometheus/common/version.Revision={{.Revision}} -X github.com/prometheus/common/version.Revision={{.Revision}}

View file

@ -1,12 +1,13 @@
go: go:
# Whenever the Go version is updated here, .circle/config.yml and # Whenever the Go version is updated here, .circle/config.yml and
# .promu-cgo.yml should also be updated. # .promu-cgo.yml should also be updated.
version: 1.24 version: 1.19
repository: repository:
path: github.com/prometheus/node_exporter path: github.com/prometheus/node_exporter
build: build:
binaries: binaries:
- name: node_exporter - name: node_exporter
flags: -a -tags 'netgo osusergo static_build'
ldflags: | ldflags: |
-X github.com/prometheus/common/version.Version={{.Version}} -X github.com/prometheus/common/version.Version={{.Version}}
-X github.com/prometheus/common/version.Revision={{.Revision}} -X github.com/prometheus/common/version.Revision={{.Revision}}

View file

@ -1,7 +1,5 @@
--- ---
extends: default extends: default
ignore: |
**/node_modules
rules: rules:
braces: braces:
@ -22,4 +20,9 @@ rules:
config/testdata/section_key_dup.bad.yml config/testdata/section_key_dup.bad.yml
line-length: disable line-length: disable
truthy: truthy:
check-keys: false ignore: |
.github/workflows/codeql-analysis.yml
.github/workflows/funcbench.yml
.github/workflows/fuzzing.yml
.github/workflows/prombench.yml
.github/workflows/golangci-lint.yml

View file

@ -5,143 +5,6 @@
* [ENHANCEMENT] * [ENHANCEMENT]
* [BUGFIX] * [BUGFIX]
## 1.9.0 / 2025-02-17
* [CHANGE] meminfo: Convert linux implementation to use procfs lib #3049
* [CHANGE] Update logging to use Go log/slog #3097
* [FEATURE] filesystem: Add `node_filesystem_mount_info` metric #2970
* [FEATURE] btrfs: Add metrics for commit statistics #3010
* [FEATURE] interrupts: Add collector include/exclude filtering #3028
* [FEATURE] interrupts: Add "exclude zeros" filtering #3028
* [FEATURE] slabinfo: Add filters for slab name. #3041
* [FEATURE] pressure: add IRQ PSI metrics #3048
* [FEATURE] hwmon: Add include and exclude filter for sensors #3072
* [FEATURE] filesystem: Add NetBSD support #3082
* [FEATURE] netdev: Add ifAlias label #3087
* [FEATURE] hwmon: Add Support for GPU Clock Frequencies #3093
* [FEATURE] Add `exclude[]` URL parameter #3116
* [FEATURE] Add AIX support #3136
* [FEATURE] filesystem: Add fs-types/mount-points include flags #3171
* [FEATURE] netstat: Add collector for tcp packet counters for FreeBSD. #3177
* [ENHANCEMENT] ethtool: Add logging for filtering flags #2979
* [ENHANCEMENT] netstat: Add TCPRcvQDrop to default metrics #3021
* [ENHANCEMENT] diskstats: Add block device rotational #3022
* [ENHANCEMENT] cpu: Support CPU online status #3032
* [ENHANCEMENT] arp: optimize interface name resolution #3133
* [ENHANCEMENT] textfile: Allow specifiying multiple directory globs #3135
* [ENHANCEMENT] filesystem: Add reporting of purgeable space on MacOS #3206
* [ENHANCEMENT] ethtool: Skip full scan of NetClass directories #3239
* [BUGFIX] zfs: Prevent `procfs` integer underflow #2961
* [BUGFIX] pressure: Fix collection on systems that do not expose a full CPU stat #3054
* [BUGFIX] cpu: Fix FreeBSD 32-bit host support and plug memory leak #3083
* [BUGFIX] hwmon: Add safety check to hwmon read #3134
* [BUGFIX] zfs: Allow space in dataset name #3186
## 1.8.1 / 2024-05-16
* [BUGFIX] Fix CPU seconds on Solaris #2963
* [BUGFIX] Sign Darwin/MacOS binaries #3008
* [BUGFIX] Fix pressure collector nil reference #3016
## 1.8.0 / 2024-04-24
* [CHANGE] exec_bsd: Fix labels for `vm.stats.sys.v_syscall` sysctl #2895
* [CHANGE] diskstats: Ignore zram devices on linux systems #2898
* [CHANGE] textfile: Avoid inconsistent help-texts #2962
* [CHANGE] os: Removed caching of modtime/filename of os-release file #2987
* [FEATURE] xfrm: Add new collector #2866
* [FEATURE] watchdog: Add new collector #2880
* [ENHANCEMENT] cpu_vulnerabilities: Add mitigation information label #2806
* [ENHANCEMENT] nfsd: Handle new `wdeleg_getattr` attribute #2810
* [ENHANCEMENT] netstat: Add TCPOFOQueue to default netstat metrics #2867
* [ENHANCEMENT] filesystem: surface device errors #2923
* [ENHANCEMENT] os: Add support end parsing #2982
* [ENHANCEMENT] zfs: Log mib when sysctl read fails on FreeBSD #2975
* [ENHANCEMENT] fibre_channel: update procfs to take into account optional attributes #2933
* [BUGFIX] cpu: Fix debug log in cpu collector #2857
* [BUGFIX] hwmon: Fix hwmon nil ptr #2873
* [BUGFIX] hwmon: Fix hwmon error capture #2915
* [BUGFIX] zfs: Revert "Add ZFS freebsd per dataset stats #2925
* [BUGFIX] ethtool: Sanitize ethtool metric name keys #2940
* [BUGFIX] fix: data race of NetClassCollector metrics initialization #2995
## 1.7.0 / 2023-11-11
* [FEATURE] Add ZFS freebsd per dataset stats #2753
* [FEATURE] Add cpu vulnerabilities reporting from sysfs #2721
* [ENHANCEMENT] Parallelize stat calls in Linux filesystem collector #1772
* [ENHANCEMENT] Add missing linkspeeds to ethtool collector 2711
* [ENHANCEMENT] Add CPU MHz as the value for `node_cpu_info` metric #2778
* [ENHANCEMENT] Improve qdisc collector performance #2779
* [ENHANCEMENT] Add include and exclude filter for hwmon collector #2699
* [ENHANCEMENT] Optionally fetch ARP stats via rtnetlink instead of procfs #2777
* [BUFFIX] Fix ZFS arcstats on FreeBSD 14.0+ 2754
* [BUGFIX] Fallback to 32-bit stats in netdev #2757
* [BUGFIX] Close btrfs.FS handle after use #2780
* [BUGFIX] Move RO status before error return #2807
* [BUFFIX] Fix `promhttp_metric_handler_errors_total` being always active #2808
* [BUGFIX] Fix nfsd v4 index miss #2824
## 1.6.1 / 2023-06-17
Rebuild with latest Go compiler bugfix release.
## 1.6.0 / 2023-05-27
* [CHANGE] Fix cpustat when some cpus are offline #2318
* [CHANGE] Remove metrics of offline CPUs in CPU collector #2605
* [CHANGE] Deprecate ntp collector #2603
* [CHANGE] Remove bcache `cache_readaheads_totals` metrics #2583
* [CHANGE] Deprecate supervisord collector #2685
* [FEATURE] Enable uname collector on NetBSD #2559
* [FEATURE] NetBSD support for the meminfo collector #2570
* [FEATURE] NetBSD support for CPU collector #2626
* [FEATURE] Add FreeBSD collector for netisr subsystem #2668
* [FEATURE] Add softirqs collector #2669
* [ENHANCEMENT] Add suspended as a `node_zfs_zpool_state` #2449
* [ENHANCEMENT] Add administrative state of Linux network interfaces #2515
* [ENHANCEMENT] Log current value of GOMAXPROCS #2537
* [ENHANCEMENT] Add profiler options for perf collector #2542
* [ENHANCEMENT] Allow root path as metrics path #2590
* [ENHANCEMENT] Add cpu frequency governor metrics #2569
* [ENHANCEMENT] Add new landing page #2622
* [ENHANCEMENT] Reduce privileges needed for btrfs device stats #2634
* [ENHANCEMENT] Add ZFS `memory_available_bytes` #2687
* [ENHANCEMENT] Use `SCSI_IDENT_SERIAL` as serial in diskstats #2612
* [ENHANCEMENT] Read missing from netlink netclass attributes from sysfs #2669
* [BUGFIX] perf: fixes for automatically detecting the correct tracefs mountpoints #2553
* [BUGFIX] Fix `thermal_zone` collector noise #2554
* [BUGFIX] Fix a problem fetching the user wire count on FreeBSD #2584
* [BUGFIX] interrupts: Fix fields on linux aarch64 #2631
* [BUGFIX] Remove metrics of offline CPUs in CPU collector #2605
* [BUGFIX] Fix OpenBSD filesystem collector string parsing #2637
* [BUGFIX] Fix bad reporting of `node_cpu_seconds_total` in OpenBSD #2663
## 1.5.0 / 2022-11-29
NOTE: This changes the Go runtime "GOMAXPROCS" to 1. This is done to limit the
concurrency of the exporter to 1 CPU thread at a time in order to avoid a
race condition problem in the Linux kernel (#2500) and parallel IO issues
on nodes with high numbers of CPUs/CPU threads (#1880).
NOTE: A command line arg has been changed from `--web.config` to `--web.config.file`.
* [CHANGE] Default GOMAXPROCS to 1 #2530
* [FEATURE] Add multiple listeners and systemd socket listener activation #2393
* [ENHANCEMENT] Add RTNL version of netclass collector #2492, #2528
* [BUGFIX] Fix diskstats exclude flags #2487
* [BUGFIX] Bump go/x/crypt and go/x/net #2488
* [BUGFIX] Fix hwmon label sanitizer #2504
* [BUGFIX] Use native endianness when encoding InetDiagMsg #2508
* [BUGFIX] Fix btrfs device stats always being zero #2516
* [BUGFIX] Security: Update exporter-toolkit (CVE-2022-46146) #2531
## 1.4.1 / 2022-11-29
* [BUGFIX] Fix diskstats exclude flags #2487
* [BUGFIX] Security: Update go/x/crypto and go/x/net (CVE-2022-27191 CVE-2022-27664) #2488
* [BUGFIX] Security: Update exporter-toolkit (CVE-2022-46146) #2531
## 1.4.0 / 2022-09-24 ## 1.4.0 / 2022-09-24
* [CHANGE] Merge metrics descriptions in textfile collector #2475 * [CHANGE] Merge metrics descriptions in textfile collector #2475
@ -211,9 +74,9 @@ NOTE: In order to support globs in the textfile collector path, filenames expose
* [ENHANCEMENT] Add flag to disable guest CPU metrics #2123 * [ENHANCEMENT] Add flag to disable guest CPU metrics #2123
* [ENHANCEMENT] Add DMI collector #2131 * [ENHANCEMENT] Add DMI collector #2131
* [ENHANCEMENT] Add threads metrics to processes collector #2164 * [ENHANCEMENT] Add threads metrics to processes collector #2164
* [ENHANCEMENT] Reduce timer GC delays in the Linux filesystem collector #2169 * [ENHANCMMENT] Reduce timer GC delays in the Linux filesystem collector #2169
* [ENHANCEMENT] Add TCPTimeouts to netstat default filter #2189 * [ENHANCMMENT] Add TCPTimeouts to netstat default filter #2189
* [ENHANCEMENT] Use SysctlTimeval for boottime collector on BSD #2208 * [ENHANCMMENT] Use SysctlTimeval for boottime collector on BSD #2208
* [BUGFIX] ethtool: Sanitize metric names #2093 * [BUGFIX] ethtool: Sanitize metric names #2093
* [BUGFIX] Fix ethtool collector for multiple interfaces #2126 * [BUGFIX] Fix ethtool collector for multiple interfaces #2126
* [BUGFIX] Fix possible panic on macOS #2133 * [BUGFIX] Fix possible panic on macOS #2133
@ -543,7 +406,7 @@ Other breaking changes:
**Breaking changes** **Breaking changes**
This release contains major breaking changes to flag handling. This release contains major breaking changes to flag handling.
* The flag library has been changed, all flags now require double-dashes. (`-foo` becomes `--foo`). * The flag library has been changed, all flags now require double-dashs. (`-foo` becomes `--foo`).
* The collector selection flag has been replaced by individual boolean flags. * The collector selection flag has been replaced by individual boolean flags.
* The `-collector.procfs` and `-collector.sysfs` flags have been renamed to `--path.procfs` and `--path.sysfs` respectively. * The `-collector.procfs` and `-collector.sysfs` flags have been renamed to `--path.procfs` and `--path.sysfs` respectively.
@ -559,7 +422,7 @@ Windows support is now removed, the [wmi_exporter](https://github.com/martinlind
* [CHANGE] Switch to kingpin flags #639 * [CHANGE] Switch to kingpin flags #639
* [CHANGE] Replace --collectors.enabled with per-collector flags #640 * [CHANGE] Replace --collectors.enabled with per-collector flags #640
* [FEATURE] Add ARP collector for Linux #540 * [FEATURE] Add ARP collector for Linux #540
* [FEATURE] Add XFS collector for Linux #568, #575 * [FEATURE] Add XFS colector for Linux #568, #575
* [FEATURE] Add qdisc collector for Linux #580 * [FEATURE] Add qdisc collector for Linux #580
* [FEATURE] Add cpufreq stats for Linux #548 * [FEATURE] Add cpufreq stats for Linux #548
* [FEATURE] Add diskstats for Darwin #593 * [FEATURE] Add diskstats for Darwin #593

View file

@ -88,12 +88,12 @@ $(eval $(call goarch_pair,mips64el,mipsel))
all:: vet checkmetrics checkrules common-all $(cross-test) $(test-e2e) all:: vet checkmetrics checkrules common-all $(cross-test) $(test-e2e)
.PHONY: test .PHONY: test
test: collector/fixtures/sys/.unpacked collector/fixtures/udev/.unpacked test: collector/fixtures/sys/.unpacked
@echo ">> running tests" @echo ">> running tests"
$(GO) test -short $(test-flags) $(pkgs) $(GO) test -short $(test-flags) $(pkgs)
.PHONY: test-32bit .PHONY: test-32bit
test-32bit: collector/fixtures/sys/.unpacked collector/fixtures/udev/.unpacked test-32bit: collector/fixtures/sys/.unpacked
@echo ">> running tests in 32-bit mode" @echo ">> running tests in 32-bit mode"
@env GOARCH=$(GOARCH_CROSS) $(GO) test $(pkgs) @env GOARCH=$(GOARCH_CROSS) $(GO) test $(pkgs)
@ -110,16 +110,9 @@ skip-test-32bit:
update_fixtures: update_fixtures:
rm -vf collector/fixtures/sys/.unpacked rm -vf collector/fixtures/sys/.unpacked
./ttar -C collector/fixtures -c -f collector/fixtures/sys.ttar sys ./ttar -C collector/fixtures -c -f collector/fixtures/sys.ttar sys
rm -vf collector/fixtures/udev/.unpacked
./ttar -C collector/fixtures -c -f collector/fixtures/udev.ttar udev
.PHONY: tools
tools:
@rm ./tools/tools >/dev/null 2>&1 || true
@$(GO) build -o tools ./tools/...
.PHONY: test-e2e .PHONY: test-e2e
test-e2e: build collector/fixtures/sys/.unpacked collector/fixtures/udev/.unpacked tools test-e2e: build collector/fixtures/sys/.unpacked
@echo ">> running end-to-end tests" @echo ">> running end-to-end tests"
./end-to-end-test.sh ./end-to-end-test.sh

View file

@ -49,23 +49,23 @@ endif
GOTEST := $(GO) test GOTEST := $(GO) test
GOTEST_DIR := GOTEST_DIR :=
ifneq ($(CIRCLE_JOB),) ifneq ($(CIRCLE_JOB),)
ifneq ($(shell command -v gotestsum 2> /dev/null),) ifneq ($(shell which gotestsum),)
GOTEST_DIR := test-results GOTEST_DIR := test-results
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
endif endif
endif endif
PROMU_VERSION ?= 0.17.0 PROMU_VERSION ?= 0.13.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
SKIP_GOLANGCI_LINT := SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT := GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?= GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.64.6 GOLANGCI_LINT_VERSION ?= v1.49.0
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
# windows isn't included here because of the path separator being different. # windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64)) ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
# If we're in CI and there is an Actions file, that means the linter # If we're in CI and there is an Actions file, that means the linter
# is being run in Actions, so we don't need to run it here. # is being run in Actions, so we don't need to run it here.
ifneq (,$(SKIP_GOLANGCI_LINT)) ifneq (,$(SKIP_GOLANGCI_LINT))
@ -91,8 +91,6 @@ BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG))
ifeq ($(GOHOSTARCH),amd64) ifeq ($(GOHOSTARCH),amd64)
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
# Only supported on amd64 # Only supported on amd64
@ -169,20 +167,16 @@ common-vet:
common-lint: $(GOLANGCI_LINT) common-lint: $(GOLANGCI_LINT)
ifdef GOLANGCI_LINT ifdef GOLANGCI_LINT
@echo ">> running golangci-lint" @echo ">> running golangci-lint"
# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
# Otherwise staticcheck might fail randomly for some reason not yet explained.
$(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
$(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
endif endif
.PHONY: common-lint-fix
common-lint-fix: $(GOLANGCI_LINT)
ifdef GOLANGCI_LINT
@echo ">> running golangci-lint fix"
$(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs)
endif
.PHONY: common-yamllint .PHONY: common-yamllint
common-yamllint: common-yamllint:
@echo ">> running yamllint on all YAML files in the repository" @echo ">> running yamllint on all YAML files in the repository"
ifeq (, $(shell command -v yamllint 2> /dev/null)) ifeq (, $(shell which yamllint))
@echo "yamllint not installed so skipping" @echo "yamllint not installed so skipping"
else else
yamllint . yamllint .
@ -208,14 +202,10 @@ common-tarball: promu
@echo ">> building release tarball" @echo ">> building release tarball"
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
.PHONY: common-docker-repo-name
common-docker-repo-name:
@echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
.PHONY: common-docker $(BUILD_DOCKER_ARCHS) .PHONY: common-docker $(BUILD_DOCKER_ARCHS)
common-docker: $(BUILD_DOCKER_ARCHS) common-docker: $(BUILD_DOCKER_ARCHS)
$(BUILD_DOCKER_ARCHS): common-docker-%: $(BUILD_DOCKER_ARCHS): common-docker-%:
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \ docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
-f $(DOCKERFILE_PATH) \ -f $(DOCKERFILE_PATH) \
--build-arg ARCH="$*" \ --build-arg ARCH="$*" \
--build-arg OS="linux" \ --build-arg OS="linux" \
@ -224,19 +214,19 @@ $(BUILD_DOCKER_ARCHS): common-docker-%:
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) .PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
common-docker-publish: $(PUBLISH_DOCKER_ARCHS) common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: $(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) .PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
common-docker-tag-latest: $(TAG_DOCKER_ARCHS) common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: $(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
.PHONY: common-docker-manifest .PHONY: common-docker-manifest
common-docker-manifest: common-docker-manifest:
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG)) DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG))
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)"
.PHONY: promu .PHONY: promu
promu: $(PROMU) promu: $(PROMU)
@ -275,9 +265,3 @@ $(1)_precheck:
exit 1; \ exit 1; \
fi fi
endef endef
govulncheck: install-govulncheck
govulncheck ./...
install-govulncheck:
command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest

View file

@ -1,8 +1,7 @@
# Node exporter # Node exporter
[![CircleCI](https://circleci.com/gh/prometheus/node_exporter/tree/master.svg?style=shield)][circleci] [![CircleCI](https://circleci.com/gh/prometheus/node_exporter/tree/master.svg?style=shield)][circleci]
![bsd workflow](https://github.com/prometheus/node_exporter/actions/workflows/bsd.yml/badge.svg) [![Buildkite status](https://badge.buildkite.com/94a0c1fb00b1f46883219c256efe9ce01d63b6505f3a942f9b.svg)](https://buildkite.com/prometheus/node-exporter)
![golangci-lint workflow](https://github.com/prometheus/node_exporter/actions/workflows/golangci-lint.yml/badge.svg)
[![Docker Repository on Quay](https://quay.io/repository/prometheus/node-exporter/status)][quay] [![Docker Repository on Quay](https://quay.io/repository/prometheus/node-exporter/status)][quay]
[![Docker Pulls](https://img.shields.io/docker/pulls/prom/node-exporter.svg?maxAge=604800)][hub] [![Docker Pulls](https://img.shields.io/docker/pulls/prom/node-exporter.svg?maxAge=604800)][hub]
[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/node_exporter)][goreportcard] [![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/node_exporter)][goreportcard]
@ -23,14 +22,18 @@ The `node_exporter` listens on HTTP port 9100 by default. See the `--help` outpu
### Ansible ### Ansible
For automated installs with [Ansible](https://www.ansible.com/), there is the [Prometheus Community role](https://github.com/prometheus-community/ansible). For automated installs with [Ansible](https://www.ansible.com/), there is the [Cloud Alchemy role](https://github.com/cloudalchemy/ansible-node-exporter).
### RHEL/CentOS/Fedora
There is a [community-supplied COPR repository](https://copr.fedorainfracloud.org/coprs/ibotty/prometheus-exporters/) which closely follows upstream releases.
### Docker ### Docker
The `node_exporter` is designed to monitor the host system. Deploying in containers requires The `node_exporter` is designed to monitor the host system. It's not recommended
extra care in order to avoid monitoring the container itself. to deploy it as a Docker container because it requires access to the host system.
For situations where containerized deployment is needed, some extra flags must be used to allow For situations where Docker deployment is needed, some extra flags must be used to allow
the `node_exporter` access to the host namespaces. the `node_exporter` access to the host namespaces.
Be aware that any non-root mount points you want to monitor will need to be bind-mounted Be aware that any non-root mount points you want to monitor will need to be bind-mounted
@ -80,37 +83,6 @@ Collectors are enabled by providing a `--collector.<name>` flag.
Collectors that are enabled by default can be disabled by providing a `--no-collector.<name>` flag. Collectors that are enabled by default can be disabled by providing a `--no-collector.<name>` flag.
To enable only some specific collector(s), use `--collector.disable-defaults --collector.<name> ...`. To enable only some specific collector(s), use `--collector.disable-defaults --collector.<name> ...`.
### Include & Exclude flags
A few collectors can be configured to include or exclude certain patterns using dedicated flags. The exclude flags are used to indicate "all except", while the include flags are used to say "none except". Note that these flags are mutually exclusive on collectors that support both.
Example:
```txt
--collector.filesystem.mount-points-exclude=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/)
```
List:
Collector | Scope | Include Flag | Exclude Flag
--- | --- | --- | ---
arp | device | --collector.arp.device-include | --collector.arp.device-exclude
cpu | bugs | --collector.cpu.info.bugs-include | N/A
cpu | flags | --collector.cpu.info.flags-include | N/A
diskstats | device | --collector.diskstats.device-include | --collector.diskstats.device-exclude
ethtool | device | --collector.ethtool.device-include | --collector.ethtool.device-exclude
ethtool | metrics | --collector.ethtool.metrics-include | N/A
filesystem | fs-types | --collector.filesystem.fs-types-include | --collector.filesystem.fs-types-exclude
filesystem | mount-points | --collector.filesystem.mount-points-include | --collector.filesystem.mount-points-exclude
hwmon | chip | --collector.hwmon.chip-include | --collector.hwmon.chip-exclude
hwmon | sensor | --collector.hwmon.sensor-include | --collector.hwmon.sensor-exclude
interrupts | name | --collector.interrupts.name-include | --collector.interrupts.name-exclude
netdev | device | --collector.netdev.device-include | --collector.netdev.device-exclude
qdisk | device | --collector.qdisk.device-include | --collector.qdisk.device-exclude
slabinfo | slab-names | --collector.slabinfo.slabs-include | --collector.slabinfo.slabs-exclude
sysctl | all | --collector.sysctl.include | N/A
systemd | unit | --collector.systemd.unit-include | --collector.systemd.unit-exclude
### Enabled by default ### Enabled by default
Name | Description | OS Name | Description | OS
@ -139,7 +111,6 @@ mdadm | Exposes statistics about devices in `/proc/mdstat` (does nothing if no `
meminfo | Exposes memory statistics. | Darwin, Dragonfly, FreeBSD, Linux, OpenBSD meminfo | Exposes memory statistics. | Darwin, Dragonfly, FreeBSD, Linux, OpenBSD
netclass | Exposes network interface info from `/sys/class/net/` | Linux netclass | Exposes network interface info from `/sys/class/net/` | Linux
netdev | Exposes network interface statistics such as bytes transferred. | Darwin, Dragonfly, FreeBSD, Linux, OpenBSD netdev | Exposes network interface statistics such as bytes transferred. | Darwin, Dragonfly, FreeBSD, Linux, OpenBSD
netisr | Exposes netisr statistics | FreeBSD
netstat | Exposes network statistics from `/proc/net/netstat`. This is the same information as `netstat -s`. | Linux netstat | Exposes network statistics from `/proc/net/netstat`. This is the same information as `netstat -s`. | Linux
nfs | Exposes NFS client statistics from `/proc/net/rpc/nfs`. This is the same information as `nfsstat -c`. | Linux nfs | Exposes NFS client statistics from `/proc/net/rpc/nfs`. This is the same information as `nfsstat -c`. | Linux
nfsd | Exposes NFS kernel server statistics from `/proc/net/rpc/nfsd`. This is the same information as `nfsstat -s`. | Linux nfsd | Exposes NFS kernel server statistics from `/proc/net/rpc/nfsd`. This is the same information as `nfsstat -s`. | Linux
@ -162,7 +133,6 @@ timex | Exposes selected adjtimex(2) system call stats. | Linux
udp_queues | Exposes UDP total lengths of the rx_queue and tx_queue from `/proc/net/udp` and `/proc/net/udp6`. | Linux udp_queues | Exposes UDP total lengths of the rx_queue and tx_queue from `/proc/net/udp` and `/proc/net/udp6`. | Linux
uname | Exposes system information as provided by the uname system call. | Darwin, FreeBSD, Linux, OpenBSD uname | Exposes system information as provided by the uname system call. | Darwin, FreeBSD, Linux, OpenBSD
vmstat | Exposes statistics from `/proc/vmstat`. | Linux vmstat | Exposes statistics from `/proc/vmstat`. | Linux
watchdog | Exposes statistics from `/sys/class/watchdog` | Linux
xfs | Exposes XFS runtime statistics. | Linux (kernel 4.4+) xfs | Exposes XFS runtime statistics. | Linux (kernel 4.4+)
zfs | Exposes [ZFS](http://open-zfs.org/) performance statistics. | FreeBSD, [Linux](http://zfsonlinux.org/), Solaris zfs | Exposes [ZFS](http://open-zfs.org/) performance statistics. | FreeBSD, [Linux](http://zfsonlinux.org/), Solaris
@ -189,41 +159,29 @@ Name | Description | OS
---------|-------------|---- ---------|-------------|----
buddyinfo | Exposes statistics of memory fragments as reported by /proc/buddyinfo. | Linux buddyinfo | Exposes statistics of memory fragments as reported by /proc/buddyinfo. | Linux
cgroups | A summary of the number of active and enabled cgroups | Linux cgroups | A summary of the number of active and enabled cgroups | Linux
cpu\_vulnerabilities | Exposes CPU vulnerability information from sysfs. | Linux
devstat | Exposes device statistics | Dragonfly, FreeBSD devstat | Exposes device statistics | Dragonfly, FreeBSD
drm | Expose GPU metrics using sysfs / DRM, `amdgpu` is the only driver which exposes this information through DRM | Linux
drbd | Exposes Distributed Replicated Block Device statistics (to version 8.4) | Linux drbd | Exposes Distributed Replicated Block Device statistics (to version 8.4) | Linux
ethtool | Exposes network interface information and network driver statistics equivalent to `ethtool`, `ethtool -S`, and `ethtool -i`. | Linux ethtool | Exposes network interface information and network driver statistics equivalent to `ethtool`, `ethtool -S`, and `ethtool -i`. | Linux
interrupts | Exposes detailed interrupts statistics. | Linux, OpenBSD interrupts | Exposes detailed interrupts statistics. | Linux, OpenBSD
ksmd | Exposes kernel and system statistics from `/sys/kernel/mm/ksm`. | Linux ksmd | Exposes kernel and system statistics from `/sys/kernel/mm/ksm`. | Linux
lnstat | Exposes stats from `/proc/net/stat/`. | Linux lnstat | Exposes stats from `/proc/net/stat/`. | Linux
logind | Exposes session counts from [logind](http://www.freedesktop.org/wiki/Software/systemd/logind/). | Linux logind | Exposes session counts from [logind](http://www.freedesktop.org/wiki/Software/systemd/logind/). | Linux
meminfo\_numa | Exposes memory statistics from `/sys/devices/system/node/node[0-9]*/meminfo`, `/sys/devices/system/node/node[0-9]*/numastat`. | Linux meminfo\_numa | Exposes memory statistics from `/proc/meminfo_numa`. | Linux
mountstats | Exposes filesystem statistics from `/proc/self/mountstats`. Exposes detailed NFS client statistics. | Linux mountstats | Exposes filesystem statistics from `/proc/self/mountstats`. Exposes detailed NFS client statistics. | Linux
network_route | Exposes the routing table as metrics | Linux network_route | Exposes the routing table as metrics | Linux
pcidevice | Exposes pci devices' information including their link status and parent devices. | Linux ntp | Exposes local NTP daemon health to check [time](./docs/TIME.md) | _any_
perf | Exposes perf based metrics (Warning: Metrics are dependent on kernel configuration and settings). | Linux perf | Exposes perf based metrics (Warning: Metrics are dependent on kernel configuration and settings). | Linux
processes | Exposes aggregate process statistics from `/proc`. | Linux processes | Exposes aggregate process statistics from `/proc`. | Linux
qdisc | Exposes [queuing discipline](https://en.wikipedia.org/wiki/Network_scheduler#Linux_kernel) statistics | Linux qdisc | Exposes [queuing discipline](https://en.wikipedia.org/wiki/Network_scheduler#Linux_kernel) statistics | Linux
runit | Exposes service status from [runit](http://smarden.org/runit/). | _any_
slabinfo | Exposes slab statistics from `/proc/slabinfo`. Note that permission of `/proc/slabinfo` is usually 0400, so set it appropriately. | Linux slabinfo | Exposes slab statistics from `/proc/slabinfo`. Note that permission of `/proc/slabinfo` is usually 0400, so set it appropriately. | Linux
softirqs | Exposes detailed softirq statistics from `/proc/softirqs`. | Linux supervisord | Exposes service status from [supervisord](http://supervisord.org/). | _any_
sysctl | Expose sysctl values from `/proc/sys`. Use `--collector.sysctl.include(-info)` to configure. | Linux sysctl | Expose sysctl values from `/proc/sys`. Use `--collector.sysctl.include(-info)` to configure. | Linux
systemd | Exposes service and system status from [systemd](http://www.freedesktop.org/wiki/Software/systemd/). | Linux systemd | Exposes service and system status from [systemd](http://www.freedesktop.org/wiki/Software/systemd/). | Linux
tcpstat | Exposes TCP connection status information from `/proc/net/tcp` and `/proc/net/tcp6`. (Warning: the current version has potential performance issues in high load situations.) | Linux tcpstat | Exposes TCP connection status information from `/proc/net/tcp` and `/proc/net/tcp6`. (Warning: the current version has potential performance issues in high load situations.) | Linux
wifi | Exposes WiFi device and station statistics. | Linux wifi | Exposes WiFi device and station statistics. | Linux
xfrm | Exposes statistics from `/proc/net/xfrm_stat` | Linux
zoneinfo | Exposes NUMA memory zone metrics. | Linux zoneinfo | Exposes NUMA memory zone metrics. | Linux
### Deprecated
These collectors are deprecated and will be removed in the next major release.
Name | Description | OS
---------|-------------|----
ntp | Exposes local NTP daemon health to check [time](./docs/TIME.md) | _any_
runit | Exposes service status from [runit](http://smarden.org/runit/). | _any_
supervisord | Exposes service status from [supervisord](http://supervisord.org/). | _any_
### Perf Collector ### Perf Collector
The `perf` collector may not work out of the box on some Linux systems due to kernel The `perf` collector may not work out of the box on some Linux systems due to kernel
@ -341,21 +299,13 @@ mv /path/to/directory/role.prom.$$ /path/to/directory/role.prom
The `node_exporter` will expose all metrics from enabled collectors by default. This is the recommended way to collect metrics to avoid errors when comparing metrics of different families. The `node_exporter` will expose all metrics from enabled collectors by default. This is the recommended way to collect metrics to avoid errors when comparing metrics of different families.
For advanced use the `node_exporter` can be passed an optional list of collectors to filter metrics. The parameters `collect[]` and `exclude[]` can be used multiple times (but cannot be combined). In Prometheus configuration you can use this syntax under the [scrape config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#<scrape_config>). For advanced use the `node_exporter` can be passed an optional list of collectors to filter metrics. The `collect[]` parameter may be used multiple times. In Prometheus configuration you can use this syntax under the [scrape config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#<scrape_config>).
Collect only `cpu` and `meminfo` collector metrics:
``` ```
params: params:
collect[]: collect[]:
- cpu - foo
- meminfo - bar
```
Collect all enabled collector metrics but exclude `netdev`:
```
params:
exclude[]:
- netdev
``` ```
This can be useful for having different Prometheus servers collect specific metrics from nodes. This can be useful for having different Prometheus servers collect specific metrics from nodes.
@ -384,15 +334,15 @@ To see all available configuration flags:
## TLS endpoint ## TLS endpoint
**EXPERIMENTAL** ** EXPERIMENTAL **
The exporter supports TLS via a new web configuration file. The exporter supports TLS via a new web configuration file.
```console ```console
./node_exporter --web.config.file=web-config.yml ./node_exporter --web.config=web-config.yml
``` ```
See the [exporter-toolkit web-configuration](https://github.com/prometheus/exporter-toolkit/blob/master/docs/web-configuration.md) for more details. See the [exporter-toolkit https package](https://github.com/prometheus/exporter-toolkit/blob/v0.1.0/https/README.md) for more details.
[travis]: https://travis-ci.org/prometheus/node_exporter [travis]: https://travis-ci.org/prometheus/node_exporter
[hub]: https://hub.docker.com/r/prom/node-exporter/ [hub]: https://hub.docker.com/r/prom/node-exporter/

View file

@ -1 +1 @@
1.9.0 1.4.0

View file

@ -17,27 +17,26 @@
package collector package collector
import ( import (
"bufio"
"fmt" "fmt"
"log/slog" "io"
"os"
"strings"
"github.com/alecthomas/kingpin/v2" "github.com/go-kit/log"
"github.com/jsimonetti/rtnetlink/v2/rtnl"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs" "gopkg.in/alecthomas/kingpin.v2"
"golang.org/x/sys/unix"
) )
var ( var (
arpDeviceInclude = kingpin.Flag("collector.arp.device-include", "Regexp of arp devices to include (mutually exclusive to device-exclude).").String() arpDeviceInclude = kingpin.Flag("collector.arp.device-include", "Regexp of arp devices to include (mutually exclusive to device-exclude).").String()
arpDeviceExclude = kingpin.Flag("collector.arp.device-exclude", "Regexp of arp devices to exclude (mutually exclusive to device-include).").String() arpDeviceExclude = kingpin.Flag("collector.arp.device-exclude", "Regexp of arp devices to exclude (mutually exclusive to device-include).").String()
arpNetlink = kingpin.Flag("collector.arp.netlink", "Use netlink to gather stats instead of /proc/net/arp.").Default("true").Bool()
) )
type arpCollector struct { type arpCollector struct {
fs procfs.FS
deviceFilter deviceFilter deviceFilter deviceFilter
entries *prometheus.Desc entries *prometheus.Desc
logger *slog.Logger logger log.Logger
} }
func init() { func init() {
@ -45,14 +44,8 @@ func init() {
} }
// NewARPCollector returns a new Collector exposing ARP stats. // NewARPCollector returns a new Collector exposing ARP stats.
func NewARPCollector(logger *slog.Logger) (Collector, error) { func NewARPCollector(logger log.Logger) (Collector, error) {
fs, err := procfs.NewFS(*procPath)
if err != nil {
return nil, fmt.Errorf("failed to open procfs: %w", err)
}
return &arpCollector{ return &arpCollector{
fs: fs,
deviceFilter: newDeviceFilter(*arpDeviceExclude, *arpDeviceInclude), deviceFilter: newDeviceFilter(*arpDeviceExclude, *arpDeviceInclude),
entries: prometheus.NewDesc( entries: prometheus.NewDesc(
prometheus.BuildFQName(namespace, "arp", "entries"), prometheus.BuildFQName(namespace, "arp", "entries"),
@ -63,63 +56,56 @@ func NewARPCollector(logger *slog.Logger) (Collector, error) {
}, nil }, nil
} }
func getTotalArpEntries(deviceEntries []procfs.ARPEntry) map[string]uint32 { func getARPEntries() (map[string]uint32, error) {
entries := make(map[string]uint32) file, err := os.Open(procFilePath("net/arp"))
if err != nil {
return nil, err
}
defer file.Close()
for _, device := range deviceEntries { entries, err := parseARPEntries(file)
entries[device.Device]++ if err != nil {
return nil, err
} }
return entries return entries, nil
} }
func getTotalArpEntriesRTNL() (map[string]uint32, error) { // TODO: This should get extracted to the github.com/prometheus/procfs package
conn, err := rtnl.Dial(nil) // to support more complete parsing of /proc/net/arp. Instead of adding
if err != nil { // more fields to this function's return values it should get moved and
return nil, err // changed to support each field.
} func parseARPEntries(data io.Reader) (map[string]uint32, error) {
defer conn.Close() scanner := bufio.NewScanner(data)
// Neighbors will also contain IPv6 neighbors, but since this is purely an ARP collector,
// restrict to AF_INET.
neighbors, err := conn.Neighbours(nil, unix.AF_INET)
if err != nil {
return nil, err
}
// Map of interface name to ARP neighbor count.
entries := make(map[string]uint32) entries := make(map[string]uint32)
for _, n := range neighbors { for scanner.Scan() {
// Skip entries which have state NUD_NOARP to conform to output of /proc/net/arp. columns := strings.Fields(scanner.Text())
if n.State&unix.NUD_NOARP == 0 {
entries[n.Interface.Name]++ if len(columns) < 6 {
return nil, fmt.Errorf("unexpected ARP table format")
} }
if columns[0] != "IP" {
deviceIndex := len(columns) - 1
entries[columns[deviceIndex]]++
}
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("failed to parse ARP info: %w", err)
} }
return entries, nil return entries, nil
} }
func (c *arpCollector) Update(ch chan<- prometheus.Metric) error { func (c *arpCollector) Update(ch chan<- prometheus.Metric) error {
var enumeratedEntry map[string]uint32 entries, err := getARPEntries()
if err != nil {
if *arpNetlink { return fmt.Errorf("could not get ARP entries: %w", err)
var err error
enumeratedEntry, err = getTotalArpEntriesRTNL()
if err != nil {
return fmt.Errorf("could not get ARP entries: %w", err)
}
} else {
entries, err := c.fs.GatherARPEntries()
if err != nil {
return fmt.Errorf("could not get ARP entries: %w", err)
}
enumeratedEntry = getTotalArpEntries(entries)
} }
for device, entryCount := range enumeratedEntry { for device, entryCount := range entries {
if c.deviceFilter.ignored(device) { if c.deviceFilter.ignored(device) {
continue continue
} }

View file

@ -18,11 +18,11 @@ package collector
import ( import (
"fmt" "fmt"
"log/slog"
"github.com/alecthomas/kingpin/v2" "github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs/bcache" "github.com/prometheus/procfs/bcache"
"gopkg.in/alecthomas/kingpin.v2"
) )
var ( var (
@ -36,12 +36,12 @@ func init() {
// A bcacheCollector is a Collector which gathers metrics from Linux bcache. // A bcacheCollector is a Collector which gathers metrics from Linux bcache.
type bcacheCollector struct { type bcacheCollector struct {
fs bcache.FS fs bcache.FS
logger *slog.Logger logger log.Logger
} }
// NewBcacheCollector returns a newly allocated bcacheCollector. // NewBcacheCollector returns a newly allocated bcacheCollector.
// It exposes a number of Linux bcache statistics. // It exposes a number of Linux bcache statistics.
func NewBcacheCollector(logger *slog.Logger) (Collector, error) { func NewBcacheCollector(logger log.Logger) (Collector, error) {
fs, err := bcache.NewFS(*sysPath) fs, err := bcache.NewFS(*sysPath)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to open sysfs: %w", err) return nil, fmt.Errorf("failed to open sysfs: %w", err)
@ -134,19 +134,14 @@ func bcachePeriodStatsToMetric(ps *bcache.PeriodStats, labelValue string) []bcac
extraLabel: label, extraLabel: label,
extraLabelValue: labelValue, extraLabelValue: labelValue,
}, },
} {
if ps.CacheReadaheads != 0 { name: "cache_readaheads_total",
bcacheReadaheadMetrics := []bcacheMetric{ desc: "Count of times readahead occurred.",
{ value: float64(ps.CacheReadaheads),
name: "cache_readaheads_total", metricType: prometheus.CounterValue,
desc: "Count of times readahead occurred.", extraLabel: label,
value: float64(ps.CacheReadaheads), extraLabelValue: labelValue,
metricType: prometheus.CounterValue, },
extraLabel: label,
extraLabelValue: labelValue,
},
}
metrics = append(metrics, bcacheReadaheadMetrics...)
} }
return metrics return metrics
} }

View file

@ -19,17 +19,18 @@ package collector
import ( import (
"errors" "errors"
"fmt" "fmt"
"log/slog"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
type bondingCollector struct { type bondingCollector struct {
slaves, active typedDesc slaves, active typedDesc
logger *slog.Logger logger log.Logger
} }
func init() { func init() {
@ -38,7 +39,7 @@ func init() {
// NewBondingCollector returns a newly allocated bondingCollector. // NewBondingCollector returns a newly allocated bondingCollector.
// It exposes the number of configured and active slave of linux bonding interfaces. // It exposes the number of configured and active slave of linux bonding interfaces.
func NewBondingCollector(logger *slog.Logger) (Collector, error) { func NewBondingCollector(logger log.Logger) (Collector, error) {
return &bondingCollector{ return &bondingCollector{
slaves: typedDesc{prometheus.NewDesc( slaves: typedDesc{prometheus.NewDesc(
prometheus.BuildFQName(namespace, "bonding", "slaves"), prometheus.BuildFQName(namespace, "bonding", "slaves"),
@ -60,7 +61,7 @@ func (c *bondingCollector) Update(ch chan<- prometheus.Metric) error {
bondingStats, err := readBondingStats(statusfile) bondingStats, err := readBondingStats(statusfile)
if err != nil { if err != nil {
if errors.Is(err, os.ErrNotExist) { if errors.Is(err, os.ErrNotExist) {
c.logger.Debug("Not collecting bonding, file does not exist", "file", statusfile) level.Debug(c.logger).Log("msg", "Not collecting bonding, file does not exist", "file", statusfile)
return ErrNoData return ErrNoData
} }
return err return err

View file

@ -11,9 +11,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build !nobonding
// +build !nobonding
package collector package collector
import ( import (

View file

@ -18,13 +18,13 @@
package collector package collector
import ( import (
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
"log/slog"
) )
type bootTimeCollector struct { type bootTimeCollector struct {
logger *slog.Logger logger log.Logger
} }
func init() { func init() {
@ -32,7 +32,7 @@ func init() {
} }
// newBootTimeCollector returns a new Collector exposing system boot time on BSD systems. // newBootTimeCollector returns a new Collector exposing system boot time on BSD systems.
func newBootTimeCollector(logger *slog.Logger) (Collector, error) { func newBootTimeCollector(logger log.Logger) (Collector, error) {
return &bootTimeCollector{ return &bootTimeCollector{
logger: logger, logger: logger,
}, nil }, nil

View file

@ -11,27 +11,27 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build !noboottime //go:build solaris && !noboottime
// +build !noboottime // +build solaris,!noboottime
package collector package collector
import ( import (
"github.com/go-kit/log"
"github.com/illumos/go-kstat" "github.com/illumos/go-kstat"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"log/slog"
) )
type bootTimeCollector struct { type bootTimeCollector struct {
boottime typedDesc boottime typedDesc
logger *slog.Logger logger log.Logger
} }
func init() { func init() {
registerCollector("boottime", defaultEnabled, newBootTimeCollector) registerCollector("boottime", defaultEnabled, newBootTimeCollector)
} }
func newBootTimeCollector(logger *slog.Logger) (Collector, error) { func newBootTimeCollector(logger log.Logger) (Collector, error) {
return &bootTimeCollector{ return &bootTimeCollector{
boottime: typedDesc{ boottime: typedDesc{
prometheus.NewDesc( prometheus.NewDesc(

View file

@ -18,12 +18,13 @@ package collector
import ( import (
"fmt" "fmt"
"log/slog"
"path" "path"
"strings" "strings"
"syscall" "syscall"
dennwc "github.com/dennwc/btrfs" dennwc "github.com/dennwc/btrfs"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs/btrfs" "github.com/prometheus/procfs/btrfs"
) )
@ -31,7 +32,7 @@ import (
// A btrfsCollector is a Collector which gathers metrics from Btrfs filesystems. // A btrfsCollector is a Collector which gathers metrics from Btrfs filesystems.
type btrfsCollector struct { type btrfsCollector struct {
fs btrfs.FS fs btrfs.FS
logger *slog.Logger logger log.Logger
} }
func init() { func init() {
@ -39,7 +40,7 @@ func init() {
} }
// NewBtrfsCollector returns a new Collector exposing Btrfs statistics. // NewBtrfsCollector returns a new Collector exposing Btrfs statistics.
func NewBtrfsCollector(logger *slog.Logger) (Collector, error) { func NewBtrfsCollector(logger log.Logger) (Collector, error) {
fs, err := btrfs.NewFS(*sysPath) fs, err := btrfs.NewFS(*sysPath)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to open sysfs: %w", err) return nil, fmt.Errorf("failed to open sysfs: %w", err)
@ -61,15 +62,15 @@ func (c *btrfsCollector) Update(ch chan<- prometheus.Metric) error {
ioctlStatsMap, err := c.getIoctlStats() ioctlStatsMap, err := c.getIoctlStats()
if err != nil { if err != nil {
c.logger.Debug( level.Debug(c.logger).Log(
"Error querying btrfs device stats with ioctl", "msg", "Error querying btrfs device stats with ioctl",
"err", err) "err", err)
ioctlStatsMap = make(map[string]*btrfsIoctlFsStats) ioctlStatsMap = make(map[string]*btrfsIoctlFsStats)
} }
for _, s := range stats { for _, s := range stats {
// match up procfs and ioctl info by filesystem UUID (without dashes) // match up procfs and ioctl info by filesystem UUID (without dashes)
var fsUUID = strings.ReplaceAll(s.UUID, "-", "") var fsUUID = strings.Replace(s.UUID, "-", "", -1)
ioctlStats := ioctlStatsMap[fsUUID] ioctlStats := ioctlStatsMap[fsUUID]
c.updateBtrfsStats(ch, s, ioctlStats) c.updateBtrfsStats(ch, s, ioctlStats)
} }
@ -122,27 +123,24 @@ func (c *btrfsCollector) getIoctlStats() (map[string]*btrfsIoctlFsStats, error)
continue continue
} }
mountPath := rootfsFilePath(mount.mountPoint) fs, err := dennwc.Open(mount.mountPoint, true)
fs, err := dennwc.Open(mountPath, true)
if err != nil { if err != nil {
// Failed to open this mount point, maybe we didn't have permission // Failed to open this mount point, maybe we didn't have permission
// maybe we'll find another mount point for this FS later. // maybe we'll find another mount point for this FS later.
c.logger.Debug( level.Debug(c.logger).Log(
"Error inspecting btrfs mountpoint", "msg", "Error inspecting btrfs mountpoint",
"mountPoint", mountPath, "mountPoint", mount.mountPoint,
"err", err) "err", err)
continue continue
} }
defer fs.Close()
fsInfo, err := fs.Info() fsInfo, err := fs.Info()
if err != nil { if err != nil {
// Failed to get the FS info for some reason, // Failed to get the FS info for some reason,
// perhaps it'll work with a different mount point // perhaps it'll work with a different mount point
c.logger.Debug( level.Debug(c.logger).Log(
"Error querying btrfs filesystem", "msg", "Error querying btrfs filesystem",
"mountPoint", mountPath, "mountPoint", mount.mountPoint,
"err", err) "err", err)
continue continue
} }
@ -155,9 +153,9 @@ func (c *btrfsCollector) getIoctlStats() (map[string]*btrfsIoctlFsStats, error)
deviceStats, err := c.getIoctlDeviceStats(fs, &fsInfo) deviceStats, err := c.getIoctlDeviceStats(fs, &fsInfo)
if err != nil { if err != nil {
c.logger.Debug( level.Debug(c.logger).Log(
"Error querying btrfs device stats", "msg", "Error querying btrfs device stats",
"mountPoint", mountPath, "mountPoint", mount.mountPoint,
"err", err) "err", err)
continue continue
} }
@ -274,30 +272,6 @@ func (c *btrfsCollector) getMetrics(s *btrfs.Stats, ioctlStats *btrfsIoctlFsStat
metricType: prometheus.GaugeValue, metricType: prometheus.GaugeValue,
value: float64(s.Allocation.GlobalRsvSize), value: float64(s.Allocation.GlobalRsvSize),
}, },
{
name: "commits_total",
desc: "The total number of commits that have occurred.",
metricType: prometheus.CounterValue,
value: float64(s.CommitStats.Commits),
},
{
name: "last_commit_seconds",
desc: "Duration of the most recent commit, in seconds.",
metricType: prometheus.GaugeValue,
value: float64(s.CommitStats.LastCommitMs) / 1000,
},
{
name: "max_commit_seconds",
desc: "Duration of the slowest commit, in seconds.",
metricType: prometheus.GaugeValue,
value: float64(s.CommitStats.MaxCommitMs) / 1000,
},
{
name: "commit_seconds_total",
desc: "Sum of the duration of all commits, in seconds.",
metricType: prometheus.CounterValue,
value: float64(s.CommitStats.TotalCommitMs) / 1000,
},
} }
// Information about data, metadata and system data. // Information about data, metadata and system data.

View file

@ -27,10 +27,6 @@ var expectedBtrfsMetrics = [][]btrfsMetric{
{ {
{name: "info", value: 1, extraLabel: []string{"label"}, extraLabelValue: []string{"fixture"}}, {name: "info", value: 1, extraLabel: []string{"label"}, extraLabelValue: []string{"fixture"}},
{name: "global_rsv_size_bytes", value: 1.6777216e+07}, {name: "global_rsv_size_bytes", value: 1.6777216e+07},
{name: "commits_total", value: 258051, metricType: 1},
{name: "last_commit_seconds", value: 1.0},
{name: "max_commit_seconds", value: 51.462},
{name: "commit_seconds_total", value: 47836.090, metricType: 1},
{name: "reserved_bytes", value: 0, extraLabel: []string{"block_group_type"}, extraLabelValue: []string{"data"}}, {name: "reserved_bytes", value: 0, extraLabel: []string{"block_group_type"}, extraLabelValue: []string{"data"}},
{name: "used_bytes", value: 8.08189952e+08, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"data", "raid0"}}, {name: "used_bytes", value: 8.08189952e+08, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"data", "raid0"}},
{name: "size_bytes", value: 2.147483648e+09, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"data", "raid0"}}, {name: "size_bytes", value: 2.147483648e+09, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"data", "raid0"}},
@ -49,10 +45,6 @@ var expectedBtrfsMetrics = [][]btrfsMetric{
{ {
{name: "info", value: 1, extraLabel: []string{"label"}, extraLabelValue: []string{""}}, {name: "info", value: 1, extraLabel: []string{"label"}, extraLabelValue: []string{""}},
{name: "global_rsv_size_bytes", value: 1.6777216e+07}, {name: "global_rsv_size_bytes", value: 1.6777216e+07},
{name: "commits_total", value: 0, metricType: 1},
{name: "last_commit_seconds", value: 0},
{name: "max_commit_seconds", value: 0},
{name: "commit_seconds_total", value: 0, metricType: 1},
{name: "reserved_bytes", value: 0, extraLabel: []string{"block_group_type"}, extraLabelValue: []string{"data"}}, {name: "reserved_bytes", value: 0, extraLabel: []string{"block_group_type"}, extraLabelValue: []string{"data"}},
{name: "used_bytes", value: 0, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"data", "raid5"}}, {name: "used_bytes", value: 0, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"data", "raid5"}},
{name: "size_bytes", value: 6.44087808e+08, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"data", "raid5"}}, {name: "size_bytes", value: 6.44087808e+08, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"data", "raid5"}},
@ -100,10 +92,7 @@ func checkMetric(exp, got *btrfsMetric) bool {
} }
func TestBtrfs(t *testing.T) { func TestBtrfs(t *testing.T) {
fs, err := btrfs.NewFS("fixtures/sys") fs, _ := btrfs.NewFS("fixtures/sys")
if err != nil {
t.Fatal(err)
}
collector := &btrfsCollector{fs: fs} collector := &btrfsCollector{fs: fs}
stats, err := collector.fs.Stats() stats, err := collector.fs.Stats()

View file

@ -18,9 +18,10 @@ package collector
import ( import (
"fmt" "fmt"
"log/slog"
"strconv" "strconv"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs" "github.com/prometheus/procfs"
) )
@ -32,7 +33,7 @@ const (
type buddyinfoCollector struct { type buddyinfoCollector struct {
fs procfs.FS fs procfs.FS
desc *prometheus.Desc desc *prometheus.Desc
logger *slog.Logger logger log.Logger
} }
func init() { func init() {
@ -40,7 +41,7 @@ func init() {
} }
// NewBuddyinfoCollector returns a new Collector exposing buddyinfo stats. // NewBuddyinfoCollector returns a new Collector exposing buddyinfo stats.
func NewBuddyinfoCollector(logger *slog.Logger) (Collector, error) { func NewBuddyinfoCollector(logger log.Logger) (Collector, error) {
desc := prometheus.NewDesc( desc := prometheus.NewDesc(
prometheus.BuildFQName(namespace, buddyInfoSubsystem, "blocks"), prometheus.BuildFQName(namespace, buddyInfoSubsystem, "blocks"),
"Count of free blocks according to size.", "Count of free blocks according to size.",
@ -61,7 +62,7 @@ func (c *buddyinfoCollector) Update(ch chan<- prometheus.Metric) error {
return fmt.Errorf("couldn't get buddyinfo: %w", err) return fmt.Errorf("couldn't get buddyinfo: %w", err)
} }
c.logger.Debug("Set node_buddy", "buddyInfo", buddyInfo) level.Debug(c.logger).Log("msg", "Set node_buddy", "buddyInfo", buddyInfo)
for _, entry := range buddyInfo { for _, entry := range buddyInfo {
for size, value := range entry.Sizes { for size, value := range entry.Sizes {
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(

View file

@ -18,8 +18,8 @@ package collector
import ( import (
"fmt" "fmt"
"log/slog"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs" "github.com/prometheus/procfs"
) )
@ -30,7 +30,7 @@ type cgroupSummaryCollector struct {
fs procfs.FS fs procfs.FS
cgroups *prometheus.Desc cgroups *prometheus.Desc
enabled *prometheus.Desc enabled *prometheus.Desc
logger *slog.Logger logger log.Logger
} }
func init() { func init() {
@ -38,7 +38,7 @@ func init() {
} }
// NewCgroupSummaryCollector returns a new Collector exposing a summary of cgroups. // NewCgroupSummaryCollector returns a new Collector exposing a summary of cgroups.
func NewCgroupSummaryCollector(logger *slog.Logger) (Collector, error) { func NewCgroupSummaryCollector(logger log.Logger) (Collector, error) {
fs, err := procfs.NewFS(*procPath) fs, err := procfs.NewFS(*procPath)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to open procfs: %w", err) return nil, fmt.Errorf("failed to open procfs: %w", err)

View file

@ -17,12 +17,13 @@ package collector
import ( import (
"errors" "errors"
"fmt" "fmt"
"log/slog"
"sync" "sync"
"time" "time"
"github.com/alecthomas/kingpin/v2" "github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
kingpin "gopkg.in/alecthomas/kingpin.v2"
) )
// Namespace defines the common namespace to be used by all metrics. // Namespace defines the common namespace to be used by all metrics.
@ -49,14 +50,14 @@ const (
) )
var ( var (
factories = make(map[string]func(logger *slog.Logger) (Collector, error)) factories = make(map[string]func(logger log.Logger) (Collector, error))
initiatedCollectorsMtx = sync.Mutex{} initiatedCollectorsMtx = sync.Mutex{}
initiatedCollectors = make(map[string]Collector) initiatedCollectors = make(map[string]Collector)
collectorState = make(map[string]*bool) collectorState = make(map[string]*bool)
forcedCollectors = map[string]bool{} // collectors which have been explicitly enabled or disabled forcedCollectors = map[string]bool{} // collectors which have been explicitly enabled or disabled
) )
func registerCollector(collector string, isDefaultEnabled bool, factory func(logger *slog.Logger) (Collector, error)) { func registerCollector(collector string, isDefaultEnabled bool, factory func(logger log.Logger) (Collector, error)) {
var helpDefaultState string var helpDefaultState string
if isDefaultEnabled { if isDefaultEnabled {
helpDefaultState = "enabled" helpDefaultState = "enabled"
@ -77,7 +78,7 @@ func registerCollector(collector string, isDefaultEnabled bool, factory func(log
// NodeCollector implements the prometheus.Collector interface. // NodeCollector implements the prometheus.Collector interface.
type NodeCollector struct { type NodeCollector struct {
Collectors map[string]Collector Collectors map[string]Collector
logger *slog.Logger logger log.Logger
} }
// DisableDefaultCollectors sets the collector state to false for all collectors which // DisableDefaultCollectors sets the collector state to false for all collectors which
@ -103,7 +104,7 @@ func collectorFlagAction(collector string) func(ctx *kingpin.ParseContext) error
} }
// NewNodeCollector creates a new NodeCollector. // NewNodeCollector creates a new NodeCollector.
func NewNodeCollector(logger *slog.Logger, filters ...string) (*NodeCollector, error) { func NewNodeCollector(logger log.Logger, filters ...string) (*NodeCollector, error) {
f := make(map[string]bool) f := make(map[string]bool)
for _, filter := range filters { for _, filter := range filters {
enabled, exist := collectorState[filter] enabled, exist := collectorState[filter]
@ -125,7 +126,7 @@ func NewNodeCollector(logger *slog.Logger, filters ...string) (*NodeCollector, e
if collector, ok := initiatedCollectors[key]; ok { if collector, ok := initiatedCollectors[key]; ok {
collectors[key] = collector collectors[key] = collector
} else { } else {
collector, err := factories[key](logger.With("collector", key)) collector, err := factories[key](log.With(logger, "collector", key))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -155,7 +156,7 @@ func (n NodeCollector) Collect(ch chan<- prometheus.Metric) {
wg.Wait() wg.Wait()
} }
func execute(name string, c Collector, ch chan<- prometheus.Metric, logger *slog.Logger) { func execute(name string, c Collector, ch chan<- prometheus.Metric, logger log.Logger) {
begin := time.Now() begin := time.Now()
err := c.Update(ch) err := c.Update(ch)
duration := time.Since(begin) duration := time.Since(begin)
@ -163,13 +164,13 @@ func execute(name string, c Collector, ch chan<- prometheus.Metric, logger *slog
if err != nil { if err != nil {
if IsNoDataError(err) { if IsNoDataError(err) {
logger.Debug("collector returned no data", "name", name, "duration_seconds", duration.Seconds(), "err", err) level.Debug(logger).Log("msg", "collector returned no data", "name", name, "duration_seconds", duration.Seconds(), "err", err)
} else { } else {
logger.Error("collector failed", "name", name, "duration_seconds", duration.Seconds(), "err", err) level.Error(logger).Log("msg", "collector failed", "name", name, "duration_seconds", duration.Seconds(), "err", err)
} }
success = 0 success = 0
} else { } else {
logger.Debug("collector succeeded", "name", name, "duration_seconds", duration.Seconds()) level.Debug(logger).Log("msg", "collector succeeded", "name", name, "duration_seconds", duration.Seconds())
success = 1 success = 1
} }
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, duration.Seconds(), name) ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, duration.Seconds(), name)
@ -197,49 +198,3 @@ var ErrNoData = errors.New("collector returned no data")
func IsNoDataError(err error) bool { func IsNoDataError(err error) bool {
return err == ErrNoData return err == ErrNoData
} }
// pushMetric helps construct and convert a variety of value types into Prometheus float64 metrics.
func pushMetric(ch chan<- prometheus.Metric, fieldDesc *prometheus.Desc, name string, value interface{}, valueType prometheus.ValueType, labelValues ...string) {
var fVal float64
switch val := value.(type) {
case uint8:
fVal = float64(val)
case uint16:
fVal = float64(val)
case uint32:
fVal = float64(val)
case uint64:
fVal = float64(val)
case int64:
fVal = float64(val)
case *uint8:
if val == nil {
return
}
fVal = float64(*val)
case *uint16:
if val == nil {
return
}
fVal = float64(*val)
case *uint32:
if val == nil {
return
}
fVal = float64(*val)
case *uint64:
if val == nil {
return
}
fVal = float64(*val)
case *int64:
if val == nil {
return
}
fVal = float64(*val)
default:
return
}
ch <- prometheus.MustNewConstMetric(fieldDesc, valueType, fVal, labelValues...)
}

View file

@ -19,9 +19,10 @@ package collector
import ( import (
"errors" "errors"
"fmt" "fmt"
"log/slog"
"os" "os"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs" "github.com/prometheus/procfs"
) )
@ -37,7 +38,7 @@ type conntrackCollector struct {
drop *prometheus.Desc drop *prometheus.Desc
earlyDrop *prometheus.Desc earlyDrop *prometheus.Desc
searchRestart *prometheus.Desc searchRestart *prometheus.Desc
logger *slog.Logger logger log.Logger
} }
type conntrackStatistics struct { type conntrackStatistics struct {
@ -56,7 +57,7 @@ func init() {
} }
// NewConntrackCollector returns a new Collector exposing conntrack stats. // NewConntrackCollector returns a new Collector exposing conntrack stats.
func NewConntrackCollector(logger *slog.Logger) (Collector, error) { func NewConntrackCollector(logger log.Logger) (Collector, error) {
return &conntrackCollector{ return &conntrackCollector{
current: prometheus.NewDesc( current: prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "nf_conntrack_entries"), prometheus.BuildFQName(namespace, "", "nf_conntrack_entries"),
@ -153,7 +154,7 @@ func (c *conntrackCollector) Update(ch chan<- prometheus.Metric) error {
func (c *conntrackCollector) handleErr(err error) error { func (c *conntrackCollector) handleErr(err error) error {
if errors.Is(err, os.ErrNotExist) { if errors.Is(err, os.ErrNotExist) {
c.logger.Debug("conntrack probably not loaded") level.Debug(c.logger).Log("msg", "conntrack probably not loaded")
return ErrNoData return ErrNoData
} }
return fmt.Errorf("failed to retrieve conntrack stats: %w", err) return fmt.Errorf("failed to retrieve conntrack stats: %w", err)

View file

@ -1,132 +0,0 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !nocpu
// +build !nocpu
package collector
/*
#include <unistd.h> // Include the standard Unix header
#include <errno.h> // For errno
*/
import "C"
import (
"fmt"
"log/slog"
"strconv"
"github.com/power-devops/perfstat"
"github.com/prometheus/client_golang/prometheus"
)
var (
nodeCPUPhysicalSecondsDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "physical_seconds_total"),
"Seconds the physical CPUs spent in each mode.",
[]string{"cpu", "mode"}, nil,
)
nodeCPUSRunQueueDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "runqueue"),
"Length of the run queue.", []string{"cpu"}, nil,
)
nodeCPUFlagsDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "flags"),
"CPU flags.",
[]string{"cpu", "flag"}, nil,
)
nodeCPUContextSwitchDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "context_switches_total"),
"Number of context switches.",
[]string{"cpu"}, nil,
)
)
type cpuCollector struct {
cpu typedDesc
cpuPhysical typedDesc
cpuRunQueue typedDesc
cpuFlags typedDesc
cpuContextSwitch typedDesc
logger *slog.Logger
tickPerSecond float64
purrTicksPerSecond float64
}
func init() {
registerCollector("cpu", defaultEnabled, NewCpuCollector)
}
func tickPerSecond() (float64, error) {
ticks, err := C.sysconf(C._SC_CLK_TCK)
if ticks == -1 || err != nil {
return 0, fmt.Errorf("failed to get clock ticks per second: %v", err)
}
return float64(ticks), nil
}
func NewCpuCollector(logger *slog.Logger) (Collector, error) {
ticks, err := tickPerSecond()
if err != nil {
return nil, err
}
pconfig, err := perfstat.PartitionStat()
if err != nil {
return nil, err
}
return &cpuCollector{
cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue},
cpuPhysical: typedDesc{nodeCPUPhysicalSecondsDesc, prometheus.CounterValue},
cpuRunQueue: typedDesc{nodeCPUSRunQueueDesc, prometheus.GaugeValue},
cpuFlags: typedDesc{nodeCPUFlagsDesc, prometheus.GaugeValue},
cpuContextSwitch: typedDesc{nodeCPUContextSwitchDesc, prometheus.CounterValue},
logger: logger,
tickPerSecond: ticks,
purrTicksPerSecond: float64(pconfig.ProcessorMhz * 1e6),
}, nil
}
func (c *cpuCollector) Update(ch chan<- prometheus.Metric) error {
stats, err := perfstat.CpuStat()
if err != nil {
return err
}
for n, stat := range stats {
// LPAR metrics
ch <- c.cpu.mustNewConstMetric(float64(stat.User)/c.tickPerSecond, strconv.Itoa(n), "user")
ch <- c.cpu.mustNewConstMetric(float64(stat.Sys)/c.tickPerSecond, strconv.Itoa(n), "system")
ch <- c.cpu.mustNewConstMetric(float64(stat.Idle)/c.tickPerSecond, strconv.Itoa(n), "idle")
ch <- c.cpu.mustNewConstMetric(float64(stat.Wait)/c.tickPerSecond, strconv.Itoa(n), "wait")
// Physical CPU metrics
ch <- c.cpuPhysical.mustNewConstMetric(float64(stat.PIdle)/c.purrTicksPerSecond, strconv.Itoa(n), "pidle")
ch <- c.cpuPhysical.mustNewConstMetric(float64(stat.PUser)/c.purrTicksPerSecond, strconv.Itoa(n), "puser")
ch <- c.cpuPhysical.mustNewConstMetric(float64(stat.PSys)/c.purrTicksPerSecond, strconv.Itoa(n), "psys")
ch <- c.cpuPhysical.mustNewConstMetric(float64(stat.PWait)/c.purrTicksPerSecond, strconv.Itoa(n), "pwait")
// Run queue length
ch <- c.cpuRunQueue.mustNewConstMetric(float64(stat.RunQueue), strconv.Itoa(n))
// Flags
ch <- c.cpuFlags.mustNewConstMetric(float64(stat.SpurrFlag), strconv.Itoa(n), "spurr")
// Context switches
ch <- c.cpuContextSwitch.mustNewConstMetric(float64(stat.CSwitches), strconv.Itoa(n))
}
return nil
}

View file

@ -23,10 +23,10 @@ import (
"bytes" "bytes"
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"log/slog"
"strconv" "strconv"
"unsafe" "unsafe"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
@ -53,7 +53,7 @@ const ClocksPerSec = float64(C.CLK_TCK)
type statCollector struct { type statCollector struct {
cpu *prometheus.Desc cpu *prometheus.Desc
logger *slog.Logger logger log.Logger
} }
func init() { func init() {
@ -61,7 +61,7 @@ func init() {
} }
// NewCPUCollector returns a new Collector exposing CPU stats. // NewCPUCollector returns a new Collector exposing CPU stats.
func NewCPUCollector(logger *slog.Logger) (Collector, error) { func NewCPUCollector(logger log.Logger) (Collector, error) {
return &statCollector{ return &statCollector{
cpu: nodeCPUSecondsDesc, cpu: nodeCPUSecondsDesc,
logger: logger, logger: logger,

View file

@ -18,10 +18,10 @@ package collector
import ( import (
"errors" "errors"
"log/slog"
"strconv" "strconv"
"unsafe" "unsafe"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
@ -78,7 +78,7 @@ const maxCPUTimesLen = C.MAXCPU * C.CPUSTATES
type statCollector struct { type statCollector struct {
cpu *prometheus.Desc cpu *prometheus.Desc
logger *slog.Logger logger log.Logger
} }
func init() { func init() {
@ -86,7 +86,7 @@ func init() {
} }
// NewStatCollector returns a new Collector exposing CPU stats. // NewStatCollector returns a new Collector exposing CPU stats.
func NewStatCollector(logger *slog.Logger) (Collector, error) { func NewStatCollector(logger log.Logger) (Collector, error) {
return &statCollector{ return &statCollector{
cpu: nodeCPUSecondsDesc, cpu: nodeCPUSecondsDesc,
logger: logger, logger: logger,

View file

@ -18,11 +18,12 @@ package collector
import ( import (
"fmt" "fmt"
"log/slog"
"math" "math"
"strconv" "strconv"
"unsafe" "unsafe"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
@ -84,7 +85,7 @@ func getCPUTimes() ([]cputime, error) {
type statCollector struct { type statCollector struct {
cpu typedDesc cpu typedDesc
temp typedDesc temp typedDesc
logger *slog.Logger logger log.Logger
} }
func init() { func init() {
@ -92,7 +93,7 @@ func init() {
} }
// NewStatCollector returns a new Collector exposing CPU stats. // NewStatCollector returns a new Collector exposing CPU stats.
func NewStatCollector(logger *slog.Logger) (Collector, error) { func NewStatCollector(logger log.Logger) (Collector, error) {
return &statCollector{ return &statCollector{
cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue}, cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue},
temp: typedDesc{prometheus.NewDesc( temp: typedDesc{prometheus.NewDesc(
@ -133,11 +134,11 @@ func (c *statCollector) Update(ch chan<- prometheus.Metric) error {
if err != nil { if err != nil {
if err == unix.ENOENT { if err == unix.ENOENT {
// No temperature information for this CPU // No temperature information for this CPU
c.logger.Debug("no temperature information for CPU", "cpu", cpu) level.Debug(c.logger).Log("msg", "no temperature information for CPU", "cpu", cpu)
} else { } else {
// Unexpected error // Unexpected error
ch <- c.temp.mustNewConstMetric(math.NaN(), lcpu) ch <- c.temp.mustNewConstMetric(math.NaN(), lcpu)
c.logger.Error("failed to query CPU temperature for CPU", "cpu", cpu, "err", err) level.Error(c.logger).Log("msg", "failed to query CPU temperature for CPU", "cpu", cpu, "err", err)
} }
continue continue
} }

View file

@ -17,39 +17,33 @@
package collector package collector
import ( import (
"errors"
"fmt" "fmt"
"log/slog"
"os" "os"
"path/filepath" "path/filepath"
"regexp" "regexp"
"slices"
"strconv" "strconv"
"sync" "sync"
"golang.org/x/exp/maps" "github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs" "github.com/prometheus/procfs"
"github.com/prometheus/procfs/sysfs" "github.com/prometheus/procfs/sysfs"
"gopkg.in/alecthomas/kingpin.v2"
) )
type cpuCollector struct { type cpuCollector struct {
procfs procfs.FS fs procfs.FS
sysfs sysfs.FS
cpu *prometheus.Desc cpu *prometheus.Desc
cpuInfo *prometheus.Desc cpuInfo *prometheus.Desc
cpuFrequencyHz *prometheus.Desc
cpuFlagsInfo *prometheus.Desc cpuFlagsInfo *prometheus.Desc
cpuBugsInfo *prometheus.Desc cpuBugsInfo *prometheus.Desc
cpuGuest *prometheus.Desc cpuGuest *prometheus.Desc
cpuCoreThrottle *prometheus.Desc cpuCoreThrottle *prometheus.Desc
cpuPackageThrottle *prometheus.Desc cpuPackageThrottle *prometheus.Desc
cpuIsolated *prometheus.Desc cpuIsolated *prometheus.Desc
logger *slog.Logger logger log.Logger
cpuOnline *prometheus.Desc cpuStats []procfs.CPUStat
cpuStats map[int64]procfs.CPUStat
cpuStatsMutex sync.Mutex cpuStatsMutex sync.Mutex
isolatedCpus []uint16 isolatedCpus []uint16
@ -73,39 +67,33 @@ func init() {
} }
// NewCPUCollector returns a new Collector exposing kernel/system statistics. // NewCPUCollector returns a new Collector exposing kernel/system statistics.
func NewCPUCollector(logger *slog.Logger) (Collector, error) { func NewCPUCollector(logger log.Logger) (Collector, error) {
pfs, err := procfs.NewFS(*procPath) fs, err := procfs.NewFS(*procPath)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to open procfs: %w", err) return nil, fmt.Errorf("failed to open procfs: %w", err)
} }
sfs, err := sysfs.NewFS(*sysPath) sysfs, err := sysfs.NewFS(*sysPath)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to open sysfs: %w", err) return nil, fmt.Errorf("failed to open sysfs: %w", err)
} }
isolcpus, err := sfs.IsolatedCPUs() isolcpus, err := sysfs.IsolatedCPUs()
if err != nil { if err != nil {
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
return nil, fmt.Errorf("unable to get isolated cpus: %w", err) return nil, fmt.Errorf("Unable to get isolated cpus: %w", err)
} }
logger.Debug("Could not open isolated file", "error", err) level.Debug(logger).Log("msg", "Could not open isolated file", "error", err)
} }
c := &cpuCollector{ c := &cpuCollector{
procfs: pfs, fs: fs,
sysfs: sfs, cpu: nodeCPUSecondsDesc,
cpu: nodeCPUSecondsDesc,
cpuInfo: prometheus.NewDesc( cpuInfo: prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "info"), prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "info"),
"CPU information from /proc/cpuinfo.", "CPU information from /proc/cpuinfo.",
[]string{"package", "core", "cpu", "vendor", "family", "model", "model_name", "microcode", "stepping", "cachesize"}, nil, []string{"package", "core", "cpu", "vendor", "family", "model", "model_name", "microcode", "stepping", "cachesize"}, nil,
), ),
cpuFrequencyHz: prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_hertz"),
"CPU frequency in hertz from /proc/cpuinfo.",
[]string{"package", "core", "cpu"}, nil,
),
cpuFlagsInfo: prometheus.NewDesc( cpuFlagsInfo: prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "flag_info"), prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "flag_info"),
"The `flags` field of CPU information from /proc/cpuinfo taken from the first core.", "The `flags` field of CPU information from /proc/cpuinfo taken from the first core.",
@ -136,14 +124,8 @@ func NewCPUCollector(logger *slog.Logger) (Collector, error) {
"Whether each core is isolated, information from /sys/devices/system/cpu/isolated.", "Whether each core is isolated, information from /sys/devices/system/cpu/isolated.",
[]string{"cpu"}, nil, []string{"cpu"}, nil,
), ),
cpuOnline: prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "online"),
"CPUs that are online and being scheduled.",
[]string{"cpu"}, nil,
),
logger: logger, logger: logger,
isolatedCpus: isolcpus, isolatedCpus: isolcpus,
cpuStats: make(map[int64]procfs.CPUStat),
} }
err = c.compileIncludeFlags(flagsInclude, bugsInclude) err = c.compileIncludeFlags(flagsInclude, bugsInclude)
if err != nil { if err != nil {
@ -155,7 +137,7 @@ func NewCPUCollector(logger *slog.Logger) (Collector, error) {
func (c *cpuCollector) compileIncludeFlags(flagsIncludeFlag, bugsIncludeFlag *string) error { func (c *cpuCollector) compileIncludeFlags(flagsIncludeFlag, bugsIncludeFlag *string) error {
if (*flagsIncludeFlag != "" || *bugsIncludeFlag != "") && !*enableCPUInfo { if (*flagsIncludeFlag != "" || *bugsIncludeFlag != "") && !*enableCPUInfo {
*enableCPUInfo = true *enableCPUInfo = true
c.logger.Info("--collector.cpu.info has been set to `true` because you set the following flags, like --collector.cpu.info.flags-include and --collector.cpu.info.bugs-include") level.Info(c.logger).Log("msg", "--collector.cpu.info has been set to `true` because you set the following flags, like --collector.cpu.info.flags-include and --collector.cpu.info.bugs-include")
} }
var err error var err error
@ -187,21 +169,12 @@ func (c *cpuCollector) Update(ch chan<- prometheus.Metric) error {
if c.isolatedCpus != nil { if c.isolatedCpus != nil {
c.updateIsolated(ch) c.updateIsolated(ch)
} }
err := c.updateThermalThrottle(ch) return c.updateThermalThrottle(ch)
if err != nil {
return err
}
err = c.updateOnline(ch)
if err != nil {
return err
}
return nil
} }
// updateInfo reads /proc/cpuinfo // updateInfo reads /proc/cpuinfo
func (c *cpuCollector) updateInfo(ch chan<- prometheus.Metric) error { func (c *cpuCollector) updateInfo(ch chan<- prometheus.Metric) error {
info, err := c.procfs.CPUInfo() info, err := c.fs.CPUInfo()
if err != nil { if err != nil {
return err return err
} }
@ -221,20 +194,6 @@ func (c *cpuCollector) updateInfo(ch chan<- prometheus.Metric) error {
cpu.CacheSize) cpu.CacheSize)
} }
cpuFreqEnabled, ok := collectorState["cpufreq"]
if !ok || cpuFreqEnabled == nil {
c.logger.Debug("cpufreq key missing or nil value in collectorState map")
} else if !*cpuFreqEnabled {
for _, cpu := range info {
ch <- prometheus.MustNewConstMetric(c.cpuFrequencyHz,
prometheus.GaugeValue,
cpu.CPUMHz*1e6,
cpu.PhysicalID,
cpu.CoreID,
strconv.Itoa(int(cpu.Processor)))
}
}
if len(info) != 0 { if len(info) != 0 {
cpu := info[0] cpu := info[0]
if err := updateFieldInfo(cpu.Flags, c.cpuFlagsIncludeRegexp, c.cpuFlagsInfo, ch); err != nil { if err := updateFieldInfo(cpu.Flags, c.cpuFlagsIncludeRegexp, c.cpuFlagsInfo, ch); err != nil {
@ -287,12 +246,12 @@ func (c *cpuCollector) updateThermalThrottle(ch chan<- prometheus.Metric) error
// topology/physical_package_id // topology/physical_package_id
if physicalPackageID, err = readUintFromFile(filepath.Join(cpu, "topology", "physical_package_id")); err != nil { if physicalPackageID, err = readUintFromFile(filepath.Join(cpu, "topology", "physical_package_id")); err != nil {
c.logger.Debug("CPU is missing physical_package_id", "cpu", cpu) level.Debug(c.logger).Log("msg", "CPU is missing physical_package_id", "cpu", cpu)
continue continue
} }
// topology/core_id // topology/core_id
if coreID, err = readUintFromFile(filepath.Join(cpu, "topology", "core_id")); err != nil { if coreID, err = readUintFromFile(filepath.Join(cpu, "topology", "core_id")); err != nil {
c.logger.Debug("CPU is missing core_id", "cpu", cpu) level.Debug(c.logger).Log("msg", "CPU is missing core_id", "cpu", cpu)
continue continue
} }
@ -310,7 +269,7 @@ func (c *cpuCollector) updateThermalThrottle(ch chan<- prometheus.Metric) error
if coreThrottleCount, err := readUintFromFile(filepath.Join(cpu, "thermal_throttle", "core_throttle_count")); err == nil { if coreThrottleCount, err := readUintFromFile(filepath.Join(cpu, "thermal_throttle", "core_throttle_count")); err == nil {
packageCoreThrottles[physicalPackageID][coreID] = coreThrottleCount packageCoreThrottles[physicalPackageID][coreID] = coreThrottleCount
} else { } else {
c.logger.Debug("CPU is missing core_throttle_count", "cpu", cpu) level.Debug(c.logger).Log("msg", "CPU is missing core_throttle_count", "cpu", cpu)
} }
} }
@ -320,7 +279,7 @@ func (c *cpuCollector) updateThermalThrottle(ch chan<- prometheus.Metric) error
if packageThrottleCount, err := readUintFromFile(filepath.Join(cpu, "thermal_throttle", "package_throttle_count")); err == nil { if packageThrottleCount, err := readUintFromFile(filepath.Join(cpu, "thermal_throttle", "package_throttle_count")); err == nil {
packageThrottles[physicalPackageID] = packageThrottleCount packageThrottles[physicalPackageID] = packageThrottleCount
} else { } else {
c.logger.Debug("CPU is missing package_throttle_count", "cpu", cpu) level.Debug(c.logger).Log("msg", "CPU is missing package_throttle_count", "cpu", cpu)
} }
} }
} }
@ -352,31 +311,9 @@ func (c *cpuCollector) updateIsolated(ch chan<- prometheus.Metric) {
} }
} }
// updateOnline reads /sys/devices/system/cpu/cpu*/online through sysfs and exports online status metrics.
func (c *cpuCollector) updateOnline(ch chan<- prometheus.Metric) error {
cpus, err := c.sysfs.CPUs()
if err != nil {
return err
}
// No-op if the system does not support CPU online stats.
cpu0 := cpus[0]
if _, err := cpu0.Online(); err != nil && errors.Is(err, os.ErrNotExist) {
return nil
}
for _, cpu := range cpus {
setOnline := float64(0)
if online, _ := cpu.Online(); online {
setOnline = 1
}
ch <- prometheus.MustNewConstMetric(c.cpuOnline, prometheus.GaugeValue, setOnline, cpu.Number())
}
return nil
}
// updateStat reads /proc/stat through procfs and exports CPU-related metrics. // updateStat reads /proc/stat through procfs and exports CPU-related metrics.
func (c *cpuCollector) updateStat(ch chan<- prometheus.Metric) error { func (c *cpuCollector) updateStat(ch chan<- prometheus.Metric) error {
stats, err := c.procfs.Stat() stats, err := c.fs.Stat()
if err != nil { if err != nil {
return err return err
} }
@ -387,7 +324,7 @@ func (c *cpuCollector) updateStat(ch chan<- prometheus.Metric) error {
c.cpuStatsMutex.Lock() c.cpuStatsMutex.Lock()
defer c.cpuStatsMutex.Unlock() defer c.cpuStatsMutex.Unlock()
for cpuID, cpuStat := range c.cpuStats { for cpuID, cpuStat := range c.cpuStats {
cpuNum := strconv.Itoa(int(cpuID)) cpuNum := strconv.Itoa(cpuID)
ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.User, cpuNum, "user") ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.User, cpuNum, "user")
ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.Nice, cpuNum, "nice") ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.Nice, cpuNum, "nice")
ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.System, cpuNum, "system") ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.System, cpuNum, "system")
@ -408,90 +345,82 @@ func (c *cpuCollector) updateStat(ch chan<- prometheus.Metric) error {
} }
// updateCPUStats updates the internal cache of CPU stats. // updateCPUStats updates the internal cache of CPU stats.
func (c *cpuCollector) updateCPUStats(newStats map[int64]procfs.CPUStat) { func (c *cpuCollector) updateCPUStats(newStats []procfs.CPUStat) {
// Acquire a lock to update the stats. // Acquire a lock to update the stats.
c.cpuStatsMutex.Lock() c.cpuStatsMutex.Lock()
defer c.cpuStatsMutex.Unlock() defer c.cpuStatsMutex.Unlock()
// Reset the cache if the list of CPUs has changed. // Reset the cache if the list of CPUs has changed.
for i, n := range newStats { if len(c.cpuStats) != len(newStats) {
cpuStats := c.cpuStats[i] c.cpuStats = make([]procfs.CPUStat, len(newStats))
// If idle jumps backwards by more than X seconds, assume we had a hotplug event and reset the stats for this CPU.
if (cpuStats.Idle - n.Idle) >= jumpBackSeconds {
c.logger.Debug(jumpBackDebugMessage, "cpu", i, "old_value", cpuStats.Idle, "new_value", n.Idle)
cpuStats = procfs.CPUStat{}
}
if n.Idle >= cpuStats.Idle {
cpuStats.Idle = n.Idle
} else {
c.logger.Debug("CPU Idle counter jumped backwards", "cpu", i, "old_value", cpuStats.Idle, "new_value", n.Idle)
}
if n.User >= cpuStats.User {
cpuStats.User = n.User
} else {
c.logger.Debug("CPU User counter jumped backwards", "cpu", i, "old_value", cpuStats.User, "new_value", n.User)
}
if n.Nice >= cpuStats.Nice {
cpuStats.Nice = n.Nice
} else {
c.logger.Debug("CPU Nice counter jumped backwards", "cpu", i, "old_value", cpuStats.Nice, "new_value", n.Nice)
}
if n.System >= cpuStats.System {
cpuStats.System = n.System
} else {
c.logger.Debug("CPU System counter jumped backwards", "cpu", i, "old_value", cpuStats.System, "new_value", n.System)
}
if n.Iowait >= cpuStats.Iowait {
cpuStats.Iowait = n.Iowait
} else {
c.logger.Debug("CPU Iowait counter jumped backwards", "cpu", i, "old_value", cpuStats.Iowait, "new_value", n.Iowait)
}
if n.IRQ >= cpuStats.IRQ {
cpuStats.IRQ = n.IRQ
} else {
c.logger.Debug("CPU IRQ counter jumped backwards", "cpu", i, "old_value", cpuStats.IRQ, "new_value", n.IRQ)
}
if n.SoftIRQ >= cpuStats.SoftIRQ {
cpuStats.SoftIRQ = n.SoftIRQ
} else {
c.logger.Debug("CPU SoftIRQ counter jumped backwards", "cpu", i, "old_value", cpuStats.SoftIRQ, "new_value", n.SoftIRQ)
}
if n.Steal >= cpuStats.Steal {
cpuStats.Steal = n.Steal
} else {
c.logger.Debug("CPU Steal counter jumped backwards", "cpu", i, "old_value", cpuStats.Steal, "new_value", n.Steal)
}
if n.Guest >= cpuStats.Guest {
cpuStats.Guest = n.Guest
} else {
c.logger.Debug("CPU Guest counter jumped backwards", "cpu", i, "old_value", cpuStats.Guest, "new_value", n.Guest)
}
if n.GuestNice >= cpuStats.GuestNice {
cpuStats.GuestNice = n.GuestNice
} else {
c.logger.Debug("CPU GuestNice counter jumped backwards", "cpu", i, "old_value", cpuStats.GuestNice, "new_value", n.GuestNice)
}
c.cpuStats[i] = cpuStats
} }
// Remove offline CPUs. for i, n := range newStats {
if len(newStats) != len(c.cpuStats) { // If idle jumps backwards by more than X seconds, assume we had a hotplug event and reset the stats for this CPU.
onlineCPUIds := maps.Keys(newStats) if (c.cpuStats[i].Idle - n.Idle) >= jumpBackSeconds {
maps.DeleteFunc(c.cpuStats, func(key int64, item procfs.CPUStat) bool { level.Debug(c.logger).Log("msg", jumpBackDebugMessage, "cpu", i, "old_value", c.cpuStats[i].Idle, "new_value", n.Idle)
return !slices.Contains(onlineCPUIds, key) c.cpuStats[i] = procfs.CPUStat{}
}) }
if n.Idle >= c.cpuStats[i].Idle {
c.cpuStats[i].Idle = n.Idle
} else {
level.Debug(c.logger).Log("msg", "CPU Idle counter jumped backwards", "cpu", i, "old_value", c.cpuStats[i].Idle, "new_value", n.Idle)
}
if n.User >= c.cpuStats[i].User {
c.cpuStats[i].User = n.User
} else {
level.Debug(c.logger).Log("msg", "CPU User counter jumped backwards", "cpu", i, "old_value", c.cpuStats[i].User, "new_value", n.User)
}
if n.Nice >= c.cpuStats[i].Nice {
c.cpuStats[i].Nice = n.Nice
} else {
level.Debug(c.logger).Log("msg", "CPU Nice counter jumped backwards", "cpu", i, "old_value", c.cpuStats[i].Nice, "new_value", n.Nice)
}
if n.System >= c.cpuStats[i].System {
c.cpuStats[i].System = n.System
} else {
level.Debug(c.logger).Log("msg", "CPU System counter jumped backwards", "cpu", i, "old_value", c.cpuStats[i].System, "new_value", n.System)
}
if n.Iowait >= c.cpuStats[i].Iowait {
c.cpuStats[i].Iowait = n.Iowait
} else {
level.Debug(c.logger).Log("msg", "CPU Iowait counter jumped backwards", "cpu", i, "old_value", c.cpuStats[i].Iowait, "new_value", n.Iowait)
}
if n.IRQ >= c.cpuStats[i].IRQ {
c.cpuStats[i].IRQ = n.IRQ
} else {
level.Debug(c.logger).Log("msg", "CPU IRQ counter jumped backwards", "cpu", i, "old_value", c.cpuStats[i].IRQ, "new_value", n.IRQ)
}
if n.SoftIRQ >= c.cpuStats[i].SoftIRQ {
c.cpuStats[i].SoftIRQ = n.SoftIRQ
} else {
level.Debug(c.logger).Log("msg", "CPU SoftIRQ counter jumped backwards", "cpu", i, "old_value", c.cpuStats[i].SoftIRQ, "new_value", n.SoftIRQ)
}
if n.Steal >= c.cpuStats[i].Steal {
c.cpuStats[i].Steal = n.Steal
} else {
level.Debug(c.logger).Log("msg", "CPU Steal counter jumped backwards", "cpu", i, "old_value", c.cpuStats[i].Steal, "new_value", n.Steal)
}
if n.Guest >= c.cpuStats[i].Guest {
c.cpuStats[i].Guest = n.Guest
} else {
level.Debug(c.logger).Log("msg", "CPU Guest counter jumped backwards", "cpu", i, "old_value", c.cpuStats[i].Guest, "new_value", n.Guest)
}
if n.GuestNice >= c.cpuStats[i].GuestNice {
c.cpuStats[i].GuestNice = n.GuestNice
} else {
level.Debug(c.logger).Log("msg", "CPU GuestNice counter jumped backwards", "cpu", i, "old_value", c.cpuStats[i].GuestNice, "new_value", n.GuestNice)
}
} }
} }

View file

@ -17,59 +17,49 @@
package collector package collector
import ( import (
"io"
"log/slog"
"reflect" "reflect"
"testing" "testing"
"github.com/go-kit/log"
"github.com/prometheus/procfs" "github.com/prometheus/procfs"
) )
func copyStats(d, s map[int64]procfs.CPUStat) { func makeTestCPUCollector(s []procfs.CPUStat) *cpuCollector {
for k := range s { dup := make([]procfs.CPUStat, len(s))
v := s[k] copy(dup, s)
d[k] = v
}
}
func makeTestCPUCollector(s map[int64]procfs.CPUStat) *cpuCollector {
dup := make(map[int64]procfs.CPUStat, len(s))
copyStats(dup, s)
return &cpuCollector{ return &cpuCollector{
logger: slog.New(slog.NewTextHandler(io.Discard, nil)), logger: log.NewNopLogger(),
cpuStats: dup, cpuStats: dup,
} }
} }
func TestCPU(t *testing.T) { func TestCPU(t *testing.T) {
firstCPUStat := map[int64]procfs.CPUStat{ firstCPUStat := []procfs.CPUStat{{
0: { User: 100.0,
User: 100.0, Nice: 100.0,
Nice: 100.0, System: 100.0,
System: 100.0, Idle: 100.0,
Idle: 100.0, Iowait: 100.0,
Iowait: 100.0, IRQ: 100.0,
IRQ: 100.0, SoftIRQ: 100.0,
SoftIRQ: 100.0, Steal: 100.0,
Steal: 100.0, Guest: 100.0,
Guest: 100.0, GuestNice: 100.0,
GuestNice: 100.0, }}
}}
c := makeTestCPUCollector(firstCPUStat) c := makeTestCPUCollector(firstCPUStat)
want := map[int64]procfs.CPUStat{ want := []procfs.CPUStat{{
0: { User: 101.0,
User: 101.0, Nice: 101.0,
Nice: 101.0, System: 101.0,
System: 101.0, Idle: 101.0,
Idle: 101.0, Iowait: 101.0,
Iowait: 101.0, IRQ: 101.0,
IRQ: 101.0, SoftIRQ: 101.0,
SoftIRQ: 101.0, Steal: 101.0,
Steal: 101.0, Guest: 101.0,
Guest: 101.0, GuestNice: 101.0,
GuestNice: 101.0, }}
}}
c.updateCPUStats(want) c.updateCPUStats(want)
got := c.cpuStats got := c.cpuStats
if !reflect.DeepEqual(want, got) { if !reflect.DeepEqual(want, got) {
@ -77,19 +67,18 @@ func TestCPU(t *testing.T) {
} }
c = makeTestCPUCollector(firstCPUStat) c = makeTestCPUCollector(firstCPUStat)
jumpBack := map[int64]procfs.CPUStat{ jumpBack := []procfs.CPUStat{{
0: { User: 99.9,
User: 99.9, Nice: 99.9,
Nice: 99.9, System: 99.9,
System: 99.9, Idle: 99.9,
Idle: 99.9, Iowait: 99.9,
Iowait: 99.9, IRQ: 99.9,
IRQ: 99.9, SoftIRQ: 99.9,
SoftIRQ: 99.9, Steal: 99.9,
Steal: 99.9, Guest: 99.9,
Guest: 99.9, GuestNice: 99.9,
GuestNice: 99.9, }}
}}
c.updateCPUStats(jumpBack) c.updateCPUStats(jumpBack)
got = c.cpuStats got = c.cpuStats
if reflect.DeepEqual(jumpBack, got) { if reflect.DeepEqual(jumpBack, got) {
@ -97,107 +86,21 @@ func TestCPU(t *testing.T) {
} }
c = makeTestCPUCollector(firstCPUStat) c = makeTestCPUCollector(firstCPUStat)
resetIdle := map[int64]procfs.CPUStat{ resetIdle := []procfs.CPUStat{{
0: { User: 102.0,
User: 102.0, Nice: 102.0,
Nice: 102.0, System: 102.0,
System: 102.0, Idle: 1.0,
Idle: 1.0, Iowait: 102.0,
Iowait: 102.0, IRQ: 102.0,
IRQ: 102.0, SoftIRQ: 102.0,
SoftIRQ: 102.0, Steal: 102.0,
Steal: 102.0, Guest: 102.0,
Guest: 102.0, GuestNice: 102.0,
GuestNice: 102.0, }}
}}
c.updateCPUStats(resetIdle) c.updateCPUStats(resetIdle)
got = c.cpuStats got = c.cpuStats
if !reflect.DeepEqual(resetIdle, got) { if !reflect.DeepEqual(resetIdle, got) {
t.Fatalf("should have %v CPU Stat: got %v", resetIdle, got) t.Fatalf("should have %v CPU Stat: got %v", resetIdle, got)
} }
} }
func TestCPUOffline(t *testing.T) {
// CPU 1 goes offline.
firstCPUStat := map[int64]procfs.CPUStat{
0: {
User: 100.0,
Nice: 100.0,
System: 100.0,
Idle: 100.0,
Iowait: 100.0,
IRQ: 100.0,
SoftIRQ: 100.0,
Steal: 100.0,
Guest: 100.0,
GuestNice: 100.0,
},
1: {
User: 101.0,
Nice: 101.0,
System: 101.0,
Idle: 101.0,
Iowait: 101.0,
IRQ: 101.0,
SoftIRQ: 101.0,
Steal: 101.0,
Guest: 101.0,
GuestNice: 101.0,
},
}
c := makeTestCPUCollector(firstCPUStat)
want := map[int64]procfs.CPUStat{
0: {
User: 100.0,
Nice: 100.0,
System: 100.0,
Idle: 100.0,
Iowait: 100.0,
IRQ: 100.0,
SoftIRQ: 100.0,
Steal: 100.0,
Guest: 100.0,
GuestNice: 100.0,
},
}
c.updateCPUStats(want)
got := c.cpuStats
if !reflect.DeepEqual(want, got) {
t.Fatalf("should have %v CPU Stat: got %v", want, got)
}
// CPU 1 comes back online.
want = map[int64]procfs.CPUStat{
0: {
User: 100.0,
Nice: 100.0,
System: 100.0,
Idle: 100.0,
Iowait: 100.0,
IRQ: 100.0,
SoftIRQ: 100.0,
Steal: 100.0,
Guest: 100.0,
GuestNice: 100.0,
},
1: {
User: 101.0,
Nice: 101.0,
System: 101.0,
Idle: 101.0,
Iowait: 101.0,
IRQ: 101.0,
SoftIRQ: 101.0,
Steal: 101.0,
Guest: 101.0,
GuestNice: 101.0,
},
}
c.updateCPUStats(want)
got = c.cpuStats
if !reflect.DeepEqual(want, got) {
t.Fatalf("should have %v CPU Stat: got %v", want, got)
}
}

View file

@ -1,279 +0,0 @@
// Copyright 2023 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !nocpu
// +build !nocpu
package collector
import (
"errors"
"log/slog"
"math"
"regexp"
"sort"
"strconv"
"strings"
"unsafe"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/unix"
"howett.net/plist"
)
const (
_IOC_OUT = uint(0x40000000)
_IOC_IN = uint(0x80000000)
_IOC_INOUT = (_IOC_IN | _IOC_OUT)
_IOCPARM_MASK = uint(0x1fff)
_IOCPARM_SHIFT = uint(16)
_IOCGROUP_SHIFT = uint(8)
)
type clockinfo struct {
hz int32 // clock frequency
tick int32 // micro-seconds per hz tick
spare int32
stathz int32 // statistics clock frequency
profhz int32 // profiling clock frequency
}
type cputime struct {
user float64
nice float64
sys float64
intr float64
idle float64
}
type plistref struct {
pref_plist unsafe.Pointer
pref_len uint
}
type sysmonValues struct {
CurValue int `plist:"cur-value"`
Description string `plist:"description"`
State string `plist:"state"`
Type string `plist:"type"`
}
type sysmonProperty []sysmonValues
type sysmonProperties map[string]sysmonProperty
func _IOC(inout uint, group byte, num uint, len uintptr) uint {
return ((inout) | ((uint(len) & _IOCPARM_MASK) << _IOCPARM_SHIFT) | (uint(group) << _IOCGROUP_SHIFT) | (num))
}
func _IOWR(group byte, num uint, len uintptr) uint {
return _IOC(_IOC_INOUT, group, num, len)
}
func ioctl(fd int, nr uint, typ byte, size uintptr, retptr unsafe.Pointer) error {
_, _, errno := unix.Syscall(
unix.SYS_IOCTL,
uintptr(fd),
uintptr(_IOWR(typ, nr, size)),
uintptr(retptr),
)
if errno != 0 {
return errno
}
return nil
}
func readSysmonProperties() (sysmonProperties, error) {
fd, err := unix.Open(rootfsFilePath("/dev/sysmon"), unix.O_RDONLY, 0)
if err != nil {
return nil, err
}
defer unix.Close(fd)
var retptr plistref
if err = ioctl(fd, 0, 'E', unsafe.Sizeof(retptr), unsafe.Pointer(&retptr)); err != nil {
return nil, err
}
defer unix.Syscall(unix.SYS_MUNMAP, uintptr(retptr.pref_plist), uintptr(retptr.pref_len), uintptr(0))
bytes := unsafe.Slice((*byte)(unsafe.Pointer(retptr.pref_plist)), retptr.pref_len-1)
var props sysmonProperties
if _, err = plist.Unmarshal(bytes, &props); err != nil {
return nil, err
}
return props, nil
}
func sortFilterSysmonProperties(props sysmonProperties, prefix string) []string {
var keys []string
for key := range props {
if !strings.HasPrefix(key, prefix) {
continue
}
keys = append(keys, key)
}
sort.Strings(keys)
return keys
}
func convertTemperatures(prop sysmonProperty, res map[int]float64) error {
for _, val := range prop {
if val.State == "invalid" || val.State == "unknown" || val.State == "" {
continue
}
re := regexp.MustCompile("^cpu([0-9]+) temperature$")
core := re.FindStringSubmatch(val.Description)[1]
ncore, _ := strconv.Atoi(core)
temperature := ((float64(uint64(val.CurValue))) / 1000000) - 273.15
res[ncore] = temperature
}
return nil
}
func getCPUTemperatures() (map[int]float64, error) {
res := make(map[int]float64)
// Read all properties
props, err := readSysmonProperties()
if err != nil {
return res, err
}
keys := sortFilterSysmonProperties(props, "coretemp")
for idx := range keys {
convertTemperatures(props[keys[idx]], res)
}
return res, nil
}
func getCPUTimes() ([]cputime, error) {
const states = 5
clockb, err := unix.SysctlRaw("kern.clockrate")
if err != nil {
return nil, err
}
clock := *(*clockinfo)(unsafe.Pointer(&clockb[0]))
var cpufreq float64
if clock.stathz > 0 {
cpufreq = float64(clock.stathz)
} else {
cpufreq = float64(clock.hz)
}
ncpusb, err := unix.SysctlRaw("hw.ncpu")
if err != nil {
return nil, err
}
ncpus := int(*(*uint32)(unsafe.Pointer(&ncpusb[0])))
if ncpus < 1 {
return nil, errors.New("Invalid cpu number")
}
var times []float64
for ncpu := 0; ncpu < ncpus; ncpu++ {
cpb, err := unix.SysctlRaw("kern.cp_time", ncpu)
if err != nil {
return nil, err
}
for len(cpb) >= int(unsafe.Sizeof(uint64(0))) {
t := *(*uint64)(unsafe.Pointer(&cpb[0]))
times = append(times, float64(t)/cpufreq)
cpb = cpb[unsafe.Sizeof(uint64(0)):]
}
}
cpus := make([]cputime, len(times)/states)
for i := 0; i < len(times); i += states {
cpu := &cpus[i/states]
cpu.user = times[i]
cpu.nice = times[i+1]
cpu.sys = times[i+2]
cpu.intr = times[i+3]
cpu.idle = times[i+4]
}
return cpus, nil
}
type statCollector struct {
cpu typedDesc
temp typedDesc
logger *slog.Logger
}
func init() {
registerCollector("cpu", defaultEnabled, NewStatCollector)
}
// NewStatCollector returns a new Collector exposing CPU stats.
func NewStatCollector(logger *slog.Logger) (Collector, error) {
return &statCollector{
cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue},
temp: typedDesc{prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "temperature_celsius"),
"CPU temperature",
[]string{"cpu"}, nil,
), prometheus.GaugeValue},
logger: logger,
}, nil
}
// Expose CPU stats using sysctl.
func (c *statCollector) Update(ch chan<- prometheus.Metric) error {
// We want time spent per-cpu per CPUSTATE.
// CPUSTATES (number of CPUSTATES) is defined as 5U.
// Order: CP_USER | CP_NICE | CP_SYS | CP_IDLE | CP_INTR
// sysctl kern.cp_time.x provides CPUSTATES long integers:
// (space-separated list of the above variables, where
// x stands for the number of the CPU core)
//
// Each value is a counter incremented at frequency
// kern.clockrate.(stathz | hz)
//
// Look into sys/kern/kern_clock.c for details.
cpuTimes, err := getCPUTimes()
if err != nil {
return err
}
cpuTemperatures, err := getCPUTemperatures()
if err != nil {
return err
}
for cpu, t := range cpuTimes {
lcpu := strconv.Itoa(cpu)
ch <- c.cpu.mustNewConstMetric(float64(t.user), lcpu, "user")
ch <- c.cpu.mustNewConstMetric(float64(t.nice), lcpu, "nice")
ch <- c.cpu.mustNewConstMetric(float64(t.sys), lcpu, "system")
ch <- c.cpu.mustNewConstMetric(float64(t.intr), lcpu, "interrupt")
ch <- c.cpu.mustNewConstMetric(float64(t.idle), lcpu, "idle")
if temp, ok := cpuTemperatures[cpu]; ok {
ch <- c.temp.mustNewConstMetric(temp, lcpu)
} else {
c.logger.Debug("no temperature information for CPU", "cpu", cpu)
ch <- c.temp.mustNewConstMetric(math.NaN(), lcpu)
}
}
return err
}

View file

@ -1,44 +0,0 @@
// Copyright 2023 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !nocpu
// +build !nocpu
package collector
import (
"runtime"
"testing"
)
func TestCPUTimes(t *testing.T) {
times, err := getCPUTimes()
if err != nil {
t.Fatalf("getCPUTimes returned error: %v", err)
}
if len(times) == 0 {
t.Fatalf("no CPU times found")
}
if got, want := len(times), runtime.NumCPU(); got != want {
t.Fatalf("unexpected # of CPU times; got %d want %d", got, want)
}
}
func TestCPUTemperatures(t *testing.T) {
_, err := getCPUTemperatures()
if err != nil {
t.Fatalf("getCPUTemperatures returned error: %v", err)
}
}

View file

@ -17,14 +17,22 @@
package collector package collector
import ( import (
"log/slog"
"strconv" "strconv"
"unsafe" "unsafe"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
type clockinfo struct {
hz int32
tick int32
tickadj int32
stathz int32
profhz int32
}
const ( const (
CP_USER = iota CP_USER = iota
CP_NICE CP_NICE
@ -45,14 +53,14 @@ const (
type cpuCollector struct { type cpuCollector struct {
cpu typedDesc cpu typedDesc
logger *slog.Logger logger log.Logger
} }
func init() { func init() {
registerCollector("cpu", defaultEnabled, NewCPUCollector) registerCollector("cpu", defaultEnabled, NewCPUCollector)
} }
func NewCPUCollector(logger *slog.Logger) (Collector, error) { func NewCPUCollector(logger log.Logger) (Collector, error) {
return &cpuCollector{ return &cpuCollector{
cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue}, cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue},
logger: logger, logger: logger,
@ -64,8 +72,8 @@ func (c *cpuCollector) Update(ch chan<- prometheus.Metric) (err error) {
if err != nil { if err != nil {
return err return err
} }
clock := *(*unix.Clockinfo)(unsafe.Pointer(&clockb[0])) clock := *(*clockinfo)(unsafe.Pointer(&clockb[0]))
hz := float64(clock.Stathz) hz := float64(clock.stathz)
ncpus, err := unix.SysctlUint32("hw.ncpu") ncpus, err := unix.SysctlUint32("hw.ncpu")
if err != nil { if err != nil {

View file

@ -11,15 +11,15 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build !nocpu //go:build solaris && !nocpu
// +build !nocpu // +build solaris,!nocpu
package collector package collector
import ( import (
"log/slog"
"strconv" "strconv"
"github.com/go-kit/log"
"github.com/illumos/go-kstat" "github.com/illumos/go-kstat"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
@ -29,14 +29,14 @@ import "C"
type cpuCollector struct { type cpuCollector struct {
cpu typedDesc cpu typedDesc
logger *slog.Logger logger log.Logger
} }
func init() { func init() {
registerCollector("cpu", defaultEnabled, NewCpuCollector) registerCollector("cpu", defaultEnabled, NewCpuCollector)
} }
func NewCpuCollector(logger *slog.Logger) (Collector, error) { func NewCpuCollector(logger log.Logger) (Collector, error) {
return &cpuCollector{ return &cpuCollector{
cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue}, cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue},
logger: logger, logger: logger,
@ -60,17 +60,17 @@ func (c *cpuCollector) Update(ch chan<- prometheus.Metric) error {
} }
for k, v := range map[string]string{ for k, v := range map[string]string{
"idle": "cpu_nsec_idle", "idle": "cpu_ticks_idle",
"kernel": "cpu_nsec_kernel", "kernel": "cpu_ticks_kernel",
"user": "cpu_nsec_user", "user": "cpu_ticks_user",
"wait": "cpu_nsec_wait", "wait": "cpu_ticks_wait",
} { } {
kstatValue, err := ksCPU.GetNamed(v) kstatValue, err := ksCPU.GetNamed(v)
if err != nil { if err != nil {
return err return err
} }
ch <- c.cpu.mustNewConstMetric(float64(kstatValue.UintVal)/1e9, strconv.Itoa(cpu), k) ch <- c.cpu.mustNewConstMetric(float64(kstatValue.UintVal), strconv.Itoa(cpu), k)
} }
} }
return nil return nil

View file

@ -1,69 +0,0 @@
// Copyright 2023 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package collector
import (
"fmt"
"log/slog"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs/sysfs"
)
const (
cpuVulnerabilitiesCollectorSubsystem = "cpu_vulnerabilities"
)
var (
vulnerabilityDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuVulnerabilitiesCollectorSubsystem, "info"),
"Details of each CPU vulnerability reported by sysfs. The value of the series is an int encoded state of the vulnerability. The same state is stored as a string in the label",
[]string{"codename", "state", "mitigation"},
nil,
)
)
type cpuVulnerabilitiesCollector struct{}
func init() {
registerCollector(cpuVulnerabilitiesCollectorSubsystem, defaultDisabled, NewVulnerabilitySysfsCollector)
}
func NewVulnerabilitySysfsCollector(logger *slog.Logger) (Collector, error) {
return &cpuVulnerabilitiesCollector{}, nil
}
func (v *cpuVulnerabilitiesCollector) Update(ch chan<- prometheus.Metric) error {
fs, err := sysfs.NewFS(*sysPath)
if err != nil {
return fmt.Errorf("failed to open sysfs: %w", err)
}
vulnerabilities, err := fs.CPUVulnerabilities()
if err != nil {
return fmt.Errorf("failed to get vulnerabilities: %w", err)
}
for _, vulnerability := range vulnerabilities {
ch <- prometheus.MustNewConstMetric(
vulnerabilityDesc,
prometheus.GaugeValue,
1.0,
vulnerability.CodeName,
sysfs.VulnerabilityHumanEncoding[vulnerability.State],
vulnerability.Mitigation,
)
}
return nil
}

View file

@ -1,59 +0,0 @@
// Copyright 2023 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !nocpu
// +build !nocpu
package collector
import (
"github.com/prometheus/client_golang/prometheus"
)
var (
cpuFreqHertzDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_hertz"),
"Current CPU thread frequency in hertz.",
[]string{"cpu"}, nil,
)
cpuFreqMinDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_min_hertz"),
"Minimum CPU thread frequency in hertz.",
[]string{"cpu"}, nil,
)
cpuFreqMaxDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_max_hertz"),
"Maximum CPU thread frequency in hertz.",
[]string{"cpu"}, nil,
)
cpuFreqScalingFreqDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "scaling_frequency_hertz"),
"Current scaled CPU thread frequency in hertz.",
[]string{"cpu"}, nil,
)
cpuFreqScalingFreqMinDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "scaling_frequency_min_hertz"),
"Minimum scaled CPU thread frequency in hertz.",
[]string{"cpu"}, nil,
)
cpuFreqScalingFreqMaxDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "scaling_frequency_max_hertz"),
"Maximum scaled CPU thread frequency in hertz.",
[]string{"cpu"}, nil,
)
cpuFreqScalingGovernorDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "scaling_governor"),
"Current enabled CPU frequency governor.",
[]string{"cpu", "governor"}, nil,
)
)

View file

@ -18,16 +18,21 @@ package collector
import ( import (
"fmt" "fmt"
"log/slog"
"strings"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs/sysfs" "github.com/prometheus/procfs/sysfs"
) )
type cpuFreqCollector struct { type cpuFreqCollector struct {
fs sysfs.FS fs sysfs.FS
logger *slog.Logger cpuFreq *prometheus.Desc
cpuFreqMin *prometheus.Desc
cpuFreqMax *prometheus.Desc
scalingFreq *prometheus.Desc
scalingFreqMin *prometheus.Desc
scalingFreqMax *prometheus.Desc
logger log.Logger
} }
func init() { func init() {
@ -35,14 +40,44 @@ func init() {
} }
// NewCPUFreqCollector returns a new Collector exposing kernel/system statistics. // NewCPUFreqCollector returns a new Collector exposing kernel/system statistics.
func NewCPUFreqCollector(logger *slog.Logger) (Collector, error) { func NewCPUFreqCollector(logger log.Logger) (Collector, error) {
fs, err := sysfs.NewFS(*sysPath) fs, err := sysfs.NewFS(*sysPath)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to open sysfs: %w", err) return nil, fmt.Errorf("failed to open sysfs: %w", err)
} }
return &cpuFreqCollector{ return &cpuFreqCollector{
fs: fs, fs: fs,
cpuFreq: prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_hertz"),
"Current cpu thread frequency in hertz.",
[]string{"cpu"}, nil,
),
cpuFreqMin: prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_min_hertz"),
"Minimum cpu thread frequency in hertz.",
[]string{"cpu"}, nil,
),
cpuFreqMax: prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_max_hertz"),
"Maximum cpu thread frequency in hertz.",
[]string{"cpu"}, nil,
),
scalingFreq: prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "scaling_frequency_hertz"),
"Current scaled CPU thread frequency in hertz.",
[]string{"cpu"}, nil,
),
scalingFreqMin: prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "scaling_frequency_min_hertz"),
"Minimum scaled CPU thread frequency in hertz.",
[]string{"cpu"}, nil,
),
scalingFreqMax: prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "scaling_frequency_max_hertz"),
"Maximum scaled CPU thread frequency in hertz.",
[]string{"cpu"}, nil,
),
logger: logger, logger: logger,
}, nil }, nil
} }
@ -59,7 +94,7 @@ func (c *cpuFreqCollector) Update(ch chan<- prometheus.Metric) error {
for _, stats := range cpuFreqs { for _, stats := range cpuFreqs {
if stats.CpuinfoCurrentFrequency != nil { if stats.CpuinfoCurrentFrequency != nil {
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
cpuFreqHertzDesc, c.cpuFreq,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(*stats.CpuinfoCurrentFrequency)*1000.0, float64(*stats.CpuinfoCurrentFrequency)*1000.0,
stats.Name, stats.Name,
@ -67,7 +102,7 @@ func (c *cpuFreqCollector) Update(ch chan<- prometheus.Metric) error {
} }
if stats.CpuinfoMinimumFrequency != nil { if stats.CpuinfoMinimumFrequency != nil {
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
cpuFreqMinDesc, c.cpuFreqMin,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(*stats.CpuinfoMinimumFrequency)*1000.0, float64(*stats.CpuinfoMinimumFrequency)*1000.0,
stats.Name, stats.Name,
@ -75,7 +110,7 @@ func (c *cpuFreqCollector) Update(ch chan<- prometheus.Metric) error {
} }
if stats.CpuinfoMaximumFrequency != nil { if stats.CpuinfoMaximumFrequency != nil {
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
cpuFreqMaxDesc, c.cpuFreqMax,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(*stats.CpuinfoMaximumFrequency)*1000.0, float64(*stats.CpuinfoMaximumFrequency)*1000.0,
stats.Name, stats.Name,
@ -83,7 +118,7 @@ func (c *cpuFreqCollector) Update(ch chan<- prometheus.Metric) error {
} }
if stats.ScalingCurrentFrequency != nil { if stats.ScalingCurrentFrequency != nil {
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
cpuFreqScalingFreqDesc, c.scalingFreq,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(*stats.ScalingCurrentFrequency)*1000.0, float64(*stats.ScalingCurrentFrequency)*1000.0,
stats.Name, stats.Name,
@ -91,7 +126,7 @@ func (c *cpuFreqCollector) Update(ch chan<- prometheus.Metric) error {
} }
if stats.ScalingMinimumFrequency != nil { if stats.ScalingMinimumFrequency != nil {
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
cpuFreqScalingFreqMinDesc, c.scalingFreqMin,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(*stats.ScalingMinimumFrequency)*1000.0, float64(*stats.ScalingMinimumFrequency)*1000.0,
stats.Name, stats.Name,
@ -99,28 +134,12 @@ func (c *cpuFreqCollector) Update(ch chan<- prometheus.Metric) error {
} }
if stats.ScalingMaximumFrequency != nil { if stats.ScalingMaximumFrequency != nil {
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
cpuFreqScalingFreqMaxDesc, c.scalingFreqMax,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(*stats.ScalingMaximumFrequency)*1000.0, float64(*stats.ScalingMaximumFrequency)*1000.0,
stats.Name, stats.Name,
) )
} }
if stats.Governor != "" {
availableGovernors := strings.Split(stats.AvailableGovernors, " ")
for _, g := range availableGovernors {
state := 0
if g == stats.Governor {
state = 1
}
ch <- prometheus.MustNewConstMetric(
cpuFreqScalingGovernorDesc,
prometheus.GaugeValue,
float64(state),
stats.Name,
g,
)
}
}
} }
return nil return nil
} }

View file

@ -11,16 +11,16 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build !nocpu //go:build solaris && !nocpu
// +build !nocpu // +build solaris,!nocpu
package collector package collector
import ( import (
"fmt" "fmt"
"log/slog"
"strconv" "strconv"
"github.com/go-kit/log"
"github.com/illumos/go-kstat" "github.com/illumos/go-kstat"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
@ -29,15 +29,27 @@ import (
import "C" import "C"
type cpuFreqCollector struct { type cpuFreqCollector struct {
logger *slog.Logger cpuFreq *prometheus.Desc
cpuFreqMax *prometheus.Desc
logger log.Logger
} }
func init() { func init() {
registerCollector("cpufreq", defaultEnabled, NewCpuFreqCollector) registerCollector("cpufreq", defaultEnabled, NewCpuFreqCollector)
} }
func NewCpuFreqCollector(logger *slog.Logger) (Collector, error) { func NewCpuFreqCollector(logger log.Logger) (Collector, error) {
return &cpuFreqCollector{ return &cpuFreqCollector{
cpuFreq: prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_hertz"),
"Current CPU thread frequency in hertz.",
[]string{"cpu"}, nil,
),
cpuFreqMax: prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_max_hertz"),
"Maximum CPU thread frequency in hertz.",
[]string{"cpu"}, nil,
),
logger: logger, logger: logger,
}, nil }, nil
} }
@ -69,14 +81,14 @@ func (c *cpuFreqCollector) Update(ch chan<- prometheus.Metric) error {
lcpu := strconv.Itoa(cpu) lcpu := strconv.Itoa(cpu)
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
cpuFreqHertzDesc, c.cpuFreq,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(cpuFreqV.UintVal), float64(cpuFreqV.UintVal),
lcpu, lcpu,
) )
// Multiply by 1e+6 to convert MHz to Hz. // Multiply by 1e+6 to convert MHz to Hz.
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
cpuFreqMaxDesc, c.cpuFreqMax,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(cpuFreqMaxV.IntVal)*1e+6, float64(cpuFreqMaxV.IntVal)*1e+6,
lcpu, lcpu,

View file

@ -34,8 +34,8 @@ func newDeviceFilter(ignoredPattern, acceptPattern string) (f deviceFilter) {
return return
} }
// ignored returns whether the device should be ignored // ignores returns whether the device should be ignored
func (f *deviceFilter) ignored(name string) bool { func (f *deviceFilter) ignored(name string) bool {
return (f.ignorePattern != nil && f.ignorePattern.MatchString(name)) || return ((f.ignorePattern != nil && f.ignorePattern.MatchString(name)) ||
(f.acceptPattern != nil && !f.acceptPattern.MatchString(name)) (f.acceptPattern != nil && !f.acceptPattern.MatchString(name)))
} }

View file

@ -19,8 +19,8 @@ package collector
import ( import (
"errors" "errors"
"fmt" "fmt"
"log/slog"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
@ -98,7 +98,7 @@ type devstatCollector struct {
bytesDesc *prometheus.Desc bytesDesc *prometheus.Desc
transfersDesc *prometheus.Desc transfersDesc *prometheus.Desc
blocksDesc *prometheus.Desc blocksDesc *prometheus.Desc
logger *slog.Logger logger log.Logger
} }
func init() { func init() {
@ -106,7 +106,7 @@ func init() {
} }
// NewDevstatCollector returns a new Collector exposing Device stats. // NewDevstatCollector returns a new Collector exposing Device stats.
func NewDevstatCollector(logger *slog.Logger) (Collector, error) { func NewDevstatCollector(logger log.Logger) (Collector, error) {
return &devstatCollector{ return &devstatCollector{
bytesDesc: prometheus.NewDesc( bytesDesc: prometheus.NewDesc(
prometheus.BuildFQName(namespace, devstatSubsystem, "bytes_total"), prometheus.BuildFQName(namespace, devstatSubsystem, "bytes_total"),

View file

@ -19,10 +19,10 @@ package collector
import ( import (
"errors" "errors"
"fmt" "fmt"
"log/slog"
"sync" "sync"
"unsafe" "unsafe"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
@ -43,7 +43,7 @@ type devstatCollector struct {
duration typedDesc duration typedDesc
busyTime typedDesc busyTime typedDesc
blocks typedDesc blocks typedDesc
logger *slog.Logger logger log.Logger
} }
func init() { func init() {
@ -51,7 +51,7 @@ func init() {
} }
// NewDevstatCollector returns a new Collector exposing Device stats. // NewDevstatCollector returns a new Collector exposing Device stats.
func NewDevstatCollector(logger *slog.Logger) (Collector, error) { func NewDevstatCollector(logger log.Logger) (Collector, error) {
return &devstatCollector{ return &devstatCollector{
devinfo: &C.struct_devinfo{}, devinfo: &C.struct_devinfo{},
bytes: typedDesc{prometheus.NewDesc( bytes: typedDesc{prometheus.NewDesc(

View file

@ -1,145 +0,0 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !nodiskstats
// +build !nodiskstats
package collector
import (
"fmt"
"log/slog"
"github.com/power-devops/perfstat"
"github.com/prometheus/client_golang/prometheus"
)
const diskstatsDefaultIgnoredDevices = ""
type diskstatsCollector struct {
rbytes typedDesc
wbytes typedDesc
time typedDesc
bsize typedDesc
qdepth typedDesc
rserv typedDesc
wserv typedDesc
xfers typedDesc
xrate typedDesc
deviceFilter deviceFilter
logger *slog.Logger
tickPerSecond float64
}
func init() {
registerCollector("diskstats", defaultEnabled, NewDiskstatsCollector)
}
// NewDiskstatsCollector returns a new Collector exposing disk device stats.
func NewDiskstatsCollector(logger *slog.Logger) (Collector, error) {
ticks, err := tickPerSecond()
if err != nil {
return nil, err
}
deviceFilter, err := newDiskstatsDeviceFilter(logger)
if err != nil {
return nil, fmt.Errorf("failed to parse device filter flags: %w", err)
}
return &diskstatsCollector{
rbytes: typedDesc{readBytesDesc, prometheus.CounterValue},
wbytes: typedDesc{writtenBytesDesc, prometheus.CounterValue},
time: typedDesc{ioTimeSecondsDesc, prometheus.CounterValue},
bsize: typedDesc{
prometheus.NewDesc(
prometheus.BuildFQName(namespace, diskSubsystem, "block_size_bytes"),
"Size of the block device in bytes.",
diskLabelNames, nil,
),
prometheus.GaugeValue,
},
qdepth: typedDesc{
prometheus.NewDesc(
prometheus.BuildFQName(namespace, diskSubsystem, "queue_depth"),
"Number of requests in the queue.",
diskLabelNames, nil,
),
prometheus.GaugeValue,
},
rserv: typedDesc{
prometheus.NewDesc(
prometheus.BuildFQName(namespace, diskSubsystem, "read_time_seconds_total"),
"The total time spent servicing read requests.",
diskLabelNames, nil,
),
prometheus.CounterValue,
},
wserv: typedDesc{
prometheus.NewDesc(
prometheus.BuildFQName(namespace, diskSubsystem, "write_time_seconds_total"),
"The total time spent servicing write requests.",
diskLabelNames, nil,
),
prometheus.CounterValue,
},
xfers: typedDesc{
prometheus.NewDesc(
prometheus.BuildFQName(namespace, diskSubsystem, "transfers_total"),
"The total number of transfers to/from disk.",
diskLabelNames, nil,
),
prometheus.CounterValue,
},
xrate: typedDesc{
prometheus.NewDesc(
prometheus.BuildFQName(namespace, diskSubsystem, "transfers_to_disk_total"),
"The total number of transfers from disk.",
diskLabelNames, nil,
),
prometheus.CounterValue,
},
deviceFilter: deviceFilter,
logger: logger,
tickPerSecond: ticks,
}, nil
}
func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) error {
stats, err := perfstat.DiskStat()
if err != nil {
return err
}
for _, stat := range stats {
if c.deviceFilter.ignored(stat.Name) {
continue
}
ch <- c.rbytes.mustNewConstMetric(float64(stat.Rblks*512), stat.Name)
ch <- c.wbytes.mustNewConstMetric(float64(stat.Wblks*512), stat.Name)
ch <- c.time.mustNewConstMetric(float64(stat.Time)/float64(c.tickPerSecond), stat.Name)
ch <- c.bsize.mustNewConstMetric(float64(stat.BSize), stat.Name)
ch <- c.qdepth.mustNewConstMetric(float64(stat.QDepth), stat.Name)
ch <- c.rserv.mustNewConstMetric(float64(stat.Rserv)/1e9, stat.Name)
ch <- c.wserv.mustNewConstMetric(float64(stat.Wserv)/1e9, stat.Name)
ch <- c.xfers.mustNewConstMetric(float64(stat.Xfers), stat.Name)
ch <- c.xrate.mustNewConstMetric(float64(stat.XRate), stat.Name)
}
return nil
}

View file

@ -11,18 +11,19 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build !nodiskstats && (openbsd || linux || darwin || aix) //go:build !nodiskstats && (openbsd || linux || darwin)
// +build !nodiskstats // +build !nodiskstats
// +build openbsd linux darwin aix // +build openbsd linux darwin
package collector package collector
import ( import (
"errors" "errors"
"log/slog"
"github.com/alecthomas/kingpin/v2" "github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"gopkg.in/alecthomas/kingpin.v2"
) )
const ( const (
@ -32,20 +33,9 @@ const (
var ( var (
diskLabelNames = []string{"device"} diskLabelNames = []string{"device"}
diskstatsDeviceExcludeSet bool diskstatsDeviceExclude = kingpin.Flag("collector.diskstats.device-exclude", "Regexp of diskstats devices to exclude (mutually exclusive to device-include).").Default(diskstatsDefaultIgnoredDevices).String()
diskstatsDeviceExclude = kingpin.Flag( oldDiskstatsDeviceExclude = kingpin.Flag("collector.diskstats.ignored-devices", "DEPRECATED: Use collector.diskstats.device-exclude").String()
"collector.diskstats.device-exclude", diskstatsDeviceInclude = kingpin.Flag("collector.diskstats.device-include", "Regexp of diskstats devices to include (mutually exclusive to device-exclude).").String()
"Regexp of diskstats devices to exclude (mutually exclusive to device-include).",
).Default(diskstatsDefaultIgnoredDevices).PreAction(func(c *kingpin.ParseContext) error {
diskstatsDeviceExcludeSet = true
return nil
}).String()
oldDiskstatsDeviceExclude = kingpin.Flag(
"collector.diskstats.ignored-devices",
"DEPRECATED: Use collector.diskstats.device-exclude",
).Hidden().String()
diskstatsDeviceInclude = kingpin.Flag("collector.diskstats.device-include", "Regexp of diskstats devices to include (mutually exclusive to device-exclude).").String()
readsCompletedDesc = prometheus.NewDesc( readsCompletedDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, diskSubsystem, "reads_completed_total"), prometheus.BuildFQName(namespace, diskSubsystem, "reads_completed_total"),
@ -92,10 +82,10 @@ var (
) )
) )
func newDiskstatsDeviceFilter(logger *slog.Logger) (deviceFilter, error) { func newDiskstatsDeviceFilter(logger log.Logger) (deviceFilter, error) {
if *oldDiskstatsDeviceExclude != "" { if *oldDiskstatsDeviceExclude != "" {
if !diskstatsDeviceExcludeSet { if *diskstatsDeviceExclude == "" {
logger.Warn("--collector.diskstats.ignored-devices is DEPRECATED and will be removed in 2.0.0, use --collector.diskstats.device-exclude") level.Warn(logger).Log("msg", "--collector.diskstats.ignored-devices is DEPRECATED and will be removed in 2.0.0, use --collector.diskstats.device-exclude")
*diskstatsDeviceExclude = *oldDiskstatsDeviceExclude *diskstatsDeviceExclude = *oldDiskstatsDeviceExclude
} else { } else {
return deviceFilter{}, errors.New("--collector.diskstats.ignored-devices and --collector.diskstats.device-exclude are mutually exclusive") return deviceFilter{}, errors.New("--collector.diskstats.ignored-devices and --collector.diskstats.device-exclude are mutually exclusive")
@ -107,11 +97,11 @@ func newDiskstatsDeviceFilter(logger *slog.Logger) (deviceFilter, error) {
} }
if *diskstatsDeviceExclude != "" { if *diskstatsDeviceExclude != "" {
logger.Info("Parsed flag --collector.diskstats.device-exclude", "flag", *diskstatsDeviceExclude) level.Info(logger).Log("msg", "Parsed flag --collector.diskstats.device-exclude", "flag", *diskstatsDeviceExclude)
} }
if *diskstatsDeviceInclude != "" { if *diskstatsDeviceInclude != "" {
logger.Info("Parsed Flag --collector.diskstats.device-include", "flag", *diskstatsDeviceInclude) level.Info(logger).Log("msg", "Parsed Flag --collector.diskstats.device-include", "flag", *diskstatsDeviceInclude)
} }
return newDeviceFilter(*diskstatsDeviceExclude, *diskstatsDeviceInclude), nil return newDeviceFilter(*diskstatsDeviceExclude, *diskstatsDeviceInclude), nil

View file

@ -18,8 +18,8 @@ package collector
import ( import (
"fmt" "fmt"
"log/slog"
"github.com/go-kit/log"
"github.com/lufia/iostat" "github.com/lufia/iostat"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
@ -35,7 +35,7 @@ type diskstatsCollector struct {
descs []typedDescFunc descs []typedDescFunc
deviceFilter deviceFilter deviceFilter deviceFilter
logger *slog.Logger logger log.Logger
} }
func init() { func init() {
@ -43,7 +43,7 @@ func init() {
} }
// NewDiskstatsCollector returns a new Collector exposing disk device stats. // NewDiskstatsCollector returns a new Collector exposing disk device stats.
func NewDiskstatsCollector(logger *slog.Logger) (Collector, error) { func NewDiskstatsCollector(logger log.Logger) (Collector, error) {
var diskLabelNames = []string{"device"} var diskLabelNames = []string{"device"}
deviceFilter, err := newDiskstatsDeviceFilter(logger) deviceFilter, err := newDiskstatsDeviceFilter(logger)

View file

@ -19,11 +19,12 @@ package collector
import ( import (
"bufio" "bufio"
"fmt" "fmt"
"log/slog"
"os" "os"
"strconv" "strconv"
"strings" "strings"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs/blockdevice" "github.com/prometheus/procfs/blockdevice"
) )
@ -35,7 +36,7 @@ const (
// See also https://www.kernel.org/doc/Documentation/block/stat.txt // See also https://www.kernel.org/doc/Documentation/block/stat.txt
unixSectorSize = 512.0 unixSectorSize = 512.0
diskstatsDefaultIgnoredDevices = "^(z?ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\\d+n\\d+p)\\d+$" diskstatsDefaultIgnoredDevices = "^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\\d+n\\d+p)\\d+$"
// See udevadm(8). // See udevadm(8).
udevDevicePropertyPrefix = "E:" udevDevicePropertyPrefix = "E:"
@ -62,7 +63,6 @@ const (
udevIDRevision = "ID_REVISION" udevIDRevision = "ID_REVISION"
udevIDSerialShort = "ID_SERIAL_SHORT" udevIDSerialShort = "ID_SERIAL_SHORT"
udevIDWWN = "ID_WWN" udevIDWWN = "ID_WWN"
udevSCSIIdentSerial = "SCSI_IDENT_SERIAL"
) )
type typedFactorDesc struct { type typedFactorDesc struct {
@ -84,7 +84,7 @@ type diskstatsCollector struct {
filesystemInfoDesc typedFactorDesc filesystemInfoDesc typedFactorDesc
deviceMapperInfoDesc typedFactorDesc deviceMapperInfoDesc typedFactorDesc
ataDescs map[string]typedFactorDesc ataDescs map[string]typedFactorDesc
logger *slog.Logger logger log.Logger
getUdevDeviceProperties func(uint32, uint32) (udevInfo, error) getUdevDeviceProperties func(uint32, uint32) (udevInfo, error)
} }
@ -94,7 +94,7 @@ func init() {
// NewDiskstatsCollector returns a new Collector exposing disk device stats. // NewDiskstatsCollector returns a new Collector exposing disk device stats.
// Docs from https://www.kernel.org/doc/Documentation/iostats.txt // Docs from https://www.kernel.org/doc/Documentation/iostats.txt
func NewDiskstatsCollector(logger *slog.Logger) (Collector, error) { func NewDiskstatsCollector(logger log.Logger) (Collector, error) {
var diskLabelNames = []string{"device"} var diskLabelNames = []string{"device"}
fs, err := blockdevice.NewFS(*procPath, *sysPath) fs, err := blockdevice.NewFS(*procPath, *sysPath)
if err != nil { if err != nil {
@ -112,7 +112,7 @@ func NewDiskstatsCollector(logger *slog.Logger) (Collector, error) {
infoDesc: typedFactorDesc{ infoDesc: typedFactorDesc{
desc: prometheus.NewDesc(prometheus.BuildFQName(namespace, diskSubsystem, "info"), desc: prometheus.NewDesc(prometheus.BuildFQName(namespace, diskSubsystem, "info"),
"Info of /sys/block/<block_device>.", "Info of /sys/block/<block_device>.",
[]string{"device", "major", "minor", "path", "wwn", "model", "serial", "revision", "rotational"}, []string{"device", "major", "minor", "path", "wwn", "model", "serial", "revision"},
nil, nil,
), valueType: prometheus.GaugeValue, ), valueType: prometheus.GaugeValue,
}, },
@ -261,7 +261,7 @@ func NewDiskstatsCollector(logger *slog.Logger) (Collector, error) {
// Only enable getting device properties from udev if the directory is readable. // Only enable getting device properties from udev if the directory is readable.
if stat, err := os.Stat(*udevDataPath); err != nil || !stat.IsDir() { if stat, err := os.Stat(*udevDataPath); err != nil || !stat.IsDir() {
logger.Error("Failed to open directory, disabling udev device properties", "path", *udevDataPath) level.Error(logger).Log("msg", "Failed to open directory, disabling udev device properties", "path", *udevDataPath)
} else { } else {
collector.getUdevDeviceProperties = getUdevDeviceProperties collector.getUdevDeviceProperties = getUdevDeviceProperties
} }
@ -283,21 +283,7 @@ func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) error {
info, err := getUdevDeviceProperties(stats.MajorNumber, stats.MinorNumber) info, err := getUdevDeviceProperties(stats.MajorNumber, stats.MinorNumber)
if err != nil { if err != nil {
c.logger.Debug("Failed to parse udev info", "err", err) level.Debug(c.logger).Log("msg", "Failed to parse udev info", "err", err)
}
// This is usually the serial printed on the disk label.
serial := info[udevSCSIIdentSerial]
// If it's undefined, fallback to ID_SERIAL_SHORT instead.
if serial == "" {
serial = info[udevIDSerialShort]
}
queueStats, err := c.fs.SysBlockDeviceQueueStats(dev)
// Block Device Queue stats may not exist for all devices.
if err != nil && !os.IsNotExist(err) {
c.logger.Debug("Failed to get block device queue stats", "device", dev, "err", err)
} }
ch <- c.infoDesc.mustNewConstMetric(1.0, dev, ch <- c.infoDesc.mustNewConstMetric(1.0, dev,
@ -306,9 +292,8 @@ func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) error {
info[udevIDPath], info[udevIDPath],
info[udevIDWWN], info[udevIDWWN],
info[udevIDModel], info[udevIDModel],
serial, info[udevIDSerialShort],
info[udevIDRevision], info[udevIDRevision],
strconv.FormatUint(queueStats.Rotational, 2),
) )
statCount := stats.IoStatsCount - 3 // Total diskstats record count, less MajorNumber, MinorNumber and DeviceName statCount := stats.IoStatsCount - 3 // Total diskstats record count, less MajorNumber, MinorNumber and DeviceName
@ -361,14 +346,14 @@ func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) error {
for attr, desc := range c.ataDescs { for attr, desc := range c.ataDescs {
str, ok := info[attr] str, ok := info[attr]
if !ok { if !ok {
c.logger.Debug("Udev attribute does not exist", "attribute", attr) level.Debug(c.logger).Log("msg", "Udev attribute does not exist", "attribute", attr)
continue continue
} }
if value, err := strconv.ParseFloat(str, 64); err == nil { if value, err := strconv.ParseFloat(str, 64); err == nil {
ch <- desc.mustNewConstMetric(value, dev) ch <- desc.mustNewConstMetric(value, dev)
} else { } else {
c.logger.Error("Failed to parse ATA value", "err", err) level.Error(c.logger).Log("msg", "Failed to parse ATA value", "err", err)
} }
} }
} }
@ -398,9 +383,15 @@ func getUdevDeviceProperties(major, minor uint32) (udevInfo, error) {
line = strings.TrimPrefix(line, udevDevicePropertyPrefix) line = strings.TrimPrefix(line, udevDevicePropertyPrefix)
/* TODO: After we drop support for Go 1.17, the condition below can be simplified to:
if name, value, found := strings.Cut(line, "="); found { if name, value, found := strings.Cut(line, "="); found {
info[name] = value info[name] = value
} }
*/
if fields := strings.SplitN(line, "=", 2); len(fields) == 2 {
info[fields[0]] = fields[1]
}
} }
return info, nil return info, nil

View file

@ -11,18 +11,15 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build !nodiskstats
// +build !nodiskstats
package collector package collector
import ( import (
"fmt" "fmt"
"io" "os"
"log/slog"
"strings" "strings"
"testing" "testing"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/client_golang/prometheus/testutil"
) )
@ -39,7 +36,7 @@ func (c testDiskStatsCollector) Describe(ch chan<- *prometheus.Desc) {
prometheus.DescribeByCollect(c, ch) prometheus.DescribeByCollect(c, ch)
} }
func NewTestDiskStatsCollector(logger *slog.Logger) (prometheus.Collector, error) { func NewTestDiskStatsCollector(logger log.Logger) (prometheus.Collector, error) {
dsc, err := NewDiskstatsCollector(logger) dsc, err := NewDiskstatsCollector(logger)
if err != nil { if err != nil {
return testDiskStatsCollector{}, err return testDiskStatsCollector{}, err
@ -53,7 +50,7 @@ func TestDiskStats(t *testing.T) {
*sysPath = "fixtures/sys" *sysPath = "fixtures/sys"
*procPath = "fixtures/proc" *procPath = "fixtures/proc"
*udevDataPath = "fixtures/udev/data" *udevDataPath = "fixtures/udev/data"
*diskstatsDeviceExclude = "^(z?ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\\d+n\\d+p)\\d+$" *diskstatsDeviceExclude = "^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\\d+n\\d+p)\\d+$"
testcase := `# HELP node_disk_ata_rotation_rate_rpm ATA disk rotation rate in RPMs (0 for SSDs). testcase := `# HELP node_disk_ata_rotation_rate_rpm ATA disk rotation rate in RPMs (0 for SSDs).
# TYPE node_disk_ata_rotation_rate_rpm gauge # TYPE node_disk_ata_rotation_rate_rpm gauge
node_disk_ata_rotation_rate_rpm{device="sda"} 7200 node_disk_ata_rotation_rate_rpm{device="sda"} 7200
@ -113,21 +110,21 @@ node_disk_flush_requests_time_seconds_total{device="sdc"} 1.944
node_disk_flush_requests_total{device="sdc"} 1555 node_disk_flush_requests_total{device="sdc"} 1555
# HELP node_disk_info Info of /sys/block/<block_device>. # HELP node_disk_info Info of /sys/block/<block_device>.
# TYPE node_disk_info gauge # TYPE node_disk_info gauge
node_disk_info{device="dm-0",major="252",minor="0",model="",path="",revision="",rotational="0",serial="",wwn=""} 1 node_disk_info{device="dm-0",major="252",minor="0",model="",path="",revision="",serial="",wwn=""} 1
node_disk_info{device="dm-1",major="252",minor="1",model="",path="",revision="",rotational="0",serial="",wwn=""} 1 node_disk_info{device="dm-1",major="252",minor="1",model="",path="",revision="",serial="",wwn=""} 1
node_disk_info{device="dm-2",major="252",minor="2",model="",path="",revision="",rotational="0",serial="",wwn=""} 1 node_disk_info{device="dm-2",major="252",minor="2",model="",path="",revision="",serial="",wwn=""} 1
node_disk_info{device="dm-3",major="252",minor="3",model="",path="",revision="",rotational="0",serial="",wwn=""} 1 node_disk_info{device="dm-3",major="252",minor="3",model="",path="",revision="",serial="",wwn=""} 1
node_disk_info{device="dm-4",major="252",minor="4",model="",path="",revision="",rotational="0",serial="",wwn=""} 1 node_disk_info{device="dm-4",major="252",minor="4",model="",path="",revision="",serial="",wwn=""} 1
node_disk_info{device="dm-5",major="252",minor="5",model="",path="",revision="",rotational="0",serial="",wwn=""} 1 node_disk_info{device="dm-5",major="252",minor="5",model="",path="",revision="",serial="",wwn=""} 1
node_disk_info{device="mmcblk0",major="179",minor="0",model="",path="platform-df2969f3.mmc",revision="",rotational="0",serial="",wwn=""} 1 node_disk_info{device="mmcblk0",major="179",minor="0",model="",path="platform-df2969f3.mmc",revision="",serial="",wwn=""} 1
node_disk_info{device="mmcblk0p1",major="179",minor="1",model="",path="platform-df2969f3.mmc",revision="",rotational="0",serial="",wwn=""} 1 node_disk_info{device="mmcblk0p1",major="179",minor="1",model="",path="platform-df2969f3.mmc",revision="",serial="",wwn=""} 1
node_disk_info{device="mmcblk0p2",major="179",minor="2",model="",path="platform-df2969f3.mmc",revision="",rotational="0",serial="",wwn=""} 1 node_disk_info{device="mmcblk0p2",major="179",minor="2",model="",path="platform-df2969f3.mmc",revision="",serial="",wwn=""} 1
node_disk_info{device="nvme0n1",major="259",minor="0",model="SAMSUNG EHFTF55LURSY-000Y9",path="pci-0000:02:00.0-nvme-1",revision="4NBTUY95",rotational="0",serial="S252B6CU1HG3M1",wwn="eui.p3vbbiejx5aae2r3"} 1 node_disk_info{device="nvme0n1",major="259",minor="0",model="SAMSUNG EHFTF55LURSY-000Y9",path="pci-0000:02:00.0-nvme-1",revision="4NBTUY95",serial="S252B6CU1HG3M1",wwn="eui.p3vbbiejx5aae2r3"} 1
node_disk_info{device="sda",major="8",minor="0",model="TOSHIBA_KSDB4U86",path="pci-0000:3b:00.0-sas-phy7-lun-0",revision="0102",rotational="1",serial="2160A0D5FVGG",wwn="0x7c72382b8de36a64"} 1 node_disk_info{device="sda",major="8",minor="0",model="TOSHIBA_KSDB4U86",path="pci-0000:3b:00.0-sas-phy7-lun-0",revision="0102",serial="2160A0D5FVGG",wwn="0x7c72382b8de36a64"} 1
node_disk_info{device="sdb",major="8",minor="16",model="SuperMicro_SSD",path="pci-0000:00:1f.2-ata-1",revision="0R",rotational="0",serial="SMC0E1B87ABBB16BD84E",wwn="0xe1b87abbb16bd84e"} 1 node_disk_info{device="sdb",major="8",minor="16",model="SuperMicro_SSD",path="pci-0000:00:1f.2-ata-1",revision="0R",serial="SMC0E1B87ABBB16BD84E",wwn="0xe1b87abbb16bd84e"} 1
node_disk_info{device="sdc",major="8",minor="32",model="INTEL_SSDS9X9SI0",path="pci-0000:00:1f.2-ata-4",revision="0100",rotational="0",serial="3EWB5Y25CWQWA7EH1U",wwn="0x58907ddc573a5de"} 1 node_disk_info{device="sdc",major="8",minor="32",model="INTEL_SSDS9X9SI0",path="pci-0000:00:1f.2-ata-4",revision="0100",serial="3EWB5Y25CWQWA7EH1U",wwn="0x58907ddc573a5de"} 1
node_disk_info{device="sr0",major="11",minor="0",model="Virtual_CDROM0",path="pci-0000:00:14.0-usb-0:1.1:1.0-scsi-0:0:0:0",revision="1.00",rotational="0",serial="AAAABBBBCCCC1",wwn=""} 1 node_disk_info{device="sr0",major="11",minor="0",model="Virtual_CDROM0",path="pci-0000:00:14.0-usb-0:1.1:1.0-scsi-0:0:0:0",revision="1.00",serial="AAAABBBBCCCC1",wwn=""} 1
node_disk_info{device="vda",major="254",minor="0",model="",path="pci-0000:00:06.0",revision="",rotational="0",serial="",wwn=""} 1 node_disk_info{device="vda",major="254",minor="0",model="",path="pci-0000:00:06.0",revision="",serial="",wwn=""} 1
# HELP node_disk_io_now The number of I/Os currently in progress. # HELP node_disk_io_now The number of I/Os currently in progress.
# TYPE node_disk_io_now gauge # TYPE node_disk_io_now gauge
node_disk_io_now{device="dm-0"} 0 node_disk_io_now{device="dm-0"} 0
@ -317,10 +314,10 @@ node_disk_written_bytes_total{device="sr0"} 0
node_disk_written_bytes_total{device="vda"} 1.0938236928e+11 node_disk_written_bytes_total{device="vda"} 1.0938236928e+11
` `
logger := slog.New(slog.NewTextHandler(io.Discard, nil)) logger := log.NewLogfmtLogger(os.Stderr)
collector, err := NewDiskstatsCollector(logger) collector, err := NewDiskstatsCollector(logger)
if err != nil { if err != nil {
t.Fatal(err) panic(err)
} }
c, err := NewTestDiskStatsCollector(logger) c, err := NewTestDiskStatsCollector(logger)
if err != nil { if err != nil {

View file

@ -11,16 +11,16 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build !nodiskstats && !amd64 //go:build openbsd && !amd64 && !nodiskstats
// +build !nodiskstats,!amd64 // +build openbsd,!amd64,!nodiskstats
package collector package collector
import ( import (
"fmt" "fmt"
"log/slog"
"unsafe" "unsafe"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
@ -41,7 +41,7 @@ type diskstatsCollector struct {
time typedDesc time typedDesc
deviceFilter deviceFilter deviceFilter deviceFilter
logger *slog.Logger logger log.Logger
} }
func init() { func init() {
@ -49,7 +49,7 @@ func init() {
} }
// NewDiskstatsCollector returns a new Collector exposing disk device stats. // NewDiskstatsCollector returns a new Collector exposing disk device stats.
func NewDiskstatsCollector(logger *slog.Logger) (Collector, error) { func NewDiskstatsCollector(logger log.Logger) (Collector, error) {
deviceFilter, err := newDiskstatsDeviceFilter(logger) deviceFilter, err := newDiskstatsDeviceFilter(logger)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse device filter flags: %w", err) return nil, fmt.Errorf("failed to parse device filter flags: %w", err)

View file

@ -18,9 +18,9 @@ package collector
import ( import (
"fmt" "fmt"
"log/slog"
"unsafe" "unsafe"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
@ -52,7 +52,7 @@ type diskstatsCollector struct {
time typedDesc time typedDesc
deviceFilter deviceFilter deviceFilter deviceFilter
logger *slog.Logger logger log.Logger
} }
func init() { func init() {
@ -60,7 +60,7 @@ func init() {
} }
// NewDiskstatsCollector returns a new Collector exposing disk device stats. // NewDiskstatsCollector returns a new Collector exposing disk device stats.
func NewDiskstatsCollector(logger *slog.Logger) (Collector, error) { func NewDiskstatsCollector(logger log.Logger) (Collector, error) {
deviceFilter, err := newDiskstatsDeviceFilter(logger) deviceFilter, err := newDiskstatsDeviceFilter(logger)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse device filter flags: %w", err) return nil, fmt.Errorf("failed to parse device filter flags: %w", err)

View file

@ -19,10 +19,11 @@ package collector
import ( import (
"errors" "errors"
"fmt" "fmt"
"log/slog"
"os" "os"
"strings" "strings"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs/sysfs" "github.com/prometheus/procfs/sysfs"
) )
@ -37,7 +38,7 @@ func init() {
} }
// NewDMICollector returns a new Collector exposing DMI information. // NewDMICollector returns a new Collector exposing DMI information.
func NewDMICollector(logger *slog.Logger) (Collector, error) { func NewDMICollector(logger log.Logger) (Collector, error) {
fs, err := sysfs.NewFS(*sysPath) fs, err := sysfs.NewFS(*sysPath)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to open sysfs: %w", err) return nil, fmt.Errorf("failed to open sysfs: %w", err)
@ -46,7 +47,7 @@ func NewDMICollector(logger *slog.Logger) (Collector, error) {
dmi, err := fs.DMIClass() dmi, err := fs.DMIClass()
if err != nil { if err != nil {
if errors.Is(err, os.ErrNotExist) { if errors.Is(err, os.ErrNotExist) {
logger.Debug("Platform does not support Desktop Management Interface (DMI) information", "err", err) level.Debug(logger).Log("msg", "Platform does not support Desktop Management Interface (DMI) information", "err", err)
dmi = &sysfs.DMIClass{} dmi = &sysfs.DMIClass{}
} else { } else {
return nil, fmt.Errorf("failed to read Desktop Management Interface (DMI) information: %w", err) return nil, fmt.Errorf("failed to read Desktop Management Interface (DMI) information: %w", err)

View file

@ -20,11 +20,12 @@ import (
"bufio" "bufio"
"errors" "errors"
"fmt" "fmt"
"log/slog"
"os" "os"
"strconv" "strconv"
"strings" "strings"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
@ -78,14 +79,14 @@ type drbdCollector struct {
numerical map[string]drbdNumericalMetric numerical map[string]drbdNumericalMetric
stringPair map[string]drbdStringPairMetric stringPair map[string]drbdStringPairMetric
connected *prometheus.Desc connected *prometheus.Desc
logger *slog.Logger logger log.Logger
} }
func init() { func init() {
registerCollector("drbd", defaultDisabled, newDRBDCollector) registerCollector("drbd", defaultDisabled, newDRBDCollector)
} }
func newDRBDCollector(logger *slog.Logger) (Collector, error) { func newDRBDCollector(logger log.Logger) (Collector, error) {
return &drbdCollector{ return &drbdCollector{
numerical: map[string]drbdNumericalMetric{ numerical: map[string]drbdNumericalMetric{
"ns": newDRBDNumericalMetric( "ns": newDRBDNumericalMetric(
@ -190,7 +191,7 @@ func (c *drbdCollector) Update(ch chan<- prometheus.Metric) error {
file, err := os.Open(statsFile) file, err := os.Open(statsFile)
if err != nil { if err != nil {
if errors.Is(err, os.ErrNotExist) { if errors.Is(err, os.ErrNotExist) {
c.logger.Debug("stats file does not exist, skipping", "file", statsFile, "err", err) level.Debug(c.logger).Log("msg", "stats file does not exist, skipping", "file", statsFile, "err", err)
return ErrNoData return ErrNoData
} }
@ -207,7 +208,7 @@ func (c *drbdCollector) Update(ch chan<- prometheus.Metric) error {
kv := strings.Split(field, ":") kv := strings.Split(field, ":")
if len(kv) != 2 { if len(kv) != 2 {
c.logger.Debug("skipping invalid key:value pair", "field", field) level.Debug(c.logger).Log("msg", "skipping invalid key:value pair", "field", field)
continue continue
} }
@ -273,7 +274,7 @@ func (c *drbdCollector) Update(ch chan<- prometheus.Metric) error {
continue continue
} }
c.logger.Debug("unhandled key-value pair", "key", kv[0], "value", kv[1]) level.Debug(c.logger).Log("msg", "unhandled key-value pair", "key", kv[0], "value", kv[1])
} }
return scanner.Err() return scanner.Err()

View file

@ -18,8 +18,8 @@ package collector
import ( import (
"fmt" "fmt"
"log/slog"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs/sysfs" "github.com/prometheus/procfs/sysfs"
) )
@ -30,7 +30,7 @@ const (
type drmCollector struct { type drmCollector struct {
fs sysfs.FS fs sysfs.FS
logger *slog.Logger logger log.Logger
CardInfo *prometheus.Desc CardInfo *prometheus.Desc
GPUBusyPercent *prometheus.Desc GPUBusyPercent *prometheus.Desc
MemoryGTTSize *prometheus.Desc MemoryGTTSize *prometheus.Desc
@ -46,7 +46,7 @@ func init() {
} }
// NewDrmCollector returns a new Collector exposing /sys/class/drm/card?/device stats. // NewDrmCollector returns a new Collector exposing /sys/class/drm/card?/device stats.
func NewDrmCollector(logger *slog.Logger) (Collector, error) { func NewDrmCollector(logger log.Logger) (Collector, error) {
fs, err := sysfs.NewFS(*sysPath) fs, err := sysfs.NewFS(*sysPath)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to open sysfs: %w", err) return nil, fmt.Errorf("failed to open sysfs: %w", err)

View file

@ -18,10 +18,10 @@ package collector
import ( import (
"fmt" "fmt"
"log/slog"
"path/filepath" "path/filepath"
"regexp" "regexp"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
@ -39,7 +39,7 @@ type edacCollector struct {
ueCount *prometheus.Desc ueCount *prometheus.Desc
csRowCECount *prometheus.Desc csRowCECount *prometheus.Desc
csRowUECount *prometheus.Desc csRowUECount *prometheus.Desc
logger *slog.Logger logger log.Logger
} }
func init() { func init() {
@ -47,7 +47,7 @@ func init() {
} }
// NewEdacCollector returns a new Collector exposing edac stats. // NewEdacCollector returns a new Collector exposing edac stats.
func NewEdacCollector(logger *slog.Logger) (Collector, error) { func NewEdacCollector(logger log.Logger) (Collector, error) {
return &edacCollector{ return &edacCollector{
ceCount: prometheus.NewDesc( ceCount: prometheus.NewDesc(
prometheus.BuildFQName(namespace, edacSubsystem, "correctable_errors_total"), prometheus.BuildFQName(namespace, edacSubsystem, "correctable_errors_total"),

View file

@ -18,8 +18,8 @@ package collector
import ( import (
"fmt" "fmt"
"log/slog"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs" "github.com/prometheus/procfs"
) )
@ -28,7 +28,7 @@ type entropyCollector struct {
fs procfs.FS fs procfs.FS
entropyAvail *prometheus.Desc entropyAvail *prometheus.Desc
entropyPoolSize *prometheus.Desc entropyPoolSize *prometheus.Desc
logger *slog.Logger logger log.Logger
} }
func init() { func init() {
@ -36,7 +36,7 @@ func init() {
} }
// NewEntropyCollector returns a new Collector exposing entropy stats. // NewEntropyCollector returns a new Collector exposing entropy stats.
func NewEntropyCollector(logger *slog.Logger) (Collector, error) { func NewEntropyCollector(logger log.Logger) (Collector, error) {
fs, err := procfs.NewFS(*procPath) fs, err := procfs.NewFS(*procPath)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to open procfs: %w", err) return nil, fmt.Errorf("failed to open procfs: %w", err)

View file

@ -23,7 +23,6 @@ package collector
import ( import (
"errors" "errors"
"fmt" "fmt"
"log/slog"
"os" "os"
"regexp" "regexp"
"sort" "sort"
@ -31,11 +30,13 @@ import (
"sync" "sync"
"syscall" "syscall"
"github.com/alecthomas/kingpin/v2" "github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs/sysfs" "github.com/prometheus/procfs/sysfs"
"github.com/safchain/ethtool" "github.com/safchain/ethtool"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
"gopkg.in/alecthomas/kingpin.v2"
) )
var ( var (
@ -78,13 +79,13 @@ type ethtoolCollector struct {
deviceFilter deviceFilter deviceFilter deviceFilter
infoDesc *prometheus.Desc infoDesc *prometheus.Desc
metricsPattern *regexp.Regexp metricsPattern *regexp.Regexp
logger *slog.Logger logger log.Logger
} }
// makeEthtoolCollector is the internal constructor for EthtoolCollector. // makeEthtoolCollector is the internal constructor for EthtoolCollector.
// This allows NewEthtoolTestCollector to override its .ethtool interface // This allows NewEthtoolTestCollector to override its .ethtool interface
// for testing. // for testing.
func makeEthtoolCollector(logger *slog.Logger) (*ethtoolCollector, error) { func makeEthtoolCollector(logger log.Logger) (*ethtoolCollector, error) {
fs, err := sysfs.NewFS(*sysPath) fs, err := sysfs.NewFS(*sysPath)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to open sysfs: %w", err) return nil, fmt.Errorf("failed to open sysfs: %w", err)
@ -95,16 +96,6 @@ func makeEthtoolCollector(logger *slog.Logger) (*ethtoolCollector, error) {
return nil, fmt.Errorf("failed to initialize ethtool library: %w", err) return nil, fmt.Errorf("failed to initialize ethtool library: %w", err)
} }
if *ethtoolDeviceInclude != "" {
logger.Info("Parsed flag --collector.ethtool.device-include", "flag", *ethtoolDeviceInclude)
}
if *ethtoolDeviceExclude != "" {
logger.Info("Parsed flag --collector.ethtool.device-exclude", "flag", *ethtoolDeviceExclude)
}
if *ethtoolIncludedMetrics != "" {
logger.Info("Parsed flag --collector.ethtool.metrics-include", "flag", *ethtoolIncludedMetrics)
}
// Pre-populate some common ethtool metrics. // Pre-populate some common ethtool metrics.
return &ethtoolCollector{ return &ethtoolCollector{
fs: fs, fs: fs,
@ -222,7 +213,7 @@ func buildEthtoolFQName(metric string) string {
} }
// NewEthtoolCollector returns a new Collector exposing ethtool stats. // NewEthtoolCollector returns a new Collector exposing ethtool stats.
func NewEthtoolCollector(logger *slog.Logger) (Collector, error) { func NewEthtoolCollector(logger log.Logger) (Collector, error) {
return makeEthtoolCollector(logger) return makeEthtoolCollector(logger)
} }
@ -285,84 +276,29 @@ func (c *ethtoolCollector) updateSpeeds(ch chan<- prometheus.Metric, prefix stri
duplex string duplex string
phy string phy string
}{ }{
unix.ETHTOOL_LINK_MODE_10baseT_Half_BIT: {10, half, "T"}, unix.ETHTOOL_LINK_MODE_10baseT_Half_BIT: {10, half, "T"},
unix.ETHTOOL_LINK_MODE_10baseT_Full_BIT: {10, full, "T"}, unix.ETHTOOL_LINK_MODE_10baseT_Full_BIT: {10, full, "T"},
unix.ETHTOOL_LINK_MODE_100baseT_Half_BIT: {100, half, "T"}, unix.ETHTOOL_LINK_MODE_100baseT_Half_BIT: {100, half, "T"},
unix.ETHTOOL_LINK_MODE_100baseT_Full_BIT: {100, full, "T"}, unix.ETHTOOL_LINK_MODE_100baseT_Full_BIT: {100, full, "T"},
unix.ETHTOOL_LINK_MODE_1000baseT_Half_BIT: {1000, half, "T"}, unix.ETHTOOL_LINK_MODE_1000baseT_Half_BIT: {1000, half, "T"},
unix.ETHTOOL_LINK_MODE_1000baseT_Full_BIT: {1000, full, "T"}, unix.ETHTOOL_LINK_MODE_1000baseT_Full_BIT: {1000, full, "T"},
unix.ETHTOOL_LINK_MODE_10000baseT_Full_BIT: {10000, full, "T"}, unix.ETHTOOL_LINK_MODE_10000baseT_Full_BIT: {10000, full, "T"},
unix.ETHTOOL_LINK_MODE_2500baseT_Full_BIT: {2500, full, "T"}, unix.ETHTOOL_LINK_MODE_2500baseT_Full_BIT: {2500, full, "T"},
unix.ETHTOOL_LINK_MODE_1000baseKX_Full_BIT: {1000, full, "KX"}, unix.ETHTOOL_LINK_MODE_1000baseKX_Full_BIT: {1000, full, "KX"},
unix.ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT: {10000, full, "KX4"}, unix.ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT: {10000, full, "KX4"},
unix.ETHTOOL_LINK_MODE_10000baseKR_Full_BIT: {10000, full, "KR"}, unix.ETHTOOL_LINK_MODE_10000baseKR_Full_BIT: {10000, full, "KR"},
unix.ETHTOOL_LINK_MODE_10000baseR_FEC_BIT: {10000, full, "R_FEC"}, unix.ETHTOOL_LINK_MODE_10000baseR_FEC_BIT: {10000, full, "R_FEC"},
unix.ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT: {20000, full, "MLD2"}, unix.ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT: {20000, full, "MLD2"},
unix.ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT: {20000, full, "KR2"}, unix.ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT: {20000, full, "KR2"},
unix.ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT: {40000, full, "KR4"}, unix.ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT: {40000, full, "KR4"},
unix.ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT: {40000, full, "CR4"}, unix.ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT: {40000, full, "CR4"},
unix.ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT: {40000, full, "SR4"}, unix.ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT: {40000, full, "SR4"},
unix.ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT: {40000, full, "LR4"}, unix.ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT: {40000, full, "LR4"},
unix.ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT: {56000, full, "KR4"}, unix.ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT: {56000, full, "KR4"},
unix.ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT: {56000, full, "CR4"}, unix.ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT: {56000, full, "CR4"},
unix.ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT: {56000, full, "SR4"}, unix.ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT: {56000, full, "SR4"},
unix.ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT: {56000, full, "LR4"}, unix.ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT: {56000, full, "LR4"},
unix.ETHTOOL_LINK_MODE_25000baseCR_Full_BIT: {25000, full, "CR"}, unix.ETHTOOL_LINK_MODE_25000baseCR_Full_BIT: {25000, full, "CR"},
unix.ETHTOOL_LINK_MODE_25000baseKR_Full_BIT: {25000, full, "KR"},
unix.ETHTOOL_LINK_MODE_25000baseSR_Full_BIT: {25000, full, "SR"},
unix.ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT: {50000, full, "CR2"},
unix.ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT: {50000, full, "KR2"},
unix.ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT: {100000, full, "KR4"},
unix.ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT: {100000, full, "SR4"},
unix.ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT: {100000, full, "CR4"},
unix.ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT: {100000, full, "R4_ER4"},
unix.ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT: {50000, full, "SR2"},
unix.ETHTOOL_LINK_MODE_1000baseX_Full_BIT: {1000, full, "X"},
unix.ETHTOOL_LINK_MODE_10000baseCR_Full_BIT: {10000, full, "CR"},
unix.ETHTOOL_LINK_MODE_10000baseSR_Full_BIT: {10000, full, "SR"},
unix.ETHTOOL_LINK_MODE_10000baseLR_Full_BIT: {10000, full, "LR"},
unix.ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT: {10000, full, "LRM"},
unix.ETHTOOL_LINK_MODE_10000baseER_Full_BIT: {10000, full, "ER"},
unix.ETHTOOL_LINK_MODE_5000baseT_Full_BIT: {5000, full, "T"},
unix.ETHTOOL_LINK_MODE_50000baseKR_Full_BIT: {50000, full, "KR"},
unix.ETHTOOL_LINK_MODE_50000baseSR_Full_BIT: {50000, full, "SR"},
unix.ETHTOOL_LINK_MODE_50000baseCR_Full_BIT: {50000, full, "CR"},
unix.ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT: {50000, full, "LR_ER_FR"},
unix.ETHTOOL_LINK_MODE_50000baseDR_Full_BIT: {50000, full, "DR"},
unix.ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT: {100000, full, "KR2"},
unix.ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT: {100000, full, "SR2"},
unix.ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT: {100000, full, "CR2"},
unix.ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT: {100000, full, "LR2_ER2_FR2"},
unix.ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT: {100000, full, "DR2"},
unix.ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT: {200000, full, "KR4"},
unix.ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT: {200000, full, "SR4"},
unix.ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT: {200000, full, "LR4_ER4_FR4"},
unix.ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT: {200000, full, "DR4"},
unix.ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT: {200000, full, "CR4"},
unix.ETHTOOL_LINK_MODE_100baseT1_Full_BIT: {100, full, "T1"},
unix.ETHTOOL_LINK_MODE_1000baseT1_Full_BIT: {1000, full, "T1"},
unix.ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT: {400000, full, "KR8"},
unix.ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT: {400000, full, "SR8"},
unix.ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT: {400000, full, "LR8_ER8_FR8"},
unix.ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT: {400000, full, "DR8"},
unix.ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT: {400000, full, "CR8"},
unix.ETHTOOL_LINK_MODE_100000baseKR_Full_BIT: {100000, full, "KR"},
unix.ETHTOOL_LINK_MODE_100000baseSR_Full_BIT: {100000, full, "SR"},
unix.ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT: {100000, full, "LR_ER_FR"},
unix.ETHTOOL_LINK_MODE_100000baseCR_Full_BIT: {100000, full, "CR"},
unix.ETHTOOL_LINK_MODE_100000baseDR_Full_BIT: {100000, full, "DR"},
unix.ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT: {200000, full, "KR2"},
unix.ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT: {200000, full, "SR2"},
unix.ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT: {200000, full, "LR2_ER2_FR2"},
unix.ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT: {200000, full, "DR2"},
unix.ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT: {200000, full, "CR2"},
unix.ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT: {400000, full, "KR4"},
unix.ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT: {400000, full, "SR4"},
unix.ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT: {400000, full, "LR4_ER4_FR4"},
unix.ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT: {400000, full, "DR4"},
unix.ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT: {400000, full, "CR4"},
unix.ETHTOOL_LINK_MODE_100baseFX_Half_BIT: {100, half, "FX"},
unix.ETHTOOL_LINK_MODE_100baseFX_Full_BIT: {100, full, "FX"},
} { } {
if linkModes&(1<<bit) != 0 { if linkModes&(1<<bit) != 0 {
ch <- prometheus.MustNewConstMetric(c.entry(linkMode), prometheus.GaugeValue, ch <- prometheus.MustNewConstMetric(c.entry(linkMode), prometheus.GaugeValue,
@ -372,10 +308,10 @@ func (c *ethtoolCollector) updateSpeeds(ch chan<- prometheus.Metric, prefix stri
} }
func (c *ethtoolCollector) Update(ch chan<- prometheus.Metric) error { func (c *ethtoolCollector) Update(ch chan<- prometheus.Metric) error {
netClass, err := c.fs.NetClassDevices() netClass, err := c.fs.NetClass()
if err != nil { if err != nil {
if errors.Is(err, os.ErrNotExist) || errors.Is(err, os.ErrPermission) { if errors.Is(err, os.ErrNotExist) || errors.Is(err, os.ErrPermission) {
c.logger.Debug("Could not read netclass file", "err", err) level.Debug(c.logger).Log("msg", "Could not read netclass file", "err", err)
return ErrNoData return ErrNoData
} }
return fmt.Errorf("could not get net class info: %w", err) return fmt.Errorf("could not get net class info: %w", err)
@ -385,7 +321,7 @@ func (c *ethtoolCollector) Update(ch chan<- prometheus.Metric) error {
return fmt.Errorf("no network devices found") return fmt.Errorf("no network devices found")
} }
for _, device := range netClass { for device := range netClass {
var stats map[string]uint64 var stats map[string]uint64
var err error var err error
@ -404,12 +340,12 @@ func (c *ethtoolCollector) Update(ch chan<- prometheus.Metric) error {
} else { } else {
if errno, ok := err.(syscall.Errno); ok { if errno, ok := err.(syscall.Errno); ok {
if err == unix.EOPNOTSUPP { if err == unix.EOPNOTSUPP {
c.logger.Debug("ethtool link info error", "err", err, "device", device, "errno", uint(errno)) level.Debug(c.logger).Log("msg", "ethtool link info error", "err", err, "device", device, "errno", uint(errno))
} else if errno != 0 { } else if errno != 0 {
c.logger.Error("ethtool link info error", "err", err, "device", device, "errno", uint(errno)) level.Error(c.logger).Log("msg", "ethtool link info error", "err", err, "device", device, "errno", uint(errno))
} }
} else { } else {
c.logger.Error("ethtool link info error", "err", err, "device", device) level.Error(c.logger).Log("msg", "ethtool link info error", "err", err, "device", device)
} }
} }
@ -421,12 +357,12 @@ func (c *ethtoolCollector) Update(ch chan<- prometheus.Metric) error {
} else { } else {
if errno, ok := err.(syscall.Errno); ok { if errno, ok := err.(syscall.Errno); ok {
if err == unix.EOPNOTSUPP { if err == unix.EOPNOTSUPP {
c.logger.Debug("ethtool driver info error", "err", err, "device", device, "errno", uint(errno)) level.Debug(c.logger).Log("msg", "ethtool driver info error", "err", err, "device", device, "errno", uint(errno))
} else if errno != 0 { } else if errno != 0 {
c.logger.Error("ethtool driver info error", "err", err, "device", device, "errno", uint(errno)) level.Error(c.logger).Log("msg", "ethtool driver info error", "err", err, "device", device, "errno", uint(errno))
} }
} else { } else {
c.logger.Error("ethtool driver info error", "err", err, "device", device) level.Error(c.logger).Log("msg", "ethtool driver info error", "err", err, "device", device)
} }
} }
@ -437,39 +373,35 @@ func (c *ethtoolCollector) Update(ch chan<- prometheus.Metric) error {
if err != nil { if err != nil {
if errno, ok := err.(syscall.Errno); ok { if errno, ok := err.(syscall.Errno); ok {
if err == unix.EOPNOTSUPP { if err == unix.EOPNOTSUPP {
c.logger.Debug("ethtool stats error", "err", err, "device", device, "errno", uint(errno)) level.Debug(c.logger).Log("msg", "ethtool stats error", "err", err, "device", device, "errno", uint(errno))
} else if errno != 0 { } else if errno != 0 {
c.logger.Error("ethtool stats error", "err", err, "device", device, "errno", uint(errno)) level.Error(c.logger).Log("msg", "ethtool stats error", "err", err, "device", device, "errno", uint(errno))
} }
} else { } else {
c.logger.Error("ethtool stats error", "err", err, "device", device) level.Error(c.logger).Log("msg", "ethtool stats error", "err", err, "device", device)
} }
} }
if len(stats) == 0 { if stats == nil || len(stats) < 1 {
// No stats returned; device does not support ethtool stats. // No stats returned; device does not support ethtool stats.
continue continue
} }
// Sanitizing the metric names can lead to duplicate metric names. Therefore check for clashes beforehand. // Sanitizing the metric names can lead to duplicate metric names. Therefore check for clashes beforehand.
metricFQNames := make(map[string]string) metricFQNames := make(map[string]string)
renamedStats := make(map[string]uint64, len(stats))
for metric := range stats { for metric := range stats {
metricName := SanitizeMetricName(metric) if !c.metricsPattern.MatchString(metric) {
if !c.metricsPattern.MatchString(metricName) {
continue continue
} }
metricFQName := buildEthtoolFQName(metricName) metricFQName := buildEthtoolFQName(metric)
existingMetric, exists := metricFQNames[metricFQName] existingMetric, exists := metricFQNames[metricFQName]
if exists { if exists {
c.logger.Debug("dropping duplicate metric name", "device", device, level.Debug(c.logger).Log("msg", "dropping duplicate metric name", "device", device,
"metricFQName", metricFQName, "metric1", existingMetric, "metric2", metricName) "metricFQName", metricFQName, "metric1", existingMetric, "metric2", metric)
// Keep the metricName as "deleted" in the dict in case there are 3 duplicates. // Keep the metric as "deleted" in the dict in case there are 3 duplicates.
metricFQNames[metricFQName] = "" metricFQNames[metricFQName] = ""
} else { } else {
metricFQNames[metricFQName] = metricName metricFQNames[metricFQName] = metric
// Later we'll go look for the stat with the "sanitized" metric name, so we can copy it there already
renamedStats[metricName] = stats[metric]
} }
} }
@ -487,7 +419,7 @@ func (c *ethtoolCollector) Update(ch chan<- prometheus.Metric) error {
continue continue
} }
val := renamedStats[metric] val := stats[metric]
// Check to see if this metric exists; if not then create it and store it in c.entries. // Check to see if this metric exists; if not then create it and store it in c.entries.
entry := c.entryWithCreate(metric, metricFQName) entry := c.entryWithCreate(metric, metricFQName)

View file

@ -11,16 +11,11 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build !noethtool
// +build !noethtool
package collector package collector
import ( import (
"bufio" "bufio"
"fmt" "fmt"
"io"
"log/slog"
"os" "os"
"path/filepath" "path/filepath"
"strconv" "strconv"
@ -28,6 +23,7 @@ import (
"syscall" "syscall"
"testing" "testing"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/safchain/ethtool" "github.com/safchain/ethtool"
@ -50,7 +46,7 @@ func (c testEthtoolCollector) Describe(ch chan<- *prometheus.Desc) {
prometheus.DescribeByCollect(c, ch) prometheus.DescribeByCollect(c, ch)
} }
func NewTestEthtoolCollector(logger *slog.Logger) (prometheus.Collector, error) { func NewTestEthtoolCollector(logger log.Logger) (prometheus.Collector, error) {
dsc, err := NewEthtoolTestCollector(logger) dsc, err := NewEthtoolTestCollector(logger)
if err != nil { if err != nil {
return testEthtoolCollector{}, err return testEthtoolCollector{}, err
@ -212,18 +208,16 @@ func (e *EthtoolFixture) LinkInfo(intf string) (ethtool.EthtoolCmd, error) {
items := strings.Split(line, ": ") items := strings.Split(line, ": ")
if items[0] == "Supported pause frame use" { if items[0] == "Supported pause frame use" {
switch items[1] { if items[1] == "Symmetric" {
case "Symmetric":
res.Supported |= (1 << unix.ETHTOOL_LINK_MODE_Pause_BIT) res.Supported |= (1 << unix.ETHTOOL_LINK_MODE_Pause_BIT)
case "Receive-only": } else if items[1] == "Receive-only" {
res.Supported |= (1 << unix.ETHTOOL_LINK_MODE_Asym_Pause_BIT) res.Supported |= (1 << unix.ETHTOOL_LINK_MODE_Asym_Pause_BIT)
} }
} }
if items[0] == "Advertised pause frame use" { if items[0] == "Advertised pause frame use" {
switch items[1] { if items[1] == "Symmetric" {
case "Symmetric":
res.Advertising |= (1 << unix.ETHTOOL_LINK_MODE_Pause_BIT) res.Advertising |= (1 << unix.ETHTOOL_LINK_MODE_Pause_BIT)
case "Receive-only": } else if items[1] == "Receive-only" {
res.Advertising |= (1 << unix.ETHTOOL_LINK_MODE_Asym_Pause_BIT) res.Advertising |= (1 << unix.ETHTOOL_LINK_MODE_Asym_Pause_BIT)
} }
} }
@ -258,20 +252,19 @@ func (e *EthtoolFixture) LinkInfo(intf string) (ethtool.EthtoolCmd, error) {
return res, err return res, err
} }
func NewEthtoolTestCollector(logger *slog.Logger) (Collector, error) { func NewEthtoolTestCollector(logger log.Logger) (Collector, error) {
collector, err := makeEthtoolCollector(logger) collector, err := makeEthtoolCollector(logger)
if err != nil {
return nil, err
}
collector.ethtool = &EthtoolFixture{ collector.ethtool = &EthtoolFixture{
fixturePath: "fixtures/ethtool/", fixturePath: "fixtures/ethtool/",
} }
if err != nil {
return nil, err
}
return collector, nil return collector, nil
} }
func TestBuildEthtoolFQName(t *testing.T) { func TestBuildEthtoolFQName(t *testing.T) {
testcases := map[string]string{ testcases := map[string]string{
"port.rx_errors": "node_ethtool_port_received_errors",
"rx_errors": "node_ethtool_received_errors", "rx_errors": "node_ethtool_received_errors",
"Queue[0] AllocFails": "node_ethtool_queue_0_allocfails", "Queue[0] AllocFails": "node_ethtool_queue_0_allocfails",
"Tx LPI entry count": "node_ethtool_transmitted_lpi_entry_count", "Tx LPI entry count": "node_ethtool_transmitted_lpi_entry_count",
@ -295,9 +288,6 @@ node_ethtool_align_errors{device="eth0"} 0
# HELP node_ethtool_info A metric with a constant '1' value labeled by bus_info, device, driver, expansion_rom_version, firmware_version, version. # HELP node_ethtool_info A metric with a constant '1' value labeled by bus_info, device, driver, expansion_rom_version, firmware_version, version.
# TYPE node_ethtool_info gauge # TYPE node_ethtool_info gauge
node_ethtool_info{bus_info="0000:00:1f.6",device="eth0",driver="e1000e",expansion_rom_version="",firmware_version="0.5-4",version="5.11.0-22-generic"} 1 node_ethtool_info{bus_info="0000:00:1f.6",device="eth0",driver="e1000e",expansion_rom_version="",firmware_version="0.5-4",version="5.11.0-22-generic"} 1
# HELP node_ethtool_port_received_dropped Network interface port_rx_dropped
# TYPE node_ethtool_port_received_dropped untyped
node_ethtool_port_received_dropped{device="eth0"} 12028
# HELP node_ethtool_received_broadcast Network interface rx_broadcast # HELP node_ethtool_received_broadcast Network interface rx_broadcast
# TYPE node_ethtool_received_broadcast untyped # TYPE node_ethtool_received_broadcast untyped
node_ethtool_received_broadcast{device="eth0"} 5792 node_ethtool_received_broadcast{device="eth0"} 5792
@ -377,10 +367,10 @@ node_network_supported_speed_bytes{device="eth0",duplex="half",mode="10baseT"} 1
` `
*sysPath = "fixtures/sys" *sysPath = "fixtures/sys"
logger := slog.New(slog.NewTextHandler(io.Discard, nil)) logger := log.NewLogfmtLogger(os.Stderr)
collector, err := NewEthtoolTestCollector(logger) collector, err := NewEthtoolTestCollector(logger)
if err != nil { if err != nil {
t.Fatal(err) panic(err)
} }
c, err := NewTestEthtoolCollector(logger) c, err := NewTestEthtoolCollector(logger)
if err != nil { if err != nil {

View file

@ -18,13 +18,13 @@
package collector package collector
import ( import (
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"log/slog"
) )
type execCollector struct { type execCollector struct {
sysctls []bsdSysctl sysctls []bsdSysctl
logger *slog.Logger logger log.Logger
} }
func init() { func init() {
@ -32,7 +32,7 @@ func init() {
} }
// NewExecCollector returns a new Collector exposing system execution statistics. // NewExecCollector returns a new Collector exposing system execution statistics.
func NewExecCollector(logger *slog.Logger) (Collector, error) { func NewExecCollector(logger log.Logger) (Collector, error) {
// From sys/vm/vm_meter.c: // From sys/vm/vm_meter.c:
// All are of type CTLTYPE_UINT. // All are of type CTLTYPE_UINT.
// //

View file

@ -11,18 +11,18 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build !nofibrechannel //go:build linux && !nofibrechannel
// +build !nofibrechannel // +build linux,!nofibrechannel
package collector package collector
import ( import (
"fmt" "fmt"
"log/slog"
"os" "os"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/node_exporter/collector/utils"
"github.com/prometheus/procfs/sysfs" "github.com/prometheus/procfs/sysfs"
) )
@ -31,7 +31,7 @@ const maxUint64 = ^uint64(0)
type fibrechannelCollector struct { type fibrechannelCollector struct {
fs sysfs.FS fs sysfs.FS
metricDescs map[string]*prometheus.Desc metricDescs map[string]*prometheus.Desc
logger *slog.Logger logger log.Logger
subsystem string subsystem string
} }
@ -40,7 +40,7 @@ func init() {
} }
// NewFibreChannelCollector returns a new Collector exposing FibreChannel stats. // NewFibreChannelCollector returns a new Collector exposing FibreChannel stats.
func NewFibreChannelCollector(logger *slog.Logger) (Collector, error) { func NewFibreChannelCollector(logger log.Logger) (Collector, error) {
var i fibrechannelCollector var i fibrechannelCollector
var err error var err error
@ -66,6 +66,18 @@ func NewFibreChannelCollector(logger *slog.Logger) (Collector, error) {
"rx_words_total": "Number of words received by host port", "rx_words_total": "Number of words received by host port",
"tx_frames_total": "Number of frames transmitted by host port", "tx_frames_total": "Number of frames transmitted by host port",
"link_failure_total": "Number of times the host port link has failed", "link_failure_total": "Number of times the host port link has failed",
"name": "Name of Fibre Channel HBA",
"speed": "Current operating speed",
"port_state": "Current port state",
"port_type": "Port type, what the port is connected to",
"symbolic_name": "Symbolic Name",
"node_name": "Node Name as hexadecimal string",
"port_id": "Port ID as string",
"port_name": "Port Name as hexadecimal string",
"fabric_name": "Fabric Name; 0 if PTP",
"dev_loss_tmo": "Device Loss Timeout in seconds",
"supported_classes": "The FC classes supported",
"supported_speeds": "The FC speeds supported",
} }
i.metricDescs = make(map[string]*prometheus.Desc) i.metricDescs = make(map[string]*prometheus.Desc)
@ -98,7 +110,7 @@ func (c *fibrechannelCollector) Update(ch chan<- prometheus.Metric) error {
hosts, err := c.fs.FibreChannelClass() hosts, err := c.fs.FibreChannelClass()
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
c.logger.Debug("fibrechannel statistics not found, skipping") level.Debug(c.logger).Log("msg", "fibrechannel statistics not found, skipping")
return ErrNoData return ErrNoData
} }
return fmt.Errorf("error obtaining FibreChannel class info: %s", err) return fmt.Errorf("error obtaining FibreChannel class info: %s", err)
@ -114,36 +126,23 @@ func (c *fibrechannelCollector) Update(ch chan<- prometheus.Metric) error {
infoValue := 1.0 infoValue := 1.0
// First push the Host values // First push the Host values
ch <- prometheus.MustNewConstMetric(infoDesc, prometheus.GaugeValue, infoValue, utils.SafeDereference( ch <- prometheus.MustNewConstMetric(infoDesc, prometheus.GaugeValue, infoValue, host.Name, host.Speed, host.PortState, host.PortType, host.PortID, host.PortName, host.FabricName, host.SymbolicName, host.SupportedClasses, host.SupportedSpeeds, host.DevLossTMO)
host.Name,
host.Speed,
host.PortState,
host.PortType,
host.PortID,
host.PortName,
host.FabricName,
host.SymbolicName,
host.SupportedClasses,
host.SupportedSpeeds,
host.DevLossTMO,
)...)
// Then the counters // Then the counters
// Note: `procfs` guarantees these a safe dereference for these counters. c.pushCounter(ch, "dumped_frames_total", host.Counters.DumpedFrames, host.Name)
c.pushCounter(ch, "dumped_frames_total", *host.Counters.DumpedFrames, *host.Name) c.pushCounter(ch, "error_frames_total", host.Counters.ErrorFrames, host.Name)
c.pushCounter(ch, "error_frames_total", *host.Counters.ErrorFrames, *host.Name) c.pushCounter(ch, "invalid_crc_total", host.Counters.InvalidCRCCount, host.Name)
c.pushCounter(ch, "invalid_crc_total", *host.Counters.InvalidCRCCount, *host.Name) c.pushCounter(ch, "rx_frames_total", host.Counters.RXFrames, host.Name)
c.pushCounter(ch, "rx_frames_total", *host.Counters.RXFrames, *host.Name) c.pushCounter(ch, "rx_words_total", host.Counters.RXWords, host.Name)
c.pushCounter(ch, "rx_words_total", *host.Counters.RXWords, *host.Name) c.pushCounter(ch, "tx_frames_total", host.Counters.TXFrames, host.Name)
c.pushCounter(ch, "tx_frames_total", *host.Counters.TXFrames, *host.Name) c.pushCounter(ch, "tx_words_total", host.Counters.TXWords, host.Name)
c.pushCounter(ch, "tx_words_total", *host.Counters.TXWords, *host.Name) c.pushCounter(ch, "seconds_since_last_reset_total", host.Counters.SecondsSinceLastReset, host.Name)
c.pushCounter(ch, "seconds_since_last_reset_total", *host.Counters.SecondsSinceLastReset, *host.Name) c.pushCounter(ch, "invalid_tx_words_total", host.Counters.InvalidTXWordCount, host.Name)
c.pushCounter(ch, "invalid_tx_words_total", *host.Counters.InvalidTXWordCount, *host.Name) c.pushCounter(ch, "link_failure_total", host.Counters.LinkFailureCount, host.Name)
c.pushCounter(ch, "link_failure_total", *host.Counters.LinkFailureCount, *host.Name) c.pushCounter(ch, "loss_of_sync_total", host.Counters.LossOfSyncCount, host.Name)
c.pushCounter(ch, "loss_of_sync_total", *host.Counters.LossOfSyncCount, *host.Name) c.pushCounter(ch, "loss_of_signal_total", host.Counters.LossOfSignalCount, host.Name)
c.pushCounter(ch, "loss_of_signal_total", *host.Counters.LossOfSignalCount, *host.Name) c.pushCounter(ch, "nos_total", host.Counters.NosCount, host.Name)
c.pushCounter(ch, "nos_total", *host.Counters.NosCount, *host.Name) c.pushCounter(ch, "fcp_packet_aborts_total", host.Counters.FCPPacketAborts, host.Name)
c.pushCounter(ch, "fcp_packet_aborts_total", *host.Counters.FCPPacketAborts, *host.Name)
} }
return nil return nil

View file

@ -20,10 +20,10 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"io" "io"
"log/slog"
"os" "os"
"strconv" "strconv"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
@ -32,7 +32,7 @@ const (
) )
type fileFDStatCollector struct { type fileFDStatCollector struct {
logger *slog.Logger logger log.Logger
} }
func init() { func init() {
@ -40,7 +40,7 @@ func init() {
} }
// NewFileFDStatCollector returns a new Collector exposing file-nr stats. // NewFileFDStatCollector returns a new Collector exposing file-nr stats.
func NewFileFDStatCollector(logger *slog.Logger) (Collector, error) { func NewFileFDStatCollector(logger log.Logger) (Collector, error) {
return &fileFDStatCollector{logger}, nil return &fileFDStatCollector{logger}, nil
} }

View file

@ -11,9 +11,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build !nofilefd
// +build !nofilefd
package collector package collector
import "testing" import "testing"

View file

@ -1,65 +0,0 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !nofilesystem
// +build !nofilesystem
package collector
import (
"github.com/power-devops/perfstat"
)
const (
defMountPointsExcluded = "^/(dev|aha)($|/)"
defFSTypesExcluded = "^procfs$"
)
// Expose filesystem fullness.
func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) {
fsStat, err := perfstat.FileSystemStat()
if err != nil {
return nil, err
}
for _, stat := range fsStat {
if c.mountPointFilter.ignored(stat.MountPoint) {
c.logger.Debug("Ignoring mount point", "mountpoint", stat.MountPoint)
continue
}
fstype := stat.TypeString()
if c.fsTypeFilter.ignored(fstype) {
c.logger.Debug("Ignoring fs type", "type", fstype)
continue
}
ro := 0.0
if stat.Flags&perfstat.VFS_READONLY != 0 {
ro = 1.0
}
stats = append(stats, filesystemStats{
labels: filesystemLabels{
device: stat.Device,
mountPoint: stat.MountPoint,
fsType: fstype,
},
size: float64(stat.TotalBlocks * 512.0),
free: float64(stat.FreeBlocks * 512.0),
avail: float64(stat.FreeBlocks * 512.0), // AIX doesn't distinguish between free and available blocks.
files: float64(stat.TotalInodes),
filesFree: float64(stat.FreeInodes),
ro: ro,
})
}
return stats, nil
}

View file

@ -11,14 +11,17 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build dragonfly && !nofilesystem //go:build ((openbsd && !amd64) || darwin || dragonfly) && !nofilesystem
// +build dragonfly,!nofilesystem // +build openbsd,!amd64 darwin dragonfly
// +build !nofilesystem
package collector package collector
import ( import (
"errors" "errors"
"unsafe" "unsafe"
"github.com/go-kit/log/level"
) )
/* /*
@ -47,15 +50,15 @@ func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) {
stats = []filesystemStats{} stats = []filesystemStats{}
for i := 0; i < int(count); i++ { for i := 0; i < int(count); i++ {
mountpoint := C.GoString(&mnt[i].f_mntonname[0]) mountpoint := C.GoString(&mnt[i].f_mntonname[0])
if c.mountPointFilter.ignored(mountpoint) { if c.excludedMountPointsPattern.MatchString(mountpoint) {
c.logger.Debug("Ignoring mount point", "mountpoint", mountpoint) level.Debug(c.logger).Log("msg", "Ignoring mount point", "mountpoint", mountpoint)
continue continue
} }
device := C.GoString(&mnt[i].f_mntfromname[0]) device := C.GoString(&mnt[i].f_mntfromname[0])
fstype := C.GoString(&mnt[i].f_fstypename[0]) fstype := C.GoString(&mnt[i].f_fstypename[0])
if c.fsTypeFilter.ignored(fstype) { if c.excludedFSTypesPattern.MatchString(fstype) {
c.logger.Debug("Ignoring fs type", "type", fstype) level.Debug(c.logger).Log("msg", "Ignoring fs type", "type", fstype)
continue continue
} }

View file

@ -11,19 +11,20 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build !nofilesystem && (linux || freebsd || netbsd || openbsd || darwin || dragonfly || aix) //go:build !nofilesystem && (linux || freebsd || openbsd || darwin || dragonfly)
// +build !nofilesystem // +build !nofilesystem
// +build linux freebsd netbsd openbsd darwin dragonfly aix // +build linux freebsd openbsd darwin dragonfly
package collector package collector
import ( import (
"errors" "errors"
"fmt" "regexp"
"log/slog"
"github.com/alecthomas/kingpin/v2" "github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"gopkg.in/alecthomas/kingpin.v2"
) )
// Arch-dependent implementation must define: // Arch-dependent implementation must define:
@ -36,7 +37,7 @@ var (
mountPointsExcludeSet bool mountPointsExcludeSet bool
mountPointsExclude = kingpin.Flag( mountPointsExclude = kingpin.Flag(
"collector.filesystem.mount-points-exclude", "collector.filesystem.mount-points-exclude",
"Regexp of mount points to exclude for filesystem collector. (mutually exclusive to mount-points-include)", "Regexp of mount points to exclude for filesystem collector.",
).Default(defMountPointsExcluded).PreAction(func(c *kingpin.ParseContext) error { ).Default(defMountPointsExcluded).PreAction(func(c *kingpin.ParseContext) error {
mountPointsExcludeSet = true mountPointsExcludeSet = true
return nil return nil
@ -45,15 +46,11 @@ var (
"collector.filesystem.ignored-mount-points", "collector.filesystem.ignored-mount-points",
"Regexp of mount points to ignore for filesystem collector.", "Regexp of mount points to ignore for filesystem collector.",
).Hidden().String() ).Hidden().String()
mountPointsInclude = kingpin.Flag(
"collector.filesystem.mount-points-include",
"Regexp of mount points to include for filesystem collector. (mutually exclusive to mount-points-exclude)",
).String()
fsTypesExcludeSet bool fsTypesExcludeSet bool
fsTypesExclude = kingpin.Flag( fsTypesExclude = kingpin.Flag(
"collector.filesystem.fs-types-exclude", "collector.filesystem.fs-types-exclude",
"Regexp of filesystem types to exclude for filesystem collector. (mutually exclusive to fs-types-include)", "Regexp of filesystem types to exclude for filesystem collector.",
).Default(defFSTypesExcluded).PreAction(func(c *kingpin.ParseContext) error { ).Default(defFSTypesExcluded).PreAction(func(c *kingpin.ParseContext) error {
fsTypesExcludeSet = true fsTypesExcludeSet = true
return nil return nil
@ -62,34 +59,27 @@ var (
"collector.filesystem.ignored-fs-types", "collector.filesystem.ignored-fs-types",
"Regexp of filesystem types to ignore for filesystem collector.", "Regexp of filesystem types to ignore for filesystem collector.",
).Hidden().String() ).Hidden().String()
fsTypesInclude = kingpin.Flag(
"collector.filesystem.fs-types-include",
"Regexp of filesystem types to exclude for filesystem collector. (mutually exclusive to fs-types-exclude)",
).String()
filesystemLabelNames = []string{"device", "mountpoint", "fstype", "device_error"} filesystemLabelNames = []string{"device", "mountpoint", "fstype"}
) )
type filesystemCollector struct { type filesystemCollector struct {
mountPointFilter deviceFilter excludedMountPointsPattern *regexp.Regexp
fsTypeFilter deviceFilter excludedFSTypesPattern *regexp.Regexp
sizeDesc, freeDesc, availDesc *prometheus.Desc sizeDesc, freeDesc, availDesc *prometheus.Desc
filesDesc, filesFreeDesc *prometheus.Desc filesDesc, filesFreeDesc *prometheus.Desc
purgeableDesc *prometheus.Desc
roDesc, deviceErrorDesc *prometheus.Desc roDesc, deviceErrorDesc *prometheus.Desc
mountInfoDesc *prometheus.Desc logger log.Logger
logger *slog.Logger
} }
type filesystemLabels struct { type filesystemLabels struct {
device, mountPoint, fsType, options, deviceError, major, minor string device, mountPoint, fsType, options string
} }
type filesystemStats struct { type filesystemStats struct {
labels filesystemLabels labels filesystemLabels
size, free, avail float64 size, free, avail float64
files, filesFree float64 files, filesFree float64
purgeable float64
ro, deviceError float64 ro, deviceError float64
} }
@ -98,8 +88,30 @@ func init() {
} }
// NewFilesystemCollector returns a new Collector exposing filesystems stats. // NewFilesystemCollector returns a new Collector exposing filesystems stats.
func NewFilesystemCollector(logger *slog.Logger) (Collector, error) { func NewFilesystemCollector(logger log.Logger) (Collector, error) {
const subsystem = "filesystem" if *oldMountPointsExcluded != "" {
if !mountPointsExcludeSet {
level.Warn(logger).Log("msg", "--collector.filesystem.ignored-mount-points is DEPRECATED and will be removed in 2.0.0, use --collector.filesystem.mount-points-exclude")
*mountPointsExclude = *oldMountPointsExcluded
} else {
return nil, errors.New("--collector.filesystem.ignored-mount-points and --collector.filesystem.mount-points-exclude are mutually exclusive")
}
}
if *oldFSTypesExcluded != "" {
if !fsTypesExcludeSet {
level.Warn(logger).Log("msg", "--collector.filesystem.ignored-fs-types is DEPRECATED and will be removed in 2.0.0, use --collector.filesystem.fs-types-exclude")
*fsTypesExclude = *oldFSTypesExcluded
} else {
return nil, errors.New("--collector.filesystem.ignored-fs-types and --collector.filesystem.fs-types-exclude are mutually exclusive")
}
}
subsystem := "filesystem"
level.Info(logger).Log("msg", "Parsed flag --collector.filesystem.mount-points-exclude", "flag", *mountPointsExclude)
mountPointPattern := regexp.MustCompile(*mountPointsExclude)
level.Info(logger).Log("msg", "Parsed flag --collector.filesystem.fs-types-exclude", "flag", *fsTypesExclude)
filesystemsTypesPattern := regexp.MustCompile(*fsTypesExclude)
sizeDesc := prometheus.NewDesc( sizeDesc := prometheus.NewDesc(
prometheus.BuildFQName(namespace, subsystem, "size_bytes"), prometheus.BuildFQName(namespace, subsystem, "size_bytes"),
@ -131,12 +143,6 @@ func NewFilesystemCollector(logger *slog.Logger) (Collector, error) {
filesystemLabelNames, nil, filesystemLabelNames, nil,
) )
purgeableDesc := prometheus.NewDesc(
prometheus.BuildFQName(namespace, subsystem, "purgeable_bytes"),
"Filesystem space available including purgeable space (MacOS specific).",
filesystemLabelNames, nil,
)
roDesc := prometheus.NewDesc( roDesc := prometheus.NewDesc(
prometheus.BuildFQName(namespace, subsystem, "readonly"), prometheus.BuildFQName(namespace, subsystem, "readonly"),
"Filesystem read-only status.", "Filesystem read-only status.",
@ -149,36 +155,17 @@ func NewFilesystemCollector(logger *slog.Logger) (Collector, error) {
filesystemLabelNames, nil, filesystemLabelNames, nil,
) )
mountInfoDesc := prometheus.NewDesc(
prometheus.BuildFQName(namespace, subsystem, "mount_info"),
"Filesystem mount information.",
[]string{"device", "major", "minor", "mountpoint"},
nil,
)
mountPointFilter, err := newMountPointsFilter(logger)
if err != nil {
return nil, fmt.Errorf("unable to parse mount points filter flags: %w", err)
}
fsTypeFilter, err := newFSTypeFilter(logger)
if err != nil {
return nil, fmt.Errorf("unable to parse fs types filter flags: %w", err)
}
return &filesystemCollector{ return &filesystemCollector{
mountPointFilter: mountPointFilter, excludedMountPointsPattern: mountPointPattern,
fsTypeFilter: fsTypeFilter, excludedFSTypesPattern: filesystemsTypesPattern,
sizeDesc: sizeDesc, sizeDesc: sizeDesc,
freeDesc: freeDesc, freeDesc: freeDesc,
availDesc: availDesc, availDesc: availDesc,
filesDesc: filesDesc, filesDesc: filesDesc,
filesFreeDesc: filesFreeDesc, filesFreeDesc: filesFreeDesc,
purgeableDesc: purgeableDesc, roDesc: roDesc,
roDesc: roDesc, deviceErrorDesc: deviceErrorDesc,
deviceErrorDesc: deviceErrorDesc, logger: logger,
mountInfoDesc: mountInfoDesc,
logger: logger,
}, nil }, nil
} }
@ -197,105 +184,36 @@ func (c *filesystemCollector) Update(ch chan<- prometheus.Metric) error {
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.deviceErrorDesc, prometheus.GaugeValue, c.deviceErrorDesc, prometheus.GaugeValue,
s.deviceError, s.labels.device, s.labels.mountPoint, s.labels.fsType, s.labels.deviceError, s.deviceError, s.labels.device, s.labels.mountPoint, s.labels.fsType,
) )
ch <- prometheus.MustNewConstMetric(
c.roDesc, prometheus.GaugeValue,
s.ro, s.labels.device, s.labels.mountPoint, s.labels.fsType, s.labels.deviceError,
)
if s.deviceError > 0 { if s.deviceError > 0 {
continue continue
} }
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.sizeDesc, prometheus.GaugeValue, c.sizeDesc, prometheus.GaugeValue,
s.size, s.labels.device, s.labels.mountPoint, s.labels.fsType, s.labels.deviceError, s.size, s.labels.device, s.labels.mountPoint, s.labels.fsType,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.freeDesc, prometheus.GaugeValue, c.freeDesc, prometheus.GaugeValue,
s.free, s.labels.device, s.labels.mountPoint, s.labels.fsType, s.labels.deviceError, s.free, s.labels.device, s.labels.mountPoint, s.labels.fsType,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.availDesc, prometheus.GaugeValue, c.availDesc, prometheus.GaugeValue,
s.avail, s.labels.device, s.labels.mountPoint, s.labels.fsType, s.labels.deviceError, s.avail, s.labels.device, s.labels.mountPoint, s.labels.fsType,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.filesDesc, prometheus.GaugeValue, c.filesDesc, prometheus.GaugeValue,
s.files, s.labels.device, s.labels.mountPoint, s.labels.fsType, s.labels.deviceError, s.files, s.labels.device, s.labels.mountPoint, s.labels.fsType,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.filesFreeDesc, prometheus.GaugeValue, c.filesFreeDesc, prometheus.GaugeValue,
s.filesFree, s.labels.device, s.labels.mountPoint, s.labels.fsType, s.labels.deviceError, s.filesFree, s.labels.device, s.labels.mountPoint, s.labels.fsType,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.mountInfoDesc, prometheus.GaugeValue, c.roDesc, prometheus.GaugeValue,
1.0, s.labels.device, s.labels.major, s.labels.minor, s.labels.mountPoint, s.ro, s.labels.device, s.labels.mountPoint, s.labels.fsType,
) )
if s.purgeable >= 0 {
ch <- prometheus.MustNewConstMetric(
c.purgeableDesc, prometheus.GaugeValue,
s.purgeable, s.labels.device, s.labels.mountPoint, s.labels.fsType, s.labels.deviceError,
)
}
} }
return nil return nil
} }
func newMountPointsFilter(logger *slog.Logger) (deviceFilter, error) {
if *oldMountPointsExcluded != "" {
if !mountPointsExcludeSet {
logger.Warn("--collector.filesystem.ignored-mount-points is DEPRECATED and will be removed in 2.0.0, use --collector.filesystem.mount-points-exclude")
*mountPointsExclude = *oldMountPointsExcluded
} else {
return deviceFilter{}, errors.New("--collector.filesystem.ignored-mount-points and --collector.filesystem.mount-points-exclude are mutually exclusive")
}
}
if *mountPointsInclude != "" && !mountPointsExcludeSet {
logger.Debug("mount-points-exclude flag not set when mount-points-include flag is set, assuming include is desired")
*mountPointsExclude = ""
}
if *mountPointsExclude != "" && *mountPointsInclude != "" {
return deviceFilter{}, errors.New("--collector.filesystem.mount-points-exclude and --collector.filesystem.mount-points-include are mutually exclusive")
}
if *mountPointsExclude != "" {
logger.Info("Parsed flag --collector.filesystem.mount-points-exclude", "flag", *mountPointsExclude)
}
if *mountPointsInclude != "" {
logger.Info("Parsed flag --collector.filesystem.mount-points-include", "flag", *mountPointsInclude)
}
return newDeviceFilter(*mountPointsExclude, *mountPointsInclude), nil
}
func newFSTypeFilter(logger *slog.Logger) (deviceFilter, error) {
if *oldFSTypesExcluded != "" {
if !fsTypesExcludeSet {
logger.Warn("--collector.filesystem.ignored-fs-types is DEPRECATED and will be removed in 2.0.0, use --collector.filesystem.fs-types-exclude")
*fsTypesExclude = *oldFSTypesExcluded
} else {
return deviceFilter{}, errors.New("--collector.filesystem.ignored-fs-types and --collector.filesystem.fs-types-exclude are mutually exclusive")
}
}
if *fsTypesInclude != "" && !fsTypesExcludeSet {
logger.Debug("fs-types-exclude flag not set when fs-types-include flag is set, assuming include is desired")
*fsTypesExclude = ""
}
if *fsTypesExclude != "" && *fsTypesInclude != "" {
return deviceFilter{}, errors.New("--collector.filesystem.fs-types-exclude and --collector.filesystem.fs-types-include are mutually exclusive")
}
if *fsTypesExclude != "" {
logger.Info("Parsed flag --collector.filesystem.fs-types-exclude", "flag", *fsTypesExclude)
}
if *fsTypesInclude != "" {
logger.Info("Parsed flag --collector.filesystem.fs-types-include", "flag", *fsTypesInclude)
}
return newDeviceFilter(*fsTypesExclude, *fsTypesInclude), nil
}

View file

@ -17,6 +17,7 @@
package collector package collector
import ( import (
"github.com/go-kit/log/level"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
@ -39,20 +40,20 @@ func (c *filesystemCollector) GetStats() ([]filesystemStats, error) {
stats := []filesystemStats{} stats := []filesystemStats{}
for _, fs := range buf { for _, fs := range buf {
mountpoint := unix.ByteSliceToString(fs.Mntonname[:]) mountpoint := unix.ByteSliceToString(fs.Mntonname[:])
if c.mountPointFilter.ignored(mountpoint) { if c.excludedMountPointsPattern.MatchString(mountpoint) {
c.logger.Debug("Ignoring mount point", "mountpoint", mountpoint) level.Debug(c.logger).Log("msg", "Ignoring mount point", "mountpoint", mountpoint)
continue continue
} }
device := unix.ByteSliceToString(fs.Mntfromname[:]) device := unix.ByteSliceToString(fs.Mntfromname[:])
fstype := unix.ByteSliceToString(fs.Fstypename[:]) fstype := unix.ByteSliceToString(fs.Fstypename[:])
if c.fsTypeFilter.ignored(fstype) { if c.excludedFSTypesPattern.MatchString(fstype) {
c.logger.Debug("Ignoring fs type", "type", fstype) level.Debug(c.logger).Log("msg", "Ignoring fs type", "type", fstype)
continue continue
} }
if (fs.Flags & unix.MNT_IGNORE) != 0 { if (fs.Flags & unix.MNT_IGNORE) != 0 {
c.logger.Debug("Ignoring mount flagged as ignore", "mountpoint", mountpoint) level.Debug(c.logger).Log("msg", "Ignoring mount flagged as ignore", "mountpoint", mountpoint)
continue continue
} }

View file

@ -21,14 +21,15 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"log/slog"
"os" "os"
"strings" "strings"
"sync" "sync"
"time" "time"
"github.com/alecthomas/kingpin/v2" "github.com/go-kit/log"
"github.com/go-kit/log/level"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
"gopkg.in/alecthomas/kingpin.v2"
) )
const ( const (
@ -39,9 +40,6 @@ const (
var mountTimeout = kingpin.Flag("collector.filesystem.mount-timeout", var mountTimeout = kingpin.Flag("collector.filesystem.mount-timeout",
"how long to wait for a mount to respond before marking it as stale"). "how long to wait for a mount to respond before marking it as stale").
Hidden().Default("5s").Duration() Hidden().Default("5s").Duration()
var statWorkerCount = kingpin.Flag("collector.filesystem.stat-workers",
"how many stat calls to process simultaneously").
Hidden().Default("4").Int()
var stuckMounts = make(map[string]struct{}) var stuckMounts = make(map[string]struct{})
var stuckMountsMtx = &sync.Mutex{} var stuckMountsMtx = &sync.Mutex{}
@ -52,111 +50,78 @@ func (c *filesystemCollector) GetStats() ([]filesystemStats, error) {
return nil, err return nil, err
} }
stats := []filesystemStats{} stats := []filesystemStats{}
labelChan := make(chan filesystemLabels) for _, labels := range mps {
statChan := make(chan filesystemStats) if c.excludedMountPointsPattern.MatchString(labels.mountPoint) {
wg := sync.WaitGroup{} level.Debug(c.logger).Log("msg", "Ignoring mount point", "mountpoint", labels.mountPoint)
continue
workerCount := *statWorkerCount
if workerCount < 1 {
workerCount = 1
}
for i := 0; i < workerCount; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for labels := range labelChan {
statChan <- c.processStat(labels)
}
}()
}
go func() {
for _, labels := range mps {
if c.mountPointFilter.ignored(labels.mountPoint) {
c.logger.Debug("Ignoring mount point", "mountpoint", labels.mountPoint)
continue
}
if c.fsTypeFilter.ignored(labels.fsType) {
c.logger.Debug("Ignoring fs type", "type", labels.fsType)
continue
}
stuckMountsMtx.Lock()
if _, ok := stuckMounts[labels.mountPoint]; ok {
labels.deviceError = "mountpoint timeout"
stats = append(stats, filesystemStats{
labels: labels,
deviceError: 1,
})
c.logger.Debug("Mount point is in an unresponsive state", "mountpoint", labels.mountPoint)
stuckMountsMtx.Unlock()
continue
}
stuckMountsMtx.Unlock()
labelChan <- labels
} }
close(labelChan) if c.excludedFSTypesPattern.MatchString(labels.fsType) {
wg.Wait() level.Debug(c.logger).Log("msg", "Ignoring fs", "type", labels.fsType)
close(statChan) continue
}() }
stuckMountsMtx.Lock()
if _, ok := stuckMounts[labels.mountPoint]; ok {
stats = append(stats, filesystemStats{
labels: labels,
deviceError: 1,
})
level.Debug(c.logger).Log("msg", "Mount point is in an unresponsive state", "mountpoint", labels.mountPoint)
stuckMountsMtx.Unlock()
continue
}
stuckMountsMtx.Unlock()
for stat := range statChan { // The success channel is used do tell the "watcher" that the stat
stats = append(stats, stat) // finished successfully. The channel is closed on success.
success := make(chan struct{})
go stuckMountWatcher(labels.mountPoint, success, c.logger)
buf := new(unix.Statfs_t)
err = unix.Statfs(rootfsFilePath(labels.mountPoint), buf)
stuckMountsMtx.Lock()
close(success)
// If the mount has been marked as stuck, unmark it and log it's recovery.
if _, ok := stuckMounts[labels.mountPoint]; ok {
level.Debug(c.logger).Log("msg", "Mount point has recovered, monitoring will resume", "mountpoint", labels.mountPoint)
delete(stuckMounts, labels.mountPoint)
}
stuckMountsMtx.Unlock()
if err != nil {
stats = append(stats, filesystemStats{
labels: labels,
deviceError: 1,
})
level.Debug(c.logger).Log("msg", "Error on statfs() system call", "rootfs", rootfsFilePath(labels.mountPoint), "err", err)
continue
}
var ro float64
for _, option := range strings.Split(labels.options, ",") {
if option == "ro" {
ro = 1
break
}
}
stats = append(stats, filesystemStats{
labels: labels,
size: float64(buf.Blocks) * float64(buf.Bsize),
free: float64(buf.Bfree) * float64(buf.Bsize),
avail: float64(buf.Bavail) * float64(buf.Bsize),
files: float64(buf.Files),
filesFree: float64(buf.Ffree),
ro: ro,
})
} }
return stats, nil return stats, nil
} }
func (c *filesystemCollector) processStat(labels filesystemLabels) filesystemStats {
var ro float64
for _, option := range strings.Split(labels.options, ",") {
if option == "ro" {
ro = 1
break
}
}
success := make(chan struct{})
go stuckMountWatcher(labels.mountPoint, success, c.logger)
buf := new(unix.Statfs_t)
err := unix.Statfs(rootfsFilePath(labels.mountPoint), buf)
stuckMountsMtx.Lock()
close(success)
// If the mount has been marked as stuck, unmark it and log it's recovery.
if _, ok := stuckMounts[labels.mountPoint]; ok {
c.logger.Debug("Mount point has recovered, monitoring will resume", "mountpoint", labels.mountPoint)
delete(stuckMounts, labels.mountPoint)
}
stuckMountsMtx.Unlock()
if err != nil {
labels.deviceError = err.Error()
c.logger.Debug("Error on statfs() system call", "rootfs", rootfsFilePath(labels.mountPoint), "err", err)
return filesystemStats{
labels: labels,
deviceError: 1,
ro: ro,
}
}
return filesystemStats{
labels: labels,
size: float64(buf.Blocks) * float64(buf.Bsize),
free: float64(buf.Bfree) * float64(buf.Bsize),
avail: float64(buf.Bavail) * float64(buf.Bsize),
files: float64(buf.Files),
filesFree: float64(buf.Ffree),
ro: ro,
}
}
// stuckMountWatcher listens on the given success channel and if the channel closes // stuckMountWatcher listens on the given success channel and if the channel closes
// then the watcher does nothing. If instead the timeout is reached, the // then the watcher does nothing. If instead the timeout is reached, the
// mount point that is being watched is marked as stuck. // mount point that is being watched is marked as stuck.
func stuckMountWatcher(mountPoint string, success chan struct{}, logger *slog.Logger) { func stuckMountWatcher(mountPoint string, success chan struct{}, logger log.Logger) {
mountCheckTimer := time.NewTimer(*mountTimeout) mountCheckTimer := time.NewTimer(*mountTimeout)
defer mountCheckTimer.Stop() defer mountCheckTimer.Stop()
select { select {
@ -169,19 +134,19 @@ func stuckMountWatcher(mountPoint string, success chan struct{}, logger *slog.Lo
case <-success: case <-success:
// Success came in just after the timeout was reached, don't label the mount as stuck // Success came in just after the timeout was reached, don't label the mount as stuck
default: default:
logger.Debug("Mount point timed out, it is being labeled as stuck and will not be monitored", "mountpoint", mountPoint) level.Debug(logger).Log("msg", "Mount point timed out, it is being labeled as stuck and will not be monitored", "mountpoint", mountPoint)
stuckMounts[mountPoint] = struct{}{} stuckMounts[mountPoint] = struct{}{}
} }
stuckMountsMtx.Unlock() stuckMountsMtx.Unlock()
} }
} }
func mountPointDetails(logger *slog.Logger) ([]filesystemLabels, error) { func mountPointDetails(logger log.Logger) ([]filesystemLabels, error) {
file, err := os.Open(procFilePath("1/mountinfo")) file, err := os.Open(procFilePath("1/mounts"))
if errors.Is(err, os.ErrNotExist) { if errors.Is(err, os.ErrNotExist) {
// Fallback to `/proc/self/mountinfo` if `/proc/1/mountinfo` is missing due hidepid. // Fallback to `/proc/mounts` if `/proc/1/mounts` is missing due hidepid.
logger.Debug("Reading root mounts failed, falling back to self mounts", "err", err) level.Debug(logger).Log("msg", "Reading root mounts failed, falling back to system mounts", "err", err)
file, err = os.Open(procFilePath("self/mountinfo")) file, err = os.Open(procFilePath("mounts"))
} }
if err != nil { if err != nil {
return nil, err return nil, err
@ -198,34 +163,20 @@ func parseFilesystemLabels(r io.Reader) ([]filesystemLabels, error) {
for scanner.Scan() { for scanner.Scan() {
parts := strings.Fields(scanner.Text()) parts := strings.Fields(scanner.Text())
if len(parts) < 10 { if len(parts) < 4 {
return nil, fmt.Errorf("malformed mount point information: %q", scanner.Text()) return nil, fmt.Errorf("malformed mount point information: %q", scanner.Text())
} }
major, minor := 0, 0
_, err := fmt.Sscanf(parts[2], "%d:%d", &major, &minor)
if err != nil {
return nil, fmt.Errorf("malformed mount point information: %q", scanner.Text())
}
m := 5
for parts[m+1] != "-" {
m++
}
// Ensure we handle the translation of \040 and \011 // Ensure we handle the translation of \040 and \011
// as per fstab(5). // as per fstab(5).
parts[4] = strings.ReplaceAll(parts[4], "\\040", " ") parts[1] = strings.Replace(parts[1], "\\040", " ", -1)
parts[4] = strings.ReplaceAll(parts[4], "\\011", "\t") parts[1] = strings.Replace(parts[1], "\\011", "\t", -1)
filesystems = append(filesystems, filesystemLabels{ filesystems = append(filesystems, filesystemLabels{
device: parts[m+3], device: parts[0],
mountPoint: rootfsStripPrefix(parts[4]), mountPoint: rootfsStripPrefix(parts[1]),
fsType: parts[m+2], fsType: parts[2],
options: parts[5], options: parts[3],
major: fmt.Sprint(major),
minor: fmt.Sprint(minor),
deviceError: "",
}) })
} }

View file

@ -11,18 +11,14 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build !nofilesystem
// +build !nofilesystem
package collector package collector
import ( import (
"io" "github.com/go-kit/log"
"log/slog"
"strings" "strings"
"testing" "testing"
"github.com/alecthomas/kingpin/v2" kingpin "gopkg.in/alecthomas/kingpin.v2"
) )
func Test_parseFilesystemLabelsError(t *testing.T) { func Test_parseFilesystemLabelsError(t *testing.T) {
@ -83,23 +79,15 @@ func TestMountPointDetails(t *testing.T) {
"/var/lib/kubelet/plugins/kubernetes.io/vsphere-volume/mounts/[vsanDatastore] bafb9e5a-8856-7e6c-699c-801844e77a4a/kubernetes-dynamic-pvc-3eba5bba-48a3-11e8-89ab-005056b92113.vmdk": "", "/var/lib/kubelet/plugins/kubernetes.io/vsphere-volume/mounts/[vsanDatastore] bafb9e5a-8856-7e6c-699c-801844e77a4a/kubernetes-dynamic-pvc-3eba5bba-48a3-11e8-89ab-005056b92113.vmdk": "",
} }
filesystems, err := mountPointDetails(slog.New(slog.NewTextHandler(io.Discard, nil))) filesystems, err := mountPointDetails(log.NewNopLogger())
if err != nil { if err != nil {
t.Log(err) t.Log(err)
} }
foundSet := map[string]bool{}
for _, fs := range filesystems { for _, fs := range filesystems {
if _, ok := expected[fs.mountPoint]; !ok { if _, ok := expected[fs.mountPoint]; !ok {
t.Errorf("Got unexpected %s", fs.mountPoint) t.Errorf("Got unexpected %s", fs.mountPoint)
} }
foundSet[fs.mountPoint] = true
}
for mountPoint := range expected {
if _, ok := foundSet[mountPoint]; !ok {
t.Errorf("Expected %s, got nothing", mountPoint)
}
} }
} }
@ -112,7 +100,7 @@ func TestMountsFallback(t *testing.T) {
"/": "", "/": "",
} }
filesystems, err := mountPointDetails(slog.New(slog.NewTextHandler(io.Discard, nil))) filesystems, err := mountPointDetails(log.NewNopLogger())
if err != nil { if err != nil {
t.Log(err) t.Log(err)
} }
@ -140,7 +128,7 @@ func TestPathRootfs(t *testing.T) {
"/sys/fs/cgroup": "", "/sys/fs/cgroup": "",
} }
filesystems, err := mountPointDetails(slog.New(slog.NewTextHandler(io.Discard, nil))) filesystems, err := mountPointDetails(log.NewNopLogger())
if err != nil { if err != nil {
t.Log(err) t.Log(err)
} }

View file

@ -1,114 +0,0 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build darwin && !nofilesystem
// +build darwin,!nofilesystem
package collector
/*
#cgo CFLAGS: -x objective-c
#cgo LDFLAGS: -framework Foundation
#import <Foundation/Foundation.h>
Float64 purgeable(char *path) {
Float64 value = -1.0f;
@autoreleasepool {
NSError *error = nil;
NSString *str = [NSString stringWithUTF8String:path];
NSURL *fileURL = [[NSURL alloc] initFileURLWithPath:str];
NSDictionary *results = [fileURL resourceValuesForKeys:@[NSURLVolumeAvailableCapacityForImportantUsageKey] error:&error];
if (results) {
CFNumberRef tmp = CFDictionaryGetValue((CFDictionaryRef)results, NSURLVolumeAvailableCapacityForImportantUsageKey);
if (tmp != NULL) {
CFNumberGetValue(tmp, kCFNumberFloat64Type, &value);
}
}
[fileURL release];
}
return value;
}
*/
import "C"
import (
"errors"
"unsafe"
)
/*
#include <sys/param.h>
#include <sys/ucred.h>
#include <sys/mount.h>
#include <stdio.h>
*/
import "C"
const (
defMountPointsExcluded = "^/(dev)($|/)"
defFSTypesExcluded = "^devfs$"
readOnly = 0x1 // MNT_RDONLY
)
// Expose filesystem fullness.
func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) {
var mntbuf *C.struct_statfs
count := C.getmntinfo(&mntbuf, C.MNT_NOWAIT)
if count == 0 {
return nil, errors.New("getmntinfo() failed")
}
mnt := (*[1 << 20]C.struct_statfs)(unsafe.Pointer(mntbuf))
stats = []filesystemStats{}
for i := 0; i < int(count); i++ {
mountpoint := C.GoString(&mnt[i].f_mntonname[0])
if c.mountPointFilter.ignored(mountpoint) {
c.logger.Debug("Ignoring mount point", "mountpoint", mountpoint)
continue
}
device := C.GoString(&mnt[i].f_mntfromname[0])
fstype := C.GoString(&mnt[i].f_fstypename[0])
if c.fsTypeFilter.ignored(fstype) {
c.logger.Debug("Ignoring fs type", "type", fstype)
continue
}
var ro float64
if (mnt[i].f_flags & readOnly) != 0 {
ro = 1
}
mountpointCString := C.CString(mountpoint)
defer C.free(unsafe.Pointer(mountpointCString))
stats = append(stats, filesystemStats{
labels: filesystemLabels{
device: device,
mountPoint: rootfsStripPrefix(mountpoint),
fsType: fstype,
},
size: float64(mnt[i].f_blocks) * float64(mnt[i].f_bsize),
free: float64(mnt[i].f_bfree) * float64(mnt[i].f_bsize),
avail: float64(mnt[i].f_bavail) * float64(mnt[i].f_bsize),
files: float64(mnt[i].f_files),
filesFree: float64(mnt[i].f_ffree),
purgeable: float64(C.purgeable(mountpointCString)),
ro: ro,
})
}
return stats, nil
}

View file

@ -1,132 +0,0 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !nofilesystem
// +build !nofilesystem
package collector
import (
"fmt"
"syscall"
"unsafe"
"golang.org/x/sys/unix"
)
const (
defMountPointsExcluded = "^/(dev)($|/)"
defFSTypesExcluded = "^(kernfs|procfs|ptyfs|fdesc)$"
_VFS_NAMELEN = 32
_VFS_MNAMELEN = 1024
)
/*
* Go uses the NetBSD 9 ABI and thus syscall.SYS_GETVFSSTAT is compat_90_getvfsstat.
* We have to declare struct statvfs90 because it is not included in the unix package.
* See NetBSD/src/sys/compat/sys/statvfs.h.
*/
type statvfs90 struct {
F_flag uint
F_bsize uint
F_frsize uint
F_iosize uint
F_blocks uint64
F_bfree uint64
F_bavail uint64
F_bresvd uint64
F_files uint64
F_ffree uint64
F_favail uint64
F_fresvd uint64
F_syncreads uint64
F_syncwrites uint64
F_asyncreads uint64
F_asyncwrites uint64
F_fsidx [2]uint32
F_fsid uint32
F_namemax uint
F_owner uint32
F_spare [4]uint32
F_fstypename [_VFS_NAMELEN]byte
F_mntonname [_VFS_MNAMELEN]byte
F_mntfromname [_VFS_MNAMELEN]byte
cgo_pad [4]byte
}
func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) {
var mnt []statvfs90
if syscall.SYS_GETVFSSTAT != 356 /* compat_90_getvfsstat */ {
/*
* Catch if golang ever updates to newer ABI and bail.
*/
return nil, fmt.Errorf("getvfsstat: ABI mismatch")
}
for {
r1, _, errno := syscall.Syscall(syscall.SYS_GETVFSSTAT, uintptr(0), 0, unix.ST_NOWAIT)
if errno != 0 {
return nil, fmt.Errorf("getvfsstat: %s", string(errno))
}
mnt = make([]statvfs90, r1, r1)
r2, _, errno := syscall.Syscall(syscall.SYS_GETVFSSTAT, uintptr(unsafe.Pointer(&mnt[0])), unsafe.Sizeof(mnt[0])*r1, unix.ST_NOWAIT /* ST_NOWAIT */)
if errno != 0 {
return nil, fmt.Errorf("getvfsstat: %s", string(errno))
}
if r1 == r2 {
break
}
}
stats = []filesystemStats{}
for _, v := range mnt {
mountpoint := unix.ByteSliceToString(v.F_mntonname[:])
if c.mountPointFilter.ignored(mountpoint) {
c.logger.Debug("msg", "Ignoring mount point", "mountpoint", mountpoint)
continue
}
device := unix.ByteSliceToString(v.F_mntfromname[:])
fstype := unix.ByteSliceToString(v.F_fstypename[:])
if c.fsTypeFilter.ignored(fstype) {
c.logger.Debug("msg", "Ignoring fs type", "type", fstype)
continue
}
var ro float64
if (v.F_flag & unix.MNT_RDONLY) != 0 {
ro = 1
}
stats = append(stats, filesystemStats{
labels: filesystemLabels{
device: device,
mountPoint: mountpoint,
fsType: fstype,
},
size: float64(v.F_blocks) * float64(v.F_bsize),
free: float64(v.F_bfree) * float64(v.F_bsize),
avail: float64(v.F_bavail) * float64(v.F_bsize),
files: float64(v.F_files),
filesFree: float64(v.F_ffree),
ro: ro,
})
}
return stats, nil
}

View file

@ -11,12 +11,13 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build !nofilesystem //go:build openbsd && !nofilesystem
// +build !nofilesystem // +build openbsd,!nofilesystem
package collector package collector
import ( import (
"github.com/go-kit/log/level"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
@ -40,16 +41,16 @@ func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) {
stats = []filesystemStats{} stats = []filesystemStats{}
for _, v := range mnt { for _, v := range mnt {
mountpoint := unix.ByteSliceToString(v.F_mntonname[:]) mountpoint := string(v.F_mntonname[:])
if c.mountPointFilter.ignored(mountpoint) { if c.excludedMountPointsPattern.MatchString(mountpoint) {
c.logger.Debug("Ignoring mount point", "mountpoint", mountpoint) level.Debug(c.logger).Log("msg", "Ignoring mount point", "mountpoint", mountpoint)
continue continue
} }
device := unix.ByteSliceToString(v.F_mntfromname[:]) device := string(v.F_mntfromname[:])
fstype := unix.ByteSliceToString(v.F_fstypename[:]) fstype := string(v.F_fstypename[:])
if c.fsTypeFilter.ignored(fstype) { if c.excludedFSTypesPattern.MatchString(fstype) {
c.logger.Debug("Ignoring fs type", "type", fstype) level.Debug(c.logger).Log("msg", "Ignoring fs type", "type", fstype)
continue continue
} }

File diff suppressed because it is too large Load diff

View file

@ -1,291 +0,0 @@
# HELP go_gc_duration_seconds A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.
# TYPE go_gc_duration_seconds summary
# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent.
# TYPE go_gc_gogc_percent gauge
# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes.
# TYPE go_gc_gomemlimit_bytes gauge
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
# HELP go_info Information about the Go environment.
# TYPE go_info gauge
# HELP go_memstats_alloc_bytes Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_alloc_bytes gauge
# HELP go_memstats_alloc_bytes_total Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.
# TYPE go_memstats_alloc_bytes_total counter
# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.
# TYPE go_memstats_buck_hash_sys_bytes gauge
# HELP go_memstats_frees_total Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_frees_total counter
# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.
# TYPE go_memstats_gc_sys_bytes gauge
# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_heap_alloc_bytes gauge
# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_idle_bytes gauge
# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes
# TYPE go_memstats_heap_inuse_bytes gauge
# HELP go_memstats_heap_objects Number of currently allocated objects. Equals to /gc/heap/objects:objects.
# TYPE go_memstats_heap_objects gauge
# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.
# TYPE go_memstats_heap_released_bytes gauge
# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_sys_bytes gauge
# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
# TYPE go_memstats_last_gc_time_seconds gauge
# HELP go_memstats_mallocs_total Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_mallocs_total counter
# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.
# TYPE go_memstats_mcache_inuse_bytes gauge
# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.
# TYPE go_memstats_mcache_sys_bytes gauge
# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.
# TYPE go_memstats_mspan_inuse_bytes gauge
# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.
# TYPE go_memstats_mspan_sys_bytes gauge
# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.
# TYPE go_memstats_next_gc_bytes gauge
# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.
# TYPE go_memstats_other_sys_bytes gauge
# HELP go_memstats_stack_inuse_bytes Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.
# TYPE go_memstats_stack_inuse_bytes gauge
# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.
# TYPE go_memstats_stack_sys_bytes gauge
# HELP go_memstats_sys_bytes Number of bytes obtained from system. Equals to /memory/classes/total:byte.
# TYPE go_memstats_sys_bytes gauge
# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads.
# TYPE go_sched_gomaxprocs_threads gauge
# HELP go_threads Number of OS threads created.
# TYPE go_threads gauge
# HELP node_buddyinfo_blocks Count of free blocks according to size.
# TYPE node_buddyinfo_blocks gauge
node_buddyinfo_blocks{node="0",size="0",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="0",zone="DMA32"} 759
node_buddyinfo_blocks{node="0",size="0",zone="Normal"} 4381
node_buddyinfo_blocks{node="0",size="1",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="1",zone="DMA32"} 572
node_buddyinfo_blocks{node="0",size="1",zone="Normal"} 1093
node_buddyinfo_blocks{node="0",size="10",zone="DMA"} 3
node_buddyinfo_blocks{node="0",size="10",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="10",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="2",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="2",zone="DMA32"} 791
node_buddyinfo_blocks{node="0",size="2",zone="Normal"} 185
node_buddyinfo_blocks{node="0",size="3",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="3",zone="DMA32"} 475
node_buddyinfo_blocks{node="0",size="3",zone="Normal"} 1530
node_buddyinfo_blocks{node="0",size="4",zone="DMA"} 2
node_buddyinfo_blocks{node="0",size="4",zone="DMA32"} 194
node_buddyinfo_blocks{node="0",size="4",zone="Normal"} 567
node_buddyinfo_blocks{node="0",size="5",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="5",zone="DMA32"} 45
node_buddyinfo_blocks{node="0",size="5",zone="Normal"} 102
node_buddyinfo_blocks{node="0",size="6",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="6",zone="DMA32"} 12
node_buddyinfo_blocks{node="0",size="6",zone="Normal"} 4
node_buddyinfo_blocks{node="0",size="7",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="7",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="7",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="8",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="8",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="8",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="9",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="9",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="9",zone="Normal"} 0
# HELP node_disk_read_errors_total The total number of read errors.
# TYPE node_disk_read_errors_total counter
node_disk_read_errors_total{device="disk0"} 0
# HELP node_disk_read_retries_total The total number of read retries.
# TYPE node_disk_read_retries_total counter
node_disk_read_retries_total{device="disk0"} 0
# HELP node_disk_write_errors_total The total number of write errors.
# TYPE node_disk_write_errors_total counter
node_disk_write_errors_total{device="disk0"} 0
# HELP node_disk_write_retries_total The total number of write retries.
# TYPE node_disk_write_retries_total counter
node_disk_write_retries_total{device="disk0"} 0
# HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which node_exporter was built, and the goos and goarch for the build.
# TYPE node_exporter_build_info gauge
# HELP node_memory_swap_total_bytes Memory information field swap_total_bytes.
# TYPE node_memory_swap_total_bytes gauge
node_memory_swap_total_bytes 0
# HELP node_memory_swap_used_bytes Memory information field swap_used_bytes.
# TYPE node_memory_swap_used_bytes gauge
node_memory_swap_used_bytes 0
# HELP node_memory_total_bytes Memory information field total_bytes.
# TYPE node_memory_total_bytes gauge
node_memory_total_bytes 7.516192768e+09
# HELP node_network_noproto_total Network device statistic noproto.
# TYPE node_network_noproto_total counter
node_network_noproto_total{device="lo0"} 0
# HELP node_network_receive_drop_total Network device statistic receive_drop.
# TYPE node_network_receive_drop_total counter
node_network_receive_drop_total{device="lo0"} 0
# HELP node_network_receive_errs_total Network device statistic receive_errs.
# TYPE node_network_receive_errs_total counter
node_network_receive_errs_total{device="lo0"} 0
# HELP node_network_receive_packets_total Network device statistic receive_packets.
# TYPE node_network_receive_packets_total counter
# HELP node_network_transmit_bytes_total Network device statistic transmit_bytes.
# TYPE node_network_transmit_bytes_total counter
# HELP node_network_transmit_colls_total Network device statistic transmit_colls.
# TYPE node_network_transmit_colls_total counter
node_network_transmit_colls_total{device="lo0"} 0
# HELP node_network_transmit_errs_total Network device statistic transmit_errs.
# TYPE node_network_transmit_errs_total counter
node_network_transmit_errs_total{device="lo0"} 0
# HELP node_network_transmit_packets_total Network device statistic transmit_packets.
# TYPE node_network_transmit_packets_total counter
# HELP node_os_info A metric with a constant '1' value labeled by build_id, id, id_like, image_id, image_version, name, pretty_name, variant, variant_id, version, version_codename, version_id.
# TYPE node_os_info gauge
node_os_info{build_id="",id="ubuntu",id_like="debian",image_id="",image_version="",name="Ubuntu",pretty_name="Ubuntu 20.04.2 LTS",variant="",variant_id="",version="20.04.2 LTS (Focal Fossa)",version_codename="focal",version_id="20.04"} 1
# HELP node_os_version Metric containing the major.minor part of the OS version.
# TYPE node_os_version gauge
node_os_version{id="ubuntu",id_like="debian",name="Ubuntu"} 20.04
# HELP node_scrape_collector_duration_seconds node_exporter: Duration of a collector scrape.
# TYPE node_scrape_collector_duration_seconds gauge
# HELP node_scrape_collector_success node_exporter: Whether a collector succeeded.
# TYPE node_scrape_collector_success gauge
node_scrape_collector_success{collector="boottime"} 1
node_scrape_collector_success{collector="buddyinfo"} 1
node_scrape_collector_success{collector="cpu"} 1
node_scrape_collector_success{collector="diskstats"} 1
node_scrape_collector_success{collector="loadavg"} 1
node_scrape_collector_success{collector="meminfo"} 1
node_scrape_collector_success{collector="netdev"} 1
node_scrape_collector_success{collector="os"} 1
node_scrape_collector_success{collector="powersupplyclass"} 1
node_scrape_collector_success{collector="textfile"} 1
node_scrape_collector_success{collector="thermal"} 0
node_scrape_collector_success{collector="time"} 1
node_scrape_collector_success{collector="xfrm"} 1
# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read.
# TYPE node_textfile_mtime_seconds gauge
# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise
# TYPE node_textfile_scrape_error gauge
node_textfile_scrape_error 0
# HELP node_time_seconds System time in seconds since epoch (1970).
# TYPE node_time_seconds gauge
# HELP node_time_zone_offset_seconds System time zone offset in seconds.
# TYPE node_time_zone_offset_seconds gauge
# HELP node_xfrm_acquire_error_packets_total State hasnt been fully acquired before use
# TYPE node_xfrm_acquire_error_packets_total counter
node_xfrm_acquire_error_packets_total 24532
# HELP node_xfrm_fwd_hdr_error_packets_total Forward routing of a packet is not allowed
# TYPE node_xfrm_fwd_hdr_error_packets_total counter
node_xfrm_fwd_hdr_error_packets_total 6654
# HELP node_xfrm_in_buffer_error_packets_total No buffer is left
# TYPE node_xfrm_in_buffer_error_packets_total counter
node_xfrm_in_buffer_error_packets_total 2
# HELP node_xfrm_in_error_packets_total All errors not matched by other
# TYPE node_xfrm_in_error_packets_total counter
node_xfrm_in_error_packets_total 1
# HELP node_xfrm_in_hdr_error_packets_total Header error
# TYPE node_xfrm_in_hdr_error_packets_total counter
node_xfrm_in_hdr_error_packets_total 4
# HELP node_xfrm_in_no_pols_packets_total No policy is found for states e.g. Inbound SAs are correct but no SP is found
# TYPE node_xfrm_in_no_pols_packets_total counter
node_xfrm_in_no_pols_packets_total 65432
# HELP node_xfrm_in_no_states_packets_total No state is found i.e. Either inbound SPI, address, or IPsec protocol at SA is wrong
# TYPE node_xfrm_in_no_states_packets_total counter
node_xfrm_in_no_states_packets_total 3
# HELP node_xfrm_in_pol_block_packets_total Policy discards
# TYPE node_xfrm_in_pol_block_packets_total counter
node_xfrm_in_pol_block_packets_total 100
# HELP node_xfrm_in_pol_error_packets_total Policy error
# TYPE node_xfrm_in_pol_error_packets_total counter
node_xfrm_in_pol_error_packets_total 10000
# HELP node_xfrm_in_state_expired_packets_total State is expired
# TYPE node_xfrm_in_state_expired_packets_total counter
node_xfrm_in_state_expired_packets_total 7
# HELP node_xfrm_in_state_invalid_packets_total State is invalid
# TYPE node_xfrm_in_state_invalid_packets_total counter
node_xfrm_in_state_invalid_packets_total 55555
# HELP node_xfrm_in_state_mismatch_packets_total State has mismatch option e.g. UDP encapsulation type is mismatch
# TYPE node_xfrm_in_state_mismatch_packets_total counter
node_xfrm_in_state_mismatch_packets_total 23451
# HELP node_xfrm_in_state_mode_error_packets_total Transformation mode specific error
# TYPE node_xfrm_in_state_mode_error_packets_total counter
node_xfrm_in_state_mode_error_packets_total 100
# HELP node_xfrm_in_state_proto_error_packets_total Transformation protocol specific error e.g. SA key is wrong
# TYPE node_xfrm_in_state_proto_error_packets_total counter
node_xfrm_in_state_proto_error_packets_total 40
# HELP node_xfrm_in_state_seq_error_packets_total Sequence error i.e. Sequence number is out of window
# TYPE node_xfrm_in_state_seq_error_packets_total counter
node_xfrm_in_state_seq_error_packets_total 6000
# HELP node_xfrm_in_tmpl_mismatch_packets_total No matching template for states e.g. Inbound SAs are correct but SP rule is wrong
# TYPE node_xfrm_in_tmpl_mismatch_packets_total counter
node_xfrm_in_tmpl_mismatch_packets_total 51
# HELP node_xfrm_out_bundle_check_error_packets_total Bundle check error
# TYPE node_xfrm_out_bundle_check_error_packets_total counter
node_xfrm_out_bundle_check_error_packets_total 555
# HELP node_xfrm_out_bundle_gen_error_packets_total Bundle generation error
# TYPE node_xfrm_out_bundle_gen_error_packets_total counter
node_xfrm_out_bundle_gen_error_packets_total 43321
# HELP node_xfrm_out_error_packets_total All errors which is not matched others
# TYPE node_xfrm_out_error_packets_total counter
node_xfrm_out_error_packets_total 1e+06
# HELP node_xfrm_out_no_states_packets_total No state is found
# TYPE node_xfrm_out_no_states_packets_total counter
node_xfrm_out_no_states_packets_total 869
# HELP node_xfrm_out_pol_block_packets_total Policy discards
# TYPE node_xfrm_out_pol_block_packets_total counter
node_xfrm_out_pol_block_packets_total 43456
# HELP node_xfrm_out_pol_dead_packets_total Policy is dead
# TYPE node_xfrm_out_pol_dead_packets_total counter
node_xfrm_out_pol_dead_packets_total 7656
# HELP node_xfrm_out_pol_error_packets_total Policy error
# TYPE node_xfrm_out_pol_error_packets_total counter
node_xfrm_out_pol_error_packets_total 1454
# HELP node_xfrm_out_state_expired_packets_total State is expired
# TYPE node_xfrm_out_state_expired_packets_total counter
node_xfrm_out_state_expired_packets_total 565
# HELP node_xfrm_out_state_invalid_packets_total State is invalid, perhaps expired
# TYPE node_xfrm_out_state_invalid_packets_total counter
node_xfrm_out_state_invalid_packets_total 28765
# HELP node_xfrm_out_state_mode_error_packets_total Transformation mode specific error
# TYPE node_xfrm_out_state_mode_error_packets_total counter
node_xfrm_out_state_mode_error_packets_total 8
# HELP node_xfrm_out_state_proto_error_packets_total Transformation protocol specific error
# TYPE node_xfrm_out_state_proto_error_packets_total counter
node_xfrm_out_state_proto_error_packets_total 4542
# HELP node_xfrm_out_state_seq_error_packets_total Sequence error i.e. Sequence number overflow
# TYPE node_xfrm_out_state_seq_error_packets_total counter
node_xfrm_out_state_seq_error_packets_total 543
# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
# TYPE process_cpu_seconds_total counter
# HELP process_max_fds Maximum number of open file descriptors.
# TYPE process_max_fds gauge
# HELP process_open_fds Number of open file descriptors.
# TYPE process_open_fds gauge
# HELP process_resident_memory_bytes Resident memory size in bytes.
# TYPE process_resident_memory_bytes gauge
# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
# TYPE process_start_time_seconds gauge
# HELP process_virtual_memory_bytes Virtual memory size in bytes.
# TYPE process_virtual_memory_bytes gauge
# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes.
# TYPE process_virtual_memory_max_bytes gauge
# HELP promhttp_metric_handler_errors_total Total number of internal errors encountered by the promhttp metric handler.
# TYPE promhttp_metric_handler_errors_total counter
promhttp_metric_handler_errors_total{cause="encoding"} 0
promhttp_metric_handler_errors_total{cause="gathering"} 0
# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served.
# TYPE promhttp_metric_handler_requests_in_flight gauge
promhttp_metric_handler_requests_in_flight 1
# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code.
# TYPE promhttp_metric_handler_requests_total counter
promhttp_metric_handler_requests_total{code="200"} 0
promhttp_metric_handler_requests_total{code="500"} 0
promhttp_metric_handler_requests_total{code="503"} 0
# HELP testmetric1_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom
# TYPE testmetric1_1 untyped
testmetric1_1{foo="bar"} 10
# HELP testmetric1_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom
# TYPE testmetric1_2 untyped
testmetric1_2{foo="baz"} 20
# HELP testmetric2_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom
# TYPE testmetric2_1 untyped
testmetric2_1{foo="bar"} 30
# HELP testmetric2_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom
# TYPE testmetric2_2 untyped
testmetric2_2{foo="baz"} 40

View file

@ -1,251 +0,0 @@
# HELP go_gc_duration_seconds A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.
# TYPE go_gc_duration_seconds summary
# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent.
# TYPE go_gc_gogc_percent gauge
# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes.
# TYPE go_gc_gomemlimit_bytes gauge
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
# HELP go_info Information about the Go environment.
# TYPE go_info gauge
# HELP go_memstats_alloc_bytes Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_alloc_bytes gauge
# HELP go_memstats_alloc_bytes_total Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.
# TYPE go_memstats_alloc_bytes_total counter
# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.
# TYPE go_memstats_buck_hash_sys_bytes gauge
# HELP go_memstats_frees_total Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_frees_total counter
# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.
# TYPE go_memstats_gc_sys_bytes gauge
# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_heap_alloc_bytes gauge
# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_idle_bytes gauge
# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes
# TYPE go_memstats_heap_inuse_bytes gauge
# HELP go_memstats_heap_objects Number of currently allocated objects. Equals to /gc/heap/objects:objects.
# TYPE go_memstats_heap_objects gauge
# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.
# TYPE go_memstats_heap_released_bytes gauge
# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_sys_bytes gauge
# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
# TYPE go_memstats_last_gc_time_seconds gauge
# HELP go_memstats_mallocs_total Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_mallocs_total counter
# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.
# TYPE go_memstats_mcache_inuse_bytes gauge
# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.
# TYPE go_memstats_mcache_sys_bytes gauge
# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.
# TYPE go_memstats_mspan_inuse_bytes gauge
# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.
# TYPE go_memstats_mspan_sys_bytes gauge
# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.
# TYPE go_memstats_next_gc_bytes gauge
# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.
# TYPE go_memstats_other_sys_bytes gauge
# HELP go_memstats_stack_inuse_bytes Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.
# TYPE go_memstats_stack_inuse_bytes gauge
# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.
# TYPE go_memstats_stack_sys_bytes gauge
# HELP go_memstats_sys_bytes Number of bytes obtained from system. Equals to /memory/classes/total:byte.
# TYPE go_memstats_sys_bytes gauge
# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads.
# TYPE go_sched_gomaxprocs_threads gauge
# HELP go_threads Number of OS threads created.
# TYPE go_threads gauge
# HELP node_buddyinfo_blocks Count of free blocks according to size.
# TYPE node_buddyinfo_blocks gauge
node_buddyinfo_blocks{node="0",size="0",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="0",zone="DMA32"} 759
node_buddyinfo_blocks{node="0",size="0",zone="Normal"} 4381
node_buddyinfo_blocks{node="0",size="1",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="1",zone="DMA32"} 572
node_buddyinfo_blocks{node="0",size="1",zone="Normal"} 1093
node_buddyinfo_blocks{node="0",size="10",zone="DMA"} 3
node_buddyinfo_blocks{node="0",size="10",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="10",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="2",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="2",zone="DMA32"} 791
node_buddyinfo_blocks{node="0",size="2",zone="Normal"} 185
node_buddyinfo_blocks{node="0",size="3",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="3",zone="DMA32"} 475
node_buddyinfo_blocks{node="0",size="3",zone="Normal"} 1530
node_buddyinfo_blocks{node="0",size="4",zone="DMA"} 2
node_buddyinfo_blocks{node="0",size="4",zone="DMA32"} 194
node_buddyinfo_blocks{node="0",size="4",zone="Normal"} 567
node_buddyinfo_blocks{node="0",size="5",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="5",zone="DMA32"} 45
node_buddyinfo_blocks{node="0",size="5",zone="Normal"} 102
node_buddyinfo_blocks{node="0",size="6",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="6",zone="DMA32"} 12
node_buddyinfo_blocks{node="0",size="6",zone="Normal"} 4
node_buddyinfo_blocks{node="0",size="7",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="7",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="7",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="8",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="8",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="8",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="9",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="9",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="9",zone="Normal"} 0
# HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which node_exporter was built, and the goos and goarch for the build.
# TYPE node_exporter_build_info gauge
# HELP node_network_receive_drop_total Network device statistic receive_drop.
# TYPE node_network_receive_drop_total counter
node_network_receive_drop_total{device="lo0"} 0
# HELP node_network_receive_errs_total Network device statistic receive_errs.
# TYPE node_network_receive_errs_total counter
node_network_receive_errs_total{device="lo0"} 0
# HELP node_network_receive_packets_total Network device statistic receive_packets.
# TYPE node_network_receive_packets_total counter
# HELP node_network_transmit_bytes_total Network device statistic transmit_bytes.
# TYPE node_network_transmit_bytes_total counter
# HELP node_network_transmit_drop_total Network device statistic transmit_drop.
# TYPE node_network_transmit_drop_total counter
node_network_transmit_drop_total{device="lo0"} 0
# HELP node_network_transmit_errs_total Network device statistic transmit_errs.
# TYPE node_network_transmit_errs_total counter
node_network_transmit_errs_total{device="lo0"} 0
# HELP node_network_transmit_packets_total Network device statistic transmit_packets.
# TYPE node_network_transmit_packets_total counter
# HELP node_os_info A metric with a constant '1' value labeled by build_id, id, id_like, image_id, image_version, name, pretty_name, variant, variant_id, version, version_codename, version_id.
# TYPE node_os_info gauge
node_os_info{build_id="",id="ubuntu",id_like="debian",image_id="",image_version="",name="Ubuntu",pretty_name="Ubuntu 20.04.2 LTS",variant="",variant_id="",version="20.04.2 LTS (Focal Fossa)",version_codename="focal",version_id="20.04"} 1
# HELP node_os_version Metric containing the major.minor part of the OS version.
# TYPE node_os_version gauge
node_os_version{id="ubuntu",id_like="debian",name="Ubuntu"} 20.04
# HELP node_scrape_collector_duration_seconds node_exporter: Duration of a collector scrape.
# TYPE node_scrape_collector_duration_seconds gauge
# HELP node_scrape_collector_success node_exporter: Whether a collector succeeded.
# TYPE node_scrape_collector_success gauge
node_scrape_collector_success{collector="boottime"} 1
node_scrape_collector_success{collector="buddyinfo"} 1
node_scrape_collector_success{collector="cpu"} 1
node_scrape_collector_success{collector="exec"} 1
node_scrape_collector_success{collector="loadavg"} 1
node_scrape_collector_success{collector="meminfo"} 0
node_scrape_collector_success{collector="netdev"} 1
node_scrape_collector_success{collector="os"} 1
node_scrape_collector_success{collector="textfile"} 1
node_scrape_collector_success{collector="time"} 1
node_scrape_collector_success{collector="xfrm"} 1
# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read.
# TYPE node_textfile_mtime_seconds gauge
# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise
# TYPE node_textfile_scrape_error gauge
node_textfile_scrape_error 0
# HELP node_time_seconds System time in seconds since epoch (1970).
# TYPE node_time_seconds gauge
# HELP node_time_zone_offset_seconds System time zone offset in seconds.
# TYPE node_time_zone_offset_seconds gauge
# HELP node_xfrm_acquire_error_packets_total State hasnt been fully acquired before use
# TYPE node_xfrm_acquire_error_packets_total counter
node_xfrm_acquire_error_packets_total 24532
# HELP node_xfrm_fwd_hdr_error_packets_total Forward routing of a packet is not allowed
# TYPE node_xfrm_fwd_hdr_error_packets_total counter
node_xfrm_fwd_hdr_error_packets_total 6654
# HELP node_xfrm_in_buffer_error_packets_total No buffer is left
# TYPE node_xfrm_in_buffer_error_packets_total counter
node_xfrm_in_buffer_error_packets_total 2
# HELP node_xfrm_in_error_packets_total All errors not matched by other
# TYPE node_xfrm_in_error_packets_total counter
node_xfrm_in_error_packets_total 1
# HELP node_xfrm_in_hdr_error_packets_total Header error
# TYPE node_xfrm_in_hdr_error_packets_total counter
node_xfrm_in_hdr_error_packets_total 4
# HELP node_xfrm_in_no_pols_packets_total No policy is found for states e.g. Inbound SAs are correct but no SP is found
# TYPE node_xfrm_in_no_pols_packets_total counter
node_xfrm_in_no_pols_packets_total 65432
# HELP node_xfrm_in_no_states_packets_total No state is found i.e. Either inbound SPI, address, or IPsec protocol at SA is wrong
# TYPE node_xfrm_in_no_states_packets_total counter
node_xfrm_in_no_states_packets_total 3
# HELP node_xfrm_in_pol_block_packets_total Policy discards
# TYPE node_xfrm_in_pol_block_packets_total counter
node_xfrm_in_pol_block_packets_total 100
# HELP node_xfrm_in_pol_error_packets_total Policy error
# TYPE node_xfrm_in_pol_error_packets_total counter
node_xfrm_in_pol_error_packets_total 10000
# HELP node_xfrm_in_state_expired_packets_total State is expired
# TYPE node_xfrm_in_state_expired_packets_total counter
node_xfrm_in_state_expired_packets_total 7
# HELP node_xfrm_in_state_invalid_packets_total State is invalid
# TYPE node_xfrm_in_state_invalid_packets_total counter
node_xfrm_in_state_invalid_packets_total 55555
# HELP node_xfrm_in_state_mismatch_packets_total State has mismatch option e.g. UDP encapsulation type is mismatch
# TYPE node_xfrm_in_state_mismatch_packets_total counter
node_xfrm_in_state_mismatch_packets_total 23451
# HELP node_xfrm_in_state_mode_error_packets_total Transformation mode specific error
# TYPE node_xfrm_in_state_mode_error_packets_total counter
node_xfrm_in_state_mode_error_packets_total 100
# HELP node_xfrm_in_state_proto_error_packets_total Transformation protocol specific error e.g. SA key is wrong
# TYPE node_xfrm_in_state_proto_error_packets_total counter
node_xfrm_in_state_proto_error_packets_total 40
# HELP node_xfrm_in_state_seq_error_packets_total Sequence error i.e. Sequence number is out of window
# TYPE node_xfrm_in_state_seq_error_packets_total counter
node_xfrm_in_state_seq_error_packets_total 6000
# HELP node_xfrm_in_tmpl_mismatch_packets_total No matching template for states e.g. Inbound SAs are correct but SP rule is wrong
# TYPE node_xfrm_in_tmpl_mismatch_packets_total counter
node_xfrm_in_tmpl_mismatch_packets_total 51
# HELP node_xfrm_out_bundle_check_error_packets_total Bundle check error
# TYPE node_xfrm_out_bundle_check_error_packets_total counter
node_xfrm_out_bundle_check_error_packets_total 555
# HELP node_xfrm_out_bundle_gen_error_packets_total Bundle generation error
# TYPE node_xfrm_out_bundle_gen_error_packets_total counter
node_xfrm_out_bundle_gen_error_packets_total 43321
# HELP node_xfrm_out_error_packets_total All errors which is not matched others
# TYPE node_xfrm_out_error_packets_total counter
node_xfrm_out_error_packets_total 1e+06
# HELP node_xfrm_out_no_states_packets_total No state is found
# TYPE node_xfrm_out_no_states_packets_total counter
node_xfrm_out_no_states_packets_total 869
# HELP node_xfrm_out_pol_block_packets_total Policy discards
# TYPE node_xfrm_out_pol_block_packets_total counter
node_xfrm_out_pol_block_packets_total 43456
# HELP node_xfrm_out_pol_dead_packets_total Policy is dead
# TYPE node_xfrm_out_pol_dead_packets_total counter
node_xfrm_out_pol_dead_packets_total 7656
# HELP node_xfrm_out_pol_error_packets_total Policy error
# TYPE node_xfrm_out_pol_error_packets_total counter
node_xfrm_out_pol_error_packets_total 1454
# HELP node_xfrm_out_state_expired_packets_total State is expired
# TYPE node_xfrm_out_state_expired_packets_total counter
node_xfrm_out_state_expired_packets_total 565
# HELP node_xfrm_out_state_invalid_packets_total State is invalid, perhaps expired
# TYPE node_xfrm_out_state_invalid_packets_total counter
node_xfrm_out_state_invalid_packets_total 28765
# HELP node_xfrm_out_state_mode_error_packets_total Transformation mode specific error
# TYPE node_xfrm_out_state_mode_error_packets_total counter
node_xfrm_out_state_mode_error_packets_total 8
# HELP node_xfrm_out_state_proto_error_packets_total Transformation protocol specific error
# TYPE node_xfrm_out_state_proto_error_packets_total counter
node_xfrm_out_state_proto_error_packets_total 4542
# HELP node_xfrm_out_state_seq_error_packets_total Sequence error i.e. Sequence number overflow
# TYPE node_xfrm_out_state_seq_error_packets_total counter
node_xfrm_out_state_seq_error_packets_total 543
# HELP promhttp_metric_handler_errors_total Total number of internal errors encountered by the promhttp metric handler.
# TYPE promhttp_metric_handler_errors_total counter
promhttp_metric_handler_errors_total{cause="encoding"} 0
promhttp_metric_handler_errors_total{cause="gathering"} 0
# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served.
# TYPE promhttp_metric_handler_requests_in_flight gauge
promhttp_metric_handler_requests_in_flight 1
# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code.
# TYPE promhttp_metric_handler_requests_total counter
promhttp_metric_handler_requests_total{code="200"} 0
promhttp_metric_handler_requests_total{code="500"} 0
promhttp_metric_handler_requests_total{code="503"} 0
# HELP testmetric1_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom
# TYPE testmetric1_1 untyped
testmetric1_1{foo="bar"} 10
# HELP testmetric1_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom
# TYPE testmetric1_2 untyped
testmetric1_2{foo="baz"} 20
# HELP testmetric2_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom
# TYPE testmetric2_1 untyped
testmetric2_1{foo="bar"} 30
# HELP testmetric2_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom
# TYPE testmetric2_2 untyped
testmetric2_2{foo="baz"} 40

View file

@ -1,287 +0,0 @@
# HELP go_gc_duration_seconds A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.
# TYPE go_gc_duration_seconds summary
# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent.
# TYPE go_gc_gogc_percent gauge
# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes.
# TYPE go_gc_gomemlimit_bytes gauge
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
# HELP go_info Information about the Go environment.
# TYPE go_info gauge
# HELP go_memstats_alloc_bytes Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_alloc_bytes gauge
# HELP go_memstats_alloc_bytes_total Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.
# TYPE go_memstats_alloc_bytes_total counter
# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.
# TYPE go_memstats_buck_hash_sys_bytes gauge
# HELP go_memstats_frees_total Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_frees_total counter
# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.
# TYPE go_memstats_gc_sys_bytes gauge
# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_heap_alloc_bytes gauge
# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_idle_bytes gauge
# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes
# TYPE go_memstats_heap_inuse_bytes gauge
# HELP go_memstats_heap_objects Number of currently allocated objects. Equals to /gc/heap/objects:objects.
# TYPE go_memstats_heap_objects gauge
# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.
# TYPE go_memstats_heap_released_bytes gauge
# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_sys_bytes gauge
# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
# TYPE go_memstats_last_gc_time_seconds gauge
# HELP go_memstats_mallocs_total Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_mallocs_total counter
# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.
# TYPE go_memstats_mcache_inuse_bytes gauge
# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.
# TYPE go_memstats_mcache_sys_bytes gauge
# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.
# TYPE go_memstats_mspan_inuse_bytes gauge
# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.
# TYPE go_memstats_mspan_sys_bytes gauge
# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.
# TYPE go_memstats_next_gc_bytes gauge
# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.
# TYPE go_memstats_other_sys_bytes gauge
# HELP go_memstats_stack_inuse_bytes Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.
# TYPE go_memstats_stack_inuse_bytes gauge
# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.
# TYPE go_memstats_stack_sys_bytes gauge
# HELP go_memstats_sys_bytes Number of bytes obtained from system. Equals to /memory/classes/total:byte.
# TYPE go_memstats_sys_bytes gauge
# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads.
# TYPE go_sched_gomaxprocs_threads gauge
# HELP go_threads Number of OS threads created.
# TYPE go_threads gauge
# HELP node_buddyinfo_blocks Count of free blocks according to size.
# TYPE node_buddyinfo_blocks gauge
node_buddyinfo_blocks{node="0",size="0",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="0",zone="DMA32"} 759
node_buddyinfo_blocks{node="0",size="0",zone="Normal"} 4381
node_buddyinfo_blocks{node="0",size="1",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="1",zone="DMA32"} 572
node_buddyinfo_blocks{node="0",size="1",zone="Normal"} 1093
node_buddyinfo_blocks{node="0",size="10",zone="DMA"} 3
node_buddyinfo_blocks{node="0",size="10",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="10",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="2",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="2",zone="DMA32"} 791
node_buddyinfo_blocks{node="0",size="2",zone="Normal"} 185
node_buddyinfo_blocks{node="0",size="3",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="3",zone="DMA32"} 475
node_buddyinfo_blocks{node="0",size="3",zone="Normal"} 1530
node_buddyinfo_blocks{node="0",size="4",zone="DMA"} 2
node_buddyinfo_blocks{node="0",size="4",zone="DMA32"} 194
node_buddyinfo_blocks{node="0",size="4",zone="Normal"} 567
node_buddyinfo_blocks{node="0",size="5",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="5",zone="DMA32"} 45
node_buddyinfo_blocks{node="0",size="5",zone="Normal"} 102
node_buddyinfo_blocks{node="0",size="6",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="6",zone="DMA32"} 12
node_buddyinfo_blocks{node="0",size="6",zone="Normal"} 4
node_buddyinfo_blocks{node="0",size="7",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="7",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="7",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="8",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="8",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="8",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="9",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="9",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="9",zone="Normal"} 0
# HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which node_exporter was built, and the goos and goarch for the build.
# TYPE node_exporter_build_info gauge
# HELP node_memory_swap_in_bytes_total Bytes paged in from swap devices
# TYPE node_memory_swap_in_bytes_total counter
node_memory_swap_in_bytes_total 0
# HELP node_memory_swap_out_bytes_total Bytes paged out to swap devices
# TYPE node_memory_swap_out_bytes_total counter
node_memory_swap_out_bytes_total 0
# HELP node_memory_swap_size_bytes Total swap memory size
# TYPE node_memory_swap_size_bytes gauge
node_memory_swap_size_bytes 1.073741824e+09
# HELP node_memory_swap_used_bytes Currently allocated swap
# TYPE node_memory_swap_used_bytes gauge
node_memory_swap_used_bytes 0
# HELP node_memory_user_wired_bytes Locked in memory by user, mlock, etc
# TYPE node_memory_user_wired_bytes gauge
node_memory_user_wired_bytes 0
# HELP node_netisr_bindthreads netisr threads bound to CPUs
# TYPE node_netisr_bindthreads gauge
node_netisr_bindthreads 0
# HELP node_netisr_defaultqlimit netisr default queue limit
# TYPE node_netisr_defaultqlimit gauge
node_netisr_defaultqlimit 256
# HELP node_netisr_maxprot netisr maximum protocols
# TYPE node_netisr_maxprot gauge
node_netisr_maxprot 16
# HELP node_netisr_maxqlimit netisr maximum queue limit
# TYPE node_netisr_maxqlimit gauge
node_netisr_maxqlimit 10240
# HELP node_netisr_maxthreads netisr maximum thread count
# TYPE node_netisr_maxthreads gauge
node_netisr_maxthreads 1
# HELP node_netisr_numthreads netisr current thread count
# TYPE node_netisr_numthreads gauge
node_netisr_numthreads 1
# HELP node_network_receive_drop_total Network device statistic receive_drop.
# TYPE node_network_receive_drop_total counter
node_network_receive_drop_total{device="lo0"} 0
# HELP node_network_receive_errs_total Network device statistic receive_errs.
# TYPE node_network_receive_errs_total counter
node_network_receive_errs_total{device="lo0"} 0
# HELP node_network_receive_packets_total Network device statistic receive_packets.
# TYPE node_network_receive_packets_total counter
# HELP node_network_transmit_bytes_total Network device statistic transmit_bytes.
# TYPE node_network_transmit_bytes_total counter
# HELP node_network_transmit_drop_total Network device statistic transmit_drop.
# TYPE node_network_transmit_drop_total counter
node_network_transmit_drop_total{device="lo0"} 0
# HELP node_network_transmit_errs_total Network device statistic transmit_errs.
# TYPE node_network_transmit_errs_total counter
node_network_transmit_errs_total{device="lo0"} 0
# HELP node_network_transmit_packets_total Network device statistic transmit_packets.
# TYPE node_network_transmit_packets_total counter
# HELP node_os_info A metric with a constant '1' value labeled by build_id, id, id_like, image_id, image_version, name, pretty_name, variant, variant_id, version, version_codename, version_id.
# TYPE node_os_info gauge
node_os_info{build_id="",id="ubuntu",id_like="debian",image_id="",image_version="",name="Ubuntu",pretty_name="Ubuntu 20.04.2 LTS",variant="",variant_id="",version="20.04.2 LTS (Focal Fossa)",version_codename="focal",version_id="20.04"} 1
# HELP node_os_version Metric containing the major.minor part of the OS version.
# TYPE node_os_version gauge
node_os_version{id="ubuntu",id_like="debian",name="Ubuntu"} 20.04
# HELP node_scrape_collector_duration_seconds node_exporter: Duration of a collector scrape.
# TYPE node_scrape_collector_duration_seconds gauge
# HELP node_scrape_collector_success node_exporter: Whether a collector succeeded.
# TYPE node_scrape_collector_success gauge
node_scrape_collector_success{collector="boottime"} 1
node_scrape_collector_success{collector="buddyinfo"} 1
node_scrape_collector_success{collector="cpu"} 1
node_scrape_collector_success{collector="exec"} 1
node_scrape_collector_success{collector="loadavg"} 1
node_scrape_collector_success{collector="meminfo"} 1
node_scrape_collector_success{collector="netdev"} 1
node_scrape_collector_success{collector="netisr"} 1
node_scrape_collector_success{collector="netstat"} 1
node_scrape_collector_success{collector="os"} 1
node_scrape_collector_success{collector="textfile"} 1
node_scrape_collector_success{collector="time"} 1
node_scrape_collector_success{collector="xfrm"} 1
node_scrape_collector_success{collector="zfs"} 1
# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read.
# TYPE node_textfile_mtime_seconds gauge
# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise
# TYPE node_textfile_scrape_error gauge
node_textfile_scrape_error 0
# HELP node_time_seconds System time in seconds since epoch (1970).
# TYPE node_time_seconds gauge
# HELP node_time_zone_offset_seconds System time zone offset in seconds.
# TYPE node_time_zone_offset_seconds gauge
# HELP node_xfrm_acquire_error_packets_total State hasnt been fully acquired before use
# TYPE node_xfrm_acquire_error_packets_total counter
node_xfrm_acquire_error_packets_total 24532
# HELP node_xfrm_fwd_hdr_error_packets_total Forward routing of a packet is not allowed
# TYPE node_xfrm_fwd_hdr_error_packets_total counter
node_xfrm_fwd_hdr_error_packets_total 6654
# HELP node_xfrm_in_buffer_error_packets_total No buffer is left
# TYPE node_xfrm_in_buffer_error_packets_total counter
node_xfrm_in_buffer_error_packets_total 2
# HELP node_xfrm_in_error_packets_total All errors not matched by other
# TYPE node_xfrm_in_error_packets_total counter
node_xfrm_in_error_packets_total 1
# HELP node_xfrm_in_hdr_error_packets_total Header error
# TYPE node_xfrm_in_hdr_error_packets_total counter
node_xfrm_in_hdr_error_packets_total 4
# HELP node_xfrm_in_no_pols_packets_total No policy is found for states e.g. Inbound SAs are correct but no SP is found
# TYPE node_xfrm_in_no_pols_packets_total counter
node_xfrm_in_no_pols_packets_total 65432
# HELP node_xfrm_in_no_states_packets_total No state is found i.e. Either inbound SPI, address, or IPsec protocol at SA is wrong
# TYPE node_xfrm_in_no_states_packets_total counter
node_xfrm_in_no_states_packets_total 3
# HELP node_xfrm_in_pol_block_packets_total Policy discards
# TYPE node_xfrm_in_pol_block_packets_total counter
node_xfrm_in_pol_block_packets_total 100
# HELP node_xfrm_in_pol_error_packets_total Policy error
# TYPE node_xfrm_in_pol_error_packets_total counter
node_xfrm_in_pol_error_packets_total 10000
# HELP node_xfrm_in_state_expired_packets_total State is expired
# TYPE node_xfrm_in_state_expired_packets_total counter
node_xfrm_in_state_expired_packets_total 7
# HELP node_xfrm_in_state_invalid_packets_total State is invalid
# TYPE node_xfrm_in_state_invalid_packets_total counter
node_xfrm_in_state_invalid_packets_total 55555
# HELP node_xfrm_in_state_mismatch_packets_total State has mismatch option e.g. UDP encapsulation type is mismatch
# TYPE node_xfrm_in_state_mismatch_packets_total counter
node_xfrm_in_state_mismatch_packets_total 23451
# HELP node_xfrm_in_state_mode_error_packets_total Transformation mode specific error
# TYPE node_xfrm_in_state_mode_error_packets_total counter
node_xfrm_in_state_mode_error_packets_total 100
# HELP node_xfrm_in_state_proto_error_packets_total Transformation protocol specific error e.g. SA key is wrong
# TYPE node_xfrm_in_state_proto_error_packets_total counter
node_xfrm_in_state_proto_error_packets_total 40
# HELP node_xfrm_in_state_seq_error_packets_total Sequence error i.e. Sequence number is out of window
# TYPE node_xfrm_in_state_seq_error_packets_total counter
node_xfrm_in_state_seq_error_packets_total 6000
# HELP node_xfrm_in_tmpl_mismatch_packets_total No matching template for states e.g. Inbound SAs are correct but SP rule is wrong
# TYPE node_xfrm_in_tmpl_mismatch_packets_total counter
node_xfrm_in_tmpl_mismatch_packets_total 51
# HELP node_xfrm_out_bundle_check_error_packets_total Bundle check error
# TYPE node_xfrm_out_bundle_check_error_packets_total counter
node_xfrm_out_bundle_check_error_packets_total 555
# HELP node_xfrm_out_bundle_gen_error_packets_total Bundle generation error
# TYPE node_xfrm_out_bundle_gen_error_packets_total counter
node_xfrm_out_bundle_gen_error_packets_total 43321
# HELP node_xfrm_out_error_packets_total All errors which is not matched others
# TYPE node_xfrm_out_error_packets_total counter
node_xfrm_out_error_packets_total 1e+06
# HELP node_xfrm_out_no_states_packets_total No state is found
# TYPE node_xfrm_out_no_states_packets_total counter
node_xfrm_out_no_states_packets_total 869
# HELP node_xfrm_out_pol_block_packets_total Policy discards
# TYPE node_xfrm_out_pol_block_packets_total counter
node_xfrm_out_pol_block_packets_total 43456
# HELP node_xfrm_out_pol_dead_packets_total Policy is dead
# TYPE node_xfrm_out_pol_dead_packets_total counter
node_xfrm_out_pol_dead_packets_total 7656
# HELP node_xfrm_out_pol_error_packets_total Policy error
# TYPE node_xfrm_out_pol_error_packets_total counter
node_xfrm_out_pol_error_packets_total 1454
# HELP node_xfrm_out_state_expired_packets_total State is expired
# TYPE node_xfrm_out_state_expired_packets_total counter
node_xfrm_out_state_expired_packets_total 565
# HELP node_xfrm_out_state_invalid_packets_total State is invalid, perhaps expired
# TYPE node_xfrm_out_state_invalid_packets_total counter
node_xfrm_out_state_invalid_packets_total 28765
# HELP node_xfrm_out_state_mode_error_packets_total Transformation mode specific error
# TYPE node_xfrm_out_state_mode_error_packets_total counter
node_xfrm_out_state_mode_error_packets_total 8
# HELP node_xfrm_out_state_proto_error_packets_total Transformation protocol specific error
# TYPE node_xfrm_out_state_proto_error_packets_total counter
node_xfrm_out_state_proto_error_packets_total 4542
# HELP node_xfrm_out_state_seq_error_packets_total Sequence error i.e. Sequence number overflow
# TYPE node_xfrm_out_state_seq_error_packets_total counter
node_xfrm_out_state_seq_error_packets_total 543
# HELP promhttp_metric_handler_errors_total Total number of internal errors encountered by the promhttp metric handler.
# TYPE promhttp_metric_handler_errors_total counter
promhttp_metric_handler_errors_total{cause="encoding"} 0
promhttp_metric_handler_errors_total{cause="gathering"} 0
# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served.
# TYPE promhttp_metric_handler_requests_in_flight gauge
promhttp_metric_handler_requests_in_flight 1
# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code.
# TYPE promhttp_metric_handler_requests_total counter
promhttp_metric_handler_requests_total{code="200"} 0
promhttp_metric_handler_requests_total{code="500"} 0
promhttp_metric_handler_requests_total{code="503"} 0
# HELP testmetric1_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom
# TYPE testmetric1_1 untyped
testmetric1_1{foo="bar"} 10
# HELP testmetric1_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom
# TYPE testmetric1_2 untyped
testmetric1_2{foo="baz"} 20
# HELP testmetric2_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom
# TYPE testmetric2_1 untyped
testmetric2_1{foo="bar"} 30
# HELP testmetric2_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom
# TYPE testmetric2_2 untyped
testmetric2_2{foo="baz"} 40

View file

@ -1,209 +0,0 @@
# HELP go_gc_duration_seconds A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.
# TYPE go_gc_duration_seconds summary
# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent.
# TYPE go_gc_gogc_percent gauge
# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes.
# TYPE go_gc_gomemlimit_bytes gauge
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
# HELP go_info Information about the Go environment.
# TYPE go_info gauge
# HELP go_memstats_alloc_bytes Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_alloc_bytes gauge
# HELP go_memstats_alloc_bytes_total Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.
# TYPE go_memstats_alloc_bytes_total counter
# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.
# TYPE go_memstats_buck_hash_sys_bytes gauge
# HELP go_memstats_frees_total Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_frees_total counter
# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.
# TYPE go_memstats_gc_sys_bytes gauge
# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_heap_alloc_bytes gauge
# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_idle_bytes gauge
# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes
# TYPE go_memstats_heap_inuse_bytes gauge
# HELP go_memstats_heap_objects Number of currently allocated objects. Equals to /gc/heap/objects:objects.
# TYPE go_memstats_heap_objects gauge
# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.
# TYPE go_memstats_heap_released_bytes gauge
# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_sys_bytes gauge
# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
# TYPE go_memstats_last_gc_time_seconds gauge
# HELP go_memstats_mallocs_total Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_mallocs_total counter
# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.
# TYPE go_memstats_mcache_inuse_bytes gauge
# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.
# TYPE go_memstats_mcache_sys_bytes gauge
# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.
# TYPE go_memstats_mspan_inuse_bytes gauge
# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.
# TYPE go_memstats_mspan_sys_bytes gauge
# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.
# TYPE go_memstats_next_gc_bytes gauge
# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.
# TYPE go_memstats_other_sys_bytes gauge
# HELP go_memstats_stack_inuse_bytes Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.
# TYPE go_memstats_stack_inuse_bytes gauge
# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.
# TYPE go_memstats_stack_sys_bytes gauge
# HELP go_memstats_sys_bytes Number of bytes obtained from system. Equals to /memory/classes/total:byte.
# TYPE go_memstats_sys_bytes gauge
# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads.
# TYPE go_sched_gomaxprocs_threads gauge
# HELP go_threads Number of OS threads created.
# TYPE go_threads gauge
# HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which node_exporter was built, and the goos and goarch for the build.
# TYPE node_exporter_build_info gauge
# HELP node_memory_swap_size_bytes Memory information field swap_size_bytes.
# TYPE node_memory_swap_size_bytes gauge
node_memory_swap_size_bytes 6.442426368e+09
# HELP node_memory_swap_used_bytes Memory information field swap_used_bytes.
# TYPE node_memory_swap_used_bytes gauge
node_memory_swap_used_bytes 0
# HELP node_memory_swapped_in_pages_bytes_total Memory information field swapped_in_pages_bytes_total.
# TYPE node_memory_swapped_in_pages_bytes_total counter
node_memory_swapped_in_pages_bytes_total 0
# HELP node_memory_swapped_out_pages_bytes_total Memory information field swapped_out_pages_bytes_total.
# TYPE node_memory_swapped_out_pages_bytes_total counter
node_memory_swapped_out_pages_bytes_total 0
# HELP node_os_info A metric with a constant '1' value labeled by build_id, id, id_like, image_id, image_version, name, pretty_name, variant, variant_id, version, version_codename, version_id.
# TYPE node_os_info gauge
node_os_info{build_id="",id="ubuntu",id_like="debian",image_id="",image_version="",name="Ubuntu",pretty_name="Ubuntu 20.04.2 LTS",variant="",variant_id="",version="20.04.2 LTS (Focal Fossa)",version_codename="focal",version_id="20.04"} 1
# HELP node_os_version Metric containing the major.minor part of the OS version.
# TYPE node_os_version gauge
node_os_version{id="ubuntu",id_like="debian",name="Ubuntu"} 20.04
# HELP node_scrape_collector_duration_seconds node_exporter: Duration of a collector scrape.
# TYPE node_scrape_collector_duration_seconds gauge
# HELP node_scrape_collector_success node_exporter: Whether a collector succeeded.
# TYPE node_scrape_collector_success gauge
node_scrape_collector_success{collector="boottime"} 1
node_scrape_collector_success{collector="cpu"} 0
node_scrape_collector_success{collector="loadavg"} 1
node_scrape_collector_success{collector="meminfo"} 1
node_scrape_collector_success{collector="os"} 1
node_scrape_collector_success{collector="textfile"} 1
node_scrape_collector_success{collector="time"} 1
node_scrape_collector_success{collector="xfrm"} 1
# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read.
# TYPE node_textfile_mtime_seconds gauge
# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise
# TYPE node_textfile_scrape_error gauge
node_textfile_scrape_error 0
# HELP node_time_seconds System time in seconds since epoch (1970).
# TYPE node_time_seconds gauge
# HELP node_time_zone_offset_seconds System time zone offset in seconds.
# TYPE node_time_zone_offset_seconds gauge
# HELP node_xfrm_acquire_error_packets_total State hasnt been fully acquired before use
# TYPE node_xfrm_acquire_error_packets_total counter
node_xfrm_acquire_error_packets_total 24532
# HELP node_xfrm_fwd_hdr_error_packets_total Forward routing of a packet is not allowed
# TYPE node_xfrm_fwd_hdr_error_packets_total counter
node_xfrm_fwd_hdr_error_packets_total 6654
# HELP node_xfrm_in_buffer_error_packets_total No buffer is left
# TYPE node_xfrm_in_buffer_error_packets_total counter
node_xfrm_in_buffer_error_packets_total 2
# HELP node_xfrm_in_error_packets_total All errors not matched by other
# TYPE node_xfrm_in_error_packets_total counter
node_xfrm_in_error_packets_total 1
# HELP node_xfrm_in_hdr_error_packets_total Header error
# TYPE node_xfrm_in_hdr_error_packets_total counter
node_xfrm_in_hdr_error_packets_total 4
# HELP node_xfrm_in_no_pols_packets_total No policy is found for states e.g. Inbound SAs are correct but no SP is found
# TYPE node_xfrm_in_no_pols_packets_total counter
node_xfrm_in_no_pols_packets_total 65432
# HELP node_xfrm_in_no_states_packets_total No state is found i.e. Either inbound SPI, address, or IPsec protocol at SA is wrong
# TYPE node_xfrm_in_no_states_packets_total counter
node_xfrm_in_no_states_packets_total 3
# HELP node_xfrm_in_pol_block_packets_total Policy discards
# TYPE node_xfrm_in_pol_block_packets_total counter
node_xfrm_in_pol_block_packets_total 100
# HELP node_xfrm_in_pol_error_packets_total Policy error
# TYPE node_xfrm_in_pol_error_packets_total counter
node_xfrm_in_pol_error_packets_total 10000
# HELP node_xfrm_in_state_expired_packets_total State is expired
# TYPE node_xfrm_in_state_expired_packets_total counter
node_xfrm_in_state_expired_packets_total 7
# HELP node_xfrm_in_state_invalid_packets_total State is invalid
# TYPE node_xfrm_in_state_invalid_packets_total counter
node_xfrm_in_state_invalid_packets_total 55555
# HELP node_xfrm_in_state_mismatch_packets_total State has mismatch option e.g. UDP encapsulation type is mismatch
# TYPE node_xfrm_in_state_mismatch_packets_total counter
node_xfrm_in_state_mismatch_packets_total 23451
# HELP node_xfrm_in_state_mode_error_packets_total Transformation mode specific error
# TYPE node_xfrm_in_state_mode_error_packets_total counter
node_xfrm_in_state_mode_error_packets_total 100
# HELP node_xfrm_in_state_proto_error_packets_total Transformation protocol specific error e.g. SA key is wrong
# TYPE node_xfrm_in_state_proto_error_packets_total counter
node_xfrm_in_state_proto_error_packets_total 40
# HELP node_xfrm_in_state_seq_error_packets_total Sequence error i.e. Sequence number is out of window
# TYPE node_xfrm_in_state_seq_error_packets_total counter
node_xfrm_in_state_seq_error_packets_total 6000
# HELP node_xfrm_in_tmpl_mismatch_packets_total No matching template for states e.g. Inbound SAs are correct but SP rule is wrong
# TYPE node_xfrm_in_tmpl_mismatch_packets_total counter
node_xfrm_in_tmpl_mismatch_packets_total 51
# HELP node_xfrm_out_bundle_check_error_packets_total Bundle check error
# TYPE node_xfrm_out_bundle_check_error_packets_total counter
node_xfrm_out_bundle_check_error_packets_total 555
# HELP node_xfrm_out_bundle_gen_error_packets_total Bundle generation error
# TYPE node_xfrm_out_bundle_gen_error_packets_total counter
node_xfrm_out_bundle_gen_error_packets_total 43321
# HELP node_xfrm_out_error_packets_total All errors which is not matched others
# TYPE node_xfrm_out_error_packets_total counter
node_xfrm_out_error_packets_total 1e+06
# HELP node_xfrm_out_no_states_packets_total No state is found
# TYPE node_xfrm_out_no_states_packets_total counter
node_xfrm_out_no_states_packets_total 869
# HELP node_xfrm_out_pol_block_packets_total Policy discards
# TYPE node_xfrm_out_pol_block_packets_total counter
node_xfrm_out_pol_block_packets_total 43456
# HELP node_xfrm_out_pol_dead_packets_total Policy is dead
# TYPE node_xfrm_out_pol_dead_packets_total counter
node_xfrm_out_pol_dead_packets_total 7656
# HELP node_xfrm_out_pol_error_packets_total Policy error
# TYPE node_xfrm_out_pol_error_packets_total counter
node_xfrm_out_pol_error_packets_total 1454
# HELP node_xfrm_out_state_expired_packets_total State is expired
# TYPE node_xfrm_out_state_expired_packets_total counter
node_xfrm_out_state_expired_packets_total 565
# HELP node_xfrm_out_state_invalid_packets_total State is invalid, perhaps expired
# TYPE node_xfrm_out_state_invalid_packets_total counter
node_xfrm_out_state_invalid_packets_total 28765
# HELP node_xfrm_out_state_mode_error_packets_total Transformation mode specific error
# TYPE node_xfrm_out_state_mode_error_packets_total counter
node_xfrm_out_state_mode_error_packets_total 8
# HELP node_xfrm_out_state_proto_error_packets_total Transformation protocol specific error
# TYPE node_xfrm_out_state_proto_error_packets_total counter
node_xfrm_out_state_proto_error_packets_total 4542
# HELP node_xfrm_out_state_seq_error_packets_total Sequence error i.e. Sequence number overflow
# TYPE node_xfrm_out_state_seq_error_packets_total counter
node_xfrm_out_state_seq_error_packets_total 543
# HELP process_open_fds Number of open file descriptors.
# TYPE process_open_fds gauge
# HELP promhttp_metric_handler_errors_total Total number of internal errors encountered by the promhttp metric handler.
# TYPE promhttp_metric_handler_errors_total counter
promhttp_metric_handler_errors_total{cause="encoding"} 0
promhttp_metric_handler_errors_total{cause="gathering"} 0
# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served.
# TYPE promhttp_metric_handler_requests_in_flight gauge
promhttp_metric_handler_requests_in_flight 1
# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code.
# TYPE promhttp_metric_handler_requests_total counter
promhttp_metric_handler_requests_total{code="200"} 0
promhttp_metric_handler_requests_total{code="500"} 0
promhttp_metric_handler_requests_total{code="503"} 0
# HELP testmetric1_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom
# TYPE testmetric1_1 untyped
testmetric1_1{foo="bar"} 10
# HELP testmetric1_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom
# TYPE testmetric1_2 untyped
testmetric1_2{foo="baz"} 20
# HELP testmetric2_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom
# TYPE testmetric2_1 untyped
testmetric2_1{foo="bar"} 30
# HELP testmetric2_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom
# TYPE testmetric2_2 untyped
testmetric2_2{foo="baz"} 40

View file

@ -1,276 +0,0 @@
# HELP go_gc_duration_seconds A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.
# TYPE go_gc_duration_seconds summary
# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent.
# TYPE go_gc_gogc_percent gauge
# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes.
# TYPE go_gc_gomemlimit_bytes gauge
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
# HELP go_info Information about the Go environment.
# TYPE go_info gauge
# HELP go_memstats_alloc_bytes Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_alloc_bytes gauge
# HELP go_memstats_alloc_bytes_total Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.
# TYPE go_memstats_alloc_bytes_total counter
# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.
# TYPE go_memstats_buck_hash_sys_bytes gauge
# HELP go_memstats_frees_total Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_frees_total counter
# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.
# TYPE go_memstats_gc_sys_bytes gauge
# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_heap_alloc_bytes gauge
# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_idle_bytes gauge
# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes
# TYPE go_memstats_heap_inuse_bytes gauge
# HELP go_memstats_heap_objects Number of currently allocated objects. Equals to /gc/heap/objects:objects.
# TYPE go_memstats_heap_objects gauge
# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.
# TYPE go_memstats_heap_released_bytes gauge
# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_sys_bytes gauge
# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
# TYPE go_memstats_last_gc_time_seconds gauge
# HELP go_memstats_mallocs_total Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_mallocs_total counter
# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.
# TYPE go_memstats_mcache_inuse_bytes gauge
# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.
# TYPE go_memstats_mcache_sys_bytes gauge
# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.
# TYPE go_memstats_mspan_inuse_bytes gauge
# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.
# TYPE go_memstats_mspan_sys_bytes gauge
# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.
# TYPE go_memstats_next_gc_bytes gauge
# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.
# TYPE go_memstats_other_sys_bytes gauge
# HELP go_memstats_stack_inuse_bytes Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.
# TYPE go_memstats_stack_inuse_bytes gauge
# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.
# TYPE go_memstats_stack_sys_bytes gauge
# HELP go_memstats_sys_bytes Number of bytes obtained from system. Equals to /memory/classes/total:byte.
# TYPE go_memstats_sys_bytes gauge
# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads.
# TYPE go_sched_gomaxprocs_threads gauge
# HELP go_threads Number of OS threads created.
# TYPE go_threads gauge
# HELP node_buddyinfo_blocks Count of free blocks according to size.
# TYPE node_buddyinfo_blocks gauge
node_buddyinfo_blocks{node="0",size="0",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="0",zone="DMA32"} 759
node_buddyinfo_blocks{node="0",size="0",zone="Normal"} 4381
node_buddyinfo_blocks{node="0",size="1",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="1",zone="DMA32"} 572
node_buddyinfo_blocks{node="0",size="1",zone="Normal"} 1093
node_buddyinfo_blocks{node="0",size="10",zone="DMA"} 3
node_buddyinfo_blocks{node="0",size="10",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="10",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="2",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="2",zone="DMA32"} 791
node_buddyinfo_blocks{node="0",size="2",zone="Normal"} 185
node_buddyinfo_blocks{node="0",size="3",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="3",zone="DMA32"} 475
node_buddyinfo_blocks{node="0",size="3",zone="Normal"} 1530
node_buddyinfo_blocks{node="0",size="4",zone="DMA"} 2
node_buddyinfo_blocks{node="0",size="4",zone="DMA32"} 194
node_buddyinfo_blocks{node="0",size="4",zone="Normal"} 567
node_buddyinfo_blocks{node="0",size="5",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="5",zone="DMA32"} 45
node_buddyinfo_blocks{node="0",size="5",zone="Normal"} 102
node_buddyinfo_blocks{node="0",size="6",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="6",zone="DMA32"} 12
node_buddyinfo_blocks{node="0",size="6",zone="Normal"} 4
node_buddyinfo_blocks{node="0",size="7",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="7",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="7",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="8",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="8",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="8",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="9",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="9",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="9",zone="Normal"} 0
# HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which node_exporter was built, and the goos and goarch for the build.
# TYPE node_exporter_build_info gauge
# HELP node_memory_swap_size_bytes Memory information field swap_size_bytes.
# TYPE node_memory_swap_size_bytes gauge
node_memory_swap_size_bytes 6.693941248e+09
# HELP node_memory_swap_used_bytes Memory information field swap_used_bytes.
# TYPE node_memory_swap_used_bytes gauge
node_memory_swap_used_bytes 0
# HELP node_memory_swapped_in_pages_bytes_total Memory information field swapped_in_pages_bytes_total.
# TYPE node_memory_swapped_in_pages_bytes_total counter
node_memory_swapped_in_pages_bytes_total 0
# HELP node_memory_swapped_out_pages_bytes_total Memory information field swapped_out_pages_bytes_total.
# TYPE node_memory_swapped_out_pages_bytes_total counter
node_memory_swapped_out_pages_bytes_total 0
# HELP node_network_noproto_total Network device statistic noproto.
# TYPE node_network_noproto_total counter
node_network_noproto_total{device="lo0"} 0
node_network_noproto_total{device="pflog0"} 0
# HELP node_network_receive_drop_total Network device statistic receive_drop.
# TYPE node_network_receive_drop_total counter
node_network_receive_drop_total{device="lo0"} 0
node_network_receive_drop_total{device="pflog0"} 0
# HELP node_network_receive_errs_total Network device statistic receive_errs.
# TYPE node_network_receive_errs_total counter
node_network_receive_errs_total{device="lo0"} 0
node_network_receive_errs_total{device="pflog0"} 0
# HELP node_network_receive_packets_total Network device statistic receive_packets.
# TYPE node_network_receive_packets_total counter
# HELP node_network_transmit_bytes_total Network device statistic transmit_bytes.
# TYPE node_network_transmit_bytes_total counter
# HELP node_network_transmit_colls_total Network device statistic transmit_colls.
# TYPE node_network_transmit_colls_total counter
node_network_transmit_colls_total{device="lo0"} 0
node_network_transmit_colls_total{device="pflog0"} 0
# HELP node_network_transmit_drop_total Network device statistic transmit_drop.
# TYPE node_network_transmit_drop_total counter
node_network_transmit_drop_total{device="lo0"} 0
node_network_transmit_drop_total{device="pflog0"} 0
# HELP node_network_transmit_errs_total Network device statistic transmit_errs.
# TYPE node_network_transmit_errs_total counter
node_network_transmit_errs_total{device="lo0"} 0
node_network_transmit_errs_total{device="pflog0"} 0
# HELP node_network_transmit_packets_total Network device statistic transmit_packets.
# TYPE node_network_transmit_packets_total counter
# HELP node_os_info A metric with a constant '1' value labeled by build_id, id, id_like, image_id, image_version, name, pretty_name, variant, variant_id, version, version_codename, version_id.
# TYPE node_os_info gauge
node_os_info{build_id="",id="ubuntu",id_like="debian",image_id="",image_version="",name="Ubuntu",pretty_name="Ubuntu 20.04.2 LTS",variant="",variant_id="",version="20.04.2 LTS (Focal Fossa)",version_codename="focal",version_id="20.04"} 1
# HELP node_os_version Metric containing the major.minor part of the OS version.
# TYPE node_os_version gauge
node_os_version{id="ubuntu",id_like="debian",name="Ubuntu"} 20.04
# HELP node_scrape_collector_duration_seconds node_exporter: Duration of a collector scrape.
# TYPE node_scrape_collector_duration_seconds gauge
# HELP node_scrape_collector_success node_exporter: Whether a collector succeeded.
# TYPE node_scrape_collector_success gauge
node_scrape_collector_success{collector="boottime"} 1
node_scrape_collector_success{collector="buddyinfo"} 1
node_scrape_collector_success{collector="cpu"} 1
node_scrape_collector_success{collector="diskstats"} 1
node_scrape_collector_success{collector="interrupts"} 1
node_scrape_collector_success{collector="loadavg"} 1
node_scrape_collector_success{collector="meminfo"} 1
node_scrape_collector_success{collector="netdev"} 1
node_scrape_collector_success{collector="os"} 1
node_scrape_collector_success{collector="textfile"} 1
node_scrape_collector_success{collector="time"} 1
node_scrape_collector_success{collector="xfrm"} 1
# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read.
# TYPE node_textfile_mtime_seconds gauge
# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise
# TYPE node_textfile_scrape_error gauge
node_textfile_scrape_error 0
# HELP node_time_seconds System time in seconds since epoch (1970).
# TYPE node_time_seconds gauge
# HELP node_time_zone_offset_seconds System time zone offset in seconds.
# TYPE node_time_zone_offset_seconds gauge
# HELP node_xfrm_acquire_error_packets_total State hasnt been fully acquired before use
# TYPE node_xfrm_acquire_error_packets_total counter
node_xfrm_acquire_error_packets_total 24532
# HELP node_xfrm_fwd_hdr_error_packets_total Forward routing of a packet is not allowed
# TYPE node_xfrm_fwd_hdr_error_packets_total counter
node_xfrm_fwd_hdr_error_packets_total 6654
# HELP node_xfrm_in_buffer_error_packets_total No buffer is left
# TYPE node_xfrm_in_buffer_error_packets_total counter
node_xfrm_in_buffer_error_packets_total 2
# HELP node_xfrm_in_error_packets_total All errors not matched by other
# TYPE node_xfrm_in_error_packets_total counter
node_xfrm_in_error_packets_total 1
# HELP node_xfrm_in_hdr_error_packets_total Header error
# TYPE node_xfrm_in_hdr_error_packets_total counter
node_xfrm_in_hdr_error_packets_total 4
# HELP node_xfrm_in_no_pols_packets_total No policy is found for states e.g. Inbound SAs are correct but no SP is found
# TYPE node_xfrm_in_no_pols_packets_total counter
node_xfrm_in_no_pols_packets_total 65432
# HELP node_xfrm_in_no_states_packets_total No state is found i.e. Either inbound SPI, address, or IPsec protocol at SA is wrong
# TYPE node_xfrm_in_no_states_packets_total counter
node_xfrm_in_no_states_packets_total 3
# HELP node_xfrm_in_pol_block_packets_total Policy discards
# TYPE node_xfrm_in_pol_block_packets_total counter
node_xfrm_in_pol_block_packets_total 100
# HELP node_xfrm_in_pol_error_packets_total Policy error
# TYPE node_xfrm_in_pol_error_packets_total counter
node_xfrm_in_pol_error_packets_total 10000
# HELP node_xfrm_in_state_expired_packets_total State is expired
# TYPE node_xfrm_in_state_expired_packets_total counter
node_xfrm_in_state_expired_packets_total 7
# HELP node_xfrm_in_state_invalid_packets_total State is invalid
# TYPE node_xfrm_in_state_invalid_packets_total counter
node_xfrm_in_state_invalid_packets_total 55555
# HELP node_xfrm_in_state_mismatch_packets_total State has mismatch option e.g. UDP encapsulation type is mismatch
# TYPE node_xfrm_in_state_mismatch_packets_total counter
node_xfrm_in_state_mismatch_packets_total 23451
# HELP node_xfrm_in_state_mode_error_packets_total Transformation mode specific error
# TYPE node_xfrm_in_state_mode_error_packets_total counter
node_xfrm_in_state_mode_error_packets_total 100
# HELP node_xfrm_in_state_proto_error_packets_total Transformation protocol specific error e.g. SA key is wrong
# TYPE node_xfrm_in_state_proto_error_packets_total counter
node_xfrm_in_state_proto_error_packets_total 40
# HELP node_xfrm_in_state_seq_error_packets_total Sequence error i.e. Sequence number is out of window
# TYPE node_xfrm_in_state_seq_error_packets_total counter
node_xfrm_in_state_seq_error_packets_total 6000
# HELP node_xfrm_in_tmpl_mismatch_packets_total No matching template for states e.g. Inbound SAs are correct but SP rule is wrong
# TYPE node_xfrm_in_tmpl_mismatch_packets_total counter
node_xfrm_in_tmpl_mismatch_packets_total 51
# HELP node_xfrm_out_bundle_check_error_packets_total Bundle check error
# TYPE node_xfrm_out_bundle_check_error_packets_total counter
node_xfrm_out_bundle_check_error_packets_total 555
# HELP node_xfrm_out_bundle_gen_error_packets_total Bundle generation error
# TYPE node_xfrm_out_bundle_gen_error_packets_total counter
node_xfrm_out_bundle_gen_error_packets_total 43321
# HELP node_xfrm_out_error_packets_total All errors which is not matched others
# TYPE node_xfrm_out_error_packets_total counter
node_xfrm_out_error_packets_total 1e+06
# HELP node_xfrm_out_no_states_packets_total No state is found
# TYPE node_xfrm_out_no_states_packets_total counter
node_xfrm_out_no_states_packets_total 869
# HELP node_xfrm_out_pol_block_packets_total Policy discards
# TYPE node_xfrm_out_pol_block_packets_total counter
node_xfrm_out_pol_block_packets_total 43456
# HELP node_xfrm_out_pol_dead_packets_total Policy is dead
# TYPE node_xfrm_out_pol_dead_packets_total counter
node_xfrm_out_pol_dead_packets_total 7656
# HELP node_xfrm_out_pol_error_packets_total Policy error
# TYPE node_xfrm_out_pol_error_packets_total counter
node_xfrm_out_pol_error_packets_total 1454
# HELP node_xfrm_out_state_expired_packets_total State is expired
# TYPE node_xfrm_out_state_expired_packets_total counter
node_xfrm_out_state_expired_packets_total 565
# HELP node_xfrm_out_state_invalid_packets_total State is invalid, perhaps expired
# TYPE node_xfrm_out_state_invalid_packets_total counter
node_xfrm_out_state_invalid_packets_total 28765
# HELP node_xfrm_out_state_mode_error_packets_total Transformation mode specific error
# TYPE node_xfrm_out_state_mode_error_packets_total counter
node_xfrm_out_state_mode_error_packets_total 8
# HELP node_xfrm_out_state_proto_error_packets_total Transformation protocol specific error
# TYPE node_xfrm_out_state_proto_error_packets_total counter
node_xfrm_out_state_proto_error_packets_total 4542
# HELP node_xfrm_out_state_seq_error_packets_total Sequence error i.e. Sequence number overflow
# TYPE node_xfrm_out_state_seq_error_packets_total counter
node_xfrm_out_state_seq_error_packets_total 543
# HELP promhttp_metric_handler_errors_total Total number of internal errors encountered by the promhttp metric handler.
# TYPE promhttp_metric_handler_errors_total counter
promhttp_metric_handler_errors_total{cause="encoding"} 0
promhttp_metric_handler_errors_total{cause="gathering"} 0
# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served.
# TYPE promhttp_metric_handler_requests_in_flight gauge
promhttp_metric_handler_requests_in_flight 1
# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code.
# TYPE promhttp_metric_handler_requests_total counter
promhttp_metric_handler_requests_total{code="200"} 0
promhttp_metric_handler_requests_total{code="500"} 0
promhttp_metric_handler_requests_total{code="503"} 0
# HELP testmetric1_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom
# TYPE testmetric1_1 untyped
testmetric1_1{foo="bar"} 10
# HELP testmetric1_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom
# TYPE testmetric1_2 untyped
testmetric1_2{foo="baz"} 20
# HELP testmetric2_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom
# TYPE testmetric2_1 untyped
testmetric2_1{foo="bar"} 30
# HELP testmetric2_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom
# TYPE testmetric2_2 untyped
testmetric2_2{foo="baz"} 40

View file

@ -1,234 +0,0 @@
# HELP go_gc_duration_seconds A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.
# TYPE go_gc_duration_seconds summary
# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent.
# TYPE go_gc_gogc_percent gauge
# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes.
# TYPE go_gc_gomemlimit_bytes gauge
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
# HELP go_info Information about the Go environment.
# TYPE go_info gauge
# HELP go_memstats_alloc_bytes Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_alloc_bytes gauge
# HELP go_memstats_alloc_bytes_total Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.
# TYPE go_memstats_alloc_bytes_total counter
# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.
# TYPE go_memstats_buck_hash_sys_bytes gauge
# HELP go_memstats_frees_total Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_frees_total counter
# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.
# TYPE go_memstats_gc_sys_bytes gauge
# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_heap_alloc_bytes gauge
# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_idle_bytes gauge
# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes
# TYPE go_memstats_heap_inuse_bytes gauge
# HELP go_memstats_heap_objects Number of currently allocated objects. Equals to /gc/heap/objects:objects.
# TYPE go_memstats_heap_objects gauge
# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.
# TYPE go_memstats_heap_released_bytes gauge
# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_sys_bytes gauge
# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
# TYPE go_memstats_last_gc_time_seconds gauge
# HELP go_memstats_mallocs_total Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_mallocs_total counter
# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.
# TYPE go_memstats_mcache_inuse_bytes gauge
# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.
# TYPE go_memstats_mcache_sys_bytes gauge
# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.
# TYPE go_memstats_mspan_inuse_bytes gauge
# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.
# TYPE go_memstats_mspan_sys_bytes gauge
# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.
# TYPE go_memstats_next_gc_bytes gauge
# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.
# TYPE go_memstats_other_sys_bytes gauge
# HELP go_memstats_stack_inuse_bytes Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.
# TYPE go_memstats_stack_inuse_bytes gauge
# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.
# TYPE go_memstats_stack_sys_bytes gauge
# HELP go_memstats_sys_bytes Number of bytes obtained from system. Equals to /memory/classes/total:byte.
# TYPE go_memstats_sys_bytes gauge
# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads.
# TYPE go_sched_gomaxprocs_threads gauge
# HELP go_threads Number of OS threads created.
# TYPE go_threads gauge
# HELP node_buddyinfo_blocks Count of free blocks according to size.
# TYPE node_buddyinfo_blocks gauge
node_buddyinfo_blocks{node="0",size="0",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="0",zone="DMA32"} 759
node_buddyinfo_blocks{node="0",size="0",zone="Normal"} 4381
node_buddyinfo_blocks{node="0",size="1",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="1",zone="DMA32"} 572
node_buddyinfo_blocks{node="0",size="1",zone="Normal"} 1093
node_buddyinfo_blocks{node="0",size="10",zone="DMA"} 3
node_buddyinfo_blocks{node="0",size="10",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="10",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="2",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="2",zone="DMA32"} 791
node_buddyinfo_blocks{node="0",size="2",zone="Normal"} 185
node_buddyinfo_blocks{node="0",size="3",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="3",zone="DMA32"} 475
node_buddyinfo_blocks{node="0",size="3",zone="Normal"} 1530
node_buddyinfo_blocks{node="0",size="4",zone="DMA"} 2
node_buddyinfo_blocks{node="0",size="4",zone="DMA32"} 194
node_buddyinfo_blocks{node="0",size="4",zone="Normal"} 567
node_buddyinfo_blocks{node="0",size="5",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="5",zone="DMA32"} 45
node_buddyinfo_blocks{node="0",size="5",zone="Normal"} 102
node_buddyinfo_blocks{node="0",size="6",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="6",zone="DMA32"} 12
node_buddyinfo_blocks{node="0",size="6",zone="Normal"} 4
node_buddyinfo_blocks{node="0",size="7",zone="DMA"} 0
node_buddyinfo_blocks{node="0",size="7",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="7",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="8",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="8",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="8",zone="Normal"} 0
node_buddyinfo_blocks{node="0",size="9",zone="DMA"} 1
node_buddyinfo_blocks{node="0",size="9",zone="DMA32"} 0
node_buddyinfo_blocks{node="0",size="9",zone="Normal"} 0
# HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which node_exporter was built, and the goos and goarch for the build.
# TYPE node_exporter_build_info gauge
# HELP node_os_info A metric with a constant '1' value labeled by build_id, id, id_like, image_id, image_version, name, pretty_name, variant, variant_id, version, version_codename, version_id.
# TYPE node_os_info gauge
node_os_info{build_id="",id="ubuntu",id_like="debian",image_id="",image_version="",name="Ubuntu",pretty_name="Ubuntu 20.04.2 LTS",variant="",variant_id="",version="20.04.2 LTS (Focal Fossa)",version_codename="focal",version_id="20.04"} 1
# HELP node_os_version Metric containing the major.minor part of the OS version.
# TYPE node_os_version gauge
node_os_version{id="ubuntu",id_like="debian",name="Ubuntu"} 20.04
# HELP node_scrape_collector_duration_seconds node_exporter: Duration of a collector scrape.
# TYPE node_scrape_collector_duration_seconds gauge
# HELP node_scrape_collector_success node_exporter: Whether a collector succeeded.
# TYPE node_scrape_collector_success gauge
node_scrape_collector_success{collector="boottime"} 1
node_scrape_collector_success{collector="buddyinfo"} 1
node_scrape_collector_success{collector="cpu"} 0
node_scrape_collector_success{collector="cpufreq"} 1
node_scrape_collector_success{collector="loadavg"} 1
node_scrape_collector_success{collector="os"} 1
node_scrape_collector_success{collector="textfile"} 1
node_scrape_collector_success{collector="time"} 1
node_scrape_collector_success{collector="xfrm"} 1
node_scrape_collector_success{collector="zfs"} 0
# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read.
# TYPE node_textfile_mtime_seconds gauge
# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise
# TYPE node_textfile_scrape_error gauge
node_textfile_scrape_error 0
# HELP node_time_seconds System time in seconds since epoch (1970).
# TYPE node_time_seconds gauge
# HELP node_time_zone_offset_seconds System time zone offset in seconds.
# TYPE node_time_zone_offset_seconds gauge
# HELP node_xfrm_acquire_error_packets_total State hasnt been fully acquired before use
# TYPE node_xfrm_acquire_error_packets_total counter
node_xfrm_acquire_error_packets_total 24532
# HELP node_xfrm_fwd_hdr_error_packets_total Forward routing of a packet is not allowed
# TYPE node_xfrm_fwd_hdr_error_packets_total counter
node_xfrm_fwd_hdr_error_packets_total 6654
# HELP node_xfrm_in_buffer_error_packets_total No buffer is left
# TYPE node_xfrm_in_buffer_error_packets_total counter
node_xfrm_in_buffer_error_packets_total 2
# HELP node_xfrm_in_error_packets_total All errors not matched by other
# TYPE node_xfrm_in_error_packets_total counter
node_xfrm_in_error_packets_total 1
# HELP node_xfrm_in_hdr_error_packets_total Header error
# TYPE node_xfrm_in_hdr_error_packets_total counter
node_xfrm_in_hdr_error_packets_total 4
# HELP node_xfrm_in_no_pols_packets_total No policy is found for states e.g. Inbound SAs are correct but no SP is found
# TYPE node_xfrm_in_no_pols_packets_total counter
node_xfrm_in_no_pols_packets_total 65432
# HELP node_xfrm_in_no_states_packets_total No state is found i.e. Either inbound SPI, address, or IPsec protocol at SA is wrong
# TYPE node_xfrm_in_no_states_packets_total counter
node_xfrm_in_no_states_packets_total 3
# HELP node_xfrm_in_pol_block_packets_total Policy discards
# TYPE node_xfrm_in_pol_block_packets_total counter
node_xfrm_in_pol_block_packets_total 100
# HELP node_xfrm_in_pol_error_packets_total Policy error
# TYPE node_xfrm_in_pol_error_packets_total counter
node_xfrm_in_pol_error_packets_total 10000
# HELP node_xfrm_in_state_expired_packets_total State is expired
# TYPE node_xfrm_in_state_expired_packets_total counter
node_xfrm_in_state_expired_packets_total 7
# HELP node_xfrm_in_state_invalid_packets_total State is invalid
# TYPE node_xfrm_in_state_invalid_packets_total counter
node_xfrm_in_state_invalid_packets_total 55555
# HELP node_xfrm_in_state_mismatch_packets_total State has mismatch option e.g. UDP encapsulation type is mismatch
# TYPE node_xfrm_in_state_mismatch_packets_total counter
node_xfrm_in_state_mismatch_packets_total 23451
# HELP node_xfrm_in_state_mode_error_packets_total Transformation mode specific error
# TYPE node_xfrm_in_state_mode_error_packets_total counter
node_xfrm_in_state_mode_error_packets_total 100
# HELP node_xfrm_in_state_proto_error_packets_total Transformation protocol specific error e.g. SA key is wrong
# TYPE node_xfrm_in_state_proto_error_packets_total counter
node_xfrm_in_state_proto_error_packets_total 40
# HELP node_xfrm_in_state_seq_error_packets_total Sequence error i.e. Sequence number is out of window
# TYPE node_xfrm_in_state_seq_error_packets_total counter
node_xfrm_in_state_seq_error_packets_total 6000
# HELP node_xfrm_in_tmpl_mismatch_packets_total No matching template for states e.g. Inbound SAs are correct but SP rule is wrong
# TYPE node_xfrm_in_tmpl_mismatch_packets_total counter
node_xfrm_in_tmpl_mismatch_packets_total 51
# HELP node_xfrm_out_bundle_check_error_packets_total Bundle check error
# TYPE node_xfrm_out_bundle_check_error_packets_total counter
node_xfrm_out_bundle_check_error_packets_total 555
# HELP node_xfrm_out_bundle_gen_error_packets_total Bundle generation error
# TYPE node_xfrm_out_bundle_gen_error_packets_total counter
node_xfrm_out_bundle_gen_error_packets_total 43321
# HELP node_xfrm_out_error_packets_total All errors which is not matched others
# TYPE node_xfrm_out_error_packets_total counter
node_xfrm_out_error_packets_total 1e+06
# HELP node_xfrm_out_no_states_packets_total No state is found
# TYPE node_xfrm_out_no_states_packets_total counter
node_xfrm_out_no_states_packets_total 869
# HELP node_xfrm_out_pol_block_packets_total Policy discards
# TYPE node_xfrm_out_pol_block_packets_total counter
node_xfrm_out_pol_block_packets_total 43456
# HELP node_xfrm_out_pol_dead_packets_total Policy is dead
# TYPE node_xfrm_out_pol_dead_packets_total counter
node_xfrm_out_pol_dead_packets_total 7656
# HELP node_xfrm_out_pol_error_packets_total Policy error
# TYPE node_xfrm_out_pol_error_packets_total counter
node_xfrm_out_pol_error_packets_total 1454
# HELP node_xfrm_out_state_expired_packets_total State is expired
# TYPE node_xfrm_out_state_expired_packets_total counter
node_xfrm_out_state_expired_packets_total 565
# HELP node_xfrm_out_state_invalid_packets_total State is invalid, perhaps expired
# TYPE node_xfrm_out_state_invalid_packets_total counter
node_xfrm_out_state_invalid_packets_total 28765
# HELP node_xfrm_out_state_mode_error_packets_total Transformation mode specific error
# TYPE node_xfrm_out_state_mode_error_packets_total counter
node_xfrm_out_state_mode_error_packets_total 8
# HELP node_xfrm_out_state_proto_error_packets_total Transformation protocol specific error
# TYPE node_xfrm_out_state_proto_error_packets_total counter
node_xfrm_out_state_proto_error_packets_total 4542
# HELP node_xfrm_out_state_seq_error_packets_total Sequence error i.e. Sequence number overflow
# TYPE node_xfrm_out_state_seq_error_packets_total counter
node_xfrm_out_state_seq_error_packets_total 543
# HELP process_open_fds Number of open file descriptors.
# TYPE process_open_fds gauge
# HELP promhttp_metric_handler_errors_total Total number of internal errors encountered by the promhttp metric handler.
# TYPE promhttp_metric_handler_errors_total counter
promhttp_metric_handler_errors_total{cause="encoding"} 0
promhttp_metric_handler_errors_total{cause="gathering"} 0
# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served.
# TYPE promhttp_metric_handler_requests_in_flight gauge
promhttp_metric_handler_requests_in_flight 1
# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code.
# TYPE promhttp_metric_handler_requests_total counter
promhttp_metric_handler_requests_total{code="200"} 0
promhttp_metric_handler_requests_total{code="500"} 0
promhttp_metric_handler_requests_total{code="503"} 0
# HELP testmetric1_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom
# TYPE testmetric1_1 untyped
testmetric1_1{foo="bar"} 10
# HELP testmetric1_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom
# TYPE testmetric1_2 untyped
testmetric1_2{foo="baz"} 20
# HELP testmetric2_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom
# TYPE testmetric2_1 untyped
testmetric2_1{foo="bar"} 30
# HELP testmetric2_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom
# TYPE testmetric2_2 untyped
testmetric2_2{foo="baz"} 40

File diff suppressed because it is too large Load diff

View file

@ -4,7 +4,6 @@ NIC statistics:
rx_packets: 1260062 rx_packets: 1260062
tx_errors: 0 tx_errors: 0
rx_errors: 0 rx_errors: 0
port.rx_dropped: 12028
rx_missed: 401 rx_missed: 401
align_errors: 0 align_errors: 0
tx_single_collisions: 0 tx_single_collisions: 0

View file

@ -1,31 +0,0 @@
24 29 0:22 / /sys rw,nosuid,nodev,noexec,relatime shared:7 - sysfs sysfs rw
25 29 0:23 / /proc rw,nosuid,nodev,noexec,relatime shared:13 - proc proc rw
26 29 0:5 / /dev rw,nosuid,relatime shared:2 - devtmpfs udev rw,size=7978892k,nr_inodes=1994723,mode=755
27 26 0:24 / /dev/pts rw,nosuid,noexec,relatime shared:3 - devpts devpts rw,gid=5,mode=620,ptmxmode=000
28 29 0:25 / /run rw,nosuid,relatime shared:5 - tmpfs tmpfs rw,size=1617716k,mode=755
29 1 259:2 / / rw,relatime shared:1 - ext4 /dev/dm-2 errors=remount-ro,data=ordered
30 24 0:6 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:8 - securityfs securityfs rw
31 26 0:26 / /dev/shm rw,nosuid,nodev shared:4 - tmpfs tmpfs rw,inode64
32 28 0:27 / /run/lock rw,nosuid,nodev,noexec,relatime shared:6 - tmpfs tmpfs rw,size=5120k
33 24 0:28 / /sys/fs/cgroup ro,nosuid,nodev,noexec shared:9 - tmpfs tmpfs ro,mode=755
34 31 0:24 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd
35 32 0:25 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:11 - pstore pstore rw
36 33 0:26 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuset
37 34 0:27 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,cpu,cpuacct
38 35 0:28 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,devices
39 36 0:29 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,freezer
40 37 0:30 / /sys/fs/cgroup/net_cls,net_prio rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,net_cls,net_prio
41 38 0:31 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,blkio
42 39 0:32 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:20 - cgroup cgroup rw,perf_event
43 40 0:33 / /proc/sys/fs/binfmt_misc rw,relatime shared:21 - systemd-1 autofs rw,fd=22,pgrp=1,timeout=300,minproto=5,maxproto=5,direct
44 41 0:34 / /dev/mqueue rw,relatime shared:22 - mqueue mqueue rw
45 42 0:35 / /sys/kernel/debug rw,relatime shared:23 - debugfs debugfs rw
46 43 0:36 / /dev/hugepages rw,relatime shared:24 - hugetlbfs hugetlbfs rw
47 44 0:37 / /sys/fs/fuse/connections rw,relatime shared:25 - fusectl fusectl rw
48 45 260:3 / /boot rw,relatime shared:92 - ext2 /dev/sda3 rw
49 46 0:39 / /run/rpc_pipefs rw,relatime shared:27 - rpc_pipefs rpc_pipefs rw
265 37 0:41 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime shared:94 - binfmt_misc binfmt_misc rw
3002 28 0:79 / /run/user/1000 rw,nosuid,nodev,relatime shared:1225 - tmpfs tmpfs rw,size=1603436k,nr_inodes=400859,mode=700,uid=1000,gid=1000
3147 3002 0:81 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:1290 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000
3148 3003 260:0 / /var/lib/kubelet/plugins/kubernetes.io/vsphere-volume/mounts/[vsanDatastore]\040bafb9e5a-8856-7e6c-699c-801844e77a4a/kubernetes-dynamic-pvc-3eba5bba-48a3-11e8-89ab-005056b92113.vmdk rw,relatime shared:31 - ext4 /dev/sda rw,data=ordered
3149 3004 260:0 / /var/lib/kubelet/plugins/kubernetes.io/vsphere-volume/mounts/[vsanDatastore]\011bafb9e5a-8856-7e6c-699c-801844e77a4a/kubernetes-dynamic-pvc-3eba5bba-48a3-11e8-89ab-005056b92113.vmdk rw,relatime shared:32 - ext4 /dev/sda rw,data=ordered

View file

@ -0,0 +1,32 @@
rootfs / rootfs rw 0 0
sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
udev /dev devtmpfs rw,relatime,size=10240k,nr_inodes=1008585,mode=755 0 0
devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
tmpfs /run tmpfs rw,nosuid,relatime,size=1617716k,mode=755 0 0
/dev/dm-2 / ext4 rw,relatime,errors=remount-ro,data=ordered 0 0
securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
tmpfs /dev/shm tmpfs rw,nosuid,nodev 0 0
tmpfs /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k 0 0
tmpfs /sys/fs/cgroup tmpfs ro,nosuid,nodev,noexec,mode=755 0 0
cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd 0 0
pstore /sys/fs/pstore pstore rw,nosuid,nodev,noexec,relatime 0 0
cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0
cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0
cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0
cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0
cgroup /sys/fs/cgroup/net_cls,net_prio cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio 0 0
cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0
cgroup /sys/fs/cgroup/perf_event cgroup rw,nosuid,nodev,noexec,relatime,perf_event 0 0
systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=22,pgrp=1,timeout=300,minproto=5,maxproto=5,direct 0 0
mqueue /dev/mqueue mqueue rw,relatime 0 0
debugfs /sys/kernel/debug debugfs rw,relatime 0 0
hugetlbfs /dev/hugepages hugetlbfs rw,relatime 0 0
fusectl /sys/fs/fuse/connections fusectl rw,relatime 0 0
/dev/sda3 /boot ext2 rw,relatime 0 0
rpc_pipefs /run/rpc_pipefs rpc_pipefs rw,relatime 0 0
binfmt_misc /proc/sys/fs/binfmt_misc binfmt_misc rw,relatime 0 0
tmpfs /run/user/1000 tmpfs rw,nosuid,nodev,relatime,size=808860k,mode=700,uid=1000,gid=1000 0 0
gvfsd-fuse /run/user/1000/gvfs fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
/dev/sda /var/lib/kubelet/plugins/kubernetes.io/vsphere-volume/mounts/[vsanDatastore]\040bafb9e5a-8856-7e6c-699c-801844e77a4a/kubernetes-dynamic-pvc-3eba5bba-48a3-11e8-89ab-005056b92113.vmdk ext4 rw,relatime,data=ordered 0 0
/dev/sda /var/lib/kubelet/plugins/kubernetes.io/vsphere-volume/mounts/[vsanDatastore]\011bafb9e5a-8856-7e6c-699c-801844e77a4a/kubernetes-dynamic-pvc-3eba5bba-48a3-11e8-89ab-005056b92113.vmdk ext4 rw,relatime,data=ordered 0 0

View file

@ -1,61 +0,0 @@
CPU0 CPU1 CPU2 CPU3 CPU4 CPU5 CPU6 CPU7
10: 3287008667 3310445093 3301386305 3273132897 3368262064 3641875466 3360412019 3225020442 GICv3 27 Level arch_timer
14: 7815 0 0 4 0 0 0 0 GICv3 37 Level ttyS0
17: 0 0 0 0 0 0 0 0 GICv3 48 Edge ACPI:Ged
18: 0 0 0 0 0 0 0 0 GICv3 49 Edge ACPI:Ged
19: 0 0 0 0 0 0 0 0 GICv3 50 Edge ACPI:Ged
20: 0 0 0 0 0 0 0 0 GICv3 51 Edge ACPI:Ged
21: 0 0 0 0 0 0 0 0 GICv3 52 Edge ACPI:Ged
22: 0 0 0 0 0 0 0 0 GICv3 53 Edge ACPI:Ged
23: 0 0 0 0 0 0 0 0 GICv3 54 Edge ACPI:Ged
24: 0 0 0 0 0 0 0 0 GICv3 55 Edge ACPI:Ged
25: 0 0 0 0 0 0 0 0 GICv3 56 Edge ACPI:Ged
26: 0 0 0 0 0 0 0 0 GICv3 57 Edge ACPI:Ged
27: 0 0 0 0 0 0 0 0 GICv3 58 Edge ACPI:Ged
28: 0 0 0 0 0 0 0 0 GICv3 59 Edge ACPI:Ged
29: 0 0 0 0 0 0 0 0 GICv3 60 Edge ACPI:Ged
30: 0 0 0 0 0 0 0 0 GICv3 61 Edge ACPI:Ged
31: 0 0 0 0 0 0 0 0 GICv3 62 Edge ACPI:Ged
32: 0 0 0 0 0 0 0 0 GICv3 63 Edge ACPI:Ged
33: 0 0 0 0 0 0 0 0 GICv3 64 Edge ACPI:Ged
34: 0 0 0 0 0 0 0 0 GICv3 65 Edge ACPI:Ged
35: 0 0 0 0 0 0 0 0 GICv3 66 Edge ACPI:Ged
36: 0 0 0 0 0 0 0 0 GICv3 67 Edge ACPI:Ged
37: 0 0 0 0 0 0 0 0 GICv3 68 Edge ACPI:Ged
38: 0 0 0 0 0 0 0 0 GICv3 69 Edge ACPI:Ged
39: 0 0 0 0 0 0 0 0 GICv3 70 Edge ACPI:Ged
40: 0 0 0 0 0 0 0 0 GICv3 71 Edge ACPI:Ged
41: 0 0 0 0 0 0 0 0 GICv3 72 Edge ACPI:Ged
42: 0 0 0 0 0 0 0 0 GICv3 73 Edge ACPI:Ged
43: 0 0 0 0 0 0 0 0 GICv3 74 Edge ACPI:Ged
44: 0 0 0 0 0 0 0 0 GICv3 75 Edge ACPI:Ged
45: 0 0 0 0 0 0 0 0 GICv3 76 Edge ACPI:Ged
46: 0 0 0 0 0 0 0 0 GICv3 77 Edge ACPI:Ged
47: 0 0 0 0 0 0 0 0 GICv3 78 Edge ACPI:Ged
48: 0 0 0 0 0 0 0 0 GICv3 79 Edge ACPI:Ged
49: 0 0 0 0 0 0 0 0 GICv3 23 Level arm-pmu
50: 0 0 0 0 0 0 0 0 ARMH0061:00 3 Edge ACPI:Event
51: 13 0 0 20 4 0 0 0 ITS-MSI 65536 Edge nvme0q0
52: 0 9 0 0 0 5 20 0 ITS-MSI 507904 Edge nvme1q0
53: 129969327 0 0 0 0 0 0 0 ITS-MSI 65537 Edge nvme0q1
54: 0 0 0 0 126913956 0 0 0 ITS-MSI 65538 Edge nvme0q2
55: 0 199619844 0 0 0 0 0 0 ITS-MSI 507905 Edge nvme1q1
56: 0 0 0 0 0 198494086 0 0 ITS-MSI 507906 Edge nvme1q2
57: 0 0 51 0 0 32479308 0 0 ITS-MSI 81920 Edge ena-mgmnt@pci:0000:00:05.0
58: 0 0 1195697946 437 0 0 0 0 ITS-MSI 81921 Edge eth0-Tx-Rx-0
59: 0 0 0 2709937608 1619 0 0 0 ITS-MSI 81922 Edge eth0-Tx-Rx-1
60: 0 1457922109 0 0 0 71 0 0 ITS-MSI 81923 Edge eth0-Tx-Rx-2
61: 2052879736 0 0 0 0 0 124 0 ITS-MSI 81924 Edge eth0-Tx-Rx-3
62: 0 0 0 0 0 0 2268695629 1530 ITS-MSI 81925 Edge eth0-Tx-Rx-4
63: 50 0 0 0 0 0 0 1997799253 ITS-MSI 81926 Edge eth0-Tx-Rx-5
64: 0 48 0 0 1238622585 0 0 0 ITS-MSI 81927 Edge eth0-Tx-Rx-6
65: 0 0 47 0 0 0 0 1574978449 ITS-MSI 81928 Edge eth0-Tx-Rx-7
IPI0:2768808080 2844211768 2878602432 2730576120 2723524623 3349096412 2717389879 2154252810 Rescheduling interrupts
IPI1: 357815098 213258177 153713187 132890624 124746406 123498004 122386326 120728639 Function call interrupts
IPI2: 0 0 0 0 0 0 0 0 CPU stop interrupts
IPI3: 0 0 0 0 0 0 0 0 CPU stop (for crash dump) interrupts
IPI4: 0 0 0 0 0 0 0 0 Timer broadcast interrupts
IPI5: 0 0 0 0 0 0 0 0 IRQ work interrupts
IPI6: 0 0 0 0 0 0 0 0 CPU wake-up interrupts
Err: 0

View file

@ -1,4 +1,4 @@
TcpExt: SyncookiesSent SyncookiesRecv SyncookiesFailed EmbryonicRsts PruneCalled RcvPruned OfoPruned OutOfWindowIcmps LockDroppedIcmps ArpFilter TW TWRecycled TWKilled PAWSPassive PAWSActive PAWSEstab DelayedACKs DelayedACKLocked DelayedACKLost ListenOverflows ListenDrops TCPPrequeued TCPDirectCopyFromBacklog TCPDirectCopyFromPrequeue TCPPrequeueDropped TCPHPHits TCPHPHitsToUser TCPPureAcks TCPHPAcks TCPRenoRecovery TCPSackRecovery TCPSACKReneging TCPFACKReorder TCPSACKReorder TCPRenoReorder TCPTSReorder TCPFullUndo TCPPartialUndo TCPDSACKUndo TCPLossUndo TCPLoss TCPLostRetransmit TCPRenoFailures TCPSackFailures TCPLossFailures TCPFastRetrans TCPForwardRetrans TCPSlowStartRetrans TCPTimeouts TCPRenoRecoveryFail TCPSackRecoveryFail TCPSchedulerFailed TCPRcvCollapsed TCPDSACKOldSent TCPDSACKOfoSent TCPDSACKRecv TCPDSACKOfoRecv TCPAbortOnData TCPAbortOnClose TCPAbortOnMemory TCPAbortOnTimeout TCPAbortOnLinger TCPAbortFailed TCPMemoryPressures TCPSACKDiscard TCPDSACKIgnoredOld TCPDSACKIgnoredNoUndo TCPSpuriousRTOs TCPMD5NotFound TCPMD5Unexpected TCPSackShifted TCPSackMerged TCPSackShiftFallback TCPBacklogDrop TCPMinTTLDrop TCPDeferAcceptDrop IPReversePathFilter TCPTimeWaitOverflow TCPReqQFullDoCookies TCPReqQFullDrop TCPChallengeACK TCPSYNChallenge TCPOFOQueue TCPRcvQDrop TcpExt: SyncookiesSent SyncookiesRecv SyncookiesFailed EmbryonicRsts PruneCalled RcvPruned OfoPruned OutOfWindowIcmps LockDroppedIcmps ArpFilter TW TWRecycled TWKilled PAWSPassive PAWSActive PAWSEstab DelayedACKs DelayedACKLocked DelayedACKLost ListenOverflows ListenDrops TCPPrequeued TCPDirectCopyFromBacklog TCPDirectCopyFromPrequeue TCPPrequeueDropped TCPHPHits TCPHPHitsToUser TCPPureAcks TCPHPAcks TCPRenoRecovery TCPSackRecovery TCPSACKReneging TCPFACKReorder TCPSACKReorder TCPRenoReorder TCPTSReorder TCPFullUndo TCPPartialUndo TCPDSACKUndo TCPLossUndo TCPLoss TCPLostRetransmit TCPRenoFailures TCPSackFailures TCPLossFailures TCPFastRetrans TCPForwardRetrans TCPSlowStartRetrans TCPTimeouts TCPRenoRecoveryFail TCPSackRecoveryFail TCPSchedulerFailed TCPRcvCollapsed TCPDSACKOldSent TCPDSACKOfoSent TCPDSACKRecv TCPDSACKOfoRecv TCPAbortOnData TCPAbortOnClose TCPAbortOnMemory TCPAbortOnTimeout TCPAbortOnLinger TCPAbortFailed TCPMemoryPressures TCPSACKDiscard TCPDSACKIgnoredOld TCPDSACKIgnoredNoUndo TCPSpuriousRTOs TCPMD5NotFound TCPMD5Unexpected TCPSackShifted TCPSackMerged TCPSackShiftFallback TCPBacklogDrop TCPMinTTLDrop TCPDeferAcceptDrop IPReversePathFilter TCPTimeWaitOverflow TCPReqQFullDoCookies TCPReqQFullDrop TCPChallengeACK TCPSYNChallenge
TcpExt: 0 0 2 0 0 0 0 0 0 0 388812 0 0 0 0 6 102471 17 9 0 0 80568 0 168808 0 4471289 26 1433940 3744565 0 1 0 0 0 0 0 0 0 0 48 0 0 0 1 0 1 0 1 115 0 0 0 0 9 0 5 0 41 4 0 0 0 0 0 0 0 1 0 0 0 0 2 5 0 0 0 0 0 0 0 2 2 42 131 TcpExt: 0 0 2 0 0 0 0 0 0 0 388812 0 0 0 0 6 102471 17 9 0 0 80568 0 168808 0 4471289 26 1433940 3744565 0 1 0 0 0 0 0 0 0 0 48 0 0 0 1 0 1 0 1 115 0 0 0 0 9 0 5 0 41 4 0 0 0 0 0 0 0 1 0 0 0 0 2 5 0 0 0 0 0 0 0 2 2
IpExt: InNoRoutes InTruncatedPkts InMcastPkts OutMcastPkts InBcastPkts OutBcastPkts InOctets OutOctets InMcastOctets OutMcastOctets InBcastOctets OutBcastOctets IpExt: InNoRoutes InTruncatedPkts InMcastPkts OutMcastPkts InBcastPkts OutBcastPkts InOctets OutOctets InMcastOctets OutMcastOctets InBcastOctets OutBcastOctets
IpExt: 0 0 0 0 0 0 6286396970 2786264347 0 0 0 0 IpExt: 0 0 0 0 0 0 6286396970 2786264347 0 0 0 0

View file

@ -2,4 +2,4 @@ net 70 70 69 45
rpc 1218785755 374636 1218815394 rpc 1218785755 374636 1218815394
proc2 18 16 57 74 52 71 73 45 86 0 52 83 61 17 53 50 23 70 82 proc2 18 16 57 74 52 71 73 45 86 0 52 83 61 17 53 50 23 70 82
proc3 22 0 1061909262 48906 4077635 117661341 5 29391916 2570425 2993289 590 0 0 7815 15 1130 0 3983 92385 13332 2 1 23729 proc3 22 0 1061909262 48906 4077635 117661341 5 29391916 2570425 2993289 590 0 0 7815 15 1130 0 3983 92385 13332 2 1 23729
proc4 48 98 51 54 83 85 23 24 1 28 73 68 83 12 84 39 68 59 58 88 29 74 69 96 21 84 15 53 86 54 66 56 97 36 49 32 85 81 11 58 32 67 13 28 35 1 90 26 0 proc4 48 98 51 54 83 85 23 24 1 28 73 68 83 12 84 39 68 59 58 88 29 74 69 96 21 84 15 53 86 54 66 56 97 36 49 32 85 81 11 58 32 67 13 28 35 90 1 26 0

View file

@ -9,4 +9,3 @@ proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2
proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0 proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0
proc4 2 2 10853 proc4 2 2 10853
proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
wdeleg_getattr 15

View file

@ -1,28 +0,0 @@
XfrmInError 1
XfrmInBufferError 2
XfrmInHdrError 4
XfrmInNoStates 3
XfrmInStateProtoError 40
XfrmInStateModeError 100
XfrmInStateSeqError 6000
XfrmInStateExpired 7
XfrmInStateMismatch 23451
XfrmInStateInvalid 55555
XfrmInTmplMismatch 51
XfrmInNoPols 65432
XfrmInPolBlock 100
XfrmInPolError 10000
XfrmOutError 1000000
XfrmOutBundleGenError 43321
XfrmOutBundleCheckError 555
XfrmOutNoStates 869
XfrmOutStateProtoError 4542
XfrmOutStateModeError 8
XfrmOutStateSeqError 543
XfrmOutStateExpired 565
XfrmOutPolBlock 43456
XfrmOutPolDead 7656
XfrmOutPolError 1454
XfrmFwdHdrError 6654
XfrmOutStateInvalid 28765
XfrmAcquireError 24532

View file

@ -1,2 +1 @@
some avg10=0.00 avg60=0.00 avg300=0.00 total=14036781 some avg10=0.00 avg60=0.00 avg300=0.00 total=14036781
full avg10=0.00 avg60=0.00 avg300=0.00 total=0

View file

@ -1 +0,0 @@
full avg10=0.00 avg60=0.00 avg300=0.00 total=8494

View file

@ -1,11 +0,0 @@
CPU0 CPU1
HI: 7 1
TIMER: 424191 108342
NET_TX: 2301 2430
NET_RX: 43066 104508
BLOCK: 23776 24115
IRQ_POLL: 0 0
TASKLET: 372 1899
SCHED: 378895 152852
HRTIMER: 40 346
RCU: 155929 146631

View file

@ -1,94 +1,93 @@
6 1 0x01 91 4368 5266997922 97951858082072 6 1 0x01 91 4368 5266997922 97951858082072
name type data name type data
anon_evictable_data 4 0 hits 4 8772612
anon_evictable_metadata 4 0 misses 4 604635
anon_size 4 1917440
arc_loaned_bytes 4 0
arc_meta_limit 4 6275982336
arc_meta_max 4 449286096
arc_meta_min 4 16777216
arc_meta_used 4 308103632
arc_need_free 4 0
arc_no_grow 4 0
arc_prune 4 0
arc_sys_free 4 261496832
arc_tempreserve 4 0
c 4 1643208777
c_max 4 8367976448
c_min 4 33554432
data_size 4 1295836160
deleted 4 60403
demand_data_hits 4 7221032 demand_data_hits 4 7221032
demand_data_misses 4 73300 demand_data_misses 4 73300
demand_metadata_hits 4 1464353 demand_metadata_hits 4 1464353
demand_metadata_misses 4 498170 demand_metadata_misses 4 498170
duplicate_buffers 4 0
duplicate_buffers_size 4 0
duplicate_reads 4 0
evict_l2_cached 4 0
evict_l2_eligible 4 8992514560
evict_l2_ineligible 4 992552448
evict_l2_skip 4 0
evict_not_enough 4 680
evict_skip 4 2265729
hash_chain_max 4 3
hash_chains 4 412
hash_collisions 4 50564
hash_elements 4 42359
hash_elements_max 4 88245
hdr_size 4 16361080
hits 4 8772612
l2_abort_lowmem 4 0
l2_asize 4 0
l2_cdata_free_on_write 4 0
l2_cksum_bad 4 0
l2_compress_failures 4 0
l2_compress_successes 4 0
l2_compress_zeros 4 0
l2_evict_l1cached 4 0
l2_evict_lock_retry 4 0
l2_evict_reading 4 0
l2_feeds 4 0
l2_free_on_write 4 0
l2_hdr_size 4 0
l2_hits 4 0
l2_io_error 4 0
l2_misses 4 0
l2_read_bytes 4 0
l2_rw_clash 4 0
l2_size 4 0
l2_write_bytes 4 0
l2_writes_done 4 0
l2_writes_error 4 0
l2_writes_lock_retry 4 0
l2_writes_sent 4 0
memory_available_bytes 3 -922337203685477580
memory_direct_count 4 542
memory_indirect_count 4 3006
memory_throttle_count 4 0
metadata_size 4 175298560
mfu_evictable_data 4 1017613824
mfu_evictable_metadata 4 9163776
mfu_ghost_evictable_data 4 96731136
mfu_ghost_evictable_metadata 4 8205312
mfu_ghost_hits 4 821
mfu_ghost_size 4 104936448
mfu_hits 4 7829854
mfu_size 4 1066623488
misses 4 604635
mru_evictable_data 4 278091264
mru_evictable_metadata 4 18606592
mru_ghost_evictable_data 4 883765248
mru_ghost_evictable_metadata 4 115962880
mru_ghost_hits 4 21100
mru_ghost_size 4 999728128
mru_hits 4 855535
mru_size 4 402593792
mutex_miss 4 2
other_size 4 116443992
p 4 516395305
prefetch_data_hits 4 3615 prefetch_data_hits 4 3615
prefetch_data_misses 4 17094 prefetch_data_misses 4 17094
prefetch_metadata_hits 4 83612 prefetch_metadata_hits 4 83612
prefetch_metadata_misses 4 16071 prefetch_metadata_misses 4 16071
mru_hits 4 855535
mru_ghost_hits 4 21100
mfu_hits 4 7829854
mfu_ghost_hits 4 821
deleted 4 60403
mutex_miss 4 2
evict_skip 4 2265729
evict_not_enough 4 680
evict_l2_cached 4 0
evict_l2_eligible 4 8992514560
evict_l2_ineligible 4 992552448
evict_l2_skip 4 0
hash_elements 4 42359
hash_elements_max 4 88245
hash_collisions 4 50564
hash_chains 4 412
hash_chain_max 4 3
p 4 516395305
c 4 1643208777
c_min 4 33554432
c_max 4 8367976448
size 4 1603939792 size 4 1603939792
hdr_size 4 16361080
data_size 4 1295836160
metadata_size 4 175298560
other_size 4 116443992
anon_size 4 1917440
anon_evictable_data 4 0
anon_evictable_metadata 4 0
mru_size 4 402593792
mru_evictable_data 4 278091264
mru_evictable_metadata 4 18606592
mru_ghost_size 4 999728128
mru_ghost_evictable_data 4 883765248
mru_ghost_evictable_metadata 4 115962880
mfu_size 4 1066623488
mfu_evictable_data 4 1017613824
mfu_evictable_metadata 4 9163776
mfu_ghost_size 4 104936448
mfu_ghost_evictable_data 4 96731136
mfu_ghost_evictable_metadata 4 8205312
l2_hits 4 0
l2_misses 4 0
l2_feeds 4 0
l2_rw_clash 4 0
l2_read_bytes 4 0
l2_write_bytes 4 0
l2_writes_sent 4 0
l2_writes_done 4 0
l2_writes_error 4 0
l2_writes_lock_retry 4 0
l2_evict_lock_retry 4 0
l2_evict_reading 4 0
l2_evict_l1cached 4 0
l2_free_on_write 4 0
l2_cdata_free_on_write 4 0
l2_abort_lowmem 4 0
l2_cksum_bad 4 0
l2_io_error 4 0
l2_size 4 0
l2_asize 4 0
l2_hdr_size 4 0
l2_compress_successes 4 0
l2_compress_zeros 4 0
l2_compress_failures 4 0
memory_throttle_count 4 0
duplicate_buffers 4 0
duplicate_buffers_size 4 0
duplicate_reads 4 0
memory_direct_count 4 542
memory_indirect_count 4 3006
arc_no_grow 4 0
arc_tempreserve 4 0
arc_loaned_bytes 4 0
arc_prune 4 0
arc_meta_used 4 308103632
arc_meta_limit 4 6275982336
arc_meta_max 4 449286096
arc_meta_min 4 16777216
arc_need_free 4 0
arc_sys_free 4 261496832

View file

@ -1 +0,0 @@
SUSPENDED

View file

@ -1,3 +0,0 @@
12 3 0x00 1 80 79205351707403 395818011156865
nread nwritten reads writes wtime wlentime wupdate rtime rlentime rupdate wcnt rcnt
1884160 3206144 22 132 7155162 104112268 79210489694949 24168078 104112268 79210489849220 0 0

View file

@ -1,9 +0,0 @@
23 1 0x01 7 2160 221578688875 6665999035587
name type data
dataset_name 7 pool3
writes 4 0
nwritten 4 0
reads 4 0
nread 4 0
nunlinks 4 0
nunlinked 4 0

View file

@ -1,9 +0,0 @@
24 1 0x01 7 2160 221611904716 7145015038451
name type data
dataset_name 7 pool3/dataset with space
writes 4 4
nwritten 4 12302
reads 4 2
nread 4 28
nunlinks 4 3
nunlinked 4 3

View file

@ -1 +0,0 @@
ONLINE

File diff suppressed because it is too large Load diff

View file

@ -8,4 +8,4 @@ node_textfile_mtime_seconds{file="fixtures/textfile/metrics_merge_different_help
node_textfile_mtime_seconds{file="fixtures/textfile/metrics_merge_different_help/b.prom"} 1 node_textfile_mtime_seconds{file="fixtures/textfile/metrics_merge_different_help/b.prom"} 1
# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise
# TYPE node_textfile_scrape_error gauge # TYPE node_textfile_scrape_error gauge
node_textfile_scrape_error 1 node_textfile_scrape_error 0

View file

@ -1,529 +0,0 @@
# Archive created by ttar -C collector/fixtures -c -f collector/fixtures/udev.ttar udev
Directory: udev
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: udev/data
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: udev/data/b11:0
Lines: 38
S:disk/by-id/usb-AMI_Virtual_CDROM0_AAAABBBBCCCC1-0:0
S:disk/by-path/pci-0000:00:14.0-usb-0:1.1:1.0-scsi-0:0:0:0
S:cdrom
L:-100
I:83543243
E:ID_CDROM=1
E:SYSTEMD_MOUNT_DEVICE_BOUND=1
E:ID_VENDOR=AMI
E:ID_VENDOR_ENC=AMI\x20\x20\x20\x20\x20
E:ID_VENDOR_ID=c096
E:ID_MODEL=Virtual_CDROM0
E:ID_MODEL_ENC=Virtual\x20CDROM0\x20\x20
E:ID_MODEL_ID=ee31
E:ID_REVISION=1.00
E:ID_SERIAL=AMI_Virtual_CDROM0_AAAABBBBCCCC1-0:0
E:ID_SERIAL_SHORT=AAAABBBBCCCC1
E:ID_TYPE=cd/dvd
E:ID_INSTANCE=0:0
E:ID_BUS=usb
E:ID_USB_INTERFACES=:905639:
E:ID_USB_INTERFACE_NUM=00
E:ID_USB_DRIVER=usb-storage
E:ID_PATH=pci-0000:00:14.0-usb-0:1.1:1.0-scsi-0:0:0:0
E:ID_PATH_TAG=pci-0000_00_14_0-usb-0_1_1_1_0-scsi-0_0_0_0
E:SCSI_TPGS=0
E:SCSI_TYPE=cd/dvd
E:SCSI_VENDOR=AMI
E:SCSI_VENDOR_ENC=AMI\x20\x20\x20\x20\x20
E:SCSI_MODEL=Virtual_CDROM0
E:SCSI_MODEL_ENC=Virtual\x20CDROM0\x20\x20
E:SCSI_REVISION=1.00
E:ID_SCSI=1
E:ID_SCSI_INQUIRY=1
E:ID_FS_TYPE=
E:ID_FOR_SEAT=block-pci-0000_00_14_0-usb-0_1_1_1_0-scsi-0_0_0_0
G:uaccess
G:systemd
G:seat
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: udev/data/b179:0
Lines: 13
S:disk/by-path/platform-df2969f3.mmc
S:disk/by-id/mmc-SC64G_0x83e36d93
W:1
I:7679747
E:ID_NAME=SC64G
E:ID_SERIAL=0x83e36d93
E:ID_PATH=platform-df2969f3.mmc
E:ID_PATH_TAG=platform-df2969f3_mmc
E:ID_PART_TABLE_UUID=1954c9df
E:ID_PART_TABLE_TYPE=dos
E:ID_DRIVE_FLASH_SD=1
E:ID_DRIVE_MEDIA_FLASH_SD=1
G:systemd
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: udev/data/b179:1
Lines: 30
S:disk/by-id/mmc-SC64G_0x83e36d93-part1
S:disk/by-path/platform-df2969f3.mmc-part1
S:disk/by-label/boot
S:disk/by-uuid/6284-658D
S:disk/by-partuuid/1954c9df-01
W:12
I:8463403
E:ID_NAME=SC64G
E:ID_SERIAL=0x83e36d93
E:ID_PATH=platform-df2969f3.mmc
E:ID_PATH_TAG=platform-df2969f3_mmc
E:ID_PART_TABLE_UUID=1954c9df
E:ID_PART_TABLE_TYPE=dos
E:ID_DRIVE_FLASH_SD=1
E:ID_DRIVE_MEDIA_FLASH_SD=1
E:ID_FS_LABEL=boot
E:ID_FS_LABEL_ENC=boot
E:ID_FS_UUID=6284-658D
E:ID_FS_UUID_ENC=6284-658D
E:ID_FS_VERSION=FAT32
E:ID_FS_TYPE=vfat
E:ID_FS_USAGE=filesystem
E:ID_PART_ENTRY_SCHEME=dos
E:ID_PART_ENTRY_UUID=1954c9df-01
E:ID_PART_ENTRY_TYPE=0xc
E:ID_PART_ENTRY_NUMBER=1
E:ID_PART_ENTRY_OFFSET=8192
E:ID_PART_ENTRY_SIZE=524288
E:ID_PART_ENTRY_DISK=179:0
G:systemd
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: udev/data/b179:2
Lines: 30
S:disk/by-id/mmc-SC64G_0x83e36d93-part2
S:disk/by-path/platform-df2969f3.mmc-part2
S:disk/by-label/rootfs
S:disk/by-uuid/83324ce8-a6f3-4e35-ad64-dbb3d6b87a32
S:disk/by-partuuid/1954c9df-02
W:2
I:7676649
E:ID_NAME=SC64G
E:ID_SERIAL=0x83e36d93
E:ID_PATH=platform-df2969f3.mmc
E:ID_PATH_TAG=platform-df2969f3_mmc
E:ID_PART_TABLE_UUID=1954c9df
E:ID_PART_TABLE_TYPE=dos
E:ID_DRIVE_FLASH_SD=1
E:ID_DRIVE_MEDIA_FLASH_SD=1
E:ID_FS_LABEL=rootfs
E:ID_FS_LABEL_ENC=rootfs
E:ID_FS_UUID=83324ce8-a6f3-4e35-ad64-dbb3d6b87a32
E:ID_FS_UUID_ENC=83324ce8-a6f3-4e35-ad64-dbb3d6b87a32
E:ID_FS_VERSION=1.0
E:ID_FS_TYPE=ext4
E:ID_FS_USAGE=filesystem
E:ID_PART_ENTRY_SCHEME=dos
E:ID_PART_ENTRY_UUID=1954c9df-02
E:ID_PART_ENTRY_TYPE=0x83
E:ID_PART_ENTRY_NUMBER=2
E:ID_PART_ENTRY_OFFSET=532480
E:ID_PART_ENTRY_SIZE=124203008
E:ID_PART_ENTRY_DISK=179:0
G:systemd
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: udev/data/b252:0
Lines: 20
S:disk/by-id/dm-name-nvme0n1_crypt
S:mapper/nvme0n1_crypt
S:disk/by-id/lvm-pv-uuid-c3C3uW-gD96-Yw69-c1CJ-5MwT-6ysM-mST0vB
S:disk/by-id/dm-uuid-CRYPT-LUKS2-jolaulot80fy9zsiobkxyxo7y2dqeho2-nvme0n1_crypt
I:72859885
E:DM_UDEV_DISABLE_LIBRARY_FALLBACK_FLAG=1
E:DM_UDEV_PRIMARY_SOURCE_FLAG=1
E:DM_UDEV_RULES=1
E:DM_UDEV_RULES_VSN=2
E:DM_NAME=nvme0n1_crypt
E:DM_UUID=CRYPT-LUKS2-jolaulot80fy9zsiobkxyxo7y2dqeho2-nvme0n1_crypt
E:DM_SUSPENDED=0
E:ID_FS_UUID=c3C3uW-gD96-Yw69-c1CJ-5MwT-6ysM-mST0vB
E:ID_FS_UUID_ENC=c3C3uW-gD96-Yw69-c1CJ-5MwT-6ysM-mST0vB
E:ID_FS_VERSION=LVM2 001
E:ID_FS_TYPE=LVM2_member
E:ID_FS_USAGE=raid
G:systemd
Q:systemd
V:1
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: udev/data/b252:1
Lines: 24
S:disk/by-id/dm-uuid-LVM-wbGqQEBL9SxrW2DLntJwgg8fAv946hw3Tvjqh0v31fWgxEtD4BoHO0lROWFUY65T
S:mapper/system-swap_1
S:disk/by-id/dm-name-system-swap_1
S:disk/by-uuid/5272bb60-04b5-49cd-b730-be57c7604450
S:system/swap_1
I:78705530
E:DM_UDEV_DISABLE_LIBRARY_FALLBACK_FLAG=1
E:DM_UDEV_PRIMARY_SOURCE_FLAG=1
E:DM_UDEV_RULES=1
E:DM_UDEV_RULES_VSN=2
E:DM_NAME=system-swap_1
E:DM_UUID=LVM-wbGqQEBL9SxrW2DLntJwgg8fAv946hw3Tvjqh0v31fWgxEtD4BoHO0lROWFUY65T
E:DM_SUSPENDED=0
E:DM_VG_NAME=system
E:DM_LV_NAME=swap_1
E:DM_LV_LAYER=
E:ID_FS_UUID=5272bb60-04b5-49cd-b730-be57c7604450
E:ID_FS_UUID_ENC=5272bb60-04b5-49cd-b730-be57c7604450
E:ID_FS_VERSION=1
E:ID_FS_TYPE=swap
E:ID_FS_USAGE=other
G:systemd
Q:systemd
V:1
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: udev/data/b252:2
Lines: 24
S:disk/by-id/dm-name-system-root
S:disk/by-id/dm-uuid-LVM-NWEDo8q5ABDyJuC3F8veKNyWfYmeIBfFMS4MF3HakzUhkk7ekDm6fJTHkl2fYHe7
S:mapper/system-root
S:disk/by-uuid/3deafd0d-faff-4695-8d15-51061ae1f51b
S:system/root
I:77655410
E:DM_UDEV_DISABLE_LIBRARY_FALLBACK_FLAG=1
E:DM_UDEV_PRIMARY_SOURCE_FLAG=1
E:DM_UDEV_RULES=1
E:DM_UDEV_RULES_VSN=2
E:DM_NAME=system-root
E:DM_UUID=LVM-NWEDo8q5ABDyJuC3F8veKNyWfYmeIBfFMS4MF3HakzUhkk7ekDm6fJTHkl2fYHe7
E:DM_SUSPENDED=0
E:DM_VG_NAME=system
E:DM_LV_NAME=root
E:DM_LV_LAYER=
E:ID_FS_UUID=3deafd0d-faff-4695-8d15-51061ae1f51b
E:ID_FS_UUID_ENC=3deafd0d-faff-4695-8d15-51061ae1f51b
E:ID_FS_VERSION=1.0
E:ID_FS_TYPE=ext4
E:ID_FS_USAGE=filesystem
G:systemd
Q:systemd
V:1
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: udev/data/b252:3
Lines: 24
S:disk/by-id/dm-name-system-var
S:disk/by-id/dm-uuid-LVM-hrxHo0rlZ6U95ku5841Lpd17bS1Z7V7lrtEE60DVgE6YEOCdS9gcDGyonWim4hGP
S:mapper/system-var
S:disk/by-uuid/5c772222-f7d4-4c8e-87e8-e97df6b7a45e
S:system/var
I:79395348
E:DM_UDEV_DISABLE_LIBRARY_FALLBACK_FLAG=1
E:DM_UDEV_PRIMARY_SOURCE_FLAG=1
E:DM_UDEV_RULES=1
E:DM_UDEV_RULES_VSN=2
E:DM_NAME=system-var
E:DM_UUID=LVM-hrxHo0rlZ6U95ku5841Lpd17bS1Z7V7lrtEE60DVgE6YEOCdS9gcDGyonWim4hGP
E:DM_SUSPENDED=0
E:DM_VG_NAME=system
E:DM_LV_NAME=var
E:DM_LV_LAYER=
E:ID_FS_UUID=5c772222-f7d4-4c8e-87e8-e97df6b7a45e
E:ID_FS_UUID_ENC=5c772222-f7d4-4c8e-87e8-e97df6b7a45e
E:ID_FS_VERSION=1.0
E:ID_FS_TYPE=ext4
E:ID_FS_USAGE=filesystem
G:systemd
Q:systemd
V:1
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: udev/data/b252:4
Lines: 24
S:system/tmp
S:disk/by-uuid/a9479d44-60e1-4015-a1e5-bb065e6dd11b
S:disk/by-id/dm-uuid-LVM-XTNGOHjPWLHcxmJmVu5cWTXEtuzqDeBkdEHAZW5q9LxWQ2d4mb5CchUQzUPJpl8H
S:mapper/system-tmp
S:disk/by-id/dm-name-system-tmp
I:75852450
E:DM_UDEV_DISABLE_LIBRARY_FALLBACK_FLAG=1
E:DM_UDEV_PRIMARY_SOURCE_FLAG=1
E:DM_UDEV_RULES=1
E:DM_UDEV_RULES_VSN=2
E:DM_NAME=system-tmp
E:DM_UUID=LVM-XTNGOHjPWLHcxmJmVu5cWTXEtuzqDeBkdEHAZW5q9LxWQ2d4mb5CchUQzUPJpl8H
E:DM_SUSPENDED=0
E:DM_VG_NAME=system
E:DM_LV_NAME=tmp
E:DM_LV_LAYER=
E:ID_FS_UUID=a9479d44-60e1-4015-a1e5-bb065e6dd11b
E:ID_FS_UUID_ENC=a9479d44-60e1-4015-a1e5-bb065e6dd11b
E:ID_FS_VERSION=1.0
E:ID_FS_TYPE=ext4
E:ID_FS_USAGE=filesystem
G:systemd
Q:systemd
V:1
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: udev/data/b252:5
Lines: 24
S:disk/by-uuid/b05b726a-c718-4c4d-8641-7c73a7696d83
S:mapper/system-home
S:system/home
S:disk/by-id/dm-name-system-home
S:disk/by-id/dm-uuid-LVM-MtoJaWTpjWRXlUnNFlpxZauTEuYlMvGFutigEzCCrfj8CNh6jCRi5LQJXZCpLjPf
I:72604009
E:DM_UDEV_DISABLE_LIBRARY_FALLBACK_FLAG=1
E:DM_UDEV_PRIMARY_SOURCE_FLAG=1
E:DM_UDEV_RULES=1
E:DM_UDEV_RULES_VSN=2
E:DM_NAME=system-home
E:DM_UUID=LVM-MtoJaWTpjWRXlUnNFlpxZauTEuYlMvGFutigEzCCrfj8CNh6jCRi5LQJXZCpLjPf
E:DM_SUSPENDED=0
E:DM_VG_NAME=system
E:DM_LV_NAME=home
E:DM_LV_LAYER=
E:ID_FS_UUID=b05b726a-c718-4c4d-8641-7c73a7696d83
E:ID_FS_UUID_ENC=b05b726a-c718-4c4d-8641-7c73a7696d83
E:ID_FS_VERSION=1.0
E:ID_FS_TYPE=ext4
E:ID_FS_USAGE=filesystem
G:systemd
Q:systemd
V:1
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: udev/data/b254:0
Lines: 10
S:disk/by-path/pci-0000:00:06.0
S:disk/by-path/virtio-pci-0000:00:06.0
W:1
I:8524171
E:ID_PATH=pci-0000:00:06.0
E:ID_PATH_TAG=pci-0000_00_06_0
E:ID_PART_TABLE_UUID=653b59fd
E:ID_PART_TABLE_TYPE=dos
E:ID_FS_TYPE=
G:systemd
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: udev/data/b259:0
Lines: 17
S:disk/by-path/pci-0000:02:00.0-nvme-1
S:disk/by-id/nvme-eui.p3vbbiejx5aae2r3
S:disk/by-id/nvme-SAMSUNG_EHFTF55LURSY-000Y9_S252B6CU1HG3M1
I:79621327
E:ID_SERIAL_SHORT=S252B6CU1HG3M1
E:ID_WWN=eui.p3vbbiejx5aae2r3
E:ID_MODEL=SAMSUNG EHFTF55LURSY-000Y9
E:ID_REVISION=4NBTUY95
E:ID_SERIAL=SAMSUNG_EHFTF55LURSY-000Y9_S252B6CU1HG3M1
E:ID_PATH=pci-0000:02:00.0-nvme-1
E:ID_PATH_TAG=pci-0000_02_00_0-nvme-1
E:ID_PART_TABLE_UUID=f301fdbd-fd1f-46d4-9fb8-c9aeb757f050
E:ID_PART_TABLE_TYPE=gpt
E:ID_FS_TYPE=
G:systemd
Q:systemd
V:1
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: udev/data/b8:0
Lines: 60
S:disk/by-id/lvm-pv-uuid-cVVv6j-HSA2-IY33-1Jmj-dO2H-YL7w-b4Oxqw
S:disk/by-id/scsi-SATA_TOSHIBA_KSDB4U86_2160A0D5FVGG
S:disk/by-id/ata-TOSHIBA_KSDB4U866TE_2160A0D5FVGG
S:disk/by-path/pci-0000:3b:00.0-sas-phy7-lun-0
S:disk/by-id/scsi-37c72382b8de36a64
S:disk/by-id/wwn-0x7c72382b8de36a64
W:702
I:73815117
E:ID_ATA=1
E:ID_TYPE=disk
E:ID_BUS=ata
E:ID_MODEL=TOSHIBA_KSDB4U86
E:ID_MODEL_ENC=TOSHIBA\x20KSDB4U86
E:ID_REVISION=0102
E:ID_SERIAL=TOSHIBA_KSDB4U866TE_DTB0QRJR2EIG
E:ID_SERIAL_SHORT=2160A0D5FVGG
E:ID_ATA_WRITE_CACHE=1
E:ID_ATA_WRITE_CACHE_ENABLED=0
E:ID_ATA_FEATURE_SET_PM=1
E:ID_ATA_FEATURE_SET_PM_ENABLED=1
E:ID_ATA_FEATURE_SET_SECURITY=1
E:ID_ATA_FEATURE_SET_SECURITY_ENABLED=0
E:ID_ATA_FEATURE_SET_SECURITY_ERASE_UNIT_MIN=66892
E:ID_ATA_FEATURE_SET_SECURITY_ENHANCED_ERASE_UNIT_MIN=66892
E:ID_ATA_FEATURE_SET_SMART=1
E:ID_ATA_FEATURE_SET_SMART_ENABLED=1
E:ID_ATA_FEATURE_SET_APM=1
E:ID_ATA_FEATURE_SET_APM_ENABLED=1
E:ID_ATA_FEATURE_SET_APM_CURRENT_VALUE=128
E:ID_ATA_DOWNLOAD_MICROCODE=1
E:ID_ATA_SATA=1
E:ID_ATA_SATA_SIGNAL_RATE_GEN2=1
E:ID_ATA_SATA_SIGNAL_RATE_GEN1=1
E:ID_ATA_ROTATION_RATE_RPM=7200
E:ID_WWN=0x7c72382b8de36a64
E:ID_WWN_WITH_EXTENSION=0x7c72382b8de36a64
E:ID_PATH=pci-0000:3b:00.0-sas-phy7-lun-0
E:ID_PATH_TAG=pci-0000_3b_00_0-sas-phy7-lun-0
E:ID_FS_UUID=cVVv6j-HSA2-IY33-1Jmj-dO2H-YL7w-b4Oxqw
E:ID_FS_UUID_ENC=cVVv6j-HSA2-IY33-1Jmj-dO2H-YL7w-b4Oxqw
E:ID_FS_VERSION=LVM2 001
E:ID_FS_TYPE=LVM2_member
E:ID_FS_USAGE=raid
E:SCSI_TPGS=0
E:SCSI_TYPE=disk
E:SCSI_VENDOR=ATA
E:SCSI_VENDOR_ENC=ATA\x20\x20\x20\x20\x20
E:SCSI_MODEL=TOSHIBA_KSDB4U86
E:SCSI_MODEL_ENC=TOSHIBA\x20KSDB4U86
E:SCSI_REVISION=0102
E:ID_SCSI=1
E:ID_SCSI_INQUIRY=1
E:ID_VENDOR=ATA
E:ID_VENDOR_ENC=ATA\x20\x20\x20\x20\x20
E:SCSI_IDENT_SERIAL=2160A0D5FVGG
E:SCSI_IDENT_LUN_NAA_REG=7c72382b8de36a64
E:SYSTEMD_READY=1
E:SYSTEMD_ALIAS=/dev/block/8:0
E:SYSTEMD_WANTS=lvm2-pvscan@8:0.service
G:systemd
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: udev/data/b8:16
Lines: 62
S:disk/by-id/scsi-3e1b87abbb16bd84e
S:disk/by-id/wwn-0xe1b87abbb16bd84e
S:disk/by-path/pci-0000:00:1f.2-ata-1
S:disk/by-id/scsi-0ATA_SuperMicro_SSD_SMC0E1B87ABBB16BD84E
S:disk/by-id/scsi-SATA_SuperMicro_SSD_SMC0E1B87ABBB16BD84E
S:disk/by-id/scsi-1ATA_SuperMicro_SSD_SMC0E1B87ABBB16BD84E
S:disk/by-id/ata-SuperMicro_SSD_SMC0E1B87ABBB16BD84E
W:58
I:147686920
E:ID_ATA=1
E:ID_TYPE=disk
E:ID_BUS=ata
E:ID_MODEL=SuperMicro_SSD
E:ID_MODEL_ENC=SuperMicro\x20SSD\x20\x20
E:ID_REVISION=0R
E:ID_SERIAL=SuperMicro_SSD_SMC0E1B87ABBB16BD84E
E:ID_SERIAL_SHORT=SMC0E1B87ABBB16BD84E
E:ID_ATA_WRITE_CACHE=1
E:ID_ATA_WRITE_CACHE_ENABLED=1
E:ID_ATA_FEATURE_SET_HPA=1
E:ID_ATA_FEATURE_SET_HPA_ENABLED=1
E:ID_ATA_FEATURE_SET_PM=1
E:ID_ATA_FEATURE_SET_PM_ENABLED=1
E:ID_ATA_FEATURE_SET_SECURITY=1
E:ID_ATA_FEATURE_SET_SECURITY_ENABLED=0
E:ID_ATA_FEATURE_SET_SECURITY_ERASE_UNIT_MIN=4
E:ID_ATA_FEATURE_SET_SECURITY_ENHANCED_ERASE_UNIT_MIN=4
E:ID_ATA_FEATURE_SET_SMART=1
E:ID_ATA_FEATURE_SET_SMART_ENABLED=1
E:ID_ATA_FEATURE_SET_AAM=1
E:ID_ATA_FEATURE_SET_AAM_ENABLED=0
E:ID_ATA_FEATURE_SET_AAM_VENDOR_RECOMMENDED_VALUE=0
E:ID_ATA_FEATURE_SET_AAM_CURRENT_VALUE=0
E:ID_ATA_DOWNLOAD_MICROCODE=1
E:ID_ATA_SATA=1
E:ID_ATA_SATA_SIGNAL_RATE_GEN2=1
E:ID_ATA_SATA_SIGNAL_RATE_GEN1=1
E:ID_ATA_ROTATION_RATE_RPM=0
E:ID_WWN=0xe1b87abbb16bd84e
E:ID_WWN_WITH_EXTENSION=0xe1b87abbb16bd84e
E:ID_PATH=pci-0000:00:1f.2-ata-1
E:ID_PATH_TAG=pci-0000_00_1f_2-ata-1
E:ID_PART_TABLE_UUID=45980145-24e2-4302-a7f0-364c68cfaf59
E:ID_PART_TABLE_TYPE=gpt
E:SCSI_TPGS=0
E:SCSI_TYPE=disk
E:SCSI_VENDOR=ATA
E:SCSI_VENDOR_ENC=ATA\x20\x20\x20\x20\x20
E:SCSI_MODEL=SuperMicro_SSD
E:SCSI_MODEL_ENC=SuperMicro\x20SSD\x20\x20
E:SCSI_REVISION=0R
E:ID_SCSI=1
E:ID_SCSI_INQUIRY=1
E:ID_VENDOR=ATA
E:ID_VENDOR_ENC=ATA\x20\x20\x20\x20\x20
E:SCSI_IDENT_SERIAL=SMC0E1B87ABBB16BD84E
E:SCSI_IDENT_LUN_VENDOR=SMC0E1B87ABBB16BD84E
E:SCSI_IDENT_LUN_T10=ATA_SuperMicro_SSD_SMC0E1B87ABBB16BD84E
E:SCSI_IDENT_LUN_ATA=SuperMicro_SSD_SMC0E1B87ABBB16BD84E
E:SCSI_IDENT_LUN_NAA_REG=e1b87abbb16bd84e
E:ID_FS_TYPE=
G:systemd
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: udev/data/b8:32
Lines: 62
S:disk/by-path/pci-0000:00:1f.2-ata-4
S:disk/by-id/scsi-SATA_INTEL_SSDS9X9SI0_3EWB5Y25CWQWA7EH1U
S:disk/by-id/scsi-0ATA_INTEL_SSDS9X9SI0_3EWB5Y25CWQWA7EH1U
S:disk/by-id/scsi-1ATA_INTEL_SSDS9X9SI00G8_3EWB5Y25CWQWA7EH1U
S:disk/by-id/lvm-pv-uuid-QFy9W7-Brj3-hQ6v-AF8i-3Zqg-n3Vs-kGY4vb
S:disk/by-id/ata-INTEL_SSDS9X9SI00G8_3EWB5Y25CWQWA7EH1U
S:disk/by-id/scsi-358907ddc573a5de
S:disk/by-id/wwn-0x58907ddc573a5de
W:10
I:145572852
E:ID_ATA=1
E:ID_TYPE=disk
E:ID_BUS=ata
E:ID_MODEL=INTEL_SSDS9X9SI0
E:ID_MODEL_ENC=INTEL\x20SSDS9X9SI0
E:ID_REVISION=0100
E:ID_SERIAL=INTEL_SSDS9X9SI00G8_3EWB5Y25CWQWA7EH1U
E:ID_SERIAL_SHORT=3EWB5Y25CWQWA7EH1U
E:ID_ATA_WRITE_CACHE=1
E:ID_ATA_WRITE_CACHE_ENABLED=0
E:ID_ATA_FEATURE_SET_PM=1
E:ID_ATA_FEATURE_SET_PM_ENABLED=1
E:ID_ATA_FEATURE_SET_SECURITY=1
E:ID_ATA_FEATURE_SET_SECURITY_ENABLED=0
E:ID_ATA_FEATURE_SET_SECURITY_ERASE_UNIT_MIN=4
E:ID_ATA_FEATURE_SET_SECURITY_ENHANCED_ERASE_UNIT_MIN=4
E:ID_ATA_FEATURE_SET_SMART=1
E:ID_ATA_FEATURE_SET_SMART_ENABLED=1
E:ID_ATA_DOWNLOAD_MICROCODE=1
E:ID_ATA_SATA=1
E:ID_ATA_SATA_SIGNAL_RATE_GEN2=1
E:ID_ATA_SATA_SIGNAL_RATE_GEN1=1
E:ID_ATA_ROTATION_RATE_RPM=0
E:ID_WWN=0x58907ddc573a5de
E:ID_WWN_WITH_EXTENSION=0x58907ddc573a5de
E:ID_PATH=pci-0000:00:1f.2-ata-4
E:ID_PATH_TAG=pci-0000_00_1f_2-ata-4
E:ID_FS_UUID=QFy9W7-Brj3-hQ6v-AF8i-3Zqg-n3Vs-kGY4vb
E:ID_FS_UUID_ENC=QFy9W7-Brj3-hQ6v-AF8i-3Zqg-n3Vs-kGY4vb
E:ID_FS_VERSION=LVM2 001
E:ID_FS_TYPE=LVM2_member
E:ID_FS_USAGE=raid
E:SCSI_TPGS=0
E:SCSI_TYPE=disk
E:SCSI_VENDOR=ATA
E:SCSI_VENDOR_ENC=ATA\x20\x20\x20\x20\x20
E:SCSI_MODEL=INTEL_SSDS9X9SI0
E:SCSI_MODEL_ENC=INTEL\x20SSDS9X9SI0
E:SCSI_REVISION=0100
E:ID_SCSI=1
E:ID_SCSI_INQUIRY=1
E:ID_VENDOR=ATA
E:ID_VENDOR_ENC=ATA\x20\x20\x20\x20\x20
E:SCSI_IDENT_SERIAL=3EWB5Y25CWQWA7EH1U
E:SCSI_IDENT_LUN_VENDOR=3EWB5Y25CWQWA7EH1U
E:SCSI_IDENT_LUN_T10=ATA_INTEL_SSDS9X9SI00G8_3EWB5Y25CWQWA7EH1U
E:SCSI_IDENT_LUN_ATA=INTEL_SSDS9X9SI00G8_3EWB5Y25CWQWA7EH1U
E:SCSI_IDENT_LUN_NAA_REG=58907ddc573a5de
E:SYSTEMD_READY=1
E:SYSTEMD_ALIAS=/dev/block/8:32
E:SYSTEMD_WANTS=lvm2-pvscan@8:32.service
G:systemd
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Some files were not shown because too many files have changed in this diff Show more