mirror of
https://github.com/prometheus/node_exporter.git
synced 2025-08-20 18:33:52 -07:00
Merge branch 'master' into update-mountstats
This commit is contained in:
commit
98289e7102
|
@ -46,7 +46,7 @@ jobs:
|
||||||
parallelism: 3
|
parallelism: 3
|
||||||
steps:
|
steps:
|
||||||
- prometheus/setup_environment
|
- prometheus/setup_environment
|
||||||
- run: docker run --privileged linuxkit/binfmt:v0.8
|
- run: docker run --privileged linuxkit/binfmt:af88a591f9cc896a52ce596b9cf7ca26a061ef97
|
||||||
- run: promu crossbuild -v --parallelism $CIRCLE_NODE_TOTAL --parallelism-thread $CIRCLE_NODE_INDEX
|
- run: promu crossbuild -v --parallelism $CIRCLE_NODE_TOTAL --parallelism-thread $CIRCLE_NODE_INDEX
|
||||||
- run: promu --config .promu-cgo.yml crossbuild -v --parallelism $CIRCLE_NODE_TOTAL --parallelism-thread $CIRCLE_NODE_INDEX
|
- run: promu --config .promu-cgo.yml crossbuild -v --parallelism $CIRCLE_NODE_TOTAL --parallelism-thread $CIRCLE_NODE_INDEX
|
||||||
- persist_to_workspace:
|
- persist_to_workspace:
|
||||||
|
|
2
.github/workflows/golangci-lint.yml
vendored
2
.github/workflows/golangci-lint.yml
vendored
|
@ -22,7 +22,7 @@ jobs:
|
||||||
- name: install Go
|
- name: install Go
|
||||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||||
with:
|
with:
|
||||||
go-version: 1.20.x
|
go-version: 1.21.x
|
||||||
- name: Install snmp_exporter/generator dependencies
|
- name: Install snmp_exporter/generator dependencies
|
||||||
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
|
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
|
||||||
if: github.repository == 'prometheus/snmp_exporter'
|
if: github.repository == 'prometheus/snmp_exporter'
|
||||||
|
|
17
CHANGELOG.md
17
CHANGELOG.md
|
@ -5,6 +5,23 @@
|
||||||
* [ENHANCEMENT]
|
* [ENHANCEMENT]
|
||||||
* [BUGFIX]
|
* [BUGFIX]
|
||||||
|
|
||||||
|
## 1.7.0 / 2023-11-11
|
||||||
|
|
||||||
|
* [FEATURE] Add ZFS freebsd per dataset stats #2753
|
||||||
|
* [FEATURE] Add cpu vulnerabilities reporting from sysfs #2721
|
||||||
|
* [ENHANCEMENT] Parallelize stat calls in Linux filesystem collector #1772
|
||||||
|
* [ENHANCEMENT] Add missing linkspeeds to ethtool collector 2711
|
||||||
|
* [ENHANCEMENT] Add CPU MHz as the value for `node_cpu_info` metric #2778
|
||||||
|
* [ENHANCEMENT] Improve qdisc collector performance #2779
|
||||||
|
* [ENHANCEMENT] Add include and exclude filter for hwmon collector #2699
|
||||||
|
* [ENHANCEMENT] Optionally fetch ARP stats via rtnetlink instead of procfs #2777
|
||||||
|
* [BUFFIX] Fix ZFS arcstats on FreeBSD 14.0+ 2754
|
||||||
|
* [BUGFIX] Fallback to 32-bit stats in netdev #2757
|
||||||
|
* [BUGFIX] Close btrfs.FS handle after use #2780
|
||||||
|
* [BUGFIX] Move RO status before error return #2807
|
||||||
|
* [BUFFIX] Fix `promhttp_metric_handler_errors_total` being always active #2808
|
||||||
|
* [BUGFIX] Fix nfsd v4 index miss #2824
|
||||||
|
|
||||||
## 1.6.1 / 2023-06-17
|
## 1.6.1 / 2023-06-17
|
||||||
|
|
||||||
Rebuild with latest Go compiler bugfix release.
|
Rebuild with latest Go compiler bugfix release.
|
||||||
|
|
|
@ -2815,6 +2815,7 @@ node_nfsd_requests_total{method="SetClientIDConfirm",proto="4"} 3
|
||||||
node_nfsd_requests_total{method="SymLink",proto="2"} 0
|
node_nfsd_requests_total{method="SymLink",proto="2"} 0
|
||||||
node_nfsd_requests_total{method="SymLink",proto="3"} 0
|
node_nfsd_requests_total{method="SymLink",proto="3"} 0
|
||||||
node_nfsd_requests_total{method="Verify",proto="4"} 0
|
node_nfsd_requests_total{method="Verify",proto="4"} 0
|
||||||
|
node_nfsd_requests_total{method="WdelegGetattr",proto="4"} 15
|
||||||
node_nfsd_requests_total{method="WrCache",proto="2"} 0
|
node_nfsd_requests_total{method="WrCache",proto="2"} 0
|
||||||
node_nfsd_requests_total{method="Write",proto="2"} 0
|
node_nfsd_requests_total{method="Write",proto="2"} 0
|
||||||
node_nfsd_requests_total{method="Write",proto="3"} 0
|
node_nfsd_requests_total{method="Write",proto="3"} 0
|
||||||
|
|
|
@ -2837,6 +2837,7 @@ node_nfsd_requests_total{method="SetClientIDConfirm",proto="4"} 3
|
||||||
node_nfsd_requests_total{method="SymLink",proto="2"} 0
|
node_nfsd_requests_total{method="SymLink",proto="2"} 0
|
||||||
node_nfsd_requests_total{method="SymLink",proto="3"} 0
|
node_nfsd_requests_total{method="SymLink",proto="3"} 0
|
||||||
node_nfsd_requests_total{method="Verify",proto="4"} 0
|
node_nfsd_requests_total{method="Verify",proto="4"} 0
|
||||||
|
node_nfsd_requests_total{method="WdelegGetattr",proto="4"} 15
|
||||||
node_nfsd_requests_total{method="WrCache",proto="2"} 0
|
node_nfsd_requests_total{method="WrCache",proto="2"} 0
|
||||||
node_nfsd_requests_total{method="Write",proto="2"} 0
|
node_nfsd_requests_total{method="Write",proto="2"} 0
|
||||||
node_nfsd_requests_total{method="Write",proto="3"} 0
|
node_nfsd_requests_total{method="Write",proto="3"} 0
|
||||||
|
|
|
@ -9,3 +9,4 @@ proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2
|
||||||
proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0
|
proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0
|
||||||
proc4 2 2 10853
|
proc4 2 2 10853
|
||||||
proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||||
|
wdeleg_getattr 15
|
||||||
|
|
|
@ -82,6 +82,8 @@ func (c *nfsdCollector) Update(ch chan<- prometheus.Metric) error {
|
||||||
c.updateNFSdRequestsv2Stats(ch, &stats.V2Stats)
|
c.updateNFSdRequestsv2Stats(ch, &stats.V2Stats)
|
||||||
c.updateNFSdRequestsv3Stats(ch, &stats.V3Stats)
|
c.updateNFSdRequestsv3Stats(ch, &stats.V3Stats)
|
||||||
c.updateNFSdRequestsv4Stats(ch, &stats.V4Ops)
|
c.updateNFSdRequestsv4Stats(ch, &stats.V4Ops)
|
||||||
|
ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue,
|
||||||
|
float64(stats.WdelegGetattr), "4", "WdelegGetattr")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
(
|
(
|
||||||
node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_size_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < %(fsSpaceFillingUpWarningThreshold)d
|
node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_size_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < %(fsSpaceFillingUpWarningThreshold)d
|
||||||
and
|
and
|
||||||
predict_linear(node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[6h], 24*60*60) < 0
|
predict_linear(node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[%(fsSpaceFillingUpPredictionWindow)s], 24*60*60) < 0
|
||||||
and
|
and
|
||||||
node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0
|
node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0
|
||||||
)
|
)
|
||||||
|
@ -407,6 +407,20 @@
|
||||||
description: 'Systemd service {{ $labels.name }} has entered failed state at {{ $labels.instance }}',
|
description: 'Systemd service {{ $labels.name }} has entered failed state at {{ $labels.instance }}',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
alert: 'NodeBondingDegraded',
|
||||||
|
expr: |||
|
||||||
|
(node_bonding_slaves - node_bonding_active) != 0
|
||||||
|
||| % $._config,
|
||||||
|
'for': '5m',
|
||||||
|
labels: {
|
||||||
|
severity: 'warning',
|
||||||
|
},
|
||||||
|
annotations: {
|
||||||
|
summary: 'Bonding interface is degraded',
|
||||||
|
description: 'Bonding interface {{ $labels.master }} on {{ $labels.instance }} is in degraded state due to one or more slave failures.',
|
||||||
|
},
|
||||||
|
},
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
|
|
|
@ -54,13 +54,19 @@
|
||||||
// 'NodeFilesystemSpaceFillingUp' alerts. These alerts fire if the disk
|
// 'NodeFilesystemSpaceFillingUp' alerts. These alerts fire if the disk
|
||||||
// usage grows in a way that it is predicted to run out in 4h or 1d
|
// usage grows in a way that it is predicted to run out in 4h or 1d
|
||||||
// and if the provided thresholds have been reached right now.
|
// and if the provided thresholds have been reached right now.
|
||||||
// In some cases you'll want to adjust these, e.g. by default Kubernetes
|
// In some cases you'll want to adjust these, e.g., by default, Kubernetes
|
||||||
// runs the image garbage collection when the disk usage reaches 85%
|
// runs the image garbage collection when the disk usage reaches 85%
|
||||||
// of its available space. In that case, you'll want to reduce the
|
// of its available space. In that case, you'll want to reduce the
|
||||||
// critical threshold below to something like 14 or 15, otherwise
|
// critical threshold below to something like 14 or 15, otherwise
|
||||||
// the alert could fire under normal node usage.
|
// the alert could fire under normal node usage.
|
||||||
|
// Additionally, the prediction window for the alert can be configured
|
||||||
|
// to account for environments where disk usage can fluctuate within
|
||||||
|
// a short time frame. By extending the prediction window, you can
|
||||||
|
// reduce false positives caused by temporary spikes, providing a
|
||||||
|
// more accurate prediction of disk space issues.
|
||||||
fsSpaceFillingUpWarningThreshold: 40,
|
fsSpaceFillingUpWarningThreshold: 40,
|
||||||
fsSpaceFillingUpCriticalThreshold: 20,
|
fsSpaceFillingUpCriticalThreshold: 20,
|
||||||
|
fsSpaceFillingUpPredictionWindow: '6h',
|
||||||
|
|
||||||
// Available disk space (%) thresholds on which to trigger the
|
// Available disk space (%) thresholds on which to trigger the
|
||||||
// 'NodeFilesystemAlmostOutOfSpace' alerts.
|
// 'NodeFilesystemAlmostOutOfSpace' alerts.
|
||||||
|
|
Loading…
Reference in a new issue