node_exporter/node_exporter.go
ideaship 8d90276283 Add bcache collector (#597)
* Add bcache collector for Linux

This collector gathers metrics related to the Linux block cache
(bcache) from sysfs.

* Removed commented out code

* Use project comment style

* Add _sectors to metric name to indicate unit

* Really use project comment style

* Rename bcache.go to bcache_linux.go

* Keep collector namespace clean

Rename:
- metric -> bcacheMetric
- periodStatsToMetrics -> bcachePeriodStatsToMetric

* Shorten slice initialization

* Change label names to backing_device, cache_device

* Remove five minute metrics (keep only total)

* Include units in additional metric names

* Enable bcache collector by default

* Provide metrics in seconds, not nanoseconds

* remove metrics with label "all"

* Add fixtures, update end-to-end for bcache collector

* Move fixtures/sys into tar.gz

This changeset moves the collector/fixtures/sys directory into
collector/fixtures/sys.tar.gz and tweaks the Makefile to unpack the
tarball before tests are run.

The reason for this change is that Windows does not allow colons in a
path (colons are present in some of the bcache fixture files), nor can
it (out of the box) deal with pathnames longer than 260 characters
(which we would be increasingly likely to hit if we tried to replace
colons with longer codes that are guaranteed not the turn up in regular
file names).

* Add ttar: plain text archive, replacement for tar

This changeset adds ttar, a plain text replacement for tar, and uses it
for the sysfs fixture archive. The syntax is loosely based on tar(1).

Using a plain text archive makes it possible to review changes without
downloading and extracting the archive. Also, when working on the repo,
git diff and git log become useful again, allowing a committer to verify
and track changes over time.

The code is written in bash, because bash is available out of the box on
all major flavors of Linux and on macOS. The feature set used is
restricted to bash version 3.2 because that is what Apple is still
shipping.

The programm also works on Windows if bash is installed. Obviously, it
does not solve the Windows limitations (path length limited to 260
characters, no symbolic links) that prompted the move to an archive
format in the first place.
2017-07-07 07:20:18 +02:00

192 lines
5.7 KiB
Go

// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"flag"
"fmt"
"net/http"
_ "net/http/pprof"
"os"
"sort"
"strings"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/common/log"
"github.com/prometheus/common/version"
"github.com/prometheus/node_exporter/collector"
)
const (
defaultCollectors = "arp,bcache,conntrack,cpu,diskstats,entropy,edac,exec,filefd,filesystem,hwmon,infiniband,loadavg,mdadm,meminfo,netdev,netstat,sockstat,stat,textfile,time,uname,vmstat,wifi,xfs,zfs"
)
var (
scrapeDurationDesc = prometheus.NewDesc(
prometheus.BuildFQName(collector.Namespace, "scrape", "collector_duration_seconds"),
"node_exporter: Duration of a collector scrape.",
[]string{"collector"},
nil,
)
scrapeSuccessDesc = prometheus.NewDesc(
prometheus.BuildFQName(collector.Namespace, "scrape", "collector_success"),
"node_exporter: Whether a collector succeeded.",
[]string{"collector"},
nil,
)
)
// NodeCollector implements the prometheus.Collector interface.
type NodeCollector struct {
collectors map[string]collector.Collector
}
// Describe implements the prometheus.Collector interface.
func (n NodeCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- scrapeDurationDesc
ch <- scrapeSuccessDesc
}
// Collect implements the prometheus.Collector interface.
func (n NodeCollector) Collect(ch chan<- prometheus.Metric) {
wg := sync.WaitGroup{}
wg.Add(len(n.collectors))
for name, c := range n.collectors {
go func(name string, c collector.Collector) {
execute(name, c, ch)
wg.Done()
}(name, c)
}
wg.Wait()
}
func filterAvailableCollectors(collectors string) string {
var availableCollectors []string
for _, c := range strings.Split(collectors, ",") {
_, ok := collector.Factories[c]
if ok {
availableCollectors = append(availableCollectors, c)
}
}
return strings.Join(availableCollectors, ",")
}
func execute(name string, c collector.Collector, ch chan<- prometheus.Metric) {
begin := time.Now()
err := c.Update(ch)
duration := time.Since(begin)
var success float64
if err != nil {
log.Errorf("ERROR: %s collector failed after %fs: %s", name, duration.Seconds(), err)
success = 0
} else {
log.Debugf("OK: %s collector succeeded after %fs.", name, duration.Seconds())
success = 1
}
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, duration.Seconds(), name)
ch <- prometheus.MustNewConstMetric(scrapeSuccessDesc, prometheus.GaugeValue, success, name)
}
func loadCollectors(list string) (map[string]collector.Collector, error) {
collectors := map[string]collector.Collector{}
for _, name := range strings.Split(list, ",") {
fn, ok := collector.Factories[name]
if !ok {
return nil, fmt.Errorf("collector '%s' not available", name)
}
c, err := fn()
if err != nil {
return nil, err
}
collectors[name] = c
}
return collectors, nil
}
func init() {
prometheus.MustRegister(version.NewCollector("node_exporter"))
}
func main() {
var (
showVersion = flag.Bool("version", false, "Print version information.")
listenAddress = flag.String("web.listen-address", ":9100", "Address on which to expose metrics and web interface.")
metricsPath = flag.String("web.telemetry-path", "/metrics", "Path under which to expose metrics.")
enabledCollectors = flag.String("collectors.enabled", filterAvailableCollectors(defaultCollectors), "Comma-separated list of collectors to use.")
printCollectors = flag.Bool("collectors.print", false, "If true, print available collectors and exit.")
)
flag.Parse()
if *showVersion {
fmt.Fprintln(os.Stdout, version.Print("node_exporter"))
os.Exit(0)
}
log.Infoln("Starting node_exporter", version.Info())
log.Infoln("Build context", version.BuildContext())
if *printCollectors {
collectorNames := make(sort.StringSlice, 0, len(collector.Factories))
for n := range collector.Factories {
collectorNames = append(collectorNames, n)
}
collectorNames.Sort()
fmt.Printf("Available collectors:\n")
for _, n := range collectorNames {
fmt.Printf(" - %s\n", n)
}
return
}
collectors, err := loadCollectors(*enabledCollectors)
if err != nil {
log.Fatalf("Couldn't load collectors: %s", err)
}
log.Infof("Enabled collectors:")
for n := range collectors {
log.Infof(" - %s", n)
}
if err := prometheus.Register(NodeCollector{collectors: collectors}); err != nil {
log.Fatalf("Couldn't register collector: %s", err)
}
handler := promhttp.HandlerFor(prometheus.DefaultGatherer,
promhttp.HandlerOpts{
ErrorLog: log.NewErrorLogger(),
ErrorHandling: promhttp.ContinueOnError,
})
// TODO(ts): Remove deprecated and problematic InstrumentHandler usage.
http.Handle(*metricsPath, prometheus.InstrumentHandler("prometheus", handler))
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(`<html>
<head><title>Node Exporter</title></head>
<body>
<h1>Node Exporter</h1>
<p><a href="` + *metricsPath + `">Metrics</a></p>
</body>
</html>`))
})
log.Infoln("Listening on", *listenAddress)
err = http.ListenAndServe(*listenAddress, nil)
if err != nil {
log.Fatal(err)
}
}