vendor: update prometheus org dependencies

This commit is contained in:
Fabian Reinartz 2016-07-04 11:09:06 +02:00
parent 7700cff1ff
commit 7d441abd7b
25 changed files with 410 additions and 165 deletions

View file

@ -15,7 +15,6 @@ package prometheus
import (
"errors"
"hash/fnv"
)
// Counter is a Metric that represents a single numerical value that only ever
@ -97,7 +96,6 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
MetricVec: MetricVec{
children: map[uint64]Metric{},
desc: desc,
hash: fnv.New64a(),
newMetric: func(lvs ...string) Metric {
result := &counter{value: value{
desc: desc,

View file

@ -1,10 +1,8 @@
package prometheus
import (
"bytes"
"errors"
"fmt"
"hash/fnv"
"regexp"
"sort"
"strings"
@ -131,31 +129,24 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
d.err = errors.New("duplicate label names")
return d
}
h := fnv.New64a()
var b bytes.Buffer // To copy string contents into, avoiding []byte allocations.
vh := hashNew()
for _, val := range labelValues {
b.Reset()
b.WriteString(val)
b.WriteByte(separatorByte)
h.Write(b.Bytes())
vh = hashAdd(vh, val)
vh = hashAddByte(vh, separatorByte)
}
d.id = h.Sum64()
d.id = vh
// Sort labelNames so that order doesn't matter for the hash.
sort.Strings(labelNames)
// Now hash together (in this order) the help string and the sorted
// label names.
h.Reset()
b.Reset()
b.WriteString(help)
b.WriteByte(separatorByte)
h.Write(b.Bytes())
lh := hashNew()
lh = hashAdd(lh, help)
lh = hashAddByte(lh, separatorByte)
for _, labelName := range labelNames {
b.Reset()
b.WriteString(labelName)
b.WriteByte(separatorByte)
h.Write(b.Bytes())
lh = hashAdd(lh, labelName)
lh = hashAddByte(lh, separatorByte)
}
d.dimHash = h.Sum64()
d.dimHash = lh
d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
for n, v := range constLabels {

View file

@ -61,7 +61,9 @@
// It also exports some stats about the HTTP usage of the /metrics
// endpoint. (See the Handler function for more detail.)
//
// Two more advanced metric types are the Summary and Histogram.
// Two more advanced metric types are the Summary and Histogram. A more
// thorough description of metric types can be found in the prometheus docs:
// https://prometheus.io/docs/concepts/metric_types/
//
// In addition to the fundamental metric types Gauge, Counter, Summary, and
// Histogram, a very important part of the Prometheus data model is the

View file

@ -0,0 +1,29 @@
package prometheus
// Inline and byte-free variant of hash/fnv's fnv64a.
const (
offset64 = 14695981039346656037
prime64 = 1099511628211
)
// hashNew initializies a new fnv64a hash value.
func hashNew() uint64 {
return offset64
}
// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
func hashAdd(h uint64, s string) uint64 {
for i := 0; i < len(s); i++ {
h ^= uint64(s[i])
h *= prime64
}
return h
}
// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
func hashAddByte(h uint64, b byte) uint64 {
h ^= uint64(b)
h *= prime64
return h
}

View file

@ -13,8 +13,6 @@
package prometheus
import "hash/fnv"
// Gauge is a Metric that represents a single numerical value that can
// arbitrarily go up and down.
//
@ -77,7 +75,6 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
MetricVec: MetricVec{
children: map[uint64]Metric{},
desc: desc,
hash: fnv.New64a(),
newMetric: func(lvs ...string) Metric {
return newValue(desc, GaugeValue, 0, lvs...)
},

View file

@ -211,7 +211,7 @@ func NewGoCollector() *goCollector {
"Number of seconds since 1970 of last garbage collection.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC*10 ^ 9) },
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
valType: GaugeValue,
},
},

View file

@ -15,7 +15,6 @@ package prometheus
import (
"fmt"
"hash/fnv"
"math"
"sort"
"sync/atomic"
@ -305,7 +304,6 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
MetricVec: MetricVec{
children: map[uint64]Metric{},
desc: desc,
hash: fnv.New64a(),
newMetric: func(lvs ...string) Metric {
return newHistogram(desc, opts, lvs...)
},

View file

@ -57,12 +57,31 @@ func nowSeries(t ...time.Time) nower {
// has a constant label named "handler" with the provided handlerName as
// value. http_requests_total is a metric vector partitioned by HTTP method
// (label name "method") and HTTP status code (label name "code").
//
// Note that InstrumentHandler has several issues:
//
// - It uses Summaries rather than Histograms. Summaries are not useful if
// aggregation across multiple instances is required.
//
// - It uses microseconds as unit, which is deprecated and should be replaced by
// seconds.
//
// - The size of the request is calculated in a separate goroutine. Since this
// calculator requires access to the request header, it creates a race with
// any writes to the header performed during request handling.
// httputil.ReverseProxy is a prominent example for a handler
// performing such writes.
//
// Upcoming versions of this package will provide ways of instrumenting HTTP
// handlers that are more flexible and have fewer issues. Consider this function
// DEPRECATED and prefer direct instrumentation in the meantime.
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
}
// InstrumentHandlerFunc wraps the given function for instrumentation. It
// otherwise works in the same way as InstrumentHandler.
// otherwise works in the same way as InstrumentHandler (and shares the same
// issues).
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
return InstrumentHandlerFuncWithOpts(
SummaryOpts{
@ -73,13 +92,13 @@ func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWri
)
}
// InstrumentHandlerWithOpts works like InstrumentHandler but provides more
// flexibility (at the cost of a more complex call syntax). As
// InstrumentHandler, this function registers four metric collectors, but it
// uses the provided SummaryOpts to create them. However, the fields "Name" and
// "Help" in the SummaryOpts are ignored. "Name" is replaced by
// "requests_total", "request_duration_microseconds", "request_size_bytes", and
// "response_size_bytes", respectively. "Help" is replaced by an appropriate
// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same
// issues) but provides more flexibility (at the cost of a more complex call
// syntax). As InstrumentHandler, this function registers four metric
// collectors, but it uses the provided SummaryOpts to create them. However, the
// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced
// by "requests_total", "request_duration_microseconds", "request_size_bytes",
// and "response_size_bytes", respectively. "Help" is replaced by an appropriate
// help string. The names of the variable labels of the http_requests_total
// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
//
@ -102,9 +121,10 @@ func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.Hand
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
}
// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc but provides
// more flexibility (at the cost of a more complex call syntax). See
// InstrumentHandlerWithOpts for details how the provided SummaryOpts are used.
// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares
// the same issues) but provides more flexibility (at the cost of a more complex
// call syntax). See InstrumentHandlerWithOpts for details how the provided
// SummaryOpts are used.
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
reqCnt := NewCounterVec(
CounterOpts{

View file

@ -20,7 +20,7 @@
package prometheus
// Push triggers a metric collection by the default registry and pushes all
// collected metrics to the Pushgateway specified by addr. See the Pushgateway
// collected metrics to the Pushgateway specified by url. See the Pushgateway
// documentation for detailed implications of the job and instance
// parameter. instance can be left empty. You can use just host:port or ip:port
// as url, in which case 'http://' is added automatically. You can also include

View file

@ -24,7 +24,6 @@ import (
"compress/gzip"
"errors"
"fmt"
"hash/fnv"
"io"
"net/http"
"net/url"
@ -85,6 +84,9 @@ const (
// Handler returns the HTTP handler for the global Prometheus registry. It is
// already instrumented with InstrumentHandler (using "prometheus" as handler
// name). Usually the handler is used to handle the "/metrics" endpoint.
//
// Please note the issues described in the doc comment of InstrumentHandler. You
// might want to consider using UninstrumentedHandler instead.
func Handler() http.Handler {
return InstrumentHandler("prometheus", defRegistry)
}
@ -337,6 +339,9 @@ func (r *registry) Push(job, instance, pushURL, method string) error {
if !strings.Contains(pushURL, "://") {
pushURL = "http://" + pushURL
}
if strings.HasSuffix(pushURL, "/") {
pushURL = pushURL[:len(pushURL)-1]
}
pushURL = fmt.Sprintf("%s/metrics/jobs/%s", pushURL, url.QueryEscape(job))
if instance != "" {
pushURL += "/instances/" + url.QueryEscape(instance)
@ -528,30 +533,25 @@ func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *d
}
// Is the metric unique (i.e. no other metric with the same name and the same label values)?
h := fnv.New64a()
var buf bytes.Buffer
buf.WriteString(metricFamily.GetName())
buf.WriteByte(separatorByte)
h.Write(buf.Bytes())
h := hashNew()
h = hashAdd(h, metricFamily.GetName())
h = hashAddByte(h, separatorByte)
// Make sure label pairs are sorted. We depend on it for the consistency
// check. Label pairs must be sorted by contract. But the point of this
// method is to check for contract violations. So we better do the sort
// now.
sort.Sort(LabelPairSorter(dtoMetric.Label))
for _, lp := range dtoMetric.Label {
buf.Reset()
buf.WriteString(lp.GetValue())
buf.WriteByte(separatorByte)
h.Write(buf.Bytes())
h = hashAdd(h, lp.GetValue())
h = hashAddByte(h, separatorByte)
}
metricHash := h.Sum64()
if _, exists := metricHashes[metricHash]; exists {
if _, exists := metricHashes[h]; exists {
return fmt.Errorf(
"collected metric %s %s was collected before with the same name and label values",
metricFamily.GetName(), dtoMetric,
)
}
metricHashes[metricHash] = struct{}{}
metricHashes[h] = struct{}{}
if desc == nil {
return nil // Nothing left to check if we have no desc.
@ -722,5 +722,18 @@ func (s metricSorter) Less(i, j int) bool {
return vi < vj
}
}
// We should never arrive here. Multiple metrics with the same
// label set in the same scrape will lead to undefined ingestion
// behavior. However, as above, we have to provide stable sorting
// here, even for inconsistent metrics. So sort equal metrics
// by their timestamp, with missing timestamps (implying "now")
// coming last.
if s[i].TimestampMs == nil {
return false
}
if s[j].TimestampMs == nil {
return true
}
return s[i].GetTimestampMs() < s[j].GetTimestampMs()
}

View file

@ -15,7 +15,6 @@ package prometheus
import (
"fmt"
"hash/fnv"
"math"
"sort"
"sync"
@ -408,7 +407,6 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
MetricVec: MetricVec{
children: map[uint64]Metric{},
desc: desc,
hash: fnv.New64a(),
newMetric: func(lvs ...string) Metric {
return newSummary(desc, opts, lvs...)
},

View file

@ -13,8 +13,6 @@
package prometheus
import "hash/fnv"
// Untyped is a Metric that represents a single numerical value that can
// arbitrarily go up and down.
//
@ -75,7 +73,6 @@ func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
MetricVec: MetricVec{
children: map[uint64]Metric{},
desc: desc,
hash: fnv.New64a(),
newMetric: func(lvs ...string) Metric {
return newValue(desc, UntypedValue, 0, lvs...)
},

View file

@ -14,9 +14,7 @@
package prometheus
import (
"bytes"
"fmt"
"hash"
"sync"
)
@ -26,16 +24,10 @@ import (
// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already
// provided in this package.
type MetricVec struct {
mtx sync.RWMutex // Protects not only children, but also hash and buf.
mtx sync.RWMutex // Protects the children.
children map[uint64]Metric
desc *Desc
// hash is our own hash instance to avoid repeated allocations.
hash hash.Hash64
// buf is used to copy string contents into it for hashing,
// again to avoid allocations.
buf bytes.Buffer
newMetric func(labelValues ...string) Metric
}
@ -80,13 +72,20 @@ func (m *MetricVec) Collect(ch chan<- Metric) {
// with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example.
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
m.mtx.Lock()
defer m.mtx.Unlock()
h, err := m.hashLabelValues(lvs)
if err != nil {
return nil, err
}
m.mtx.RLock()
metric, ok := m.children[h]
m.mtx.RUnlock()
if ok {
return metric, nil
}
m.mtx.Lock()
defer m.mtx.Unlock()
return m.getOrCreateMetric(h, lvs...), nil
}
@ -103,17 +102,24 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
m.mtx.Lock()
defer m.mtx.Unlock()
h, err := m.hashLabels(labels)
if err != nil {
return nil, err
}
m.mtx.RLock()
metric, ok := m.children[h]
m.mtx.RUnlock()
if ok {
return metric, nil
}
lvs := make([]string, len(labels))
for i, label := range m.desc.variableLabels {
lvs[i] = labels[label]
}
m.mtx.Lock()
defer m.mtx.Unlock()
return m.getOrCreateMetric(h, lvs...), nil
}
@ -162,7 +168,7 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
if err != nil {
return false
}
if _, has := m.children[h]; !has {
if _, ok := m.children[h]; !ok {
return false
}
delete(m.children, h)
@ -187,7 +193,7 @@ func (m *MetricVec) Delete(labels Labels) bool {
if err != nil {
return false
}
if _, has := m.children[h]; !has {
if _, ok := m.children[h]; !ok {
return false
}
delete(m.children, h)
@ -208,30 +214,26 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
if len(vals) != len(m.desc.variableLabels) {
return 0, errInconsistentCardinality
}
m.hash.Reset()
h := hashNew()
for _, val := range vals {
m.buf.Reset()
m.buf.WriteString(val)
m.hash.Write(m.buf.Bytes())
h = hashAdd(h, val)
}
return m.hash.Sum64(), nil
return h, nil
}
func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
if len(labels) != len(m.desc.variableLabels) {
return 0, errInconsistentCardinality
}
m.hash.Reset()
h := hashNew()
for _, label := range m.desc.variableLabels {
val, ok := labels[label]
if !ok {
return 0, fmt.Errorf("label name %q missing in label map", label)
}
m.buf.Reset()
m.buf.WriteString(val)
m.hash.Write(m.buf.Bytes())
h = hashAdd(h, val)
}
return m.hash.Sum64(), nil
return h, nil
}
func (m *MetricVec) getOrCreateMetric(hash uint64, labelValues ...string) Metric {

View file

@ -12,5 +12,5 @@
// limitations under the License.
// Package model contains common data structures that are shared across
// Prometheus componenets and libraries.
// Prometheus components and libraries.
package model

View file

@ -8,5 +8,13 @@ Maintainers of this repository:
The following individuals have contributed code to this repository
(listed in alphabetical order):
* Armen Baghumian <abaghumian@noggin.com.au>
* Bjoern Rabenstein <beorn@soundcloud.com>
* David Cournapeau <cournape@gmail.com>
* Ji-Hoon, Seol <jihoon.seol@gmail.com>
* Jonas Große Sundrup <cherti@letopolis.de>
* Julius Volz <julius.volz@gmail.com>
* Matthias Rampke <mr@soundcloud.com>
* Nicky Gerritsen <nicky@streamone.nl>
* Rémi Audebert <contact@halfr.net>
* Tobias Schmidt <tobidt@gmail.com>

6
vendor/github.com/prometheus/procfs/Makefile generated vendored Normal file
View file

@ -0,0 +1,6 @@
ci:
! gofmt -l *.go | read nothing
go vet
go test -v ./...
go get github.com/golang/lint/golint
golint *.go

View file

@ -27,14 +27,7 @@ func NewFS(mountPoint string) (FS, error) {
return FS(mountPoint), nil
}
func (fs FS) stat(p string) (os.FileInfo, error) {
return os.Stat(path.Join(string(fs), p))
}
func (fs FS) open(p string) (*os.File, error) {
return os.Open(path.Join(string(fs), p))
}
func (fs FS) readlink(p string) (string, error) {
return os.Readlink(path.Join(string(fs), p))
// Path returns the path of the given subsystem relative to the procfs root.
func (fs FS) Path(p ...string) string {
return path.Join(append([]string{string(fs)}, p...)...)
}

View file

@ -8,6 +8,7 @@ import (
"io"
"io/ioutil"
"net"
"os"
"strconv"
"strings"
)
@ -58,7 +59,7 @@ func NewIPVSStats() (IPVSStats, error) {
// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem.
func (fs FS) NewIPVSStats() (IPVSStats, error) {
file, err := fs.open("net/ip_vs_stats")
file, err := os.Open(fs.Path("net/ip_vs_stats"))
if err != nil {
return IPVSStats{}, err
}
@ -127,7 +128,7 @@ func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
file, err := fs.open("net/ip_vs")
file, err := os.Open(fs.Path("net/ip_vs"))
if err != nil {
return nil, err
}

138
vendor/github.com/prometheus/procfs/mdstat.go generated vendored Normal file
View file

@ -0,0 +1,138 @@
package procfs
import (
"fmt"
"io/ioutil"
"regexp"
"strconv"
"strings"
)
var (
statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
)
// MDStat holds info parsed from /proc/mdstat.
type MDStat struct {
// Name of the device.
Name string
// activity-state of the device.
ActivityState string
// Number of active disks.
DisksActive int64
// Total number of disks the device consists of.
DisksTotal int64
// Number of blocks the device holds.
BlocksTotal int64
// Number of blocks on the device that are in sync.
BlocksSynced int64
}
// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos.
func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
mdStatusFilePath := fs.Path("mdstat")
content, err := ioutil.ReadFile(mdStatusFilePath)
if err != nil {
return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
}
mdStates := []MDStat{}
lines := strings.Split(string(content), "\n")
for i, l := range lines {
if l == "" {
continue
}
if l[0] == ' ' {
continue
}
if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") {
continue
}
mainLine := strings.Split(l, " ")
if len(mainLine) < 3 {
return mdStates, fmt.Errorf("error parsing mdline: %s", l)
}
mdName := mainLine[0]
activityState := mainLine[2]
if len(lines) <= i+3 {
return mdStates, fmt.Errorf(
"error parsing %s: too few lines for md device %s",
mdStatusFilePath,
mdName,
)
}
active, total, size, err := evalStatusline(lines[i+1])
if err != nil {
return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
}
// j is the line number of the syncing-line.
j := i + 2
if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
j = i + 3
}
// If device is syncing at the moment, get the number of currently
// synced bytes, otherwise that number equals the size of the device.
syncedBlocks := size
if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") {
syncedBlocks, err = evalBuildline(lines[j])
if err != nil {
return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
}
}
mdStates = append(mdStates, MDStat{
Name: mdName,
ActivityState: activityState,
DisksActive: active,
DisksTotal: total,
BlocksTotal: size,
BlocksSynced: syncedBlocks,
})
}
return mdStates, nil
}
func evalStatusline(statusline string) (active, total, size int64, err error) {
matches := statuslineRE.FindStringSubmatch(statusline)
if len(matches) != 4 {
return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline)
}
size, err = strconv.ParseInt(matches[1], 10, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
}
total, err = strconv.ParseInt(matches[2], 10, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
}
active, err = strconv.ParseInt(matches[3], 10, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
}
return active, total, size, nil
}
func evalBuildline(buildline string) (syncedBlocks int64, err error) {
matches := buildlineRE.FindStringSubmatch(buildline)
if len(matches) != 2 {
return 0, fmt.Errorf("unexpected buildline: %s", buildline)
}
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
if err != nil {
return 0, fmt.Errorf("%s in buildline: %s", err, buildline)
}
return syncedBlocks, nil
}

View file

@ -4,7 +4,6 @@ import (
"fmt"
"io/ioutil"
"os"
"path"
"strconv"
"strings"
)
@ -24,9 +23,13 @@ func (p Procs) Len() int { return len(p) }
func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
// Self returns a process for the current process.
// Self returns a process for the current process read via /proc/self.
func Self() (Proc, error) {
return NewProc(os.Getpid())
fs, err := NewFS(DefaultMountPoint)
if err != nil {
return Proc{}, err
}
return fs.Self()
}
// NewProc returns a process for the given pid under /proc.
@ -35,32 +38,42 @@ func NewProc(pid int) (Proc, error) {
if err != nil {
return Proc{}, err
}
return fs.NewProc(pid)
}
// AllProcs returns a list of all currently avaible processes under /proc.
// AllProcs returns a list of all currently available processes under /proc.
func AllProcs() (Procs, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
return Procs{}, err
}
return fs.AllProcs()
}
// Self returns a process for the current process.
func (fs FS) Self() (Proc, error) {
p, err := os.Readlink(fs.Path("self"))
if err != nil {
return Proc{}, err
}
pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1))
if err != nil {
return Proc{}, err
}
return fs.NewProc(pid)
}
// NewProc returns a process for the given pid.
func (fs FS) NewProc(pid int) (Proc, error) {
if _, err := fs.stat(strconv.Itoa(pid)); err != nil {
if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil {
return Proc{}, err
}
return Proc{PID: pid, fs: fs}, nil
}
// AllProcs returns a list of all currently avaible processes.
// AllProcs returns a list of all currently available processes.
func (fs FS) AllProcs() (Procs, error) {
d, err := fs.open("")
d, err := os.Open(fs.Path())
if err != nil {
return Procs{}, err
}
@ -85,7 +98,7 @@ func (fs FS) AllProcs() (Procs, error) {
// CmdLine returns the command line of a process.
func (p Proc) CmdLine() ([]string, error) {
f, err := p.open("cmdline")
f, err := os.Open(p.path("cmdline"))
if err != nil {
return nil, err
}
@ -103,10 +116,25 @@ func (p Proc) CmdLine() ([]string, error) {
return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil
}
// Comm returns the command name of a process.
func (p Proc) Comm() (string, error) {
f, err := os.Open(p.path("comm"))
if err != nil {
return "", err
}
defer f.Close()
data, err := ioutil.ReadAll(f)
if err != nil {
return "", err
}
return strings.TrimSpace(string(data)), nil
}
// Executable returns the absolute path of the executable command of a process.
func (p Proc) Executable() (string, error) {
exe, err := p.readlink("exe")
exe, err := os.Readlink(p.path("exe"))
if os.IsNotExist(err) {
return "", nil
}
@ -144,7 +172,7 @@ func (p Proc) FileDescriptorTargets() ([]string, error) {
targets := make([]string, len(names))
for i, name := range names {
target, err := p.readlink("fd/" + name)
target, err := os.Readlink(p.path("fd", name))
if err == nil {
targets[i] = target
}
@ -165,7 +193,7 @@ func (p Proc) FileDescriptorsLen() (int, error) {
}
func (p Proc) fileDescriptors() ([]string, error) {
d, err := p.open("fd")
d, err := os.Open(p.path("fd"))
if err != nil {
return nil, err
}
@ -179,10 +207,6 @@ func (p Proc) fileDescriptors() ([]string, error) {
return names, nil
}
func (p Proc) open(pa string) (*os.File, error) {
return p.fs.open(path.Join(strconv.Itoa(p.PID), pa))
}
func (p Proc) readlink(pa string) (string, error) {
return p.fs.readlink(path.Join(strconv.Itoa(p.PID), pa))
func (p Proc) path(pa ...string) string {
return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
}

View file

@ -3,6 +3,7 @@ package procfs
import (
"fmt"
"io/ioutil"
"os"
)
// ProcIO models the content of /proc/<pid>/io.
@ -29,7 +30,7 @@ type ProcIO struct {
func (p Proc) NewIO() (ProcIO, error) {
pio := ProcIO{}
f, err := p.open("io")
f, err := os.Open(p.path("io"))
if err != nil {
return pio, err
}

View file

@ -3,28 +3,55 @@ package procfs
import (
"bufio"
"fmt"
"os"
"regexp"
"strconv"
)
// ProcLimits represents the soft limits for each of the process's resource
// limits.
// limits. For more information see getrlimit(2):
// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
type ProcLimits struct {
// CPU time limit in seconds.
CPUTime int
// Maximum size of files that the process may create.
FileSize int
// Maximum size of the process's data segment (initialized data,
// uninitialized data, and heap).
DataSize int
// Maximum size of the process stack in bytes.
StackSize int
// Maximum size of a core file.
CoreFileSize int
// Limit of the process's resident set in pages.
ResidentSet int
// Maximum number of processes that can be created for the real user ID of
// the calling process.
Processes int
// Value one greater than the maximum file descriptor number that can be
// opened by this process.
OpenFiles int
// Maximum number of bytes of memory that may be locked into RAM.
LockedMemory int
// Maximum size of the process's virtual memory address space in bytes.
AddressSpace int
// Limit on the combined number of flock(2) locks and fcntl(2) leases that
// this process may establish.
FileLocks int
// Limit of signals that may be queued for the real user ID of the calling
// process.
PendingSignals int
// Limit on the number of bytes that can be allocated for POSIX message
// queues for the real user ID of the calling process.
MsqqueueSize int
// Limit of the nice priority set using setpriority(2) or nice(2).
NicePriority int
// Limit of the real-time priority set using sched_setscheduler(2) or
// sched_setparam(2).
RealtimePriority int
// Limit (in microseconds) on the amount of CPU time that a process
// scheduled under a real-time scheduling policy may consume without making
// a blocking system call.
RealtimeTimeout int
}
@ -39,7 +66,7 @@ var (
// NewLimits returns the current soft limits of the process.
func (p Proc) NewLimits() (ProcLimits, error) {
f, err := p.open("limits")
f, err := os.Open(p.path("limits"))
if err != nil {
return ProcLimits{}, err
}
@ -60,7 +87,7 @@ func (p Proc) NewLimits() (ProcLimits, error) {
case "Max cpu time":
l.CPUTime, err = parseInt(fields[1])
case "Max file size":
l.FileLocks, err = parseInt(fields[1])
l.FileSize, err = parseInt(fields[1])
case "Max data size":
l.DataSize, err = parseInt(fields[1])
case "Max stack size":
@ -90,7 +117,6 @@ func (p Proc) NewLimits() (ProcLimits, error) {
case "Max realtime timeout":
l.RealtimeTimeout, err = parseInt(fields[1])
}
if err != nil {
return ProcLimits{}, err
}

View file

@ -7,15 +7,15 @@ import (
"os"
)
// Originally, this USER_HZ value was dynamically retrieved via a sysconf call which
// required cgo. However, that caused a lot of problems regarding
// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
// which required cgo. However, that caused a lot of problems regarding
// cross-compilation. Alternatives such as running a binary to determine the
// value, or trying to derive it in some other way were all problematic.
// After much research it was determined that USER_HZ is actually hardcoded to
// 100 on all Go-supported platforms as of the time of this writing. This is
// why we decided to hardcode it here as well. It is not impossible that there
// could be systems with exceptions, but they should be very exotic edge cases,
// and in that case, the worst outcome will be two misreported metrics.
// value, or trying to derive it in some other way were all problematic. After
// much research it was determined that USER_HZ is actually hardcoded to 100 on
// all Go-supported platforms as of the time of this writing. This is why we
// decided to hardcode it here as well. It is not impossible that there could
// be systems with exceptions, but they should be very exotic edge cases, and
// in that case, the worst outcome will be two misreported metrics.
//
// See also the following discussions:
//
@ -91,7 +91,7 @@ type ProcStat struct {
// NewStat returns the current status information of the process.
func (p Proc) NewStat() (ProcStat, error) {
f, err := p.open("stat")
f, err := os.Open(p.path("stat"))
if err != nil {
return ProcStat{}, err
}

View file

@ -3,6 +3,7 @@ package procfs
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
)
@ -25,7 +26,7 @@ func NewStat() (Stat, error) {
// NewStat returns an information about current kernel/system statistics.
func (fs FS) NewStat() (Stat, error) {
f, err := fs.open("stat")
f, err := os.Open(fs.Path("stat"))
if err != nil {
return Stat{}, err
}

32
vendor/vendor.json vendored
View file

@ -152,9 +152,10 @@
"revisionTime": "2015-09-05T08:12:15+01:00"
},
{
"checksumSHA1": "OpY4giv8kPIYbaunD7BSgCynj78=",
"path": "github.com/prometheus/client_golang/prometheus",
"revision": "449ccefff16c8e2b7229f6be1921ba22f62461fe",
"revisionTime": "2015-10-26T02:27:06+01:00"
"revision": "9f1ed1ed4a5f754c9b626e5cf8ec1ea7d622e017",
"revisionTime": "2016-06-27T14:36:20Z"
},
{
"path": "github.com/prometheus/client_model/go",
@ -170,37 +171,38 @@
{
"checksumSHA1": "GWlM3d2vPYyNATtTFgftS10/A9w=",
"path": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg",
"revision": "c16e34897a744c32f6733ee720e60c4de13887fb",
"revisionTime": "2016-05-19T16:20:33Z"
"revision": "4402f4e5ea79ec15f3c574773b6a5198fbea215f",
"revisionTime": "2016-06-23T15:14:27Z"
},
{
"checksumSHA1": "qHoBp/PVBcLedTNZrF3toV9QGa0=",
"path": "github.com/prometheus/common/log",
"revision": "a6ab08426bb262e2d190097751f5cfd1cfdfd17d",
"revisionTime": "2016-05-26T15:55:09Z"
"revision": "4402f4e5ea79ec15f3c574773b6a5198fbea215f",
"revisionTime": "2016-06-23T15:14:27Z"
},
{
"checksumSHA1": "Zgmg/aOfoCNTAMtrXqBJmt852t0=",
"checksumSHA1": "Jx0GXl5hGnO25s3ryyvtdWHdCpw=",
"path": "github.com/prometheus/common/model",
"revision": "c16e34897a744c32f6733ee720e60c4de13887fb",
"revisionTime": "2016-05-19T16:20:33Z"
"revision": "4402f4e5ea79ec15f3c574773b6a5198fbea215f",
"revisionTime": "2016-06-23T15:14:27Z"
},
{
"checksumSHA1": "CKVJRc1NREmfoAWQLHxqWQlvxo0=",
"path": "github.com/prometheus/common/route",
"revision": "c16e34897a744c32f6733ee720e60c4de13887fb",
"revisionTime": "2016-05-19T16:20:33Z"
"revision": "4402f4e5ea79ec15f3c574773b6a5198fbea215f",
"revisionTime": "2016-06-23T15:14:27Z"
},
{
"checksumSHA1": "91KYK0SpvkaMJJA2+BcxbVnyRO0=",
"path": "github.com/prometheus/common/version",
"revision": "c16e34897a744c32f6733ee720e60c4de13887fb",
"revisionTime": "2016-05-19T16:20:33Z"
"revision": "4402f4e5ea79ec15f3c574773b6a5198fbea215f",
"revisionTime": "2016-06-23T15:14:27Z"
},
{
"checksumSHA1": "W218eJZPXJG783fUr/z6IaAZyes=",
"path": "github.com/prometheus/procfs",
"revision": "c91d8eefde16bd047416409eb56353ea84a186e4",
"revisionTime": "2015-06-16T16:46:31+02:00"
"revision": "abf152e5f3e97f2fafac028d2cc06c1feb87ffa5",
"revisionTime": "2016-04-11T19:08:41Z"
},
{
"path": "github.com/samuel/go-zookeeper/zk",