Update to latest procfs library (#1611)

Bump to v0.0.10 procfs library.

Signed-off-by: Ben Kochie <superq@gmail.com>
This commit is contained in:
Ben Kochie 2020-02-18 11:33:46 +01:00 committed by GitHub
parent dcfd610433
commit 14df2a1a1a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
17 changed files with 853 additions and 254 deletions

View file

@ -70,7 +70,7 @@ func NewSoftnetCollector(logger log.Logger) (Collector, error) {
// Update gets parsed softnet statistics using procfs. // Update gets parsed softnet statistics using procfs.
func (c *softnetCollector) Update(ch chan<- prometheus.Metric) error { func (c *softnetCollector) Update(ch chan<- prometheus.Metric) error {
stats, err := c.fs.GatherSoftnetStats() stats, err := c.fs.NetSoftnetStat()
if err != nil { if err != nil {
return fmt.Errorf("could not get softnet statistics: %s", err) return fmt.Errorf("could not get softnet statistics: %s", err)
} }

2
go.mod
View file

@ -15,7 +15,7 @@ require (
github.com/prometheus/client_golang v1.0.0 github.com/prometheus/client_golang v1.0.0
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90
github.com/prometheus/common v0.7.0 github.com/prometheus/common v0.7.0
github.com/prometheus/procfs v0.0.8 github.com/prometheus/procfs v0.0.10
github.com/siebenmann/go-kstat v0.0.0-20160321171754-d34789b79745 github.com/siebenmann/go-kstat v0.0.0-20160321171754-d34789b79745
github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a
go.uber.org/atomic v1.3.2 // indirect go.uber.org/atomic v1.3.2 // indirect

4
go.sum
View file

@ -87,8 +87,8 @@ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt2
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= github.com/prometheus/procfs v0.0.10 h1:QJQN3jYQhkamO4mhfUWqdDH2asK7ONOI9MTWjyAxNKM=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.10/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/siebenmann/go-kstat v0.0.0-20160321171754-d34789b79745 h1:IuH7WumZNax0D+rEqmy2TyhKCzrtMGqbZO0b8rO00JA= github.com/siebenmann/go-kstat v0.0.0-20160321171754-d34789b79745 h1:IuH7WumZNax0D+rEqmy2TyhKCzrtMGqbZO0b8rO00JA=
github.com/siebenmann/go-kstat v0.0.0-20160321171754-d34789b79745/go.mod h1:G81aIFAMS9ECrwBYR9YxhlPjWgrItd+Kje78O6+uqm8= github.com/siebenmann/go-kstat v0.0.0-20160321171754-d34789b79745/go.mod h1:G81aIFAMS9ECrwBYR9YxhlPjWgrItd+Kje78O6+uqm8=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=

View file

@ -69,12 +69,12 @@ else
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)
endif endif
PROMU_VERSION ?= 0.4.0 PROMU_VERSION ?= 0.5.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
GOLANGCI_LINT := GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?= GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.16.0 GOLANGCI_LINT_VERSION ?= v1.18.0
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
# windows isn't included here because of the path separator being different. # windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
@ -86,7 +86,8 @@ endif
PREFIX ?= $(shell pwd) PREFIX ?= $(shell pwd)
BIN_DIR ?= $(shell pwd) BIN_DIR ?= $(shell pwd)
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
DOCKERFILE_PATH ?= ./ DOCKERFILE_PATH ?= ./Dockerfile
DOCKERBUILD_CONTEXT ?= ./
DOCKER_REPO ?= prom DOCKER_REPO ?= prom
DOCKER_ARCHS ?= amd64 DOCKER_ARCHS ?= amd64
@ -200,7 +201,7 @@ endif
.PHONY: common-build .PHONY: common-build
common-build: promu common-build: promu
@echo ">> building binaries" @echo ">> building binaries"
GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)
.PHONY: common-tarball .PHONY: common-tarball
common-tarball: promu common-tarball: promu
@ -211,9 +212,10 @@ common-tarball: promu
common-docker: $(BUILD_DOCKER_ARCHS) common-docker: $(BUILD_DOCKER_ARCHS)
$(BUILD_DOCKER_ARCHS): common-docker-%: $(BUILD_DOCKER_ARCHS): common-docker-%:
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
-f $(DOCKERFILE_PATH) \
--build-arg ARCH="$*" \ --build-arg ARCH="$*" \
--build-arg OS="linux" \ --build-arg OS="linux" \
$(DOCKERFILE_PATH) $(DOCKERBUILD_CONTEXT)
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) .PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
common-docker-publish: $(PUBLISH_DOCKER_ARCHS) common-docker-publish: $(PUBLISH_DOCKER_ARCHS)

View file

@ -14,10 +14,10 @@
package procfs package procfs
import ( import (
"bufio"
"bytes" "bytes"
"fmt" "fmt"
"io/ioutil" "io"
"strconv"
"strings" "strings"
"github.com/prometheus/procfs/internal/util" "github.com/prometheus/procfs/internal/util"
@ -52,80 +52,102 @@ type Crypto struct {
// structs containing the relevant info. More information available here: // structs containing the relevant info. More information available here:
// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html // https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html
func (fs FS) Crypto() ([]Crypto, error) { func (fs FS) Crypto() ([]Crypto, error) {
data, err := ioutil.ReadFile(fs.proc.Path("crypto")) path := fs.proc.Path("crypto")
b, err := util.ReadFileNoStat(path)
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err) return nil, fmt.Errorf("error reading crypto %s: %s", path, err)
} }
crypto, err := parseCrypto(data)
crypto, err := parseCrypto(bytes.NewReader(b))
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err) return nil, fmt.Errorf("error parsing crypto %s: %s", path, err)
} }
return crypto, nil return crypto, nil
} }
func parseCrypto(cryptoData []byte) ([]Crypto, error) { // parseCrypto parses a /proc/crypto stream into Crypto elements.
crypto := []Crypto{} func parseCrypto(r io.Reader) ([]Crypto, error) {
var out []Crypto
cryptoBlocks := bytes.Split(cryptoData, []byte("\n\n")) s := bufio.NewScanner(r)
for s.Scan() {
for _, block := range cryptoBlocks { text := s.Text()
var newCryptoElem Crypto switch {
case strings.HasPrefix(text, "name"):
lines := strings.Split(string(block), "\n") // Each crypto element begins with its name.
for _, line := range lines { out = append(out, Crypto{})
if strings.TrimSpace(line) == "" || line[0] == ' ' { case text == "":
continue continue
} }
fields := strings.Split(line, ":")
key := strings.TrimSpace(fields[0]) kv := strings.Split(text, ":")
value := strings.TrimSpace(fields[1]) if len(kv) != 2 {
vp := util.NewValueParser(value) return nil, fmt.Errorf("malformed crypto line: %q", text)
}
switch strings.TrimSpace(key) {
case "async": k := strings.TrimSpace(kv[0])
b, err := strconv.ParseBool(value) v := strings.TrimSpace(kv[1])
if err == nil {
newCryptoElem.Async = b // Parse the key/value pair into the currently focused element.
} c := &out[len(out)-1]
case "blocksize": if err := c.parseKV(k, v); err != nil {
newCryptoElem.Blocksize = vp.PUInt64() return nil, err
case "chunksize":
newCryptoElem.Chunksize = vp.PUInt64()
case "digestsize":
newCryptoElem.Digestsize = vp.PUInt64()
case "driver":
newCryptoElem.Driver = value
case "geniv":
newCryptoElem.Geniv = value
case "internal":
newCryptoElem.Internal = value
case "ivsize":
newCryptoElem.Ivsize = vp.PUInt64()
case "maxauthsize":
newCryptoElem.Maxauthsize = vp.PUInt64()
case "max keysize":
newCryptoElem.MaxKeysize = vp.PUInt64()
case "min keysize":
newCryptoElem.MinKeysize = vp.PUInt64()
case "module":
newCryptoElem.Module = value
case "name":
newCryptoElem.Name = value
case "priority":
newCryptoElem.Priority = vp.PInt64()
case "refcnt":
newCryptoElem.Refcnt = vp.PInt64()
case "seedsize":
newCryptoElem.Seedsize = vp.PUInt64()
case "selftest":
newCryptoElem.Selftest = value
case "type":
newCryptoElem.Type = value
case "walksize":
newCryptoElem.Walksize = vp.PUInt64()
}
} }
crypto = append(crypto, newCryptoElem)
} }
return crypto, nil
if err := s.Err(); err != nil {
return nil, err
}
return out, nil
}
// parseKV parses a key/value pair into the appropriate field of c.
func (c *Crypto) parseKV(k, v string) error {
vp := util.NewValueParser(v)
switch k {
case "async":
// Interpret literal yes as true.
c.Async = v == "yes"
case "blocksize":
c.Blocksize = vp.PUInt64()
case "chunksize":
c.Chunksize = vp.PUInt64()
case "digestsize":
c.Digestsize = vp.PUInt64()
case "driver":
c.Driver = v
case "geniv":
c.Geniv = v
case "internal":
c.Internal = v
case "ivsize":
c.Ivsize = vp.PUInt64()
case "maxauthsize":
c.Maxauthsize = vp.PUInt64()
case "max keysize":
c.MaxKeysize = vp.PUInt64()
case "min keysize":
c.MinKeysize = vp.PUInt64()
case "module":
c.Module = v
case "name":
c.Name = v
case "priority":
c.Priority = vp.PInt64()
case "refcnt":
c.Refcnt = vp.PInt64()
case "seedsize":
c.Seedsize = vp.PUInt64()
case "selftest":
c.Selftest = v
case "type":
c.Type = v
case "walksize":
c.Walksize = vp.PUInt64()
}
return vp.Err()
} }

View file

@ -189,7 +189,7 @@ Ngid: 0
Pid: 26231 Pid: 26231
PPid: 1 PPid: 1
TracerPid: 0 TracerPid: 0
Uid: 0 0 0 0 Uid: 1000 1000 1000 0
Gid: 0 0 0 0 Gid: 0 0 0 0
FDSize: 128 FDSize: 128
Groups: Groups:
@ -554,7 +554,7 @@ power management:
Mode: 444 Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/crypto Path: fixtures/proc/crypto
Lines: 971 Lines: 972
name : ccm(aes) name : ccm(aes)
driver : ccm_base(ctr(aes-aesni),cbcmac(aes-aesni)) driver : ccm_base(ctr(aes-aesni),cbcmac(aes-aesni))
module : ccm module : ccm
@ -588,6 +588,7 @@ refcnt : 1
selftest : passed selftest : passed
internal : no internal : no
type : kpp type : kpp
async : yes
name : ecb(arc4) name : ecb(arc4)
driver : ecb(arc4)-generic driver : ecb(arc4)-generic
@ -1526,6 +1527,11 @@ blocksize : 16
min keysize : 16 min keysize : 16
max keysize : 32 max keysize : 32
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/loadavg
Lines: 1
0.02 0.04 0.05 1/497 11947
Mode: 444 Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/diskstats Path: fixtures/proc/diskstats
@ -1825,6 +1831,27 @@ Lines: 1
00015c73 00020e76 F0000769 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00015c73 00020e76 F0000769 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
Mode: 644 Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/net/udp
Lines: 4
sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode
0: 0A000005:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/net/udp6
Lines: 3
sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops
1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0
6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/net/udp_broken
Lines: 2
sl local_address rem_address st
1: 00000000:0016 00000000:0000 0A
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/net/unix Path: fixtures/proc/net/unix
Lines: 6 Lines: 6
Num RefCount Protocol Flags Type St Inode Path Num RefCount Protocol Flags Type St Inode Path
@ -1930,6 +1957,12 @@ procs_blocked 1
softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444
Mode: 644 Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/swaps
Lines: 2
Filename Type Size Used Priority
/dev/dm-2 partition 131068 176 -2
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/proc/symlinktargets Directory: fixtures/proc/symlinktargets
Mode: 755 Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

62
vendor/github.com/prometheus/procfs/loadavg.go generated vendored Normal file
View file

@ -0,0 +1,62 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"fmt"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// LoadAvg represents an entry in /proc/loadavg
type LoadAvg struct {
Load1 float64
Load5 float64
Load15 float64
}
// LoadAvg returns loadavg from /proc.
func (fs FS) LoadAvg() (*LoadAvg, error) {
path := fs.proc.Path("loadavg")
data, err := util.ReadFileNoStat(path)
if err != nil {
return nil, err
}
return parseLoad(data)
}
// Parse /proc loadavg and return 1m, 5m and 15m.
func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
loads := make([]float64, 3)
parts := strings.Fields(string(loadavgBytes))
if len(parts) < 3 {
return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %s", string(loadavgBytes))
}
var err error
for i, load := range parts[0:3] {
loads[i], err = strconv.ParseFloat(load, 64)
if err != nil {
return nil, fmt.Errorf("could not parse load '%s': %s", load, err)
}
}
return &LoadAvg{
Load1: loads[0],
Load5: loads[1],
Load15: loads[2],
}, nil
}

View file

@ -0,0 +1,153 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"io"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// A ConntrackStatEntry represents one line from net/stat/nf_conntrack
// and contains netfilter conntrack statistics at one CPU core
type ConntrackStatEntry struct {
Entries uint64
Found uint64
Invalid uint64
Ignore uint64
Insert uint64
InsertFailed uint64
Drop uint64
EarlyDrop uint64
SearchRestart uint64
}
// Retrieves netfilter's conntrack statistics, split by CPU cores
func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) {
return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack"))
}
// Parses a slice of ConntrackStatEntries from the given filepath
func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
// This file is small and can be read with one syscall.
b, err := util.ReadFileNoStat(path)
if err != nil {
// Do not wrap this error so the caller can detect os.IsNotExist and
// similar conditions.
return nil, err
}
stat, err := parseConntrackStat(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("failed to read conntrack stats from %q: %v", path, err)
}
return stat, nil
}
// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries
func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
var entries []ConntrackStatEntry
scanner := bufio.NewScanner(r)
scanner.Scan()
for scanner.Scan() {
fields := strings.Fields(scanner.Text())
conntrackEntry, err := parseConntrackStatEntry(fields)
if err != nil {
return nil, err
}
entries = append(entries, *conntrackEntry)
}
return entries, nil
}
// Parses a ConntrackStatEntry from given array of fields
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
if len(fields) != 17 {
return nil, fmt.Errorf("invalid conntrackstat entry, missing fields")
}
entry := &ConntrackStatEntry{}
entries, err := parseConntrackStatField(fields[0])
if err != nil {
return nil, err
}
entry.Entries = entries
found, err := parseConntrackStatField(fields[2])
if err != nil {
return nil, err
}
entry.Found = found
invalid, err := parseConntrackStatField(fields[4])
if err != nil {
return nil, err
}
entry.Invalid = invalid
ignore, err := parseConntrackStatField(fields[5])
if err != nil {
return nil, err
}
entry.Ignore = ignore
insert, err := parseConntrackStatField(fields[8])
if err != nil {
return nil, err
}
entry.Insert = insert
insertFailed, err := parseConntrackStatField(fields[9])
if err != nil {
return nil, err
}
entry.InsertFailed = insertFailed
drop, err := parseConntrackStatField(fields[10])
if err != nil {
return nil, err
}
entry.Drop = drop
earlyDrop, err := parseConntrackStatField(fields[11])
if err != nil {
return nil, err
}
entry.EarlyDrop = earlyDrop
searchRestart, err := parseConntrackStatField(fields[16])
if err != nil {
return nil, err
}
entry.SearchRestart = searchRestart
return entry, nil
}
// Parses a uint64 from given hex in string
func parseConntrackStatField(field string) (uint64, error) {
val, err := strconv.ParseUint(field, 16, 64)
if err != nil {
return 0, fmt.Errorf("couldn't parse \"%s\" field: %s", field, err)
}
return val, err
}

View file

@ -14,78 +14,85 @@
package procfs package procfs
import ( import (
"bufio"
"bytes"
"fmt" "fmt"
"io/ioutil" "io"
"strconv" "strconv"
"strings" "strings"
"github.com/prometheus/procfs/internal/util"
) )
// For the proc file format details, // For the proc file format details,
// see https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162 // see https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810. // and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
// SoftnetEntry contains a single row of data from /proc/net/softnet_stat // SoftnetStat contains a single row of data from /proc/net/softnet_stat
type SoftnetEntry struct { type SoftnetStat struct {
// Number of processed packets // Number of processed packets
Processed uint Processed uint32
// Number of dropped packets // Number of dropped packets
Dropped uint Dropped uint32
// Number of times processing packets ran out of quota // Number of times processing packets ran out of quota
TimeSqueezed uint TimeSqueezed uint32
} }
// GatherSoftnetStats reads /proc/net/softnet_stat, parse the relevant columns, // NetSoftnetStat reads data from /proc/net/softnet_stat.
// and then return a slice of SoftnetEntry's. func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
func (fs FS) GatherSoftnetStats() ([]SoftnetEntry, error) { b, err := util.ReadFileNoStat(fs.proc.Path("net/softnet_stat"))
data, err := ioutil.ReadFile(fs.proc.Path("net/softnet_stat"))
if err != nil { if err != nil {
return nil, fmt.Errorf("error reading softnet %s: %s", fs.proc.Path("net/softnet_stat"), err) return nil, err
} }
return parseSoftnetEntries(data) entries, err := parseSoftnet(bytes.NewReader(b))
} if err != nil {
return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %v", err)
func parseSoftnetEntries(data []byte) ([]SoftnetEntry, error) {
lines := strings.Split(string(data), "\n")
entries := make([]SoftnetEntry, 0)
var err error
const (
expectedColumns = 11
)
for _, line := range lines {
columns := strings.Fields(line)
width := len(columns)
if width == 0 {
continue
}
if width != expectedColumns {
return []SoftnetEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedColumns)
}
var entry SoftnetEntry
if entry, err = parseSoftnetEntry(columns); err != nil {
return []SoftnetEntry{}, err
}
entries = append(entries, entry)
} }
return entries, nil return entries, nil
} }
func parseSoftnetEntry(columns []string) (SoftnetEntry, error) { func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
var err error const expectedColumns = 11
var processed, dropped, timeSqueezed uint64
if processed, err = strconv.ParseUint(columns[0], 16, 32); err != nil { s := bufio.NewScanner(r)
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 0: %s", err)
var stats []SoftnetStat
for s.Scan() {
columns := strings.Fields(s.Text())
width := len(columns)
if width != 11 {
return nil, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedColumns)
}
// We only parse the first three columns at the moment.
us, err := parseHexUint32s(columns[0:3])
if err != nil {
return nil, err
}
stats = append(stats, SoftnetStat{
Processed: us[0],
Dropped: us[1],
TimeSqueezed: us[2],
})
} }
if dropped, err = strconv.ParseUint(columns[1], 16, 32); err != nil {
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 1: %s", err) return stats, nil
} }
if timeSqueezed, err = strconv.ParseUint(columns[2], 16, 32); err != nil {
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 2: %s", err) func parseHexUint32s(ss []string) ([]uint32, error) {
} us := make([]uint32, 0, len(ss))
return SoftnetEntry{ for _, s := range ss {
Processed: uint(processed), u, err := strconv.ParseUint(s, 16, 32)
Dropped: uint(dropped), if err != nil {
TimeSqueezed: uint(timeSqueezed), return nil, err
}, nil }
us = append(us, uint32(u))
}
return us, nil
} }

229
vendor/github.com/prometheus/procfs/net_udp.go generated vendored Normal file
View file

@ -0,0 +1,229 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"encoding/hex"
"fmt"
"io"
"net"
"os"
"strconv"
"strings"
)
const (
// readLimit is used by io.LimitReader while reading the content of the
// /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
// as each line represents a single used socket.
// In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
// With e.g. 150 Byte per line and the maximum number of 65535,
// the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP.
readLimit = 4294967296 // Byte -> 4 GiB
)
type (
// NetUDP represents the contents of /proc/net/udp{,6} file without the header.
NetUDP []*netUDPLine
// NetUDPSummary provides already computed values like the total queue lengths or
// the total number of used sockets. In contrast to NetUDP it does not collect
// the parsed lines into a slice.
NetUDPSummary struct {
// TxQueueLength shows the total queue length of all parsed tx_queue lengths.
TxQueueLength uint64
// RxQueueLength shows the total queue length of all parsed rx_queue lengths.
RxQueueLength uint64
// UsedSockets shows the total number of parsed lines representing the
// number of used sockets.
UsedSockets uint64
}
// netUDPLine represents the fields parsed from a single line
// in /proc/net/udp{,6}. Fields which are not used by UDP are skipped.
// For the proc file format details, see https://linux.die.net/man/5/proc.
netUDPLine struct {
Sl uint64
LocalAddr net.IP
LocalPort uint64
RemAddr net.IP
RemPort uint64
St uint64
TxQueue uint64
RxQueue uint64
UID uint64
}
)
// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams
// read from /proc/net/udp.
func (fs FS) NetUDP() (NetUDP, error) {
return newNetUDP(fs.proc.Path("net/udp"))
}
// NetUDP6 returns the IPv6 kernel/networking statistics for UDP datagrams
// read from /proc/net/udp6.
func (fs FS) NetUDP6() (NetUDP, error) {
return newNetUDP(fs.proc.Path("net/udp6"))
}
// NetUDPSummary returns already computed statistics like the total queue lengths
// for UDP datagrams read from /proc/net/udp.
func (fs FS) NetUDPSummary() (*NetUDPSummary, error) {
return newNetUDPSummary(fs.proc.Path("net/udp"))
}
// NetUDP6Summary returns already computed statistics like the total queue lengths
// for UDP datagrams read from /proc/net/udp6.
func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) {
return newNetUDPSummary(fs.proc.Path("net/udp6"))
}
// newNetUDP creates a new NetUDP{,6} from the contents of the given file.
func newNetUDP(file string) (NetUDP, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
defer f.Close()
netUDP := NetUDP{}
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
line, err := parseNetUDPLine(fields)
if err != nil {
return nil, err
}
netUDP = append(netUDP, line)
}
if err := s.Err(); err != nil {
return nil, err
}
return netUDP, nil
}
// newNetUDPSummary creates a new NetUDP{,6} from the contents of the given file.
func newNetUDPSummary(file string) (*NetUDPSummary, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
defer f.Close()
netUDPSummary := &NetUDPSummary{}
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
line, err := parseNetUDPLine(fields)
if err != nil {
return nil, err
}
netUDPSummary.TxQueueLength += line.TxQueue
netUDPSummary.RxQueueLength += line.RxQueue
netUDPSummary.UsedSockets++
}
if err := s.Err(); err != nil {
return nil, err
}
return netUDPSummary, nil
}
// parseNetUDPLine parses a single line, represented by a list of fields.
func parseNetUDPLine(fields []string) (*netUDPLine, error) {
line := &netUDPLine{}
if len(fields) < 8 {
return nil, fmt.Errorf(
"cannot parse net udp socket line as it has less then 8 columns: %s",
strings.Join(fields, " "),
)
}
var err error // parse error
// sl
s := strings.Split(fields[0], ":")
if len(s) != 2 {
return nil, fmt.Errorf(
"cannot parse sl field in udp socket line: %s", fields[0])
}
if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
return nil, fmt.Errorf("cannot parse sl value in udp socket line: %s", err)
}
// local_address
l := strings.Split(fields[1], ":")
if len(l) != 2 {
return nil, fmt.Errorf(
"cannot parse local_address field in udp socket line: %s", fields[1])
}
if line.LocalAddr, err = hex.DecodeString(l[0]); err != nil {
return nil, fmt.Errorf(
"cannot parse local_address value in udp socket line: %s", err)
}
if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
return nil, fmt.Errorf(
"cannot parse local_address port value in udp socket line: %s", err)
}
// remote_address
r := strings.Split(fields[2], ":")
if len(r) != 2 {
return nil, fmt.Errorf(
"cannot parse rem_address field in udp socket line: %s", fields[1])
}
if line.RemAddr, err = hex.DecodeString(r[0]); err != nil {
return nil, fmt.Errorf(
"cannot parse rem_address value in udp socket line: %s", err)
}
if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
return nil, fmt.Errorf(
"cannot parse rem_address port value in udp socket line: %s", err)
}
// st
if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
return nil, fmt.Errorf(
"cannot parse st value in udp socket line: %s", err)
}
// tx_queue and rx_queue
q := strings.Split(fields[4], ":")
if len(q) != 2 {
return nil, fmt.Errorf(
"cannot parse tx/rx queues in udp socket line as it has a missing colon: %s",
fields[4],
)
}
if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse tx_queue value in udp socket line: %s", err)
}
if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse rx_queue value in udp socket line: %s", err)
}
// uid
if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
return nil, fmt.Errorf(
"cannot parse uid value in udp socket line: %s", err)
}
return line, nil
}

View file

@ -15,7 +15,6 @@ package procfs
import ( import (
"bufio" "bufio"
"errors"
"fmt" "fmt"
"io" "io"
"os" "os"
@ -27,25 +26,15 @@ import (
// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815 // see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815
// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48. // and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48.
const ( // Constants for the various /proc/net/unix enumerations.
netUnixKernelPtrIdx = iota // TODO: match against x/sys/unix or similar?
netUnixRefCountIdx
_
netUnixFlagsIdx
netUnixTypeIdx
netUnixStateIdx
netUnixInodeIdx
// Inode and Path are optional.
netUnixStaticFieldsCnt = 6
)
const ( const (
netUnixTypeStream = 1 netUnixTypeStream = 1
netUnixTypeDgram = 2 netUnixTypeDgram = 2
netUnixTypeSeqpacket = 5 netUnixTypeSeqpacket = 5
netUnixFlagListen = 1 << 16 netUnixFlagDefault = 0
netUnixFlagListen = 1 << 16
netUnixStateUnconnected = 1 netUnixStateUnconnected = 1
netUnixStateConnecting = 2 netUnixStateConnecting = 2
@ -53,129 +42,127 @@ const (
netUnixStateDisconnected = 4 netUnixStateDisconnected = 4
) )
var errInvalidKernelPtrFmt = errors.New("Invalid Num(the kernel table slot number) format") // NetUNIXType is the type of the type field.
type NetUNIXType uint64
// NetUnixType is the type of the type field. // NetUNIXFlags is the type of the flags field.
type NetUnixType uint64 type NetUNIXFlags uint64
// NetUnixFlags is the type of the flags field. // NetUNIXState is the type of the state field.
type NetUnixFlags uint64 type NetUNIXState uint64
// NetUnixState is the type of the state field. // NetUNIXLine represents a line of /proc/net/unix.
type NetUnixState uint64 type NetUNIXLine struct {
// NetUnixLine represents a line of /proc/net/unix.
type NetUnixLine struct {
KernelPtr string KernelPtr string
RefCount uint64 RefCount uint64
Protocol uint64 Protocol uint64
Flags NetUnixFlags Flags NetUNIXFlags
Type NetUnixType Type NetUNIXType
State NetUnixState State NetUNIXState
Inode uint64 Inode uint64
Path string Path string
} }
// NetUnix holds the data read from /proc/net/unix. // NetUNIX holds the data read from /proc/net/unix.
type NetUnix struct { type NetUNIX struct {
Rows []*NetUnixLine Rows []*NetUNIXLine
} }
// NewNetUnix returns data read from /proc/net/unix. // NetUNIX returns data read from /proc/net/unix.
func NewNetUnix() (*NetUnix, error) { func (fs FS) NetUNIX() (*NetUNIX, error) {
fs, err := NewFS(DefaultMountPoint) return readNetUNIX(fs.proc.Path("net/unix"))
if err != nil {
return nil, err
}
return fs.NewNetUnix()
} }
// NewNetUnix returns data read from /proc/net/unix. // readNetUNIX reads data in /proc/net/unix format from the specified file.
func (fs FS) NewNetUnix() (*NetUnix, error) { func readNetUNIX(file string) (*NetUNIX, error) {
return NewNetUnixByPath(fs.proc.Path("net/unix")) // This file could be quite large and a streaming read is desirable versus
} // reading the entire contents at once.
f, err := os.Open(file)
// NewNetUnixByPath returns data read from /proc/net/unix by file path.
// It might returns an error with partial parsed data, if an error occur after some data parsed.
func NewNetUnixByPath(path string) (*NetUnix, error) {
f, err := os.Open(path)
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer f.Close() defer f.Close()
return NewNetUnixByReader(f)
return parseNetUNIX(f)
} }
// NewNetUnixByReader returns data read from /proc/net/unix by a reader. // parseNetUNIX creates a NetUnix structure from the incoming stream.
// It might returns an error with partial parsed data, if an error occur after some data parsed. func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
func NewNetUnixByReader(reader io.Reader) (*NetUnix, error) { // Begin scanning by checking for the existence of Inode.
nu := &NetUnix{ s := bufio.NewScanner(r)
Rows: make([]*NetUnixLine, 0, 32), s.Scan()
}
scanner := bufio.NewScanner(reader)
// Omit the header line.
scanner.Scan()
header := scanner.Text()
// From the man page of proc(5), it does not contain an Inode field,
// but in actually it exists.
// This code works for both cases.
hasInode := strings.Contains(header, "Inode")
minFieldsCnt := netUnixStaticFieldsCnt // From the man page of proc(5), it does not contain an Inode field,
// but in actually it exists. This code works for both cases.
hasInode := strings.Contains(s.Text(), "Inode")
// Expect a minimum number of fields, but Inode and Path are optional:
// Num RefCount Protocol Flags Type St Inode Path
minFields := 6
if hasInode { if hasInode {
minFieldsCnt++ minFields++
} }
for scanner.Scan() {
line := scanner.Text() var nu NetUNIX
item, err := nu.parseLine(line, hasInode, minFieldsCnt) for s.Scan() {
line := s.Text()
item, err := nu.parseLine(line, hasInode, minFields)
if err != nil { if err != nil {
return nu, err return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %v", line, err)
} }
nu.Rows = append(nu.Rows, item) nu.Rows = append(nu.Rows, item)
} }
return nu, scanner.Err() if err := s.Err(); err != nil {
return nil, fmt.Errorf("failed to scan /proc/net/unix data: %v", err)
}
return &nu, nil
} }
func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetUnixLine, error) { func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) {
fields := strings.Fields(line) fields := strings.Fields(line)
fieldsLen := len(fields)
if fieldsLen < minFieldsCnt { l := len(fields)
return nil, fmt.Errorf( if l < min {
"Parse Unix domain failed: expect at least %d fields but got %d", return nil, fmt.Errorf("expected at least %d fields but got %d", min, l)
minFieldsCnt, fieldsLen)
} }
kernelPtr, err := u.parseKernelPtr(fields[netUnixKernelPtrIdx])
// Field offsets are as follows:
// Num RefCount Protocol Flags Type St Inode Path
kernelPtr := strings.TrimSuffix(fields[0], ":")
users, err := u.parseUsers(fields[1])
if err != nil { if err != nil {
return nil, fmt.Errorf("Parse Unix domain num(%s) failed: %s", fields[netUnixKernelPtrIdx], err) return nil, fmt.Errorf("failed to parse ref count(%s): %v", fields[1], err)
} }
users, err := u.parseUsers(fields[netUnixRefCountIdx])
flags, err := u.parseFlags(fields[3])
if err != nil { if err != nil {
return nil, fmt.Errorf("Parse Unix domain ref count(%s) failed: %s", fields[netUnixRefCountIdx], err) return nil, fmt.Errorf("failed to parse flags(%s): %v", fields[3], err)
} }
flags, err := u.parseFlags(fields[netUnixFlagsIdx])
typ, err := u.parseType(fields[4])
if err != nil { if err != nil {
return nil, fmt.Errorf("Parse Unix domain flags(%s) failed: %s", fields[netUnixFlagsIdx], err) return nil, fmt.Errorf("failed to parse type(%s): %v", fields[4], err)
} }
typ, err := u.parseType(fields[netUnixTypeIdx])
state, err := u.parseState(fields[5])
if err != nil { if err != nil {
return nil, fmt.Errorf("Parse Unix domain type(%s) failed: %s", fields[netUnixTypeIdx], err) return nil, fmt.Errorf("failed to parse state(%s): %v", fields[5], err)
}
state, err := u.parseState(fields[netUnixStateIdx])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain state(%s) failed: %s", fields[netUnixStateIdx], err)
} }
var inode uint64 var inode uint64
if hasInode { if hasInode {
inodeStr := fields[netUnixInodeIdx] inode, err = u.parseInode(fields[6])
inode, err = u.parseInode(inodeStr)
if err != nil { if err != nil {
return nil, fmt.Errorf("Parse Unix domain inode(%s) failed: %s", inodeStr, err) return nil, fmt.Errorf("failed to parse inode(%s): %v", fields[6], err)
} }
} }
nuLine := &NetUnixLine{ n := &NetUNIXLine{
KernelPtr: kernelPtr, KernelPtr: kernelPtr,
RefCount: users, RefCount: users,
Type: typ, Type: typ,
@ -185,57 +172,56 @@ func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetU
} }
// Path field is optional. // Path field is optional.
if fieldsLen > minFieldsCnt { if l > min {
pathIdx := netUnixInodeIdx + 1 // Path occurs at either index 6 or 7 depending on whether inode is
// already present.
pathIdx := 7
if !hasInode { if !hasInode {
pathIdx-- pathIdx--
} }
nuLine.Path = fields[pathIdx]
n.Path = fields[pathIdx]
} }
return nuLine, nil return n, nil
} }
func (u NetUnix) parseKernelPtr(str string) (string, error) { func (u NetUNIX) parseUsers(s string) (uint64, error) {
if !strings.HasSuffix(str, ":") { return strconv.ParseUint(s, 16, 32)
return "", errInvalidKernelPtrFmt
}
return str[:len(str)-1], nil
} }
func (u NetUnix) parseUsers(hexStr string) (uint64, error) { func (u NetUNIX) parseType(s string) (NetUNIXType, error) {
return strconv.ParseUint(hexStr, 16, 32) typ, err := strconv.ParseUint(s, 16, 16)
}
func (u NetUnix) parseType(hexStr string) (NetUnixType, error) {
typ, err := strconv.ParseUint(hexStr, 16, 16)
if err != nil { if err != nil {
return 0, err return 0, err
} }
return NetUnixType(typ), nil
return NetUNIXType(typ), nil
} }
func (u NetUnix) parseFlags(hexStr string) (NetUnixFlags, error) { func (u NetUNIX) parseFlags(s string) (NetUNIXFlags, error) {
flags, err := strconv.ParseUint(hexStr, 16, 32) flags, err := strconv.ParseUint(s, 16, 32)
if err != nil { if err != nil {
return 0, err return 0, err
} }
return NetUnixFlags(flags), nil
return NetUNIXFlags(flags), nil
} }
func (u NetUnix) parseState(hexStr string) (NetUnixState, error) { func (u NetUNIX) parseState(s string) (NetUNIXState, error) {
st, err := strconv.ParseInt(hexStr, 16, 8) st, err := strconv.ParseInt(s, 16, 8)
if err != nil { if err != nil {
return 0, err return 0, err
} }
return NetUnixState(st), nil
return NetUNIXState(st), nil
} }
func (u NetUnix) parseInode(inodeStr string) (uint64, error) { func (u NetUNIX) parseInode(s string) (uint64, error) {
return strconv.ParseUint(inodeStr, 10, 64) return strconv.ParseUint(s, 10, 64)
} }
func (t NetUnixType) String() string { func (t NetUNIXType) String() string {
switch t { switch t {
case netUnixTypeStream: case netUnixTypeStream:
return "stream" return "stream"
@ -247,7 +233,7 @@ func (t NetUnixType) String() string {
return "unknown" return "unknown"
} }
func (f NetUnixFlags) String() string { func (f NetUNIXFlags) String() string {
switch f { switch f {
case netUnixFlagListen: case netUnixFlagListen:
return "listen" return "listen"
@ -256,7 +242,7 @@ func (f NetUnixFlags) String() string {
} }
} }
func (s NetUnixState) String() string { func (s NetUNIXState) String() string {
switch s { switch s {
case netUnixStateUnconnected: case netUnixStateUnconnected:
return "unconnected" return "unconnected"

View file

@ -71,6 +71,9 @@ type ProcStatus struct {
VoluntaryCtxtSwitches uint64 VoluntaryCtxtSwitches uint64
// Number of involuntary context switches. // Number of involuntary context switches.
NonVoluntaryCtxtSwitches uint64 NonVoluntaryCtxtSwitches uint64
// UIDs of the process (Real, effective, saved set, and filesystem UIDs (GIDs))
UIDs [4]string
} }
// NewStatus returns the current status information of the process. // NewStatus returns the current status information of the process.
@ -114,6 +117,8 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
s.TGID = int(vUint) s.TGID = int(vUint)
case "Name": case "Name":
s.Name = vString s.Name = vString
case "Uid":
copy(s.UIDs[:], strings.Split(vString, "\t"))
case "VmPeak": case "VmPeak":
s.VmPeak = vUintBytes s.VmPeak = vUintBytes
case "VmSize": case "VmSize":

89
vendor/github.com/prometheus/procfs/swaps.go generated vendored Normal file
View file

@ -0,0 +1,89 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// Swap represents an entry in /proc/swaps.
type Swap struct {
Filename string
Type string
Size int
Used int
Priority int
}
// Swaps returns a slice of all configured swap devices on the system.
func (fs FS) Swaps() ([]*Swap, error) {
data, err := util.ReadFileNoStat(fs.proc.Path("swaps"))
if err != nil {
return nil, err
}
return parseSwaps(data)
}
func parseSwaps(info []byte) ([]*Swap, error) {
swaps := []*Swap{}
scanner := bufio.NewScanner(bytes.NewReader(info))
scanner.Scan() // ignore header line
for scanner.Scan() {
swapString := scanner.Text()
parsedSwap, err := parseSwapString(swapString)
if err != nil {
return nil, err
}
swaps = append(swaps, parsedSwap)
}
err := scanner.Err()
return swaps, err
}
func parseSwapString(swapString string) (*Swap, error) {
var err error
swapFields := strings.Fields(swapString)
swapLength := len(swapFields)
if swapLength < 5 {
return nil, fmt.Errorf("too few fields in swap string: %s", swapString)
}
swap := &Swap{
Filename: swapFields[0],
Type: swapFields[1],
}
swap.Size, err = strconv.Atoi(swapFields[2])
if err != nil {
return nil, fmt.Errorf("invalid swap size: %s", swapFields[2])
}
swap.Used, err = strconv.Atoi(swapFields[3])
if err != nil {
return nil, fmt.Errorf("invalid swap used: %s", swapFields[3])
}
swap.Priority, err = strconv.Atoi(swapFields[4])
if err != nil {
return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4])
}
return swap, nil
}

View file

@ -97,7 +97,7 @@ func (fs FS) InfiniBandClass() (InfiniBandClass, error) {
dirs, err := ioutil.ReadDir(path) dirs, err := ioutil.ReadDir(path)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to list InfiniBand devices at %q: %v", path, err) return nil, err
} }
ibc := make(InfiniBandClass, len(dirs)) ibc := make(InfiniBandClass, len(dirs))

View file

@ -381,11 +381,15 @@ func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) {
} }
func quotaManagerStats(us []uint32) (QuotaManagerStats, error) { func quotaManagerStats(us []uint32) (QuotaManagerStats, error) {
if l := len(us); l != 8 { // The "Unused" attribute first appears in Linux 4.20
// As a result either 8 or 9 elements may appear in this slice depending on
// the kernel version.
l := len(us)
if l != 8 && l != 9 {
return QuotaManagerStats{}, fmt.Errorf("incorrect number of values for XFS quota stats: %d", l) return QuotaManagerStats{}, fmt.Errorf("incorrect number of values for XFS quota stats: %d", l)
} }
return QuotaManagerStats{ s := QuotaManagerStats{
Reclaims: us[0], Reclaims: us[0],
ReclaimMisses: us[1], ReclaimMisses: us[1],
DquoteDups: us[2], DquoteDups: us[2],
@ -394,7 +398,13 @@ func quotaManagerStats(us []uint32) (QuotaManagerStats, error) {
Wants: us[5], Wants: us[5],
ShakeReclaims: us[6], ShakeReclaims: us[6],
InactReclaims: us[7], InactReclaims: us[7],
}, nil }
if l > 8 {
s.Unused = us[8]
}
return s, nil
} }
func debugStats(us []uint32) (DebugStats, error) { func debugStats(us []uint32) (DebugStats, error) {

View file

@ -202,6 +202,7 @@ type QuotaManagerStats struct {
Wants uint32 Wants uint32
ShakeReclaims uint32 ShakeReclaims uint32
InactReclaims uint32 InactReclaims uint32
Unused uint32
} }
// XstratStats contains statistics regarding bytes processed by the XFS daemon. // XstratStats contains statistics regarding bytes processed by the XFS daemon.

2
vendor/modules.txt vendored
View file

@ -53,7 +53,7 @@ github.com/prometheus/common/model
github.com/prometheus/common/promlog github.com/prometheus/common/promlog
github.com/prometheus/common/promlog/flag github.com/prometheus/common/promlog/flag
github.com/prometheus/common/version github.com/prometheus/common/version
# github.com/prometheus/procfs v0.0.8 # github.com/prometheus/procfs v0.0.10
github.com/prometheus/procfs github.com/prometheus/procfs
github.com/prometheus/procfs/bcache github.com/prometheus/procfs/bcache
github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/fs