Update prometheus/client_golang and prometheus/tsdb modules

Signed-off-by: beorn7 <bjoern@rabenste.in>
This commit is contained in:
beorn7 2019-05-17 14:53:38 +02:00
parent 840872a6f8
commit 12536f3027
57 changed files with 2361 additions and 2213 deletions

6
go.mod
View file

@ -74,10 +74,10 @@ require (
github.com/peterbourgon/g2s v0.0.0-20170223122336-d4e7ad98afea // indirect github.com/peterbourgon/g2s v0.0.0-20170223122336-d4e7ad98afea // indirect
github.com/petermattis/goid v0.0.0-20170504144140-0ded85884ba5 // indirect github.com/petermattis/goid v0.0.0-20170504144140-0ded85884ba5 // indirect
github.com/pkg/errors v0.8.1 github.com/pkg/errors v0.8.1
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 github.com/prometheus/client_golang v0.9.3
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90
github.com/prometheus/common v0.4.0 github.com/prometheus/common v0.4.0
github.com/prometheus/tsdb v0.7.1 github.com/prometheus/tsdb v0.8.0
github.com/rlmcpherson/s3gof3r v0.5.0 // indirect github.com/rlmcpherson/s3gof3r v0.5.0 // indirect
github.com/rubyist/circuitbreaker v2.2.1+incompatible // indirect github.com/rubyist/circuitbreaker v2.2.1+incompatible // indirect
github.com/samuel/go-zookeeper v0.0.0-20161028232340-1d7be4effb13 github.com/samuel/go-zookeeper v0.0.0-20161028232340-1d7be4effb13

10
go.sum
View file

@ -32,6 +32,8 @@ github.com/aws/aws-sdk-go v0.0.0-20180507225419-00862f899353 h1:qFKf58XUUvHaEz0z
github.com/aws/aws-sdk-go v0.0.0-20180507225419-00862f899353/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k= github.com/aws/aws-sdk-go v0.0.0-20180507225419-00862f899353/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/biogo/store v0.0.0-20160505134755-913427a1d5e8 h1:tYoz1OeRpx3dJZlh9T4dQt4kAndcmpl+VNdzbSgFC/0= github.com/biogo/store v0.0.0-20160505134755-913427a1d5e8 h1:tYoz1OeRpx3dJZlh9T4dQt4kAndcmpl+VNdzbSgFC/0=
github.com/biogo/store v0.0.0-20160505134755-913427a1d5e8/go.mod h1:Iev9Q3MErcn+w3UOJD/DkEzllvugfdx7bGcMOFhvr/4= github.com/biogo/store v0.0.0-20160505134755-913427a1d5e8/go.mod h1:Iev9Q3MErcn+w3UOJD/DkEzllvugfdx7bGcMOFhvr/4=
@ -262,10 +264,14 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA=
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
github.com/prometheus/client_golang v0.9.3 h1:9iH4JKXLzFbOAdtqv/a+j8aewx2Y8lAjAydhbaScPF8=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.0 h1:7etb9YClo3a6HjLzfl6rIQaU+FDfi0VSX39io3aQ+DM= github.com/prometheus/common v0.4.0 h1:7etb9YClo3a6HjLzfl6rIQaU+FDfi0VSX39io3aQ+DM=
@ -273,8 +279,12 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084 h1:sofwID9zm4tzrgykg80hfFph1mryUeLRsUfoocVVmRY=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA= github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/prometheus/tsdb v0.8.0 h1:w1tAGxsBMLkuGrFMhqgcCeBkM5d1YI24udArs+aASuQ=
github.com/prometheus/tsdb v0.8.0/go.mod h1:fSI0j+IUQrDd7+ZtR9WKIGtoYAYAJUKcKhYLG25tN4g=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rlmcpherson/s3gof3r v0.5.0 h1:1izOJpTiohSibfOHuNyEA/yQnAirh05enzEdmhez43k= github.com/rlmcpherson/s3gof3r v0.5.0 h1:1izOJpTiohSibfOHuNyEA/yQnAirh05enzEdmhez43k=

View file

@ -11,8 +11,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// +build go1.7
// Package api provides clients for the HTTP APIs. // Package api provides clients for the HTTP APIs.
package api package api
@ -60,6 +58,28 @@ type Client interface {
Do(context.Context, *http.Request) (*http.Response, []byte, error) Do(context.Context, *http.Request) (*http.Response, []byte, error)
} }
// DoGetFallback will attempt to do the request as-is, and on a 405 it will fallback to a GET request.
func DoGetFallback(c Client, ctx context.Context, u *url.URL, args url.Values) (*http.Response, []byte, error) {
req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(args.Encode()))
if err != nil {
return nil, nil, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
resp, body, err := c.Do(ctx, req)
if resp != nil && resp.StatusCode == http.StatusMethodNotAllowed {
u.RawQuery = args.Encode()
req, err = http.NewRequest(http.MethodGet, u.String(), nil)
if err != nil {
return nil, nil, err
}
} else {
return resp, body, err
}
return c.Do(ctx, req)
}
// NewClient returns a new Client. // NewClient returns a new Client.
// //
// It is safe to use the returned Client from multiple goroutines. // It is safe to use the returned Client from multiple goroutines.
@ -119,8 +139,8 @@ func (c *httpClient) Do(ctx context.Context, req *http.Request) (*http.Response,
select { select {
case <-ctx.Done(): case <-ctx.Done():
err = resp.Body.Close()
<-done <-done
err = resp.Body.Close()
if err == nil { if err == nil {
err = ctx.Err() err = ctx.Err()
} }

View file

@ -11,8 +11,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// +build go1.7
// Package v1 provides bindings to the Prometheus HTTP API v1: // Package v1 provides bindings to the Prometheus HTTP API v1:
// http://prometheus.io/docs/querying/api/ // http://prometheus.io/docs/querying/api/
package v1 package v1
@ -35,6 +33,7 @@ const (
apiPrefix = "/api/v1" apiPrefix = "/api/v1"
epAlerts = apiPrefix + "/alerts"
epAlertManagers = apiPrefix + "/alertmanagers" epAlertManagers = apiPrefix + "/alertmanagers"
epQuery = apiPrefix + "/query" epQuery = apiPrefix + "/query"
epQueryRange = apiPrefix + "/query_range" epQueryRange = apiPrefix + "/query_range"
@ -115,6 +114,8 @@ type Range struct {
// API provides bindings for Prometheus's v1 API. // API provides bindings for Prometheus's v1 API.
type API interface { type API interface {
// Alerts returns a list of all active alerts.
Alerts(ctx context.Context) (AlertsResult, error)
// AlertManagers returns an overview of the current state of the Prometheus alert manager discovery. // AlertManagers returns an overview of the current state of the Prometheus alert manager discovery.
AlertManagers(ctx context.Context) (AlertManagersResult, error) AlertManagers(ctx context.Context) (AlertManagersResult, error)
// CleanTombstones removes the deleted data from disk and cleans up the existing tombstones. // CleanTombstones removes the deleted data from disk and cleans up the existing tombstones.
@ -142,6 +143,11 @@ type API interface {
Targets(ctx context.Context) (TargetsResult, error) Targets(ctx context.Context) (TargetsResult, error)
} }
// AlertsResult contains the result from querying the alerts endpoint.
type AlertsResult struct {
Alerts []Alert `json:"alerts"`
}
// AlertManagersResult contains the result from querying the alertmanagers endpoint. // AlertManagersResult contains the result from querying the alertmanagers endpoint.
type AlertManagersResult struct { type AlertManagersResult struct {
Active []AlertManager `json:"activeAlertManagers"` Active []AlertManager `json:"activeAlertManagers"`
@ -402,6 +408,24 @@ type httpAPI struct {
client api.Client client api.Client
} }
func (h *httpAPI) Alerts(ctx context.Context) (AlertsResult, error) {
u := h.client.URL(epAlerts, nil)
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
if err != nil {
return AlertsResult{}, err
}
_, body, err := h.client.Do(ctx, req)
if err != nil {
return AlertsResult{}, err
}
var res AlertsResult
err = json.Unmarshal(body, &res)
return res, err
}
func (h *httpAPI) AlertManagers(ctx context.Context) (AlertManagersResult, error) { func (h *httpAPI) AlertManagers(ctx context.Context) (AlertManagersResult, error) {
u := h.client.URL(epAlertManagers, nil) u := h.client.URL(epAlertManagers, nil)
@ -514,14 +538,7 @@ func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time) (model.
q.Set("time", ts.Format(time.RFC3339Nano)) q.Set("time", ts.Format(time.RFC3339Nano))
} }
u.RawQuery = q.Encode() _, body, err := api.DoGetFallback(h.client, ctx, u, q)
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
}
_, body, err := h.client.Do(ctx, req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -547,14 +564,7 @@ func (h *httpAPI) QueryRange(ctx context.Context, query string, r Range) (model.
q.Set("end", end) q.Set("end", end)
q.Set("step", step) q.Set("step", step)
u.RawQuery = q.Encode() _, body, err := api.DoGetFallback(h.client, ctx, u, q)
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
}
_, body, err := h.client.Do(ctx, req)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -14,9 +14,9 @@
package prometheus package prometheus
import ( import (
"fmt"
"runtime" "runtime"
"runtime/debug" "runtime/debug"
"sync"
"time" "time"
) )
@ -26,16 +26,41 @@ type goCollector struct {
gcDesc *Desc gcDesc *Desc
goInfoDesc *Desc goInfoDesc *Desc
// metrics to describe and collect // ms... are memstats related.
metrics memStatsMetrics msLast *runtime.MemStats // Previously collected memstats.
msLastTimestamp time.Time
msMtx sync.Mutex // Protects msLast and msLastTimestamp.
msMetrics memStatsMetrics
msRead func(*runtime.MemStats) // For mocking in tests.
msMaxWait time.Duration // Wait time for fresh memstats.
msMaxAge time.Duration // Maximum allowed age of old memstats.
} }
// NewGoCollector returns a collector which exports metrics about the current Go // NewGoCollector returns a collector which exports metrics about the current Go
// process. This includes memory stats. To collect those, runtime.ReadMemStats // process. This includes memory stats. To collect those, runtime.ReadMemStats
// is called. This causes a stop-the-world, which is very short with Go1.9+ // is called. This requires to “stop the world”, which usually only happens for
// (~25µs). However, with older Go versions, the stop-the-world duration depends // garbage collection (GC). Take the following implications into account when
// on the heap size and can be quite significant (~1.7 ms/GiB as per // deciding whether to use the Go collector:
//
// 1. The performance impact of stopping the world is the more relevant the more
// frequently metrics are collected. However, with Go1.9 or later the
// stop-the-world time per metrics collection is very short (~25µs) so that the
// performance impact will only matter in rare cases. However, with older Go
// versions, the stop-the-world duration depends on the heap size and can be
// quite significant (~1.7 ms/GiB as per
// https://go-review.googlesource.com/c/go/+/34937). // https://go-review.googlesource.com/c/go/+/34937).
//
// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the
// metrics collection happens to coincide with GC, it will only complete after
// GC has finished. Usually, GC is fast enough to not cause problems. However,
// with a very large heap, GC might take multiple seconds, which is enough to
// cause scrape timeouts in common setups. To avoid this problem, the Go
// collector will use the memstats from a previous collection if
// runtime.ReadMemStats takes more than 1s. However, if there are no previously
// collected memstats, or their collection is more than 5m ago, the collection
// will block until runtime.ReadMemStats succeeds. (The problem might be solved
// in Go1.13, see https://github.com/golang/go/issues/19812 for the related Go
// issue.)
func NewGoCollector() Collector { func NewGoCollector() Collector {
return &goCollector{ return &goCollector{
goroutinesDesc: NewDesc( goroutinesDesc: NewDesc(
@ -54,7 +79,11 @@ func NewGoCollector() Collector {
"go_info", "go_info",
"Information about the Go environment.", "Information about the Go environment.",
nil, Labels{"version": runtime.Version()}), nil, Labels{"version": runtime.Version()}),
metrics: memStatsMetrics{ msLast: &runtime.MemStats{},
msRead: runtime.ReadMemStats,
msMaxWait: time.Second,
msMaxAge: 5 * time.Minute,
msMetrics: memStatsMetrics{
{ {
desc: NewDesc( desc: NewDesc(
memstatNamespace("alloc_bytes"), memstatNamespace("alloc_bytes"),
@ -253,7 +282,7 @@ func NewGoCollector() Collector {
} }
func memstatNamespace(s string) string { func memstatNamespace(s string) string {
return fmt.Sprintf("go_memstats_%s", s) return "go_memstats_" + s
} }
// Describe returns all descriptions of the collector. // Describe returns all descriptions of the collector.
@ -262,13 +291,27 @@ func (c *goCollector) Describe(ch chan<- *Desc) {
ch <- c.threadsDesc ch <- c.threadsDesc
ch <- c.gcDesc ch <- c.gcDesc
ch <- c.goInfoDesc ch <- c.goInfoDesc
for _, i := range c.metrics { for _, i := range c.msMetrics {
ch <- i.desc ch <- i.desc
} }
} }
// Collect returns the current state of all metrics of the collector. // Collect returns the current state of all metrics of the collector.
func (c *goCollector) Collect(ch chan<- Metric) { func (c *goCollector) Collect(ch chan<- Metric) {
var (
ms = &runtime.MemStats{}
done = make(chan struct{})
)
// Start reading memstats first as it might take a while.
go func() {
c.msRead(ms)
c.msMtx.Lock()
c.msLast = ms
c.msLastTimestamp = time.Now()
c.msMtx.Unlock()
close(done)
}()
ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
n, _ := runtime.ThreadCreateProfile(nil) n, _ := runtime.ThreadCreateProfile(nil)
ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
@ -286,9 +329,31 @@ func (c *goCollector) Collect(ch chan<- Metric) {
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
ms := &runtime.MemStats{} timer := time.NewTimer(c.msMaxWait)
runtime.ReadMemStats(ms) select {
for _, i := range c.metrics { case <-done: // Our own ReadMemStats succeeded in time. Use it.
timer.Stop() // Important for high collection frequencies to not pile up timers.
c.msCollect(ch, ms)
return
case <-timer.C: // Time out, use last memstats if possible. Continue below.
}
c.msMtx.Lock()
if time.Since(c.msLastTimestamp) < c.msMaxAge {
// Last memstats are recent enough. Collect from them under the lock.
c.msCollect(ch, c.msLast)
c.msMtx.Unlock()
return
}
// If we are here, the last memstats are too old or don't exist. We have
// to wait until our own ReadMemStats finally completes. For that to
// happen, we have to release the lock.
c.msMtx.Unlock()
<-done
c.msCollect(ch, ms)
}
func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
for _, i := range c.msMetrics {
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
} }
} }

View file

@ -224,18 +224,21 @@ type histogramCounts struct {
} }
type histogram struct { type histogram struct {
// countAndHotIdx is a complicated one. For lock-free yet atomic // countAndHotIdx enables lock-free writes with use of atomic updates.
// observations, we need to save the total count of observations again, // The most significant bit is the hot index [0 or 1] of the count field
// combined with the index of the currently-hot counts struct, so that // below. Observe calls update the hot one. All remaining bits count the
// we can perform the operation on both values atomically. The least // number of Observe calls. Observe starts by incrementing this counter,
// significant bit defines the hot counts struct. The remaining 63 bits // and finish by incrementing the count field in the respective
// represent the total count of observations. This happens under the // histogramCounts, as a marker for completion.
// assumption that the 63bit count will never overflow. Rationale: An
// observations takes about 30ns. Let's assume it could happen in
// 10ns. Overflowing the counter will then take at least (2^63)*10ns,
// which is about 3000 years.
// //
// This has to be first in the struct for 64bit alignment. See // Calls of the Write method (which are non-mutating reads from the
// perspective of the histogram) swap the hotcold under the writeMtx
// lock. A cooldown is awaited (while locked) by comparing the number of
// observations with the initiation count. Once they match, then the
// last observation on the now cool one has completed. All cool fields must
// be merged into the new hot before releasing writeMtx.
//
// Fields with atomic access first! See alignment constraint:
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
countAndHotIdx uint64 countAndHotIdx uint64
@ -243,16 +246,14 @@ type histogram struct {
desc *Desc desc *Desc
writeMtx sync.Mutex // Only used in the Write method. writeMtx sync.Mutex // Only used in the Write method.
upperBounds []float64
// Two counts, one is "hot" for lock-free observations, the other is // Two counts, one is "hot" for lock-free observations, the other is
// "cold" for writing out a dto.Metric. It has to be an array of // "cold" for writing out a dto.Metric. It has to be an array of
// pointers to guarantee 64bit alignment of the histogramCounts, see // pointers to guarantee 64bit alignment of the histogramCounts, see
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG. // http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
counts [2]*histogramCounts counts [2]*histogramCounts
hotIdx int // Index of currently-hot counts. Only used within Write.
labelPairs []*dto.LabelPair upperBounds []float64
labelPairs []*dto.LabelPair
} }
func (h *histogram) Desc() *Desc { func (h *histogram) Desc() *Desc {
@ -271,11 +272,11 @@ func (h *histogram) Observe(v float64) {
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op // 300 buckets: 154 ns/op linear - binary 61.6 ns/op
i := sort.SearchFloat64s(h.upperBounds, v) i := sort.SearchFloat64s(h.upperBounds, v)
// We increment h.countAndHotIdx by 2 so that the counter in the upper // We increment h.countAndHotIdx so that the counter in the lower
// 63 bits gets incremented by 1. At the same time, we get the new value // 63 bits gets incremented. At the same time, we get the new value
// back, which we can use to find the currently-hot counts. // back, which we can use to find the currently-hot counts.
n := atomic.AddUint64(&h.countAndHotIdx, 2) n := atomic.AddUint64(&h.countAndHotIdx, 1)
hotCounts := h.counts[n%2] hotCounts := h.counts[n>>63]
if i < len(h.upperBounds) { if i < len(h.upperBounds) {
atomic.AddUint64(&hotCounts.buckets[i], 1) atomic.AddUint64(&hotCounts.buckets[i], 1)
@ -293,72 +294,43 @@ func (h *histogram) Observe(v float64) {
} }
func (h *histogram) Write(out *dto.Metric) error { func (h *histogram) Write(out *dto.Metric) error {
var ( // For simplicity, we protect this whole method by a mutex. It is not in
his = &dto.Histogram{} // the hot path, i.e. Observe is called much more often than Write. The
buckets = make([]*dto.Bucket, len(h.upperBounds)) // complication of making Write lock-free isn't worth it, if possible at
hotCounts, coldCounts *histogramCounts // all.
count uint64
)
// For simplicity, we mutex the rest of this method. It is not in the
// hot path, i.e. Observe is called much more often than Write. The
// complication of making Write lock-free isn't worth it.
h.writeMtx.Lock() h.writeMtx.Lock()
defer h.writeMtx.Unlock() defer h.writeMtx.Unlock()
// This is a bit arcane, which is why the following spells out this if // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
// clause in English: // without touching the count bits. See the struct comments for a full
// // description of the algorithm.
// If the currently-hot counts struct is #0, we atomically increment n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
// h.countAndHotIdx by 1 so that from now on Observe will use the counts // count is contained unchanged in the lower 63 bits.
// struct #1. Furthermore, the atomic increment gives us the new value, count := n & ((1 << 63) - 1)
// which, in its most significant 63 bits, tells us the count of // The most significant bit tells us which counts is hot. The complement
// observations done so far up to and including currently ongoing // is thus the cold one.
// observations still using the counts struct just changed from hot to hotCounts := h.counts[n>>63]
// cold. To have a normal uint64 for the count, we bitshift by 1 and coldCounts := h.counts[(^n)>>63]
// save the result in count. We also set h.hotIdx to 1 for the next
// Write call, and we will refer to counts #1 as hotCounts and to counts
// #0 as coldCounts.
//
// If the currently-hot counts struct is #1, we do the corresponding
// things the other way round. We have to _decrement_ h.countAndHotIdx
// (which is a bit arcane in itself, as we have to express -1 with an
// unsigned int...).
if h.hotIdx == 0 {
count = atomic.AddUint64(&h.countAndHotIdx, 1) >> 1
h.hotIdx = 1
hotCounts = h.counts[1]
coldCounts = h.counts[0]
} else {
count = atomic.AddUint64(&h.countAndHotIdx, ^uint64(0)) >> 1 // Decrement.
h.hotIdx = 0
hotCounts = h.counts[0]
coldCounts = h.counts[1]
}
// Now we have to wait for the now-declared-cold counts to actually cool // Await cooldown.
// down, i.e. wait for all observations still using it to finish. That's for count != atomic.LoadUint64(&coldCounts.count) {
// the case once the count in the cold counts struct is the same as the
// one atomically retrieved from the upper 63bits of h.countAndHotIdx.
for {
if count == atomic.LoadUint64(&coldCounts.count) {
break
}
runtime.Gosched() // Let observations get work done. runtime.Gosched() // Let observations get work done.
} }
his.SampleCount = proto.Uint64(count) his := &dto.Histogram{
his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))) Bucket: make([]*dto.Bucket, len(h.upperBounds)),
SampleCount: proto.Uint64(count),
SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
}
var cumCount uint64 var cumCount uint64
for i, upperBound := range h.upperBounds { for i, upperBound := range h.upperBounds {
cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
buckets[i] = &dto.Bucket{ his.Bucket[i] = &dto.Bucket{
CumulativeCount: proto.Uint64(cumCount), CumulativeCount: proto.Uint64(cumCount),
UpperBound: proto.Float64(upperBound), UpperBound: proto.Float64(upperBound),
} }
} }
his.Bucket = buckets
out.Histogram = his out.Histogram = his
out.Label = h.labelPairs out.Label = h.labelPairs

View file

@ -330,6 +330,8 @@ type fancyResponseWriterDelegator struct {
} }
func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool {
//lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
//remove support from client_golang yet.
return f.ResponseWriter.(http.CloseNotifier).CloseNotify() return f.ResponseWriter.(http.CloseNotifier).CloseNotify()
} }

View file

@ -74,8 +74,11 @@ type closeNotifierDelegator struct{ *responseWriterDelegator }
type flusherDelegator struct{ *responseWriterDelegator } type flusherDelegator struct{ *responseWriterDelegator }
type hijackerDelegator struct{ *responseWriterDelegator } type hijackerDelegator struct{ *responseWriterDelegator }
type readerFromDelegator struct{ *responseWriterDelegator } type readerFromDelegator struct{ *responseWriterDelegator }
type pusherDelegator struct{ *responseWriterDelegator }
func (d closeNotifierDelegator) CloseNotify() <-chan bool { func (d closeNotifierDelegator) CloseNotify() <-chan bool {
//lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
//remove support from client_golang yet.
return d.ResponseWriter.(http.CloseNotifier).CloseNotify() return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
} }
func (d flusherDelegator) Flush() { func (d flusherDelegator) Flush() {
@ -92,6 +95,9 @@ func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
d.written += n d.written += n
return n, err return n, err
} }
func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
return d.ResponseWriter.(http.Pusher).Push(target, opts)
}
var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32) var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
@ -195,4 +201,157 @@ func init() {
http.CloseNotifier http.CloseNotifier
}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
} }
pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
return pusherDelegator{d}
}
pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
return struct {
*responseWriterDelegator
http.Pusher
http.CloseNotifier
}{d, pusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
return struct {
*responseWriterDelegator
http.Pusher
http.Flusher
}{d, pusherDelegator{d}, flusherDelegator{d}}
}
pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
return struct {
*responseWriterDelegator
http.Pusher
http.Flusher
http.CloseNotifier
}{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
return struct {
*responseWriterDelegator
http.Pusher
http.Hijacker
}{d, pusherDelegator{d}, hijackerDelegator{d}}
}
pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
return struct {
*responseWriterDelegator
http.Pusher
http.Hijacker
http.CloseNotifier
}{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
return struct {
*responseWriterDelegator
http.Pusher
http.Hijacker
http.Flusher
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
}
pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
return struct {
*responseWriterDelegator
http.Pusher
http.Hijacker
http.Flusher
http.CloseNotifier
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
}{d, pusherDelegator{d}, readerFromDelegator{d}}
}
pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.CloseNotifier
}{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Flusher
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}}
}
pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Flusher
http.CloseNotifier
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Hijacker
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}}
}
pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Hijacker
http.CloseNotifier
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Hijacker
http.Flusher
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
}
pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Hijacker
http.Flusher
http.CloseNotifier
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
}
}
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
d := &responseWriterDelegator{
ResponseWriter: w,
observeWriteHeader: observeWriteHeaderFunc,
}
id := 0
//lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
//remove support from client_golang yet.
if _, ok := w.(http.CloseNotifier); ok {
id += closeNotifier
}
if _, ok := w.(http.Flusher); ok {
id += flusher
}
if _, ok := w.(http.Hijacker); ok {
id += hijacker
}
if _, ok := w.(io.ReaderFrom); ok {
id += readerFrom
}
if _, ok := w.(http.Pusher); ok {
id += pusher
}
return pickDelegator[id](d)
} }

View file

@ -1,181 +0,0 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.8
package promhttp
import (
"io"
"net/http"
)
type pusherDelegator struct{ *responseWriterDelegator }
func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
return d.ResponseWriter.(http.Pusher).Push(target, opts)
}
func init() {
pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
return pusherDelegator{d}
}
pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
return struct {
*responseWriterDelegator
http.Pusher
http.CloseNotifier
}{d, pusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
return struct {
*responseWriterDelegator
http.Pusher
http.Flusher
}{d, pusherDelegator{d}, flusherDelegator{d}}
}
pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
return struct {
*responseWriterDelegator
http.Pusher
http.Flusher
http.CloseNotifier
}{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
return struct {
*responseWriterDelegator
http.Pusher
http.Hijacker
}{d, pusherDelegator{d}, hijackerDelegator{d}}
}
pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
return struct {
*responseWriterDelegator
http.Pusher
http.Hijacker
http.CloseNotifier
}{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
return struct {
*responseWriterDelegator
http.Pusher
http.Hijacker
http.Flusher
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
}
pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
return struct {
*responseWriterDelegator
http.Pusher
http.Hijacker
http.Flusher
http.CloseNotifier
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
}{d, pusherDelegator{d}, readerFromDelegator{d}}
}
pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.CloseNotifier
}{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Flusher
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}}
}
pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Flusher
http.CloseNotifier
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Hijacker
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}}
}
pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Hijacker
http.CloseNotifier
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Hijacker
http.Flusher
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
}
pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Hijacker
http.Flusher
http.CloseNotifier
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
}
}
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
d := &responseWriterDelegator{
ResponseWriter: w,
observeWriteHeader: observeWriteHeaderFunc,
}
id := 0
if _, ok := w.(http.CloseNotifier); ok {
id += closeNotifier
}
if _, ok := w.(http.Flusher); ok {
id += flusher
}
if _, ok := w.(http.Hijacker); ok {
id += hijacker
}
if _, ok := w.(io.ReaderFrom); ok {
id += readerFrom
}
if _, ok := w.(http.Pusher); ok {
id += pusher
}
return pickDelegator[id](d)
}

View file

@ -1,44 +0,0 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !go1.8
package promhttp
import (
"io"
"net/http"
)
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
d := &responseWriterDelegator{
ResponseWriter: w,
observeWriteHeader: observeWriteHeaderFunc,
}
id := 0
if _, ok := w.(http.CloseNotifier); ok {
id += closeNotifier
}
if _, ok := w.(http.Flusher); ok {
id += flusher
}
if _, ok := w.(http.Hijacker); ok {
id += hijacker
}
if _, ok := w.(io.ReaderFrom); ok {
id += readerFrom
}
return pickDelegator[id](d)
}

View file

@ -14,7 +14,9 @@
package promhttp package promhttp
import ( import (
"crypto/tls"
"net/http" "net/http"
"net/http/httptrace"
"time" "time"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
@ -95,3 +97,123 @@ func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundT
return resp, err return resp, err
}) })
} }
// InstrumentTrace is used to offer flexibility in instrumenting the available
// httptrace.ClientTrace hook functions. Each function is passed a float64
// representing the time in seconds since the start of the http request. A user
// may choose to use separately buckets Histograms, or implement custom
// instance labels on a per function basis.
type InstrumentTrace struct {
GotConn func(float64)
PutIdleConn func(float64)
GotFirstResponseByte func(float64)
Got100Continue func(float64)
DNSStart func(float64)
DNSDone func(float64)
ConnectStart func(float64)
ConnectDone func(float64)
TLSHandshakeStart func(float64)
TLSHandshakeDone func(float64)
WroteHeaders func(float64)
Wait100Continue func(float64)
WroteRequest func(float64)
}
// InstrumentRoundTripperTrace is a middleware that wraps the provided
// RoundTripper and reports times to hook functions provided in the
// InstrumentTrace struct. Hook functions that are not present in the provided
// InstrumentTrace struct are ignored. Times reported to the hook functions are
// time since the start of the request. Only with Go1.9+, those times are
// guaranteed to never be negative. (Earlier Go versions are not using a
// monotonic clock.) Note that partitioning of Histograms is expensive and
// should be used judiciously.
//
// For hook functions that receive an error as an argument, no observations are
// made in the event of a non-nil error value.
//
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
start := time.Now()
trace := &httptrace.ClientTrace{
GotConn: func(_ httptrace.GotConnInfo) {
if it.GotConn != nil {
it.GotConn(time.Since(start).Seconds())
}
},
PutIdleConn: func(err error) {
if err != nil {
return
}
if it.PutIdleConn != nil {
it.PutIdleConn(time.Since(start).Seconds())
}
},
DNSStart: func(_ httptrace.DNSStartInfo) {
if it.DNSStart != nil {
it.DNSStart(time.Since(start).Seconds())
}
},
DNSDone: func(_ httptrace.DNSDoneInfo) {
if it.DNSDone != nil {
it.DNSDone(time.Since(start).Seconds())
}
},
ConnectStart: func(_, _ string) {
if it.ConnectStart != nil {
it.ConnectStart(time.Since(start).Seconds())
}
},
ConnectDone: func(_, _ string, err error) {
if err != nil {
return
}
if it.ConnectDone != nil {
it.ConnectDone(time.Since(start).Seconds())
}
},
GotFirstResponseByte: func() {
if it.GotFirstResponseByte != nil {
it.GotFirstResponseByte(time.Since(start).Seconds())
}
},
Got100Continue: func() {
if it.Got100Continue != nil {
it.Got100Continue(time.Since(start).Seconds())
}
},
TLSHandshakeStart: func() {
if it.TLSHandshakeStart != nil {
it.TLSHandshakeStart(time.Since(start).Seconds())
}
},
TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
if err != nil {
return
}
if it.TLSHandshakeDone != nil {
it.TLSHandshakeDone(time.Since(start).Seconds())
}
},
WroteHeaders: func() {
if it.WroteHeaders != nil {
it.WroteHeaders(time.Since(start).Seconds())
}
},
Wait100Continue: func() {
if it.Wait100Continue != nil {
it.Wait100Continue(time.Since(start).Seconds())
}
},
WroteRequest: func(_ httptrace.WroteRequestInfo) {
if it.WroteRequest != nil {
it.WroteRequest(time.Since(start).Seconds())
}
},
}
r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace))
return next.RoundTrip(r)
})
}

View file

@ -1,144 +0,0 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.8
package promhttp
import (
"context"
"crypto/tls"
"net/http"
"net/http/httptrace"
"time"
)
// InstrumentTrace is used to offer flexibility in instrumenting the available
// httptrace.ClientTrace hook functions. Each function is passed a float64
// representing the time in seconds since the start of the http request. A user
// may choose to use separately buckets Histograms, or implement custom
// instance labels on a per function basis.
type InstrumentTrace struct {
GotConn func(float64)
PutIdleConn func(float64)
GotFirstResponseByte func(float64)
Got100Continue func(float64)
DNSStart func(float64)
DNSDone func(float64)
ConnectStart func(float64)
ConnectDone func(float64)
TLSHandshakeStart func(float64)
TLSHandshakeDone func(float64)
WroteHeaders func(float64)
Wait100Continue func(float64)
WroteRequest func(float64)
}
// InstrumentRoundTripperTrace is a middleware that wraps the provided
// RoundTripper and reports times to hook functions provided in the
// InstrumentTrace struct. Hook functions that are not present in the provided
// InstrumentTrace struct are ignored. Times reported to the hook functions are
// time since the start of the request. Only with Go1.9+, those times are
// guaranteed to never be negative. (Earlier Go versions are not using a
// monotonic clock.) Note that partitioning of Histograms is expensive and
// should be used judiciously.
//
// For hook functions that receive an error as an argument, no observations are
// made in the event of a non-nil error value.
//
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
start := time.Now()
trace := &httptrace.ClientTrace{
GotConn: func(_ httptrace.GotConnInfo) {
if it.GotConn != nil {
it.GotConn(time.Since(start).Seconds())
}
},
PutIdleConn: func(err error) {
if err != nil {
return
}
if it.PutIdleConn != nil {
it.PutIdleConn(time.Since(start).Seconds())
}
},
DNSStart: func(_ httptrace.DNSStartInfo) {
if it.DNSStart != nil {
it.DNSStart(time.Since(start).Seconds())
}
},
DNSDone: func(_ httptrace.DNSDoneInfo) {
if it.DNSDone != nil {
it.DNSDone(time.Since(start).Seconds())
}
},
ConnectStart: func(_, _ string) {
if it.ConnectStart != nil {
it.ConnectStart(time.Since(start).Seconds())
}
},
ConnectDone: func(_, _ string, err error) {
if err != nil {
return
}
if it.ConnectDone != nil {
it.ConnectDone(time.Since(start).Seconds())
}
},
GotFirstResponseByte: func() {
if it.GotFirstResponseByte != nil {
it.GotFirstResponseByte(time.Since(start).Seconds())
}
},
Got100Continue: func() {
if it.Got100Continue != nil {
it.Got100Continue(time.Since(start).Seconds())
}
},
TLSHandshakeStart: func() {
if it.TLSHandshakeStart != nil {
it.TLSHandshakeStart(time.Since(start).Seconds())
}
},
TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
if err != nil {
return
}
if it.TLSHandshakeDone != nil {
it.TLSHandshakeDone(time.Since(start).Seconds())
}
},
WroteHeaders: func() {
if it.WroteHeaders != nil {
it.WroteHeaders(time.Since(start).Seconds())
}
},
Wait100Continue: func() {
if it.Wait100Continue != nil {
it.Wait100Continue(time.Since(start).Seconds())
}
},
WroteRequest: func(_ httptrace.WroteRequestInfo) {
if it.WroteRequest != nil {
it.WroteRequest(time.Since(start).Seconds())
}
},
}
r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace))
return next.RoundTrip(r)
})
}

View file

@ -127,9 +127,10 @@ type SummaryOpts struct {
// its zero value (i.e. nil). To create a Summary without Objectives, // its zero value (i.e. nil). To create a Summary without Objectives,
// set it to an empty map (i.e. map[float64]float64{}). // set it to an empty map (i.e. map[float64]float64{}).
// //
// Deprecated: Note that the current value of DefObjectives is // Note that the current value of DefObjectives is deprecated. It will
// deprecated. It will be replaced by an empty map in v0.10 of the // be replaced by an empty map in v0.10 of the library. Please
// library. Please explicitly set Objectives to the desired value. // explicitly set Objectives to the desired value to avoid problems
// during the transition.
Objectives map[float64]float64 Objectives map[float64]float64
// MaxAge defines the duration for which an observation stays relevant // MaxAge defines the duration for which an observation stays relevant
@ -405,18 +406,21 @@ type summaryCounts struct {
} }
type noObjectivesSummary struct { type noObjectivesSummary struct {
// countAndHotIdx is a complicated one. For lock-free yet atomic // countAndHotIdx enables lock-free writes with use of atomic updates.
// observations, we need to save the total count of observations again, // The most significant bit is the hot index [0 or 1] of the count field
// combined with the index of the currently-hot counts struct, so that // below. Observe calls update the hot one. All remaining bits count the
// we can perform the operation on both values atomically. The least // number of Observe calls. Observe starts by incrementing this counter,
// significant bit defines the hot counts struct. The remaining 63 bits // and finish by incrementing the count field in the respective
// represent the total count of observations. This happens under the // summaryCounts, as a marker for completion.
// assumption that the 63bit count will never overflow. Rationale: An
// observations takes about 30ns. Let's assume it could happen in
// 10ns. Overflowing the counter will then take at least (2^63)*10ns,
// which is about 3000 years.
// //
// This has to be first in the struct for 64bit alignment. See // Calls of the Write method (which are non-mutating reads from the
// perspective of the summary) swap the hotcold under the writeMtx
// lock. A cooldown is awaited (while locked) by comparing the number of
// observations with the initiation count. Once they match, then the
// last observation on the now cool one has completed. All cool fields must
// be merged into the new hot before releasing writeMtx.
// Fields with atomic access first! See alignment constraint:
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
countAndHotIdx uint64 countAndHotIdx uint64
@ -429,7 +433,6 @@ type noObjectivesSummary struct {
// pointers to guarantee 64bit alignment of the histogramCounts, see // pointers to guarantee 64bit alignment of the histogramCounts, see
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG. // http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
counts [2]*summaryCounts counts [2]*summaryCounts
hotIdx int // Index of currently-hot counts. Only used within Write.
labelPairs []*dto.LabelPair labelPairs []*dto.LabelPair
} }
@ -439,11 +442,11 @@ func (s *noObjectivesSummary) Desc() *Desc {
} }
func (s *noObjectivesSummary) Observe(v float64) { func (s *noObjectivesSummary) Observe(v float64) {
// We increment s.countAndHotIdx by 2 so that the counter in the upper // We increment h.countAndHotIdx so that the counter in the lower
// 63 bits gets incremented by 1. At the same time, we get the new value // 63 bits gets incremented. At the same time, we get the new value
// back, which we can use to find the currently-hot counts. // back, which we can use to find the currently-hot counts.
n := atomic.AddUint64(&s.countAndHotIdx, 2) n := atomic.AddUint64(&s.countAndHotIdx, 1)
hotCounts := s.counts[n%2] hotCounts := s.counts[n>>63]
for { for {
oldBits := atomic.LoadUint64(&hotCounts.sumBits) oldBits := atomic.LoadUint64(&hotCounts.sumBits)
@ -458,61 +461,33 @@ func (s *noObjectivesSummary) Observe(v float64) {
} }
func (s *noObjectivesSummary) Write(out *dto.Metric) error { func (s *noObjectivesSummary) Write(out *dto.Metric) error {
var ( // For simplicity, we protect this whole method by a mutex. It is not in
sum = &dto.Summary{} // the hot path, i.e. Observe is called much more often than Write. The
hotCounts, coldCounts *summaryCounts // complication of making Write lock-free isn't worth it, if possible at
count uint64 // all.
)
// For simplicity, we mutex the rest of this method. It is not in the
// hot path, i.e. Observe is called much more often than Write. The
// complication of making Write lock-free isn't worth it.
s.writeMtx.Lock() s.writeMtx.Lock()
defer s.writeMtx.Unlock() defer s.writeMtx.Unlock()
// This is a bit arcane, which is why the following spells out this if // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
// clause in English: // without touching the count bits. See the struct comments for a full
// // description of the algorithm.
// If the currently-hot counts struct is #0, we atomically increment n := atomic.AddUint64(&s.countAndHotIdx, 1<<63)
// s.countAndHotIdx by 1 so that from now on Observe will use the counts // count is contained unchanged in the lower 63 bits.
// struct #1. Furthermore, the atomic increment gives us the new value, count := n & ((1 << 63) - 1)
// which, in its most significant 63 bits, tells us the count of // The most significant bit tells us which counts is hot. The complement
// observations done so far up to and including currently ongoing // is thus the cold one.
// observations still using the counts struct just changed from hot to hotCounts := s.counts[n>>63]
// cold. To have a normal uint64 for the count, we bitshift by 1 and coldCounts := s.counts[(^n)>>63]
// save the result in count. We also set s.hotIdx to 1 for the next
// Write call, and we will refer to counts #1 as hotCounts and to counts
// #0 as coldCounts.
//
// If the currently-hot counts struct is #1, we do the corresponding
// things the other way round. We have to _decrement_ s.countAndHotIdx
// (which is a bit arcane in itself, as we have to express -1 with an
// unsigned int...).
if s.hotIdx == 0 {
count = atomic.AddUint64(&s.countAndHotIdx, 1) >> 1
s.hotIdx = 1
hotCounts = s.counts[1]
coldCounts = s.counts[0]
} else {
count = atomic.AddUint64(&s.countAndHotIdx, ^uint64(0)) >> 1 // Decrement.
s.hotIdx = 0
hotCounts = s.counts[0]
coldCounts = s.counts[1]
}
// Now we have to wait for the now-declared-cold counts to actually cool // Await cooldown.
// down, i.e. wait for all observations still using it to finish. That's for count != atomic.LoadUint64(&coldCounts.count) {
// the case once the count in the cold counts struct is the same as the
// one atomically retrieved from the upper 63bits of s.countAndHotIdx.
for {
if count == atomic.LoadUint64(&coldCounts.count) {
break
}
runtime.Gosched() // Let observations get work done. runtime.Gosched() // Let observations get work done.
} }
sum.SampleCount = proto.Uint64(count) sum := &dto.Summary{
sum.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))) SampleCount: proto.Uint64(count),
SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
}
out.Summary = sum out.Summary = sum
out.Label = s.labelPairs out.Label = s.labelPairs

6
vendor/github.com/prometheus/procfs/.golangci.yml generated vendored Normal file
View file

@ -0,0 +1,6 @@
# Run only staticcheck for now. Additional linters will be enabled one-by-one.
linters:
enable:
- staticcheck
- govet
disable-all: true

View file

@ -1,2 +1,2 @@
* Tobias Schmidt <tobidt@gmail.com> @grobie
* Johannes 'fish' Ziemke <github@freigeist.org> @discordianfish * Johannes 'fish' Ziemke <github@freigeist.org> @discordianfish
* Paul Gier <pgier@redhat.com> @pgier

View file

@ -17,14 +17,12 @@ include Makefile.common
./ttar -C $(dir $*) -x -f $*.ttar ./ttar -C $(dir $*) -x -f $*.ttar
touch $@ touch $@
update_fixtures: fixtures.ttar sysfs/fixtures.ttar update_fixtures:
rm -vf fixtures/.unpacked
%fixtures.ttar: %/fixtures ./ttar -c -f fixtures.ttar fixtures/
rm -v $(dir $*)fixtures/.unpacked
./ttar -C $(dir $*) -c -f $*fixtures.ttar fixtures/
.PHONY: build .PHONY: build
build: build:
.PHONY: test .PHONY: test
test: fixtures/.unpacked sysfs/fixtures/.unpacked common-test test: fixtures/.unpacked common-test

View file

@ -29,12 +29,15 @@ GO ?= go
GOFMT ?= $(GO)fmt GOFMT ?= $(GO)fmt
FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
GOOPTS ?= GOOPTS ?=
GOHOSTOS ?= $(shell $(GO) env GOHOSTOS)
GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH)
GO_VERSION ?= $(shell $(GO) version) GO_VERSION ?= $(shell $(GO) version)
GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
unexport GOVENDOR GOVENDOR :=
GO111MODULE :=
ifeq (, $(PRE_GO_111)) ifeq (, $(PRE_GO_111))
ifneq (,$(wildcard go.mod)) ifneq (,$(wildcard go.mod))
# Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI). # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI).
@ -55,32 +58,57 @@ $(warning Some recipes may not work as expected as the current Go runtime is '$(
# This repository isn't using Go modules (yet). # This repository isn't using Go modules (yet).
GOVENDOR := $(FIRST_GOPATH)/bin/govendor GOVENDOR := $(FIRST_GOPATH)/bin/govendor
endif endif
unexport GO111MODULE
endif endif
PROMU := $(FIRST_GOPATH)/bin/promu PROMU := $(FIRST_GOPATH)/bin/promu
STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck
pkgs = ./... pkgs = ./...
GO_VERSION ?= $(shell $(GO) version) ifeq (arm, $(GOHOSTARCH))
GO_BUILD_PLATFORM ?= $(subst /,-,$(lastword $(GO_VERSION))) GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM)
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM)
else
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)
endif
PROMU_VERSION ?= 0.2.0 PROMU_VERSION ?= 0.3.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.16.0
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
endif
endif
PREFIX ?= $(shell pwd) PREFIX ?= $(shell pwd)
BIN_DIR ?= $(shell pwd) BIN_DIR ?= $(shell pwd)
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
DOCKER_REPO ?= prom DOCKER_REPO ?= prom
.PHONY: all DOCKER_ARCHS ?= amd64
all: precheck style staticcheck unused build test
BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
ifeq ($(GOHOSTARCH),amd64)
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
# Only supported on amd64
test-flags := -race
endif
endif
# This rule is used to forward a target like "build" to "common-build". This # This rule is used to forward a target like "build" to "common-build". This
# allows a new "build" target to be defined in a Makefile which includes this # allows a new "build" target to be defined in a Makefile which includes this
# one and override "common-build" without override warnings. # one and override "common-build" without override warnings.
%: common-% ; %: common-% ;
.PHONY: common-all
common-all: precheck style check_license lint unused build test
.PHONY: common-style .PHONY: common-style
common-style: common-style:
@echo ">> checking code style" @echo ">> checking code style"
@ -102,6 +130,15 @@ common-check_license:
exit 1; \ exit 1; \
fi fi
.PHONY: common-deps
common-deps:
@echo ">> getting dependencies"
ifdef GO111MODULE
GO111MODULE=$(GO111MODULE) $(GO) mod download
else
$(GO) get $(GOOPTS) -t ./...
endif
.PHONY: common-test-short .PHONY: common-test-short
common-test-short: common-test-short:
@echo ">> running short tests" @echo ">> running short tests"
@ -110,26 +147,35 @@ common-test-short:
.PHONY: common-test .PHONY: common-test
common-test: common-test:
@echo ">> running all tests" @echo ">> running all tests"
GO111MODULE=$(GO111MODULE) $(GO) test -race $(GOOPTS) $(pkgs) GO111MODULE=$(GO111MODULE) $(GO) test $(test-flags) $(GOOPTS) $(pkgs)
.PHONY: common-format .PHONY: common-format
common-format: common-format:
@echo ">> formatting code" @echo ">> formatting code"
GO111MODULE=$(GO111MODULE) $(GO) fmt $(GOOPTS) $(pkgs) GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs)
.PHONY: common-vet .PHONY: common-vet
common-vet: common-vet:
@echo ">> vetting code" @echo ">> vetting code"
GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs)
.PHONY: common-staticcheck .PHONY: common-lint
common-staticcheck: $(STATICCHECK) common-lint: $(GOLANGCI_LINT)
@echo ">> running staticcheck" ifdef GOLANGCI_LINT
@echo ">> running golangci-lint"
ifdef GO111MODULE ifdef GO111MODULE
GO111MODULE=$(GO111MODULE) $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" -checks "SA*" $(pkgs) # 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
# Otherwise staticcheck might fail randomly for some reason not yet explained.
GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
else else
$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) $(GOLANGCI_LINT) run $(pkgs)
endif endif
endif
# For backward-compatibility.
.PHONY: common-staticcheck
common-staticcheck: lint
.PHONY: common-unused .PHONY: common-unused
common-unused: $(GOVENDOR) common-unused: $(GOVENDOR)
@ -140,8 +186,9 @@ else
ifdef GO111MODULE ifdef GO111MODULE
@echo ">> running check for unused/missing packages in go.mod" @echo ">> running check for unused/missing packages in go.mod"
GO111MODULE=$(GO111MODULE) $(GO) mod tidy GO111MODULE=$(GO111MODULE) $(GO) mod tidy
ifeq (,$(wildcard vendor))
@git diff --exit-code -- go.sum go.mod @git diff --exit-code -- go.sum go.mod
ifneq (,$(wildcard vendor)) else
@echo ">> running check for unused packages in vendor/" @echo ">> running check for unused packages in vendor/"
GO111MODULE=$(GO111MODULE) $(GO) mod vendor GO111MODULE=$(GO111MODULE) $(GO) mod vendor
@git diff --exit-code -- go.sum go.mod vendor/ @git diff --exit-code -- go.sum go.mod vendor/
@ -159,45 +206,48 @@ common-tarball: promu
@echo ">> building release tarball" @echo ">> building release tarball"
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
.PHONY: common-docker .PHONY: common-docker $(BUILD_DOCKER_ARCHS)
common-docker: common-docker: $(BUILD_DOCKER_ARCHS)
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" . $(BUILD_DOCKER_ARCHS): common-docker-%:
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
--build-arg ARCH="$*" \
--build-arg OS="linux" \
.
.PHONY: common-docker-publish .PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
common-docker-publish: common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)" $(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
.PHONY: common-docker-tag-latest .PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
common-docker-tag-latest: common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):latest" $(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
.PHONY: common-docker-manifest
common-docker-manifest:
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG))
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)"
.PHONY: promu .PHONY: promu
promu: $(PROMU) promu: $(PROMU)
$(PROMU): $(PROMU):
curl -s -L $(PROMU_URL) | tar -xvz -C /tmp $(eval PROMU_TMP := $(shell mktemp -d))
mkdir -v -p $(FIRST_GOPATH)/bin curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP)
cp -v /tmp/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(PROMU) mkdir -p $(FIRST_GOPATH)/bin
cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu
rm -r $(PROMU_TMP)
.PHONY: proto .PHONY: proto
proto: proto:
@echo ">> generating code from proto files" @echo ">> generating code from proto files"
@./scripts/genproto.sh @./scripts/genproto.sh
.PHONY: $(STATICCHECK) ifdef GOLANGCI_LINT
$(STATICCHECK): $(GOLANGCI_LINT):
ifdef GO111MODULE mkdir -p $(FIRST_GOPATH)/bin
# Get staticcheck from a temporary directory to avoid modifying the local go.{mod,sum}. curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)
# See https://github.com/golang/go/issues/27643.
# For now, we are using the next branch of staticcheck because master isn't compatible yet with Go modules.
tmpModule=$$(mktemp -d 2>&1) && \
mkdir -p $${tmpModule}/staticcheck && \
cd "$${tmpModule}"/staticcheck && \
GO111MODULE=on $(GO) mod init example.com/staticcheck && \
GO111MODULE=on GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck@next && \
rm -rf $${tmpModule};
else
GOOS= GOARCH= GO111MODULE=off $(GO) get -u honnef.co/go/tools/cmd/staticcheck
endif endif
ifdef GOVENDOR ifdef GOVENDOR
@ -212,7 +262,6 @@ precheck::
define PRECHECK_COMMAND_template = define PRECHECK_COMMAND_template =
precheck:: $(1)_precheck precheck:: $(1)_precheck
PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1)))
.PHONY: $(1)_precheck .PHONY: $(1)_precheck
$(1)_precheck: $(1)_precheck:

View file

@ -43,7 +43,7 @@ func NewBuddyInfo() ([]BuddyInfo, error) {
// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. // NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) { func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) {
file, err := os.Open(fs.Path("buddyinfo")) file, err := os.Open(fs.proc.Path("buddyinfo"))
if err != nil { if err != nil {
return nil, err return nil, err
} }

File diff suppressed because it is too large Load diff

View file

@ -14,69 +14,24 @@
package procfs package procfs
import ( import (
"fmt" "github.com/prometheus/procfs/internal/fs"
"os"
"path"
"github.com/prometheus/procfs/nfs"
"github.com/prometheus/procfs/xfs"
) )
// FS represents the pseudo-filesystem proc, which provides an interface to // FS represents the pseudo-filesystem sys, which provides an interface to
// kernel data structures. // kernel data structures.
type FS string type FS struct {
proc fs.FS
}
// DefaultMountPoint is the common mount point of the proc filesystem. // DefaultMountPoint is the common mount point of the proc filesystem.
const DefaultMountPoint = "/proc" const DefaultMountPoint = fs.DefaultProcMountPoint
// NewFS returns a new FS mounted under the given mountPoint. It will error // NewFS returns a new proc FS mounted under the given proc mountPoint. It will error
// if the mount point can't be read. // if the mount point dirctory can't be read or is a file.
func NewFS(mountPoint string) (FS, error) { func NewFS(mountPoint string) (FS, error) {
info, err := os.Stat(mountPoint) fs, err := fs.NewFS(mountPoint)
if err != nil { if err != nil {
return "", fmt.Errorf("could not read %s: %s", mountPoint, err) return FS{}, err
} }
if !info.IsDir() { return FS{fs}, nil
return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
}
return FS(mountPoint), nil
}
// Path returns the path of the given subsystem relative to the procfs root.
func (fs FS) Path(p ...string) string {
return path.Join(append([]string{string(fs)}, p...)...)
}
// XFSStats retrieves XFS filesystem runtime statistics.
func (fs FS) XFSStats() (*xfs.Stats, error) {
f, err := os.Open(fs.Path("fs/xfs/stat"))
if err != nil {
return nil, err
}
defer f.Close()
return xfs.ParseStats(f)
}
// NFSClientRPCStats retrieves NFS client RPC statistics.
func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) {
f, err := os.Open(fs.Path("net/rpc/nfs"))
if err != nil {
return nil, err
}
defer f.Close()
return nfs.ParseClientRPCStats(f)
}
// NFSdServerRPCStats retrieves NFS daemon RPC statistics.
func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) {
f, err := os.Open(fs.Path("net/rpc/nfsd"))
if err != nil {
return nil, err
}
defer f.Close()
return nfs.ParseServerRPCStats(f)
} }

View file

@ -1 +1,3 @@
module github.com/prometheus/procfs module github.com/prometheus/procfs
require golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4

2
vendor/github.com/prometheus/procfs/go.sum generated vendored Normal file
View file

@ -0,0 +1,2 @@
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=

52
vendor/github.com/prometheus/procfs/internal/fs/fs.go generated vendored Normal file
View file

@ -0,0 +1,52 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fs
import (
"fmt"
"os"
"path/filepath"
)
const (
// DefaultProcMountPoint is the common mount point of the proc filesystem.
DefaultProcMountPoint = "/proc"
// DefaultSysMountPoint is the common mount point of the sys filesystem.
DefaultSysMountPoint = "/sys"
)
// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an
// interface to kernel data structures.
type FS string
// NewFS returns a new FS mounted under the given mountPoint. It will error
// if the mount point can't be read.
func NewFS(mountPoint string) (FS, error) {
info, err := os.Stat(mountPoint)
if err != nil {
return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
}
if !info.IsDir() {
return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
}
return FS(mountPoint), nil
}
// Path appends the given path elements to the filesystem path, adding separators
// as necessary.
func (fs FS) Path(p ...string) string {
return filepath.Join(append([]string{string(fs)}, p...)...)
}

View file

@ -1,59 +0,0 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"io/ioutil"
"strconv"
"strings"
)
// ParseUint32s parses a slice of strings into a slice of uint32s.
func ParseUint32s(ss []string) ([]uint32, error) {
us := make([]uint32, 0, len(ss))
for _, s := range ss {
u, err := strconv.ParseUint(s, 10, 32)
if err != nil {
return nil, err
}
us = append(us, uint32(u))
}
return us, nil
}
// ParseUint64s parses a slice of strings into a slice of uint64s.
func ParseUint64s(ss []string) ([]uint64, error) {
us := make([]uint64, 0, len(ss))
for _, s := range ss {
u, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return nil, err
}
us = append(us, u)
}
return us, nil
}
// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
func ReadUintFromFile(path string) (uint64, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return 0, err
}
return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
}

View file

@ -1,45 +0,0 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
package util
import (
"bytes"
"os"
"syscall"
)
// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly.
// https://github.com/prometheus/node_exporter/pull/728/files
func SysReadFile(file string) (string, error) {
f, err := os.Open(file)
if err != nil {
return "", err
}
defer f.Close()
// On some machines, hwmon drivers are broken and return EAGAIN. This causes
// Go's ioutil.ReadFile implementation to poll forever.
//
// Since we either want to read data or bail immediately, do the simplest
// possible read using syscall directly.
b := make([]byte, 128)
n, err := syscall.Read(int(f.Fd()), b)
if err != nil {
return "", err
}
return string(bytes.TrimSpace(b[:n])), nil
}

View file

@ -74,7 +74,7 @@ func NewIPVSStats() (IPVSStats, error) {
// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem. // NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem.
func (fs FS) NewIPVSStats() (IPVSStats, error) { func (fs FS) NewIPVSStats() (IPVSStats, error) {
file, err := os.Open(fs.Path("net/ip_vs_stats")) file, err := os.Open(fs.proc.Path("net/ip_vs_stats"))
if err != nil { if err != nil {
return IPVSStats{}, err return IPVSStats{}, err
} }
@ -143,7 +143,7 @@ func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. // NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
file, err := os.Open(fs.Path("net/ip_vs")) file, err := os.Open(fs.proc.Path("net/ip_vs"))
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -44,7 +44,7 @@ type MDStat struct {
// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos. // ParseMDStat parses an mdstat-file and returns a struct with the relevant infos.
func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
mdStatusFilePath := fs.Path("mdstat") mdStatusFilePath := fs.proc.Path("mdstat")
content, err := ioutil.ReadFile(mdStatusFilePath) content, err := ioutil.ReadFile(mdStatusFilePath)
if err != nil { if err != nil {
return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)

View file

@ -69,6 +69,8 @@ type MountStats interface {
type MountStatsNFS struct { type MountStatsNFS struct {
// The version of statistics provided. // The version of statistics provided.
StatVersion string StatVersion string
// The optional mountaddr of the NFS mount.
MountAddress string
// The age of the NFS mount. // The age of the NFS mount.
Age time.Duration Age time.Duration
// Statistics related to byte counters for various operations. // Statistics related to byte counters for various operations.
@ -317,6 +319,7 @@ func parseMount(ss []string) (*Mount, error) {
func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
// Field indicators for parsing specific types of data // Field indicators for parsing specific types of data
const ( const (
fieldOpts = "opts:"
fieldAge = "age:" fieldAge = "age:"
fieldBytes = "bytes:" fieldBytes = "bytes:"
fieldEvents = "events:" fieldEvents = "events:"
@ -338,6 +341,13 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
} }
switch ss[0] { switch ss[0] {
case fieldOpts:
for _, opt := range strings.Split(ss[1], ",") {
split := strings.Split(opt, "=")
if len(split) == 2 && split[0] == "mountaddr" {
stats.MountAddress = split[1]
}
}
case fieldAge: case fieldAge:
// Age integer is in seconds // Age integer is in seconds
d, err := time.ParseDuration(ss[1] + "s") d, err := time.ParseDuration(ss[1] + "s")

View file

@ -59,7 +59,7 @@ func NewNetDev() (NetDev, error) {
// NewNetDev returns kernel/system statistics read from /proc/net/dev. // NewNetDev returns kernel/system statistics read from /proc/net/dev.
func (fs FS) NewNetDev() (NetDev, error) { func (fs FS) NewNetDev() (NetDev, error) {
return newNetDev(fs.Path("net/dev")) return newNetDev(fs.proc.Path("net/dev"))
} }
// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev. // NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev.

View file

@ -1,263 +0,0 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package nfs implements parsing of /proc/net/rpc/nfsd.
// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/
package nfs
// ReplyCache models the "rc" line.
type ReplyCache struct {
Hits uint64
Misses uint64
NoCache uint64
}
// FileHandles models the "fh" line.
type FileHandles struct {
Stale uint64
TotalLookups uint64
AnonLookups uint64
DirNoCache uint64
NoDirNoCache uint64
}
// InputOutput models the "io" line.
type InputOutput struct {
Read uint64
Write uint64
}
// Threads models the "th" line.
type Threads struct {
Threads uint64
FullCnt uint64
}
// ReadAheadCache models the "ra" line.
type ReadAheadCache struct {
CacheSize uint64
CacheHistogram []uint64
NotFound uint64
}
// Network models the "net" line.
type Network struct {
NetCount uint64
UDPCount uint64
TCPCount uint64
TCPConnect uint64
}
// ClientRPC models the nfs "rpc" line.
type ClientRPC struct {
RPCCount uint64
Retransmissions uint64
AuthRefreshes uint64
}
// ServerRPC models the nfsd "rpc" line.
type ServerRPC struct {
RPCCount uint64
BadCnt uint64
BadFmt uint64
BadAuth uint64
BadcInt uint64
}
// V2Stats models the "proc2" line.
type V2Stats struct {
Null uint64
GetAttr uint64
SetAttr uint64
Root uint64
Lookup uint64
ReadLink uint64
Read uint64
WrCache uint64
Write uint64
Create uint64
Remove uint64
Rename uint64
Link uint64
SymLink uint64
MkDir uint64
RmDir uint64
ReadDir uint64
FsStat uint64
}
// V3Stats models the "proc3" line.
type V3Stats struct {
Null uint64
GetAttr uint64
SetAttr uint64
Lookup uint64
Access uint64
ReadLink uint64
Read uint64
Write uint64
Create uint64
MkDir uint64
SymLink uint64
MkNod uint64
Remove uint64
RmDir uint64
Rename uint64
Link uint64
ReadDir uint64
ReadDirPlus uint64
FsStat uint64
FsInfo uint64
PathConf uint64
Commit uint64
}
// ClientV4Stats models the nfs "proc4" line.
type ClientV4Stats struct {
Null uint64
Read uint64
Write uint64
Commit uint64
Open uint64
OpenConfirm uint64
OpenNoattr uint64
OpenDowngrade uint64
Close uint64
Setattr uint64
FsInfo uint64
Renew uint64
SetClientID uint64
SetClientIDConfirm uint64
Lock uint64
Lockt uint64
Locku uint64
Access uint64
Getattr uint64
Lookup uint64
LookupRoot uint64
Remove uint64
Rename uint64
Link uint64
Symlink uint64
Create uint64
Pathconf uint64
StatFs uint64
ReadLink uint64
ReadDir uint64
ServerCaps uint64
DelegReturn uint64
GetACL uint64
SetACL uint64
FsLocations uint64
ReleaseLockowner uint64
Secinfo uint64
FsidPresent uint64
ExchangeID uint64
CreateSession uint64
DestroySession uint64
Sequence uint64
GetLeaseTime uint64
ReclaimComplete uint64
LayoutGet uint64
GetDeviceInfo uint64
LayoutCommit uint64
LayoutReturn uint64
SecinfoNoName uint64
TestStateID uint64
FreeStateID uint64
GetDeviceList uint64
BindConnToSession uint64
DestroyClientID uint64
Seek uint64
Allocate uint64
DeAllocate uint64
LayoutStats uint64
Clone uint64
}
// ServerV4Stats models the nfsd "proc4" line.
type ServerV4Stats struct {
Null uint64
Compound uint64
}
// V4Ops models the "proc4ops" line: NFSv4 operations
// Variable list, see:
// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations)
// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations)
// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations)
type V4Ops struct {
//Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct?
Op0Unused uint64
Op1Unused uint64
Op2Future uint64
Access uint64
Close uint64
Commit uint64
Create uint64
DelegPurge uint64
DelegReturn uint64
GetAttr uint64
GetFH uint64
Link uint64
Lock uint64
Lockt uint64
Locku uint64
Lookup uint64
LookupRoot uint64
Nverify uint64
Open uint64
OpenAttr uint64
OpenConfirm uint64
OpenDgrd uint64
PutFH uint64
PutPubFH uint64
PutRootFH uint64
Read uint64
ReadDir uint64
ReadLink uint64
Remove uint64
Rename uint64
Renew uint64
RestoreFH uint64
SaveFH uint64
SecInfo uint64
SetAttr uint64
Verify uint64
Write uint64
RelLockOwner uint64
}
// ClientRPCStats models all stats from /proc/net/rpc/nfs.
type ClientRPCStats struct {
Network Network
ClientRPC ClientRPC
V2Stats V2Stats
V3Stats V3Stats
ClientV4Stats ClientV4Stats
}
// ServerRPCStats models all stats from /proc/net/rpc/nfsd.
type ServerRPCStats struct {
ReplyCache ReplyCache
FileHandles FileHandles
InputOutput InputOutput
Threads Threads
ReadAheadCache ReadAheadCache
Network Network
ServerRPC ServerRPC
V2Stats V2Stats
V3Stats V3Stats
ServerV4Stats ServerV4Stats
V4Ops V4Ops
}

View file

@ -1,317 +0,0 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package nfs
import (
"fmt"
)
func parseReplyCache(v []uint64) (ReplyCache, error) {
if len(v) != 3 {
return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v)
}
return ReplyCache{
Hits: v[0],
Misses: v[1],
NoCache: v[2],
}, nil
}
func parseFileHandles(v []uint64) (FileHandles, error) {
if len(v) != 5 {
return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v)
}
return FileHandles{
Stale: v[0],
TotalLookups: v[1],
AnonLookups: v[2],
DirNoCache: v[3],
NoDirNoCache: v[4],
}, nil
}
func parseInputOutput(v []uint64) (InputOutput, error) {
if len(v) != 2 {
return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v)
}
return InputOutput{
Read: v[0],
Write: v[1],
}, nil
}
func parseThreads(v []uint64) (Threads, error) {
if len(v) != 2 {
return Threads{}, fmt.Errorf("invalid Threads line %q", v)
}
return Threads{
Threads: v[0],
FullCnt: v[1],
}, nil
}
func parseReadAheadCache(v []uint64) (ReadAheadCache, error) {
if len(v) != 12 {
return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v)
}
return ReadAheadCache{
CacheSize: v[0],
CacheHistogram: v[1:11],
NotFound: v[11],
}, nil
}
func parseNetwork(v []uint64) (Network, error) {
if len(v) != 4 {
return Network{}, fmt.Errorf("invalid Network line %q", v)
}
return Network{
NetCount: v[0],
UDPCount: v[1],
TCPCount: v[2],
TCPConnect: v[3],
}, nil
}
func parseServerRPC(v []uint64) (ServerRPC, error) {
if len(v) != 5 {
return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v)
}
return ServerRPC{
RPCCount: v[0],
BadCnt: v[1],
BadFmt: v[2],
BadAuth: v[3],
BadcInt: v[4],
}, nil
}
func parseClientRPC(v []uint64) (ClientRPC, error) {
if len(v) != 3 {
return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v)
}
return ClientRPC{
RPCCount: v[0],
Retransmissions: v[1],
AuthRefreshes: v[2],
}, nil
}
func parseV2Stats(v []uint64) (V2Stats, error) {
values := int(v[0])
if len(v[1:]) != values || values != 18 {
return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v)
}
return V2Stats{
Null: v[1],
GetAttr: v[2],
SetAttr: v[3],
Root: v[4],
Lookup: v[5],
ReadLink: v[6],
Read: v[7],
WrCache: v[8],
Write: v[9],
Create: v[10],
Remove: v[11],
Rename: v[12],
Link: v[13],
SymLink: v[14],
MkDir: v[15],
RmDir: v[16],
ReadDir: v[17],
FsStat: v[18],
}, nil
}
func parseV3Stats(v []uint64) (V3Stats, error) {
values := int(v[0])
if len(v[1:]) != values || values != 22 {
return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v)
}
return V3Stats{
Null: v[1],
GetAttr: v[2],
SetAttr: v[3],
Lookup: v[4],
Access: v[5],
ReadLink: v[6],
Read: v[7],
Write: v[8],
Create: v[9],
MkDir: v[10],
SymLink: v[11],
MkNod: v[12],
Remove: v[13],
RmDir: v[14],
Rename: v[15],
Link: v[16],
ReadDir: v[17],
ReadDirPlus: v[18],
FsStat: v[19],
FsInfo: v[20],
PathConf: v[21],
Commit: v[22],
}, nil
}
func parseClientV4Stats(v []uint64) (ClientV4Stats, error) {
values := int(v[0])
if len(v[1:]) != values {
return ClientV4Stats{}, fmt.Errorf("invalid ClientV4Stats line %q", v)
}
// This function currently supports mapping 59 NFS v4 client stats. Older
// kernels may emit fewer stats, so we must detect this and pad out the
// values to match the expected slice size.
if values < 59 {
newValues := make([]uint64, 60)
copy(newValues, v)
v = newValues
}
return ClientV4Stats{
Null: v[1],
Read: v[2],
Write: v[3],
Commit: v[4],
Open: v[5],
OpenConfirm: v[6],
OpenNoattr: v[7],
OpenDowngrade: v[8],
Close: v[9],
Setattr: v[10],
FsInfo: v[11],
Renew: v[12],
SetClientID: v[13],
SetClientIDConfirm: v[14],
Lock: v[15],
Lockt: v[16],
Locku: v[17],
Access: v[18],
Getattr: v[19],
Lookup: v[20],
LookupRoot: v[21],
Remove: v[22],
Rename: v[23],
Link: v[24],
Symlink: v[25],
Create: v[26],
Pathconf: v[27],
StatFs: v[28],
ReadLink: v[29],
ReadDir: v[30],
ServerCaps: v[31],
DelegReturn: v[32],
GetACL: v[33],
SetACL: v[34],
FsLocations: v[35],
ReleaseLockowner: v[36],
Secinfo: v[37],
FsidPresent: v[38],
ExchangeID: v[39],
CreateSession: v[40],
DestroySession: v[41],
Sequence: v[42],
GetLeaseTime: v[43],
ReclaimComplete: v[44],
LayoutGet: v[45],
GetDeviceInfo: v[46],
LayoutCommit: v[47],
LayoutReturn: v[48],
SecinfoNoName: v[49],
TestStateID: v[50],
FreeStateID: v[51],
GetDeviceList: v[52],
BindConnToSession: v[53],
DestroyClientID: v[54],
Seek: v[55],
Allocate: v[56],
DeAllocate: v[57],
LayoutStats: v[58],
Clone: v[59],
}, nil
}
func parseServerV4Stats(v []uint64) (ServerV4Stats, error) {
values := int(v[0])
if len(v[1:]) != values || values != 2 {
return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v)
}
return ServerV4Stats{
Null: v[1],
Compound: v[2],
}, nil
}
func parseV4Ops(v []uint64) (V4Ops, error) {
values := int(v[0])
if len(v[1:]) != values || values < 39 {
return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v)
}
stats := V4Ops{
Op0Unused: v[1],
Op1Unused: v[2],
Op2Future: v[3],
Access: v[4],
Close: v[5],
Commit: v[6],
Create: v[7],
DelegPurge: v[8],
DelegReturn: v[9],
GetAttr: v[10],
GetFH: v[11],
Link: v[12],
Lock: v[13],
Lockt: v[14],
Locku: v[15],
Lookup: v[16],
LookupRoot: v[17],
Nverify: v[18],
Open: v[19],
OpenAttr: v[20],
OpenConfirm: v[21],
OpenDgrd: v[22],
PutFH: v[23],
PutPubFH: v[24],
PutRootFH: v[25],
Read: v[26],
ReadDir: v[27],
ReadLink: v[28],
Remove: v[29],
Rename: v[30],
Renew: v[31],
RestoreFH: v[32],
SaveFH: v[33],
SecInfo: v[34],
SetAttr: v[35],
Verify: v[36],
Write: v[37],
RelLockOwner: v[38],
}
return stats, nil
}

View file

@ -1,67 +0,0 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package nfs
import (
"bufio"
"fmt"
"io"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs
func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) {
stats := &ClientRPCStats{}
scanner := bufio.NewScanner(r)
for scanner.Scan() {
line := scanner.Text()
parts := strings.Fields(scanner.Text())
// require at least <key> <value>
if len(parts) < 2 {
return nil, fmt.Errorf("invalid NFS metric line %q", line)
}
values, err := util.ParseUint64s(parts[1:])
if err != nil {
return nil, fmt.Errorf("error parsing NFS metric line: %s", err)
}
switch metricLine := parts[0]; metricLine {
case "net":
stats.Network, err = parseNetwork(values)
case "rpc":
stats.ClientRPC, err = parseClientRPC(values)
case "proc2":
stats.V2Stats, err = parseV2Stats(values)
case "proc3":
stats.V3Stats, err = parseV3Stats(values)
case "proc4":
stats.ClientV4Stats, err = parseClientV4Stats(values)
default:
return nil, fmt.Errorf("unknown NFS metric line %q", metricLine)
}
if err != nil {
return nil, fmt.Errorf("errors parsing NFS metric line: %s", err)
}
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("error scanning NFS file: %s", err)
}
return stats, nil
}

View file

@ -1,89 +0,0 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package nfs
import (
"bufio"
"fmt"
"io"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd
func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) {
stats := &ServerRPCStats{}
scanner := bufio.NewScanner(r)
for scanner.Scan() {
line := scanner.Text()
parts := strings.Fields(scanner.Text())
// require at least <key> <value>
if len(parts) < 2 {
return nil, fmt.Errorf("invalid NFSd metric line %q", line)
}
label := parts[0]
var values []uint64
var err error
if label == "th" {
if len(parts) < 3 {
return nil, fmt.Errorf("invalid NFSd th metric line %q", line)
}
values, err = util.ParseUint64s(parts[1:3])
} else {
values, err = util.ParseUint64s(parts[1:])
}
if err != nil {
return nil, fmt.Errorf("error parsing NFSd metric line: %s", err)
}
switch metricLine := parts[0]; metricLine {
case "rc":
stats.ReplyCache, err = parseReplyCache(values)
case "fh":
stats.FileHandles, err = parseFileHandles(values)
case "io":
stats.InputOutput, err = parseInputOutput(values)
case "th":
stats.Threads, err = parseThreads(values)
case "ra":
stats.ReadAheadCache, err = parseReadAheadCache(values)
case "net":
stats.Network, err = parseNetwork(values)
case "rpc":
stats.ServerRPC, err = parseServerRPC(values)
case "proc2":
stats.V2Stats, err = parseV2Stats(values)
case "proc3":
stats.V3Stats, err = parseV3Stats(values)
case "proc4":
stats.ServerV4Stats, err = parseServerV4Stats(values)
case "proc4ops":
stats.V4Ops, err = parseV4Ops(values)
default:
return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine)
}
if err != nil {
return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err)
}
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("error scanning NFSd file: %s", err)
}
return stats, nil
}

View file

@ -20,6 +20,8 @@ import (
"os" "os"
"strconv" "strconv"
"strings" "strings"
"github.com/prometheus/procfs/internal/fs"
) )
// Proc provides information about a running process. // Proc provides information about a running process.
@ -27,7 +29,7 @@ type Proc struct {
// The process ID. // The process ID.
PID int PID int
fs FS fs fs.FS
} }
// Procs represents a list of Proc structs. // Procs represents a list of Proc structs.
@ -66,11 +68,11 @@ func AllProcs() (Procs, error) {
// Self returns a process for the current process. // Self returns a process for the current process.
func (fs FS) Self() (Proc, error) { func (fs FS) Self() (Proc, error) {
p, err := os.Readlink(fs.Path("self")) p, err := os.Readlink(fs.proc.Path("self"))
if err != nil { if err != nil {
return Proc{}, err return Proc{}, err
} }
pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1)) pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1))
if err != nil { if err != nil {
return Proc{}, err return Proc{}, err
} }
@ -79,15 +81,15 @@ func (fs FS) Self() (Proc, error) {
// NewProc returns a process for the given pid. // NewProc returns a process for the given pid.
func (fs FS) NewProc(pid int) (Proc, error) { func (fs FS) NewProc(pid int) (Proc, error) {
if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil { if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil {
return Proc{}, err return Proc{}, err
} }
return Proc{PID: pid, fs: fs}, nil return Proc{PID: pid, fs: fs.proc}, nil
} }
// AllProcs returns a list of all currently available processes. // AllProcs returns a list of all currently available processes.
func (fs FS) AllProcs() (Procs, error) { func (fs FS) AllProcs() (Procs, error) {
d, err := os.Open(fs.Path()) d, err := os.Open(fs.proc.Path())
if err != nil { if err != nil {
return Procs{}, err return Procs{}, err
} }
@ -104,7 +106,7 @@ func (fs FS) AllProcs() (Procs, error) {
if err != nil { if err != nil {
continue continue
} }
p = append(p, Proc{PID: int(pid), fs: fs}) p = append(p, Proc{PID: int(pid), fs: fs.proc})
} }
return p, nil return p, nil

110
vendor/github.com/prometheus/procfs/proc_psi.go generated vendored Normal file
View file

@ -0,0 +1,110 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
// The PSI / pressure interface is described at
// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt
// Each resource (cpu, io, memory, ...) is exposed as a single file.
// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure.
// Each line contains several averages (over n seconds) and a total in µs.
//
// Example io pressure file:
// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362
// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134
import (
"fmt"
"io"
"io/ioutil"
"os"
"strings"
)
const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d"
// PSILine is a single line of values as returned by /proc/pressure/*
// The Avg entries are averages over n seconds, as a percentage
// The Total line is in microseconds
type PSILine struct {
Avg10 float64
Avg60 float64
Avg300 float64
Total uint64
}
// PSIStats represent pressure stall information from /proc/pressure/*
// Some indicates the share of time in which at least some tasks are stalled
// Full indicates the share of time in which all non-idle tasks are stalled simultaneously
type PSIStats struct {
Some *PSILine
Full *PSILine
}
// NewPSIStatsForResource reads pressure stall information for the specified
// resource. At time of writing this can be either "cpu", "memory" or "io".
func NewPSIStatsForResource(resource string) (PSIStats, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
return PSIStats{}, err
}
return fs.NewPSIStatsForResource(resource)
}
// NewPSIStatsForResource reads pressure stall information from /proc/pressure/<resource>
func (fs FS) NewPSIStatsForResource(resource string) (PSIStats, error) {
file, err := os.Open(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
if err != nil {
return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %s", resource)
}
defer file.Close()
return parsePSIStats(resource, file)
}
// parsePSIStats parses the specified file for pressure stall information
func parsePSIStats(resource string, file io.Reader) (PSIStats, error) {
psiStats := PSIStats{}
stats, err := ioutil.ReadAll(file)
if err != nil {
return psiStats, fmt.Errorf("psi_stats: unable to read data for %s", resource)
}
for _, l := range strings.Split(string(stats), "\n") {
prefix := strings.Split(l, " ")[0]
switch prefix {
case "some":
psi := PSILine{}
_, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total)
if err != nil {
return PSIStats{}, err
}
psiStats.Some = &psi
case "full":
psi := PSILine{}
_, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total)
if err != nil {
return PSIStats{}, err
}
psiStats.Full = &psi
default:
// If we encounter a line with an unknown prefix, ignore it and move on
// Should new measurement types be added in the future we'll simply ignore them instead
// of erroring on retrieval
continue
}
}
return psiStats, nil
}

View file

@ -18,6 +18,8 @@ import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
"github.com/prometheus/procfs/internal/fs"
) )
// Originally, this USER_HZ value was dynamically retrieved via a sysconf call // Originally, this USER_HZ value was dynamically retrieved via a sysconf call
@ -95,11 +97,11 @@ type ProcStat struct {
// in clock ticks. // in clock ticks.
Starttime uint64 Starttime uint64
// Virtual memory size in bytes. // Virtual memory size in bytes.
VSize int VSize uint
// Resident set size in pages. // Resident set size in pages.
RSS int RSS int
fs FS proc fs.FS
} }
// NewStat returns the current status information of the process. // NewStat returns the current status information of the process.
@ -118,7 +120,7 @@ func (p Proc) NewStat() (ProcStat, error) {
var ( var (
ignore int ignore int
s = ProcStat{PID: p.PID, fs: p.fs} s = ProcStat{PID: p.PID, proc: p.fs}
l = bytes.Index(data, []byte("(")) l = bytes.Index(data, []byte("("))
r = bytes.LastIndex(data, []byte(")")) r = bytes.LastIndex(data, []byte(")"))
) )
@ -164,7 +166,7 @@ func (p Proc) NewStat() (ProcStat, error) {
} }
// VirtualMemory returns the virtual memory size in bytes. // VirtualMemory returns the virtual memory size in bytes.
func (s ProcStat) VirtualMemory() int { func (s ProcStat) VirtualMemory() uint {
return s.VSize return s.VSize
} }
@ -175,7 +177,8 @@ func (s ProcStat) ResidentMemory() int {
// StartTime returns the unix timestamp of the process in seconds. // StartTime returns the unix timestamp of the process in seconds.
func (s ProcStat) StartTime() (float64, error) { func (s ProcStat) StartTime() (float64, error) {
stat, err := s.fs.NewStat() fs := FS{proc: s.proc}
stat, err := fs.NewStat()
if err != nil { if err != nil {
return 0, err return 0, err
} }

View file

@ -153,7 +153,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
func (fs FS) NewStat() (Stat, error) { func (fs FS) NewStat() (Stat, error) {
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
f, err := os.Open(fs.Path("stat")) f, err := os.Open(fs.proc.Path("stat"))
if err != nil { if err != nil {
return Stat{}, err return Stat{}, err
} }

View file

@ -97,7 +97,7 @@ func NewXfrmStat() (XfrmStat, error) {
// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. // NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem.
func (fs FS) NewXfrmStat() (XfrmStat, error) { func (fs FS) NewXfrmStat() (XfrmStat, error) {
file, err := os.Open(fs.Path("net/xfrm_stat")) file, err := os.Open(fs.proc.Path("net/xfrm_stat"))
if err != nil { if err != nil {
return XfrmStat{}, err return XfrmStat{}, err
} }

View file

@ -1,330 +0,0 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xfs
import (
"bufio"
"fmt"
"io"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// ParseStats parses a Stats from an input io.Reader, using the format
// found in /proc/fs/xfs/stat.
func ParseStats(r io.Reader) (*Stats, error) {
const (
// Fields parsed into stats structures.
fieldExtentAlloc = "extent_alloc"
fieldAbt = "abt"
fieldBlkMap = "blk_map"
fieldBmbt = "bmbt"
fieldDir = "dir"
fieldTrans = "trans"
fieldIg = "ig"
fieldLog = "log"
fieldRw = "rw"
fieldAttr = "attr"
fieldIcluster = "icluster"
fieldVnodes = "vnodes"
fieldBuf = "buf"
fieldXpc = "xpc"
// Unimplemented at this time due to lack of documentation.
// fieldPushAil = "push_ail"
// fieldXstrat = "xstrat"
// fieldAbtb2 = "abtb2"
// fieldAbtc2 = "abtc2"
// fieldBmbt2 = "bmbt2"
// fieldIbt2 = "ibt2"
// fieldFibt2 = "fibt2"
// fieldQm = "qm"
// fieldDebug = "debug"
)
var xfss Stats
s := bufio.NewScanner(r)
for s.Scan() {
// Expect at least a string label and a single integer value, ex:
// - abt 0
// - rw 1 2
ss := strings.Fields(string(s.Bytes()))
if len(ss) < 2 {
continue
}
label := ss[0]
// Extended precision counters are uint64 values.
if label == fieldXpc {
us, err := util.ParseUint64s(ss[1:])
if err != nil {
return nil, err
}
xfss.ExtendedPrecision, err = extendedPrecisionStats(us)
if err != nil {
return nil, err
}
continue
}
// All other counters are uint32 values.
us, err := util.ParseUint32s(ss[1:])
if err != nil {
return nil, err
}
switch label {
case fieldExtentAlloc:
xfss.ExtentAllocation, err = extentAllocationStats(us)
case fieldAbt:
xfss.AllocationBTree, err = btreeStats(us)
case fieldBlkMap:
xfss.BlockMapping, err = blockMappingStats(us)
case fieldBmbt:
xfss.BlockMapBTree, err = btreeStats(us)
case fieldDir:
xfss.DirectoryOperation, err = directoryOperationStats(us)
case fieldTrans:
xfss.Transaction, err = transactionStats(us)
case fieldIg:
xfss.InodeOperation, err = inodeOperationStats(us)
case fieldLog:
xfss.LogOperation, err = logOperationStats(us)
case fieldRw:
xfss.ReadWrite, err = readWriteStats(us)
case fieldAttr:
xfss.AttributeOperation, err = attributeOperationStats(us)
case fieldIcluster:
xfss.InodeClustering, err = inodeClusteringStats(us)
case fieldVnodes:
xfss.Vnode, err = vnodeStats(us)
case fieldBuf:
xfss.Buffer, err = bufferStats(us)
}
if err != nil {
return nil, err
}
}
return &xfss, s.Err()
}
// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s.
func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) {
if l := len(us); l != 4 {
return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l)
}
return ExtentAllocationStats{
ExtentsAllocated: us[0],
BlocksAllocated: us[1],
ExtentsFreed: us[2],
BlocksFreed: us[3],
}, nil
}
// btreeStats builds a BTreeStats from a slice of uint32s.
func btreeStats(us []uint32) (BTreeStats, error) {
if l := len(us); l != 4 {
return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l)
}
return BTreeStats{
Lookups: us[0],
Compares: us[1],
RecordsInserted: us[2],
RecordsDeleted: us[3],
}, nil
}
// BlockMappingStat builds a BlockMappingStats from a slice of uint32s.
func blockMappingStats(us []uint32) (BlockMappingStats, error) {
if l := len(us); l != 7 {
return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l)
}
return BlockMappingStats{
Reads: us[0],
Writes: us[1],
Unmaps: us[2],
ExtentListInsertions: us[3],
ExtentListDeletions: us[4],
ExtentListLookups: us[5],
ExtentListCompares: us[6],
}, nil
}
// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s.
func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) {
if l := len(us); l != 4 {
return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l)
}
return DirectoryOperationStats{
Lookups: us[0],
Creates: us[1],
Removes: us[2],
Getdents: us[3],
}, nil
}
// TransactionStats builds a TransactionStats from a slice of uint32s.
func transactionStats(us []uint32) (TransactionStats, error) {
if l := len(us); l != 3 {
return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l)
}
return TransactionStats{
Sync: us[0],
Async: us[1],
Empty: us[2],
}, nil
}
// InodeOperationStats builds an InodeOperationStats from a slice of uint32s.
func inodeOperationStats(us []uint32) (InodeOperationStats, error) {
if l := len(us); l != 7 {
return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l)
}
return InodeOperationStats{
Attempts: us[0],
Found: us[1],
Recycle: us[2],
Missed: us[3],
Duplicate: us[4],
Reclaims: us[5],
AttributeChange: us[6],
}, nil
}
// LogOperationStats builds a LogOperationStats from a slice of uint32s.
func logOperationStats(us []uint32) (LogOperationStats, error) {
if l := len(us); l != 5 {
return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l)
}
return LogOperationStats{
Writes: us[0],
Blocks: us[1],
NoInternalBuffers: us[2],
Force: us[3],
ForceSleep: us[4],
}, nil
}
// ReadWriteStats builds a ReadWriteStats from a slice of uint32s.
func readWriteStats(us []uint32) (ReadWriteStats, error) {
if l := len(us); l != 2 {
return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l)
}
return ReadWriteStats{
Read: us[0],
Write: us[1],
}, nil
}
// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s.
func attributeOperationStats(us []uint32) (AttributeOperationStats, error) {
if l := len(us); l != 4 {
return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l)
}
return AttributeOperationStats{
Get: us[0],
Set: us[1],
Remove: us[2],
List: us[3],
}, nil
}
// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s.
func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) {
if l := len(us); l != 3 {
return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l)
}
return InodeClusteringStats{
Iflush: us[0],
Flush: us[1],
FlushInode: us[2],
}, nil
}
// VnodeStats builds a VnodeStats from a slice of uint32s.
func vnodeStats(us []uint32) (VnodeStats, error) {
// The attribute "Free" appears to not be available on older XFS
// stats versions. Therefore, 7 or 8 elements may appear in
// this slice.
l := len(us)
if l != 7 && l != 8 {
return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l)
}
s := VnodeStats{
Active: us[0],
Allocate: us[1],
Get: us[2],
Hold: us[3],
Release: us[4],
Reclaim: us[5],
Remove: us[6],
}
// Skip adding free, unless it is present. The zero value will
// be used in place of an actual count.
if l == 7 {
return s, nil
}
s.Free = us[7]
return s, nil
}
// BufferStats builds a BufferStats from a slice of uint32s.
func bufferStats(us []uint32) (BufferStats, error) {
if l := len(us); l != 9 {
return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l)
}
return BufferStats{
Get: us[0],
Create: us[1],
GetLocked: us[2],
GetLockedWaited: us[3],
BusyLocked: us[4],
MissLocked: us[5],
PageRetries: us[6],
PageFound: us[7],
GetRead: us[8],
}, nil
}
// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s.
func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) {
if l := len(us); l != 3 {
return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l)
}
return ExtendedPrecisionStats{
FlushBytes: us[0],
WriteBytes: us[1],
ReadBytes: us[2],
}, nil
}

View file

@ -1,163 +0,0 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package xfs provides access to statistics exposed by the XFS filesystem.
package xfs
// Stats contains XFS filesystem runtime statistics, parsed from
// /proc/fs/xfs/stat.
//
// The names and meanings of each statistic were taken from
// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux
// kernel source. Most counters are uint32s (same data types used in
// xfs_stats.h), but some of the "extended precision stats" are uint64s.
type Stats struct {
// The name of the filesystem used to source these statistics.
// If empty, this indicates aggregated statistics for all XFS
// filesystems on the host.
Name string
ExtentAllocation ExtentAllocationStats
AllocationBTree BTreeStats
BlockMapping BlockMappingStats
BlockMapBTree BTreeStats
DirectoryOperation DirectoryOperationStats
Transaction TransactionStats
InodeOperation InodeOperationStats
LogOperation LogOperationStats
ReadWrite ReadWriteStats
AttributeOperation AttributeOperationStats
InodeClustering InodeClusteringStats
Vnode VnodeStats
Buffer BufferStats
ExtendedPrecision ExtendedPrecisionStats
}
// ExtentAllocationStats contains statistics regarding XFS extent allocations.
type ExtentAllocationStats struct {
ExtentsAllocated uint32
BlocksAllocated uint32
ExtentsFreed uint32
BlocksFreed uint32
}
// BTreeStats contains statistics regarding an XFS internal B-tree.
type BTreeStats struct {
Lookups uint32
Compares uint32
RecordsInserted uint32
RecordsDeleted uint32
}
// BlockMappingStats contains statistics regarding XFS block maps.
type BlockMappingStats struct {
Reads uint32
Writes uint32
Unmaps uint32
ExtentListInsertions uint32
ExtentListDeletions uint32
ExtentListLookups uint32
ExtentListCompares uint32
}
// DirectoryOperationStats contains statistics regarding XFS directory entries.
type DirectoryOperationStats struct {
Lookups uint32
Creates uint32
Removes uint32
Getdents uint32
}
// TransactionStats contains statistics regarding XFS metadata transactions.
type TransactionStats struct {
Sync uint32
Async uint32
Empty uint32
}
// InodeOperationStats contains statistics regarding XFS inode operations.
type InodeOperationStats struct {
Attempts uint32
Found uint32
Recycle uint32
Missed uint32
Duplicate uint32
Reclaims uint32
AttributeChange uint32
}
// LogOperationStats contains statistics regarding the XFS log buffer.
type LogOperationStats struct {
Writes uint32
Blocks uint32
NoInternalBuffers uint32
Force uint32
ForceSleep uint32
}
// ReadWriteStats contains statistics regarding the number of read and write
// system calls for XFS filesystems.
type ReadWriteStats struct {
Read uint32
Write uint32
}
// AttributeOperationStats contains statistics regarding manipulation of
// XFS extended file attributes.
type AttributeOperationStats struct {
Get uint32
Set uint32
Remove uint32
List uint32
}
// InodeClusteringStats contains statistics regarding XFS inode clustering
// operations.
type InodeClusteringStats struct {
Iflush uint32
Flush uint32
FlushInode uint32
}
// VnodeStats contains statistics regarding XFS vnode operations.
type VnodeStats struct {
Active uint32
Allocate uint32
Get uint32
Hold uint32
Release uint32
Reclaim uint32
Remove uint32
Free uint32
}
// BufferStats contains statistics regarding XFS read/write I/O buffers.
type BufferStats struct {
Get uint32
Create uint32
GetLocked uint32
GetLockedWaited uint32
BusyLocked uint32
MissLocked uint32
PageRetries uint32
PageFound uint32
GetRead uint32
}
// ExtendedPrecisionStats contains high precision counters used to track the
// total number of bytes read, written, or flushed, during XFS operations.
type ExtendedPrecisionStats struct {
FlushBytes uint64
WriteBytes uint64
ReadBytes uint64
}

5
vendor/github.com/prometheus/tsdb/.golangci.yml generated vendored Normal file
View file

@ -0,0 +1,5 @@
# Run only staticcheck for now. Additional linters will be enabled one-by-one.
linters:
enable:
- staticcheck
disable-all: true

View file

@ -1,5 +1,15 @@
## master / unreleased ## master / unreleased
## 0.8.0
- [BUGFIX] Calling `Close` more than once on a querier returns an error instead of a panic.
- [BUGFIX] Don't panic and recover nicely when running out of disk space.
- [BUGFIX] Correctly handle empty labels.
- [BUGFIX] Don't crash on an unknown tombstone ref.
- [ENHANCEMENT] Re-add FromData function to create a chunk from bytes. It is used by Cortex and Thanos.
- [ENHANCEMENT] Simplify mergedPostings.Seek.
- [FEATURE] Added `currentSegment` metric for the current WAL segment it is being written to.
## 0.7.1 ## 0.7.1
- [ENHANCEMENT] Reduce memory usage in mergedPostings.Seek - [ENHANCEMENT] Reduce memory usage in mergedPostings.Seek

View file

@ -69,17 +69,17 @@ else
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)
endif endif
PROMU_VERSION ?= 0.3.0 PROMU_VERSION ?= 0.4.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
STATICCHECK := GOLANGCI_LINT :=
# staticcheck only supports linux, freebsd, darwin and windows platforms on i386/amd64 GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.16.0
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
# windows isn't included here because of the path separator being different. # windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin)) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
STATICCHECK_VERSION ?= 2019.1
STATICCHECK_URL := https://github.com/dominikh/go-tools/releases/download/$(STATICCHECK_VERSION)/staticcheck_$(GOHOSTOS)_$(GOHOSTARCH)
endif endif
endif endif
@ -107,7 +107,7 @@ endif
%: common-% ; %: common-% ;
.PHONY: common-all .PHONY: common-all
common-all: precheck style check_license staticcheck unused build test common-all: precheck style check_license lint unused build test
.PHONY: common-style .PHONY: common-style
common-style: common-style:
@ -159,21 +159,24 @@ common-vet:
@echo ">> vetting code" @echo ">> vetting code"
GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs)
.PHONY: common-staticcheck .PHONY: common-lint
common-staticcheck: $(STATICCHECK) common-lint: $(GOLANGCI_LINT)
ifdef STATICCHECK ifdef GOLANGCI_LINT
@echo ">> running staticcheck" @echo ">> running golangci-lint"
chmod +x $(STATICCHECK)
ifdef GO111MODULE ifdef GO111MODULE
# 'go list' needs to be executed before staticcheck to prepopulate the modules cache. # 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
# Otherwise staticcheck might fail randomly for some reason not yet explained. # Otherwise staticcheck might fail randomly for some reason not yet explained.
GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
GO111MODULE=$(GO111MODULE) $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
else else
$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) $(GOLANGCI_LINT) run $(pkgs)
endif endif
endif endif
# For backward-compatibility.
.PHONY: common-staticcheck
common-staticcheck: lint
.PHONY: common-unused .PHONY: common-unused
common-unused: $(GOVENDOR) common-unused: $(GOVENDOR)
ifdef GOVENDOR ifdef GOVENDOR
@ -241,10 +244,10 @@ proto:
@echo ">> generating code from proto files" @echo ">> generating code from proto files"
@./scripts/genproto.sh @./scripts/genproto.sh
ifdef STATICCHECK ifdef GOLANGCI_LINT
$(STATICCHECK): $(GOLANGCI_LINT):
mkdir -p $(FIRST_GOPATH)/bin mkdir -p $(FIRST_GOPATH)/bin
curl -s -L $(STATICCHECK_URL) > $(STATICCHECK) curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)
endif endif
ifdef GOVENDOR ifdef GOVENDOR

View file

@ -29,6 +29,7 @@ import (
"github.com/prometheus/tsdb/chunkenc" "github.com/prometheus/tsdb/chunkenc"
"github.com/prometheus/tsdb/chunks" "github.com/prometheus/tsdb/chunks"
tsdb_errors "github.com/prometheus/tsdb/errors" tsdb_errors "github.com/prometheus/tsdb/errors"
"github.com/prometheus/tsdb/fileutil"
"github.com/prometheus/tsdb/index" "github.com/prometheus/tsdb/index"
"github.com/prometheus/tsdb/labels" "github.com/prometheus/tsdb/labels"
) )
@ -230,12 +231,17 @@ func readMetaFile(dir string) (*BlockMeta, error) {
return &m, nil return &m, nil
} }
func writeMetaFile(dir string, meta *BlockMeta) error { func writeMetaFile(logger log.Logger, dir string, meta *BlockMeta) error {
meta.Version = 1 meta.Version = 1
// Make any changes to the file appear atomic. // Make any changes to the file appear atomic.
path := filepath.Join(dir, metaFilename) path := filepath.Join(dir, metaFilename)
tmp := path + ".tmp" tmp := path + ".tmp"
defer func() {
if err := os.RemoveAll(tmp); err != nil {
level.Error(logger).Log("msg", "remove tmp file", "err", err.Error())
}
}()
f, err := os.Create(tmp) f, err := os.Create(tmp)
if err != nil { if err != nil {
@ -246,7 +252,6 @@ func writeMetaFile(dir string, meta *BlockMeta) error {
enc.SetIndent("", "\t") enc.SetIndent("", "\t")
var merr tsdb_errors.MultiError var merr tsdb_errors.MultiError
if merr.Add(enc.Encode(meta)); merr.Err() != nil { if merr.Add(enc.Encode(meta)); merr.Err() != nil {
merr.Add(f.Close()) merr.Add(f.Close())
return merr.Err() return merr.Err()
@ -259,7 +264,7 @@ func writeMetaFile(dir string, meta *BlockMeta) error {
if err := f.Close(); err != nil { if err := f.Close(); err != nil {
return err return err
} }
return renameFile(tmp, path) return fileutil.Replace(tmp, path)
} }
// Block represents a directory of time series data covering a continuous time range. // Block represents a directory of time series data covering a continuous time range.
@ -278,6 +283,8 @@ type Block struct {
chunkr ChunkReader chunkr ChunkReader
indexr IndexReader indexr IndexReader
tombstones TombstoneReader tombstones TombstoneReader
logger log.Logger
} }
// OpenBlock opens the block in the directory. It can be passed a chunk pool, which is used // OpenBlock opens the block in the directory. It can be passed a chunk pool, which is used
@ -322,7 +329,7 @@ func OpenBlock(logger log.Logger, dir string, pool chunkenc.Pool) (pb *Block, er
// that would be the logical place for a block size to be calculated. // that would be the logical place for a block size to be calculated.
bs := blockSize(cr, ir, tsr) bs := blockSize(cr, ir, tsr)
meta.Stats.NumBytes = bs meta.Stats.NumBytes = bs
err = writeMetaFile(dir, meta) err = writeMetaFile(logger, dir, meta)
if err != nil { if err != nil {
level.Warn(logger).Log("msg", "couldn't write the meta file for the block size", "block", dir, "err", err) level.Warn(logger).Log("msg", "couldn't write the meta file for the block size", "block", dir, "err", err)
} }
@ -334,6 +341,7 @@ func OpenBlock(logger log.Logger, dir string, pool chunkenc.Pool) (pb *Block, er
indexr: ir, indexr: ir,
tombstones: tr, tombstones: tr,
symbolTableSize: ir.SymbolTableSize(), symbolTableSize: ir.SymbolTableSize(),
logger: logger,
} }
return pb, nil return pb, nil
} }
@ -429,7 +437,7 @@ func (pb *Block) GetSymbolTableSize() uint64 {
func (pb *Block) setCompactionFailed() error { func (pb *Block) setCompactionFailed() error {
pb.meta.Compaction.Failed = true pb.meta.Compaction.Failed = true
return writeMetaFile(pb.dir, &pb.meta) return writeMetaFile(pb.logger, pb.dir, &pb.meta)
} }
type blockIndexReader struct { type blockIndexReader struct {
@ -553,10 +561,10 @@ Outer:
pb.tombstones = stones pb.tombstones = stones
pb.meta.Stats.NumTombstones = pb.tombstones.Total() pb.meta.Stats.NumTombstones = pb.tombstones.Total()
if err := writeTombstoneFile(pb.dir, pb.tombstones); err != nil { if err := writeTombstoneFile(pb.logger, pb.dir, pb.tombstones); err != nil {
return err return err
} }
return writeMetaFile(pb.dir, &pb.meta) return writeMetaFile(pb.logger, pb.dir, &pb.meta)
} }
// CleanTombstones will remove the tombstones and rewrite the block (only if there are any tombstones). // CleanTombstones will remove the tombstones and rewrite the block (only if there are any tombstones).

View file

@ -14,6 +14,7 @@
package chunkenc package chunkenc
import ( import (
"fmt"
"sync" "sync"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -70,16 +71,18 @@ func (nopIterator) At() (int64, float64) { return 0, 0 }
func (nopIterator) Next() bool { return false } func (nopIterator) Next() bool { return false }
func (nopIterator) Err() error { return nil } func (nopIterator) Err() error { return nil }
// Pool is used to create and reuse chunk references to avoid allocations.
type Pool interface { type Pool interface {
Put(Chunk) error Put(Chunk) error
Get(e Encoding, b []byte) (Chunk, error) Get(e Encoding, b []byte) (Chunk, error)
} }
// Pool is a memory pool of chunk objects. // pool is a memory pool of chunk objects.
type pool struct { type pool struct {
xor sync.Pool xor sync.Pool
} }
// NewPool returns a new pool.
func NewPool() Pool { func NewPool() Pool {
return &pool{ return &pool{
xor: sync.Pool{ xor: sync.Pool{
@ -119,3 +122,14 @@ func (p *pool) Put(c Chunk) error {
} }
return nil return nil
} }
// FromData returns a chunk from a byte slice of chunk data.
// This is there so that users of the library can easily create chunks from
// bytes.
func FromData(e Encoding, d []byte) (Chunk, error) {
switch e {
case EncXOR:
return &XORChunk{b: bstream{count: 0, stream: d}}, nil
}
return nil, fmt.Errorf("unknown chunk encoding: %d", e)
}

View file

@ -426,7 +426,7 @@ func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) (u
if meta.Stats.NumSamples == 0 { if meta.Stats.NumSamples == 0 {
for _, b := range bs { for _, b := range bs {
b.meta.Compaction.Deletable = true b.meta.Compaction.Deletable = true
if err = writeMetaFile(b.dir, &b.meta); err != nil { if err = writeMetaFile(c.logger, b.dir, &b.meta); err != nil {
level.Error(c.logger).Log( level.Error(c.logger).Log(
"msg", "Failed to write 'Deletable' to meta file after compaction", "msg", "Failed to write 'Deletable' to meta file after compaction",
"ulid", b.meta.ULID, "ulid", b.meta.ULID,
@ -609,12 +609,12 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe
return nil return nil
} }
if err = writeMetaFile(tmp, meta); err != nil { if err = writeMetaFile(c.logger, tmp, meta); err != nil {
return errors.Wrap(err, "write merged meta") return errors.Wrap(err, "write merged meta")
} }
// Create an empty tombstones file. // Create an empty tombstones file.
if err := writeTombstoneFile(tmp, newMemTombstones()); err != nil { if err := writeTombstoneFile(c.logger, tmp, newMemTombstones()); err != nil {
return errors.Wrap(err, "write new tombstones file") return errors.Wrap(err, "write new tombstones file")
} }
@ -639,7 +639,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe
df = nil df = nil
// Block successfully written, make visible and remove old ones. // Block successfully written, make visible and remove old ones.
if err := renameFile(tmp, dir); err != nil { if err := fileutil.Replace(tmp, dir); err != nil {
return errors.Wrap(err, "rename block dir") return errors.Wrap(err, "rename block dir")
} }
@ -1013,24 +1013,3 @@ func (c *compactionMerger) Err() error {
func (c *compactionMerger) At() (labels.Labels, []chunks.Meta, Intervals) { func (c *compactionMerger) At() (labels.Labels, []chunks.Meta, Intervals) {
return c.l, c.c, c.intervals return c.l, c.c, c.intervals
} }
func renameFile(from, to string) error {
if err := os.RemoveAll(to); err != nil {
return err
}
if err := os.Rename(from, to); err != nil {
return err
}
// Directory was renamed; sync parent dir to persist rename.
pdir, err := fileutil.OpenDir(filepath.Dir(to))
if err != nil {
return err
}
if err = pdir.Sync(); err != nil {
pdir.Close()
return err
}
return pdir.Close()
}

View file

@ -18,7 +18,7 @@ require (
github.com/pkg/errors v0.8.0 github.com/pkg/errors v0.8.0
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v0.9.1 github.com/prometheus/client_golang v0.9.1
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 // indirect
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce // indirect github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce // indirect
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d // indirect github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d // indirect
github.com/stretchr/testify v1.2.2 // indirect github.com/stretchr/testify v1.2.2 // indirect

View file

@ -421,6 +421,10 @@ func (h *Head) loadWAL(r *wal.Reader) error {
if itv.Maxt < h.minValidTime { if itv.Maxt < h.minValidTime {
continue continue
} }
if m := h.series.getByID(s.ref); m == nil {
unknownRefs++
continue
}
allStones.addInterval(s.ref, itv) allStones.addInterval(s.ref, itv)
} }
} }
@ -755,6 +759,9 @@ func (a *headAppender) Add(lset labels.Labels, t int64, v float64) (uint64, erro
return 0, ErrOutOfBounds return 0, ErrOutOfBounds
} }
// Ensure no empty labels have gotten through.
lset = lset.WithoutEmpty()
s, created := a.head.getOrCreate(lset.Hash(), lset) s, created := a.head.getOrCreate(lset.Hash(), lset)
if created { if created {
a.series = append(a.series, RefSeries{ a.series = append(a.series, RefSeries{

View file

@ -53,7 +53,6 @@ const (
type indexWriterSeries struct { type indexWriterSeries struct {
labels labels.Labels labels labels.Labels
chunks []chunks.Meta // series file offset of chunks chunks []chunks.Meta // series file offset of chunks
offset uint32 // index file offset of series reference
} }
type indexWriterSeriesSlice []*indexWriterSeries type indexWriterSeriesSlice []*indexWriterSeries

View file

@ -404,7 +404,6 @@ func (h *postingsHeap) Pop() interface{} {
type mergedPostings struct { type mergedPostings struct {
h postingsHeap h postingsHeap
initilized bool initilized bool
heaped bool
cur uint64 cur uint64
err error err error
} }
@ -434,12 +433,9 @@ func (it *mergedPostings) Next() bool {
return false return false
} }
if !it.heaped {
heap.Init(&it.h)
it.heaped = true
}
// The user must issue an initial Next. // The user must issue an initial Next.
if !it.initilized { if !it.initilized {
heap.Init(&it.h)
it.cur = it.h[0].At() it.cur = it.h[0].At()
it.initilized = true it.initilized = true
return true return true
@ -477,33 +473,24 @@ func (it *mergedPostings) Seek(id uint64) bool {
return false return false
} }
} }
if it.cur >= id { for it.cur < id {
return true cur := it.h[0]
} if !cur.Seek(id) {
// Heapifying when there is lots of Seeks is inefficient, heap.Pop(&it.h)
// mark to be re-heapified on the Next() call. if cur.Err() != nil {
it.heaped = false it.err = cur.Err()
lowest := ^uint64(0)
n := 0
for _, i := range it.h {
if i.Seek(id) {
it.h[n] = i
n++
if i.At() < lowest {
lowest = i.At()
}
} else {
if i.Err() != nil {
it.err = i.Err()
return false return false
} }
if it.h.Len() == 0 {
return false
}
} else {
// Value of top of heap has changed, re-heapify.
heap.Fix(&it.h, 0)
} }
it.cur = it.h[0].At()
} }
it.h = it.h[:n]
if len(it.h) == 0 {
return false
}
it.cur = lowest
return true return true
} }

View file

@ -103,6 +103,23 @@ func (ls Labels) Map() map[string]string {
return m return m
} }
// WithoutEmpty returns the labelset without empty labels.
// May return the same labelset.
func (ls Labels) WithoutEmpty() Labels {
for _, v := range ls {
if v.Value == "" {
els := make(Labels, 0, len(ls)-1)
for _, v := range ls {
if v.Value != "" {
els = append(els, v)
}
}
return els
}
}
return ls
}
// New returns a sorted Labels from the given labels. // New returns a sorted Labels from the given labels.
// The caller has to guarantee that all label names are unique. // The caller has to guarantee that all label names are unique.
func New(ls ...Label) Labels { func New(ls ...Label) Labels {
@ -119,7 +136,9 @@ func New(ls ...Label) Labels {
func FromMap(m map[string]string) Labels { func FromMap(m map[string]string) Labels {
l := make(Labels, 0, len(m)) l := make(Labels, 0, len(m))
for k, v := range m { for k, v := range m {
l = append(l, Label{Name: k, Value: v}) if v != "" {
l = append(l, Label{Name: k, Value: v})
}
} }
sort.Sort(l) sort.Sort(l)
@ -133,7 +152,9 @@ func FromStrings(ss ...string) Labels {
} }
var res Labels var res Labels
for i := 0; i < len(ss); i += 2 { for i := 0; i < len(ss); i += 2 {
res = append(res, Label{Name: ss[i], Value: ss[i+1]}) if ss[i+1] != "" {
res = append(res, Label{Name: ss[i], Value: ss[i+1]})
}
} }
sort.Sort(res) sort.Sort(res)
@ -204,9 +225,7 @@ func ReadLabels(fn string, n int) ([]Labels, error) {
hashes[h] = struct{}{} hashes[h] = struct{}{}
i++ i++
} }
if err != nil {
return nil, err
}
if i != n { if i != n {
return mets, errors.Errorf("requested %d metrics but found %d", n, i) return mets, errors.Errorf("requested %d metrics but found %d", n, i)
} }

View file

@ -205,6 +205,8 @@ type blockQuerier struct {
chunks ChunkReader chunks ChunkReader
tombstones TombstoneReader tombstones TombstoneReader
closed bool
mint, maxt int64 mint, maxt int64
} }
@ -252,12 +254,15 @@ func (q *blockQuerier) LabelValuesFor(string, labels.Label) ([]string, error) {
} }
func (q *blockQuerier) Close() error { func (q *blockQuerier) Close() error {
var merr tsdb_errors.MultiError if q.closed {
return errors.New("block querier already closed")
}
var merr tsdb_errors.MultiError
merr.Add(q.index.Close()) merr.Add(q.index.Close())
merr.Add(q.chunks.Close()) merr.Add(q.chunks.Close())
merr.Add(q.tombstones.Close()) merr.Add(q.tombstones.Close())
q.closed = true
return merr.Err() return merr.Err()
} }
@ -327,15 +332,6 @@ func PostingsForMatchers(ix IndexReader, ms ...labels.Matcher) (index.Postings,
it := index.Intersect(its...) it := index.Intersect(its...)
for _, n := range notIts { for _, n := range notIts {
if _, ok := n.(*index.ListPostings); !ok {
// Best to pre-calculate the merged lists via next rather than have a ton
// of seeks in Without.
pl, err := index.ExpandPostings(n)
if err != nil {
return nil, err
}
n = index.NewListPostings(pl)
}
it = index.Without(it, n) it = index.Without(it, n)
} }

View file

@ -23,6 +23,8 @@ import (
"github.com/go-kit/kit/log" "github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level" "github.com/go-kit/kit/log/level"
"github.com/pkg/errors" "github.com/pkg/errors"
tsdb_errors "github.com/prometheus/tsdb/errors"
"github.com/prometheus/tsdb/fileutil"
) )
// repairBadIndexVersion repairs an issue in index and meta.json persistence introduced in // repairBadIndexVersion repairs an issue in index and meta.json persistence introduced in
@ -38,6 +40,16 @@ func repairBadIndexVersion(logger log.Logger, dir string) error {
wrapErr := func(err error, d string) error { wrapErr := func(err error, d string) error {
return errors.Wrapf(err, "block dir: %q", d) return errors.Wrapf(err, "block dir: %q", d)
} }
tmpFiles := make([]string, 0, len(dir))
defer func() {
for _, tmp := range tmpFiles {
if err := os.RemoveAll(tmp); err != nil {
level.Error(logger).Log("msg", "remove tmp file", "err", err.Error())
}
}
}()
for _, d := range dirs { for _, d := range dirs {
meta, err := readBogusMetaFile(d) meta, err := readBogusMetaFile(d)
if err != nil { if err != nil {
@ -63,6 +75,8 @@ func repairBadIndexVersion(logger log.Logger, dir string) error {
if err != nil { if err != nil {
return wrapErr(err, d) return wrapErr(err, d)
} }
tmpFiles = append(tmpFiles, repl.Name())
broken, err := os.Open(filepath.Join(d, indexFilename)) broken, err := os.Open(filepath.Join(d, indexFilename))
if err != nil { if err != nil {
return wrapErr(err, d) return wrapErr(err, d)
@ -70,12 +84,19 @@ func repairBadIndexVersion(logger log.Logger, dir string) error {
if _, err := io.Copy(repl, broken); err != nil { if _, err := io.Copy(repl, broken); err != nil {
return wrapErr(err, d) return wrapErr(err, d)
} }
var merr tsdb_errors.MultiError
// Set the 5th byte to 2 to indicate the correct file format version. // Set the 5th byte to 2 to indicate the correct file format version.
if _, err := repl.WriteAt([]byte{2}, 4); err != nil { if _, err := repl.WriteAt([]byte{2}, 4); err != nil {
return wrapErr(err, d) merr.Add(wrapErr(err, d))
merr.Add(wrapErr(repl.Close(), d))
return merr.Err()
} }
if err := repl.Sync(); err != nil { if err := repl.Sync(); err != nil {
return wrapErr(err, d) merr.Add(wrapErr(err, d))
merr.Add(wrapErr(repl.Close(), d))
return merr.Err()
} }
if err := repl.Close(); err != nil { if err := repl.Close(); err != nil {
return wrapErr(err, d) return wrapErr(err, d)
@ -83,12 +104,12 @@ func repairBadIndexVersion(logger log.Logger, dir string) error {
if err := broken.Close(); err != nil { if err := broken.Close(); err != nil {
return wrapErr(err, d) return wrapErr(err, d)
} }
if err := renameFile(repl.Name(), broken.Name()); err != nil { if err := fileutil.Replace(repl.Name(), broken.Name()); err != nil {
return wrapErr(err, d) return wrapErr(err, d)
} }
// Reset version of meta.json to 1. // Reset version of meta.json to 1.
meta.Version = 1 meta.Version = 1
if err := writeMetaFile(d, meta); err != nil { if err := writeMetaFile(logger, d, meta); err != nil {
return wrapErr(err, d) return wrapErr(err, d)
} }
} }

View file

@ -22,9 +22,12 @@ import (
"path/filepath" "path/filepath"
"sync" "sync"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/prometheus/tsdb/encoding" "github.com/prometheus/tsdb/encoding"
tsdb_errors "github.com/prometheus/tsdb/errors" tsdb_errors "github.com/prometheus/tsdb/errors"
"github.com/prometheus/tsdb/fileutil"
) )
const tombstoneFilename = "tombstones" const tombstoneFilename = "tombstones"
@ -51,7 +54,7 @@ type TombstoneReader interface {
Close() error Close() error
} }
func writeTombstoneFile(dir string, tr TombstoneReader) error { func writeTombstoneFile(logger log.Logger, dir string, tr TombstoneReader) error {
path := filepath.Join(dir, tombstoneFilename) path := filepath.Join(dir, tombstoneFilename)
tmp := path + ".tmp" tmp := path + ".tmp"
hash := newCRC32() hash := newCRC32()
@ -62,7 +65,12 @@ func writeTombstoneFile(dir string, tr TombstoneReader) error {
} }
defer func() { defer func() {
if f != nil { if f != nil {
f.Close() if err := f.Close(); err != nil {
level.Error(logger).Log("msg", "close tmp file", "err", err.Error())
}
}
if err := os.RemoveAll(tmp); err != nil {
level.Error(logger).Log("msg", "remove tmp file", "err", err.Error())
} }
}() }()
@ -111,7 +119,7 @@ func writeTombstoneFile(dir string, tr TombstoneReader) error {
return err return err
} }
f = nil f = nil
return renameFile(tmp, path) return fileutil.Replace(tmp, path)
} }
// Stone holds the information on the posting and time-range // Stone holds the information on the posting and time-range

View file

@ -338,6 +338,12 @@ func (w *SegmentWAL) Truncate(mint int64, keep func(uint64) bool) error {
if err != nil { if err != nil {
return errors.Wrap(err, "create compaction segment") return errors.Wrap(err, "create compaction segment")
} }
defer func() {
if err := os.RemoveAll(f.Name()); err != nil {
level.Error(w.logger).Log("msg", "remove tmp file", "err", err.Error())
}
}()
var ( var (
csf = newSegmentFile(f) csf = newSegmentFile(f)
crc32 = newCRC32() crc32 = newCRC32()
@ -389,7 +395,7 @@ func (w *SegmentWAL) Truncate(mint int64, keep func(uint64) bool) error {
csf.Close() csf.Close()
candidates[0].Close() // need close before remove on platform windows candidates[0].Close() // need close before remove on platform windows
if err := renameFile(csf.Name(), candidates[0].Name()); err != nil { if err := fileutil.Replace(csf.Name(), candidates[0].Name()); err != nil {
return errors.Wrap(err, "rename compaction segment") return errors.Wrap(err, "rename compaction segment")
} }
for _, f := range candidates[1:] { for _, f := range candidates[1:] {

View file

@ -159,9 +159,9 @@ type WAL struct {
logger log.Logger logger log.Logger
segmentSize int segmentSize int
mtx sync.RWMutex mtx sync.RWMutex
segment *Segment // active segment segment *Segment // Active segment.
donePages int // pages written to the segment donePages int // Pages written to the segment.
page *page // active page page *page // Active page.
stopc chan chan struct{} stopc chan chan struct{}
actorc chan func() actorc chan func()
closed bool // To allow calling Close() more than once without blocking. closed bool // To allow calling Close() more than once without blocking.
@ -171,6 +171,7 @@ type WAL struct {
pageCompletions prometheus.Counter pageCompletions prometheus.Counter
truncateFail prometheus.Counter truncateFail prometheus.Counter
truncateTotal prometheus.Counter truncateTotal prometheus.Counter
currentSegment prometheus.Gauge
} }
// New returns a new WAL over the given directory. // New returns a new WAL over the given directory.
@ -218,8 +219,12 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi
Name: "prometheus_tsdb_wal_truncations_total", Name: "prometheus_tsdb_wal_truncations_total",
Help: "Total number of WAL truncations attempted.", Help: "Total number of WAL truncations attempted.",
}) })
w.currentSegment = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "prometheus_tsdb_wal_segment_current",
Help: "WAL segment index that TSDB is currently writing to.",
})
if reg != nil { if reg != nil {
reg.MustRegister(w.fsyncDuration, w.pageFlushes, w.pageCompletions, w.truncateFail, w.truncateTotal) reg.MustRegister(w.fsyncDuration, w.pageFlushes, w.pageCompletions, w.truncateFail, w.truncateTotal, w.currentSegment)
} }
_, j, err := w.Segments() _, j, err := w.Segments()
@ -413,7 +418,7 @@ func (w *WAL) setSegment(segment *Segment) error {
return err return err
} }
w.donePages = int(stat.Size() / pageSize) w.donePages = int(stat.Size() / pageSize)
w.currentSegment.Set(float64(segment.Index()))
return nil return nil
} }
@ -426,10 +431,10 @@ func (w *WAL) flushPage(clear bool) error {
p := w.page p := w.page
clear = clear || p.full() clear = clear || p.full()
// No more data will fit into the page. Enqueue and clear it. // No more data will fit into the page or an implicit clear.
// Enqueue and clear it.
if clear { if clear {
p.alloc = pageSize // Write till end of page. p.alloc = pageSize // Write till end of page.
w.pageCompletions.Inc()
} }
n, err := w.segment.Write(p.buf[p.flushed:p.alloc]) n, err := w.segment.Write(p.buf[p.flushed:p.alloc])
if err != nil { if err != nil {
@ -445,6 +450,7 @@ func (w *WAL) flushPage(clear bool) error {
p.alloc = 0 p.alloc = 0
p.flushed = 0 p.flushed = 0
w.donePages++ w.donePages++
w.pageCompletions.Inc()
} }
return nil return nil
} }
@ -495,10 +501,18 @@ func (w *WAL) Log(recs ...[]byte) error {
return nil return nil
} }
// log writes rec to the log and forces a flush of the current page if its // log writes rec to the log and forces a flush of the current page if:
// the final record of a batch, the record is bigger than the page size or // - the final record of a batch
// the current page is full. // - the record is bigger than the page size
// - the current page is full.
func (w *WAL) log(rec []byte, final bool) error { func (w *WAL) log(rec []byte, final bool) error {
// When the last page flush failed the page will remain full.
// When the page is full, need to flush it before trying to add more records to it.
if w.page.full() {
if err := w.flushPage(true); err != nil {
return err
}
}
// If the record is too big to fit within the active page in the current // If the record is too big to fit within the active page in the current
// segment, terminate the active segment and advance to the next one. // segment, terminate the active segment and advance to the next one.
// This ensures that records do not cross segment boundaries. // This ensures that records do not cross segment boundaries.
@ -606,7 +620,7 @@ func (w *WAL) Close() (err error) {
defer w.mtx.Unlock() defer w.mtx.Unlock()
if w.closed { if w.closed {
return nil return errors.New("wal already closed")
} }
// Flush the last page and zero out all its remaining size. // Flush the last page and zero out all its remaining size.

14
vendor/modules.txt vendored
View file

@ -50,7 +50,7 @@ github.com/aws/aws-sdk-go/aws/credentials/endpointcreds
github.com/aws/aws-sdk-go/private/protocol/rest github.com/aws/aws-sdk-go/private/protocol/rest
github.com/aws/aws-sdk-go/private/protocol/query/queryutil github.com/aws/aws-sdk-go/private/protocol/query/queryutil
github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil
# github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 # github.com/beorn7/perks v1.0.0
github.com/beorn7/perks/quantile github.com/beorn7/perks/quantile
# github.com/census-instrumentation/opencensus-proto v0.2.0 # github.com/census-instrumentation/opencensus-proto v0.2.0
github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1 github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1
@ -217,7 +217,7 @@ github.com/petermattis/goid
github.com/pkg/errors github.com/pkg/errors
# github.com/pmezard/go-difflib v1.0.0 # github.com/pmezard/go-difflib v1.0.0
github.com/pmezard/go-difflib/difflib github.com/pmezard/go-difflib/difflib
# github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 # github.com/prometheus/client_golang v0.9.3
github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus
github.com/prometheus/client_golang/api github.com/prometheus/client_golang/api
github.com/prometheus/client_golang/api/prometheus/v1 github.com/prometheus/client_golang/api/prometheus/v1
@ -225,7 +225,7 @@ github.com/prometheus/client_golang/prometheus/promhttp
github.com/prometheus/client_golang/prometheus/promauto github.com/prometheus/client_golang/prometheus/promauto
github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/internal
github.com/prometheus/client_golang/prometheus/testutil github.com/prometheus/client_golang/prometheus/testutil
# github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f # github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90
github.com/prometheus/client_model/go github.com/prometheus/client_model/go
# github.com/prometheus/common v0.4.0 # github.com/prometheus/common v0.4.0
github.com/prometheus/common/model github.com/prometheus/common/model
@ -237,12 +237,10 @@ github.com/prometheus/common/expfmt
github.com/prometheus/common/route github.com/prometheus/common/route
github.com/prometheus/common/server github.com/prometheus/common/server
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
# github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 # github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084
github.com/prometheus/procfs github.com/prometheus/procfs
github.com/prometheus/procfs/nfs github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/xfs # github.com/prometheus/tsdb v0.8.0
github.com/prometheus/procfs/internal/util
# github.com/prometheus/tsdb v0.7.1
github.com/prometheus/tsdb github.com/prometheus/tsdb
github.com/prometheus/tsdb/fileutil github.com/prometheus/tsdb/fileutil
github.com/prometheus/tsdb/labels github.com/prometheus/tsdb/labels