Merge branch 'master' into promql

This commit is contained in:
Fabian Reinartz 2015-05-06 13:04:54 +02:00
commit eeca323d24
24 changed files with 720 additions and 170 deletions

1
.gitignore vendored
View file

@ -4,6 +4,7 @@
*.cgo*.c *.cgo*.c
*.cgo*.go *.cgo*.go
*.conf *.conf
*.rules
*.exe *.exe
*.orig *.orig
*.pyc *.pyc

View file

@ -1,3 +1,13 @@
## 0.13.2 / 2015-05-05
* [MAINTENANCE] Updated vendored dependcies to their newest versions.
* [MAINTENANCE] Include rule_checker and console templates in release tarball.
* [BUGFIX] Sort NaN as the lowest value.
* [ENHANCEMENT] Add square root, stddev and stdvar functions.
* [BUGFIX] Use scrape_timeout for scrape timeout, not scrape_interval.
* [ENHANCEMENT] Improve chunk and chunkDesc loading, increase performance when
reading from disk.
* [BUGFIX] Show correct error on wrong DNS response.
## 0.13.1 / 2015-04-09 ## 0.13.1 / 2015-04-09
* [BUGFIX] Treat memory series with zero chunks correctly in series maintenance. * [BUGFIX] Treat memory series with zero chunks correctly in series maintenance.
* [ENHANCEMENT] Improve readability of usage text even more. * [ENHANCEMENT] Improve readability of usage text even more.

View file

@ -1,30 +1,35 @@
FROM golang:1.4 FROM alpine:edge
MAINTAINER The Prometheus Authors <prometheus-developers@googlegroups.com> MAINTAINER The Prometheus Authors <prometheus-developers@googlegroups.com>
RUN apt-get -qy update && apt-get -qy install vim-common && rm -rf /var/lib/apt/lists/* && \
go get github.com/tools/godep
WORKDIR /go/src/github.com/prometheus/prometheus ENV GOPATH /go
ADD . /go/src/github.com/prometheus/prometheus COPY . /go/src/github.com/prometheus/prometheus
RUN godep restore && go get -d RUN apk add --update -t build-deps go git mercurial vim \
RUN ./utility/embed-static.sh web/static web/templates | gofmt > web/blob/files.go && apk add -u musl && rm -rf /var/cache/apk/* \
&& go get github.com/tools/godep \
RUN go build -ldflags " \ && cd /go/src/github.com/prometheus/prometheus \
&& $GOPATH/bin/godep restore && go get -d \
&& ./utility/embed-static.sh web/static web/templates | gofmt > web/blob/files.go \
&& go build -ldflags " \
-X main.buildVersion $(cat VERSION) \ -X main.buildVersion $(cat VERSION) \
-X main.buildRevision $(git rev-parse --short HEAD) \ -X main.buildRevision $(git rev-parse --short HEAD) \
-X main.buildBranch $(git rev-parse --abbrev-ref HEAD) \ -X main.buildBranch $(git rev-parse --abbrev-ref HEAD) \
-X main.buildUser root \ -X main.buildUser root \
-X main.buildDate $(date +%Y%m%d-%H:%M:%S) \ -X main.buildDate $(date +%Y%m%d-%H:%M:%S) \
-X main.goVersion $GOLANG_VERSION \ -X main.goVersion $(go version | awk '{print substr($3,3)}') \
" " -o /bin/prometheus \
RUN cd tools/rule_checker && go build && cd tools/rule_checker && go build -o /bin/rule_checker && cd ../.. \
ADD ./documentation/examples/prometheus.conf /prometheus.conf && mkdir -p /etc/prometheus \
&& mv ./documentation/examples/prometheus.conf /etc/prometheus/prometheus.conf \
&& mv ./console_libraries/ ./consoles/ /etc/prometheus/ \
&& rm -rf /go \
&& apk del --purge build-deps
EXPOSE 9090 EXPOSE 9090
VOLUME [ "/prometheus" ] VOLUME [ "/prometheus" ]
WORKDIR /prometheus WORKDIR /prometheus
ENTRYPOINT [ "/go/src/github.com/prometheus/prometheus/prometheus" ] ENTRYPOINT [ "/bin/prometheus" ]
CMD [ "-logtostderr", "-config.file=/prometheus.conf", \ CMD [ "-logtostderr", "-config.file=/etc/prometheus/prometheus.conf", \
"-storage.local.path=/prometheus", \ "-storage.local.path=/prometheus", \
"-web.console.libraries=/go/src/github.com/prometheus/prometheus/console_libraries", \ "-web.console.libraries=/etc/prometheus/console_libraries", \
"-web.console.templates=/go/src/github.com/prometheus/prometheus/consoles" ] "-web.console.templates=/etc/prometheus/consoles" ]

16
Godeps/Godeps.json generated
View file

@ -33,23 +33,23 @@
}, },
{ {
"ImportPath": "github.com/prometheus/client_golang/extraction", "ImportPath": "github.com/prometheus/client_golang/extraction",
"Comment": "0.4.0-1-g692492e", "Comment": "0.5.0",
"Rev": "692492e54b553a81013254cc1fba4b6dd76fad30" "Rev": "b0bd7e1be33327b85cb4853e7011156e3cedd657"
}, },
{ {
"ImportPath": "github.com/prometheus/client_golang/model", "ImportPath": "github.com/prometheus/client_golang/model",
"Comment": "0.4.0-1-g692492e", "Comment": "0.5.0",
"Rev": "692492e54b553a81013254cc1fba4b6dd76fad30" "Rev": "b0bd7e1be33327b85cb4853e7011156e3cedd657"
}, },
{ {
"ImportPath": "github.com/prometheus/client_golang/prometheus", "ImportPath": "github.com/prometheus/client_golang/prometheus",
"Comment": "0.4.0-1-g692492e", "Comment": "0.5.0",
"Rev": "692492e54b553a81013254cc1fba4b6dd76fad30" "Rev": "b0bd7e1be33327b85cb4853e7011156e3cedd657"
}, },
{ {
"ImportPath": "github.com/prometheus/client_golang/text", "ImportPath": "github.com/prometheus/client_golang/text",
"Comment": "0.4.0-1-g692492e", "Comment": "0.5.0",
"Rev": "692492e54b553a81013254cc1fba4b6dd76fad30" "Rev": "b0bd7e1be33327b85cb4853e7011156e3cedd657"
}, },
{ {
"ImportPath": "github.com/prometheus/client_model/go", "ImportPath": "github.com/prometheus/client_model/go",

View file

@ -26,14 +26,30 @@ const (
// timeseries. // timeseries.
MetricNameLabel LabelName = "__name__" MetricNameLabel LabelName = "__name__"
// AddressLabel is the name of the label that holds the address of
// a scrape target.
AddressLabel LabelName = "__address__"
// MetricsPathLabel is the name of the label that holds the path on which to
// scrape a target.
MetricsPathLabel LabelName = "__metrics_path__"
// ReservedLabelPrefix is a prefix which is not legal in user-supplied // ReservedLabelPrefix is a prefix which is not legal in user-supplied
// label names. // label names.
ReservedLabelPrefix = "__" ReservedLabelPrefix = "__"
// MetaLabelPrefix is a prefix for labels that provide meta information.
// Labels with this prefix are used for intermediate label processing and
// will not be attached to time series.
MetaLabelPrefix = "__meta_"
// JobLabel is the label name indicating the job from which a timeseries // JobLabel is the label name indicating the job from which a timeseries
// was scraped. // was scraped.
JobLabel LabelName = "job" JobLabel LabelName = "job"
// InstanceLabel is the label name used for the instance label.
InstanceLabel LabelName = "instance"
// BucketLabel is used for the label that defines the upper bound of a // BucketLabel is used for the label that defines the upper bound of a
// bucket of a histogram ("le" -> "less or equal"). // bucket of a histogram ("le" -> "less or equal").
BucketLabel = "le" BucketLabel = "le"

View file

@ -26,14 +26,68 @@ var separator = []byte{0}
// a singleton and refers to one and only one stream of samples. // a singleton and refers to one and only one stream of samples.
type Metric map[LabelName]LabelValue type Metric map[LabelName]LabelValue
// Equal compares the fingerprints of both metrics. // Equal compares the metrics.
func (m Metric) Equal(o Metric) bool { func (m Metric) Equal(o Metric) bool {
return m.Fingerprint().Equal(o.Fingerprint()) if len(m) != len(o) {
return false
}
for ln, lv := range m {
olv, ok := o[ln]
if !ok {
return false
}
if olv != lv {
return false
}
}
return true
} }
// Before compares the fingerprints of both metrics. // Before compares the metrics, using the following criteria:
//
// If m has fewer labels than o, it is before o. If it has more, it is not.
//
// If the number of labels is the same, the superset of all label names is
// sorted alphanumerically. The first differing label pair found in that order
// determines the outcome: If the label does not exist at all in m, then m is
// before o, and vice versa. Otherwise the label value is compared
// alphanumerically.
//
// If m and o are equal, the method returns false.
func (m Metric) Before(o Metric) bool { func (m Metric) Before(o Metric) bool {
return m.Fingerprint().Less(o.Fingerprint()) if len(m) < len(o) {
return true
}
if len(m) > len(o) {
return false
}
lns := make(LabelNames, 0, len(m)+len(o))
for ln := range m {
lns = append(lns, ln)
}
for ln := range o {
lns = append(lns, ln)
}
// It's probably not worth it to de-dup lns.
sort.Sort(lns)
for _, ln := range lns {
mlv, ok := m[ln]
if !ok {
return true
}
olv, ok := o[ln]
if !ok {
return false
}
if mlv < olv {
return true
}
if mlv > olv {
return false
}
}
return false
} }
// String implements Stringer. // String implements Stringer.
@ -67,6 +121,12 @@ func (m Metric) Fingerprint() Fingerprint {
return metricToFingerprint(m) return metricToFingerprint(m)
} }
// Fingerprint returns a Metric's Fingerprint calculated by a faster hashing
// algorithm, which is, however, more susceptible to hash collisions.
func (m Metric) FastFingerprint() Fingerprint {
return metricToFastFingerprint(m)
}
// Clone returns a copy of the Metric. // Clone returns a copy of the Metric.
func (m Metric) Clone() Metric { func (m Metric) Clone() Metric {
clone := Metric{} clone := Metric{}

View file

@ -19,10 +19,12 @@ func testMetric(t testing.TB) {
var scenarios = []struct { var scenarios = []struct {
input Metric input Metric
fingerprint Fingerprint fingerprint Fingerprint
fastFingerprint Fingerprint
}{ }{
{ {
input: Metric{}, input: Metric{},
fingerprint: 14695981039346656037, fingerprint: 14695981039346656037,
fastFingerprint: 14695981039346656037,
}, },
{ {
input: Metric{ input: Metric{
@ -30,27 +32,31 @@ func testMetric(t testing.TB) {
"occupation": "robot", "occupation": "robot",
"manufacturer": "westinghouse", "manufacturer": "westinghouse",
}, },
fingerprint: 11310079640881077873, fingerprint: 5911716720268894962,
fastFingerprint: 11310079640881077873,
}, },
{ {
input: Metric{ input: Metric{
"x": "y", "x": "y",
}, },
fingerprint: 13948396922932177635, fingerprint: 8241431561484471700,
fastFingerprint: 13948396922932177635,
}, },
{ {
input: Metric{ input: Metric{
"a": "bb", "a": "bb",
"b": "c", "b": "c",
}, },
fingerprint: 3198632812309449502, fingerprint: 3016285359649981711,
fastFingerprint: 3198632812309449502,
}, },
{ {
input: Metric{ input: Metric{
"a": "b", "a": "b",
"bb": "c", "bb": "c",
}, },
fingerprint: 5774953389407657638, fingerprint: 7122421792099404749,
fastFingerprint: 5774953389407657638,
}, },
} }
@ -58,6 +64,9 @@ func testMetric(t testing.TB) {
if scenario.fingerprint != scenario.input.Fingerprint() { if scenario.fingerprint != scenario.input.Fingerprint() {
t.Errorf("%d. expected %d, got %d", i, scenario.fingerprint, scenario.input.Fingerprint()) t.Errorf("%d. expected %d, got %d", i, scenario.fingerprint, scenario.input.Fingerprint())
} }
if scenario.fastFingerprint != scenario.input.FastFingerprint() {
t.Errorf("%d. expected %d, got %d", i, scenario.fastFingerprint, scenario.input.FastFingerprint())
}
} }
} }

View file

@ -21,42 +21,36 @@ import (
func TestSamplesSort(t *testing.T) { func TestSamplesSort(t *testing.T) {
input := Samples{ input := Samples{
&Sample{ &Sample{
// Fingerprint: 81f9c9ed24563f8f.
Metric: Metric{ Metric: Metric{
MetricNameLabel: "A", MetricNameLabel: "A",
}, },
Timestamp: 1, Timestamp: 1,
}, },
&Sample{ &Sample{
// Fingerprint: 81f9c9ed24563f8f.
Metric: Metric{ Metric: Metric{
MetricNameLabel: "A", MetricNameLabel: "A",
}, },
Timestamp: 2, Timestamp: 2,
}, },
&Sample{ &Sample{
// Fingerprint: 1bf6c9ed24543f8f.
Metric: Metric{ Metric: Metric{
MetricNameLabel: "C", MetricNameLabel: "C",
}, },
Timestamp: 1, Timestamp: 1,
}, },
&Sample{ &Sample{
// Fingerprint: 1bf6c9ed24543f8f.
Metric: Metric{ Metric: Metric{
MetricNameLabel: "C", MetricNameLabel: "C",
}, },
Timestamp: 2, Timestamp: 2,
}, },
&Sample{ &Sample{
// Fingerprint: 68f4c9ed24533f8f.
Metric: Metric{ Metric: Metric{
MetricNameLabel: "B", MetricNameLabel: "B",
}, },
Timestamp: 1, Timestamp: 1,
}, },
&Sample{ &Sample{
// Fingerprint: 68f4c9ed24533f8f.
Metric: Metric{ Metric: Metric{
MetricNameLabel: "B", MetricNameLabel: "B",
}, },
@ -66,47 +60,41 @@ func TestSamplesSort(t *testing.T) {
expected := Samples{ expected := Samples{
&Sample{ &Sample{
// Fingerprint: 1bf6c9ed24543f8f.
Metric: Metric{
MetricNameLabel: "C",
},
Timestamp: 1,
},
&Sample{
// Fingerprint: 1bf6c9ed24543f8f.
Metric: Metric{
MetricNameLabel: "C",
},
Timestamp: 2,
},
&Sample{
// Fingerprint: 68f4c9ed24533f8f.
Metric: Metric{
MetricNameLabel: "B",
},
Timestamp: 1,
},
&Sample{
// Fingerprint: 68f4c9ed24533f8f.
Metric: Metric{
MetricNameLabel: "B",
},
Timestamp: 2,
},
&Sample{
// Fingerprint: 81f9c9ed24563f8f.
Metric: Metric{ Metric: Metric{
MetricNameLabel: "A", MetricNameLabel: "A",
}, },
Timestamp: 1, Timestamp: 1,
}, },
&Sample{ &Sample{
// Fingerprint: 81f9c9ed24563f8f.
Metric: Metric{ Metric: Metric{
MetricNameLabel: "A", MetricNameLabel: "A",
}, },
Timestamp: 2, Timestamp: 2,
}, },
&Sample{
Metric: Metric{
MetricNameLabel: "B",
},
Timestamp: 1,
},
&Sample{
Metric: Metric{
MetricNameLabel: "B",
},
Timestamp: 2,
},
&Sample{
Metric: Metric{
MetricNameLabel: "C",
},
Timestamp: 1,
},
&Sample{
Metric: Metric{
MetricNameLabel: "C",
},
Timestamp: 2,
},
} }
sort.Sort(input) sort.Sort(input)

View file

@ -17,6 +17,7 @@ import (
"bytes" "bytes"
"hash" "hash"
"hash/fnv" "hash/fnv"
"sort"
"sync" "sync"
) )
@ -46,30 +47,37 @@ func getHashAndBuf() *hashAndBuf {
} }
func putHashAndBuf(hb *hashAndBuf) { func putHashAndBuf(hb *hashAndBuf) {
hb.h.Reset()
hb.b.Reset()
hashAndBufPool.Put(hb) hashAndBufPool.Put(hb)
} }
// LabelsToSignature returns a unique signature (i.e., fingerprint) for a given // LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
// label set. // given label set. (Collisions are possible but unlikely if the number of label
// sets the function is applied to is small.)
func LabelsToSignature(labels map[string]string) uint64 { func LabelsToSignature(labels map[string]string) uint64 {
if len(labels) == 0 { if len(labels) == 0 {
return emptyLabelSignature return emptyLabelSignature
} }
var result uint64 labelNames := make([]string, 0, len(labels))
for labelName := range labels {
labelNames = append(labelNames, labelName)
}
sort.Strings(labelNames)
hb := getHashAndBuf() hb := getHashAndBuf()
defer putHashAndBuf(hb) defer putHashAndBuf(hb)
for labelName, labelValue := range labels { for _, labelName := range labelNames {
hb.b.WriteString(labelName) hb.b.WriteString(labelName)
hb.b.WriteByte(SeparatorByte) hb.b.WriteByte(SeparatorByte)
hb.b.WriteString(labelValue) hb.b.WriteString(labels[labelName])
hb.b.WriteByte(SeparatorByte)
hb.h.Write(hb.b.Bytes()) hb.h.Write(hb.b.Bytes())
result ^= hb.h.Sum64()
hb.h.Reset()
hb.b.Reset() hb.b.Reset()
} }
return result return hb.h.Sum64()
} }
// metricToFingerprint works exactly as LabelsToSignature but takes a Metric as // metricToFingerprint works exactly as LabelsToSignature but takes a Metric as
@ -79,6 +87,34 @@ func metricToFingerprint(m Metric) Fingerprint {
return Fingerprint(emptyLabelSignature) return Fingerprint(emptyLabelSignature)
} }
labelNames := make(LabelNames, 0, len(m))
for labelName := range m {
labelNames = append(labelNames, labelName)
}
sort.Sort(labelNames)
hb := getHashAndBuf()
defer putHashAndBuf(hb)
for _, labelName := range labelNames {
hb.b.WriteString(string(labelName))
hb.b.WriteByte(SeparatorByte)
hb.b.WriteString(string(m[labelName]))
hb.b.WriteByte(SeparatorByte)
hb.h.Write(hb.b.Bytes())
hb.b.Reset()
}
return Fingerprint(hb.h.Sum64())
}
// metricToFastFingerprint works similar to metricToFingerprint but uses a
// faster and less allocation-heavy hash function, which is more susceptible to
// create hash collisions. Therefore, collision detection should be applied.
func metricToFastFingerprint(m Metric) Fingerprint {
if len(m) == 0 {
return Fingerprint(emptyLabelSignature)
}
var result uint64 var result uint64
hb := getHashAndBuf() hb := getHashAndBuf()
defer putHashAndBuf(hb) defer putHashAndBuf(hb)
@ -97,13 +133,15 @@ func metricToFingerprint(m Metric) Fingerprint {
// SignatureForLabels works like LabelsToSignature but takes a Metric as // SignatureForLabels works like LabelsToSignature but takes a Metric as
// parameter (rather than a label map) and only includes the labels with the // parameter (rather than a label map) and only includes the labels with the
// specified LabelNames into the signature calculation. // specified LabelNames into the signature calculation. The labels passed in
// will be sorted by this function.
func SignatureForLabels(m Metric, labels LabelNames) uint64 { func SignatureForLabels(m Metric, labels LabelNames) uint64 {
if len(m) == 0 || len(labels) == 0 { if len(m) == 0 || len(labels) == 0 {
return emptyLabelSignature return emptyLabelSignature
} }
var result uint64 sort.Sort(labels)
hb := getHashAndBuf() hb := getHashAndBuf()
defer putHashAndBuf(hb) defer putHashAndBuf(hb)
@ -111,12 +149,11 @@ func SignatureForLabels(m Metric, labels LabelNames) uint64 {
hb.b.WriteString(string(label)) hb.b.WriteString(string(label))
hb.b.WriteByte(SeparatorByte) hb.b.WriteByte(SeparatorByte)
hb.b.WriteString(string(m[label])) hb.b.WriteString(string(m[label]))
hb.b.WriteByte(SeparatorByte)
hb.h.Write(hb.b.Bytes()) hb.h.Write(hb.b.Bytes())
result ^= hb.h.Sum64()
hb.h.Reset()
hb.b.Reset() hb.b.Reset()
} }
return result return hb.h.Sum64()
} }
// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as // SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
@ -127,24 +164,27 @@ func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
return emptyLabelSignature return emptyLabelSignature
} }
var result uint64 labelNames := make(LabelNames, 0, len(m))
for labelName := range m {
if _, exclude := labels[labelName]; !exclude {
labelNames = append(labelNames, labelName)
}
}
if len(labelNames) == 0 {
return emptyLabelSignature
}
sort.Sort(labelNames)
hb := getHashAndBuf() hb := getHashAndBuf()
defer putHashAndBuf(hb) defer putHashAndBuf(hb)
for labelName, labelValue := range m { for _, labelName := range labelNames {
if _, exclude := labels[labelName]; exclude {
continue
}
hb.b.WriteString(string(labelName)) hb.b.WriteString(string(labelName))
hb.b.WriteByte(SeparatorByte) hb.b.WriteByte(SeparatorByte)
hb.b.WriteString(string(labelValue)) hb.b.WriteString(string(m[labelName]))
hb.b.WriteByte(SeparatorByte)
hb.h.Write(hb.b.Bytes()) hb.h.Write(hb.b.Bytes())
result ^= hb.h.Sum64()
hb.h.Reset()
hb.b.Reset() hb.b.Reset()
} }
if result == 0 { return hb.h.Sum64()
return emptyLabelSignature
}
return result
} }

View file

@ -30,7 +30,7 @@ func TestLabelsToSignature(t *testing.T) {
}, },
{ {
in: map[string]string{"name": "garland, briggs", "fear": "love is not enough"}, in: map[string]string{"name": "garland, briggs", "fear": "love is not enough"},
out: 12952432476264840823, out: 5799056148416392346,
}, },
} }
@ -54,7 +54,7 @@ func TestMetricToFingerprint(t *testing.T) {
}, },
{ {
in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
out: 12952432476264840823, out: 5799056148416392346,
}, },
} }
@ -67,6 +67,30 @@ func TestMetricToFingerprint(t *testing.T) {
} }
} }
func TestMetricToFastFingerprint(t *testing.T) {
var scenarios = []struct {
in Metric
out Fingerprint
}{
{
in: Metric{},
out: 14695981039346656037,
},
{
in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
out: 12952432476264840823,
},
}
for i, scenario := range scenarios {
actual := metricToFastFingerprint(scenario.in)
if actual != scenario.out {
t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
}
}
}
func TestSignatureForLabels(t *testing.T) { func TestSignatureForLabels(t *testing.T) {
var scenarios = []struct { var scenarios = []struct {
in Metric in Metric
@ -81,12 +105,12 @@ func TestSignatureForLabels(t *testing.T) {
{ {
in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
labels: LabelNames{"fear", "name"}, labels: LabelNames{"fear", "name"},
out: 12952432476264840823, out: 5799056148416392346,
}, },
{ {
in: Metric{"name": "garland, briggs", "fear": "love is not enough", "foo": "bar"}, in: Metric{"name": "garland, briggs", "fear": "love is not enough", "foo": "bar"},
labels: LabelNames{"fear", "name"}, labels: LabelNames{"fear", "name"},
out: 12952432476264840823, out: 5799056148416392346,
}, },
{ {
in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
@ -128,17 +152,17 @@ func TestSignatureWithoutLabels(t *testing.T) {
{ {
in: Metric{"name": "garland, briggs", "fear": "love is not enough", "foo": "bar"}, in: Metric{"name": "garland, briggs", "fear": "love is not enough", "foo": "bar"},
labels: map[LabelName]struct{}{"foo": struct{}{}}, labels: map[LabelName]struct{}{"foo": struct{}{}},
out: 12952432476264840823, out: 5799056148416392346,
}, },
{ {
in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
labels: map[LabelName]struct{}{}, labels: map[LabelName]struct{}{},
out: 12952432476264840823, out: 5799056148416392346,
}, },
{ {
in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
labels: nil, labels: nil,
out: 12952432476264840823, out: 5799056148416392346,
}, },
} }
@ -164,15 +188,15 @@ func BenchmarkLabelToSignatureScalar(b *testing.B) {
} }
func BenchmarkLabelToSignatureSingle(b *testing.B) { func BenchmarkLabelToSignatureSingle(b *testing.B) {
benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value"}, 5147259542624943964) benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value"}, 5146282821936882169)
} }
func BenchmarkLabelToSignatureDouble(b *testing.B) { func BenchmarkLabelToSignatureDouble(b *testing.B) {
benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value"}, 18269973311206963528) benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value"}, 3195800080984914717)
} }
func BenchmarkLabelToSignatureTriple(b *testing.B) { func BenchmarkLabelToSignatureTriple(b *testing.B) {
benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676) benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 13843036195897128121)
} }
func benchmarkMetricToFingerprint(b *testing.B, m Metric, e Fingerprint) { func benchmarkMetricToFingerprint(b *testing.B, m Metric, e Fingerprint) {
@ -188,15 +212,39 @@ func BenchmarkMetricToFingerprintScalar(b *testing.B) {
} }
func BenchmarkMetricToFingerprintSingle(b *testing.B) { func BenchmarkMetricToFingerprintSingle(b *testing.B) {
benchmarkMetricToFingerprint(b, Metric{"first-label": "first-label-value"}, 5147259542624943964) benchmarkMetricToFingerprint(b, Metric{"first-label": "first-label-value"}, 5146282821936882169)
} }
func BenchmarkMetricToFingerprintDouble(b *testing.B) { func BenchmarkMetricToFingerprintDouble(b *testing.B) {
benchmarkMetricToFingerprint(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value"}, 18269973311206963528) benchmarkMetricToFingerprint(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value"}, 3195800080984914717)
} }
func BenchmarkMetricToFingerprintTriple(b *testing.B) { func BenchmarkMetricToFingerprintTriple(b *testing.B) {
benchmarkMetricToFingerprint(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676) benchmarkMetricToFingerprint(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 13843036195897128121)
}
func benchmarkMetricToFastFingerprint(b *testing.B, m Metric, e Fingerprint) {
for i := 0; i < b.N; i++ {
if a := metricToFastFingerprint(m); a != e {
b.Fatalf("expected signature of %d for %s, got %d", e, m, a)
}
}
}
func BenchmarkMetricToFastFingerprintScalar(b *testing.B) {
benchmarkMetricToFastFingerprint(b, nil, 14695981039346656037)
}
func BenchmarkMetricToFastFingerprintSingle(b *testing.B) {
benchmarkMetricToFastFingerprint(b, Metric{"first-label": "first-label-value"}, 5147259542624943964)
}
func BenchmarkMetricToFastFingerprintDouble(b *testing.B) {
benchmarkMetricToFastFingerprint(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value"}, 18269973311206963528)
}
func BenchmarkMetricToFastFingerprintTriple(b *testing.B) {
benchmarkMetricToFastFingerprint(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676)
} }
func TestEmptyLabelSignature(t *testing.T) { func TestEmptyLabelSignature(t *testing.T) {
@ -218,7 +266,7 @@ func TestEmptyLabelSignature(t *testing.T) {
} }
} }
func benchmarkMetricToFingerprintConc(b *testing.B, m Metric, e Fingerprint, concLevel int) { func benchmarkMetricToFastFingerprintConc(b *testing.B, m Metric, e Fingerprint, concLevel int) {
var start, end sync.WaitGroup var start, end sync.WaitGroup
start.Add(1) start.Add(1)
end.Add(concLevel) end.Add(concLevel)
@ -227,7 +275,7 @@ func benchmarkMetricToFingerprintConc(b *testing.B, m Metric, e Fingerprint, con
go func() { go func() {
start.Wait() start.Wait()
for j := b.N / concLevel; j >= 0; j-- { for j := b.N / concLevel; j >= 0; j-- {
if a := metricToFingerprint(m); a != e { if a := metricToFastFingerprint(m); a != e {
b.Fatalf("expected signature of %d for %s, got %d", e, m, a) b.Fatalf("expected signature of %d for %s, got %d", e, m, a)
} }
} }
@ -239,18 +287,18 @@ func benchmarkMetricToFingerprintConc(b *testing.B, m Metric, e Fingerprint, con
end.Wait() end.Wait()
} }
func BenchmarkMetricToFingerprintTripleConc1(b *testing.B) { func BenchmarkMetricToFastFingerprintTripleConc1(b *testing.B) {
benchmarkMetricToFingerprintConc(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 1) benchmarkMetricToFastFingerprintConc(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 1)
} }
func BenchmarkMetricToFingerprintTripleConc2(b *testing.B) { func BenchmarkMetricToFastFingerprintTripleConc2(b *testing.B) {
benchmarkMetricToFingerprintConc(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 2) benchmarkMetricToFastFingerprintConc(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 2)
} }
func BenchmarkMetricToFingerprintTripleConc4(b *testing.B) { func BenchmarkMetricToFastFingerprintTripleConc4(b *testing.B) {
benchmarkMetricToFingerprintConc(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 4) benchmarkMetricToFastFingerprintConc(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 4)
} }
func BenchmarkMetricToFingerprintTripleConc8(b *testing.B) { func BenchmarkMetricToFastFingerprintTripleConc8(b *testing.B) {
benchmarkMetricToFingerprintConc(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 8) benchmarkMetricToFastFingerprintConc(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 8)
} }

View file

@ -33,7 +33,7 @@ type Counter interface {
// Set is used to set the Counter to an arbitrary value. It is only used // Set is used to set the Counter to an arbitrary value. It is only used
// if you have to transfer a value from an external counter into this // if you have to transfer a value from an external counter into this
// Prometheus metrics. Do not use it for regular handling of a // Prometheus metric. Do not use it for regular handling of a
// Prometheus counter (as it can be used to break the contract of // Prometheus counter (as it can be used to break the contract of
// monotonically increasing values). // monotonically increasing values).
Set(float64) Set(float64)

View file

@ -455,6 +455,56 @@ func ExampleSummaryVec() {
// ] // ]
} }
func ExampleConstSummary() {
desc := prometheus.NewDesc(
"http_request_duration_seconds",
"A summary of the HTTP request durations.",
[]string{"code", "method"},
prometheus.Labels{"owner": "example"},
)
// Create a constant summary from values we got from a 3rd party telemetry system.
s := prometheus.MustNewConstSummary(
desc,
4711, 403.34,
map[float64]float64{0.5: 42.3, 0.9: 323.3},
"200", "get",
)
// Just for demonstration, let's check the state of the summary by
// (ab)using its Write method (which is usually only used by Prometheus
// internally).
metric := &dto.Metric{}
s.Write(metric)
fmt.Println(proto.MarshalTextString(metric))
// Output:
// label: <
// name: "code"
// value: "200"
// >
// label: <
// name: "method"
// value: "get"
// >
// label: <
// name: "owner"
// value: "example"
// >
// summary: <
// sample_count: 4711
// sample_sum: 403.34
// quantile: <
// quantile: 0.5
// value: 42.3
// >
// quantile: <
// quantile: 0.9
// value: 323.3
// >
// >
}
func ExampleHistogram() { func ExampleHistogram() {
temps := prometheus.NewHistogram(prometheus.HistogramOpts{ temps := prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "pond_temperature_celsius", Name: "pond_temperature_celsius",
@ -501,6 +551,64 @@ func ExampleHistogram() {
// > // >
} }
func ExampleConstHistogram() {
desc := prometheus.NewDesc(
"http_request_duration_seconds",
"A histogram of the HTTP request durations.",
[]string{"code", "method"},
prometheus.Labels{"owner": "example"},
)
// Create a constant histogram from values we got from a 3rd party telemetry system.
h := prometheus.MustNewConstHistogram(
desc,
4711, 403.34,
map[float64]uint64{25: 121, 50: 2403, 100: 3221, 200: 4233},
"200", "get",
)
// Just for demonstration, let's check the state of the histogram by
// (ab)using its Write method (which is usually only used by Prometheus
// internally).
metric := &dto.Metric{}
h.Write(metric)
fmt.Println(proto.MarshalTextString(metric))
// Output:
// label: <
// name: "code"
// value: "200"
// >
// label: <
// name: "method"
// value: "get"
// >
// label: <
// name: "owner"
// value: "example"
// >
// histogram: <
// sample_count: 4711
// sample_sum: 403.34
// bucket: <
// cumulative_count: 121
// upper_bound: 25
// >
// bucket: <
// cumulative_count: 2403
// upper_bound: 50
// >
// bucket: <
// cumulative_count: 3221
// upper_bound: 100
// >
// bucket: <
// cumulative_count: 4233
// upper_bound: 200
// >
// >
}
func ExamplePushCollectors() { func ExamplePushCollectors() {
hostname, _ := os.Hostname() hostname, _ := os.Hostname()
completionTime := prometheus.NewGauge(prometheus.GaugeOpts{ completionTime := prometheus.NewGauge(prometheus.GaugeOpts{

View file

@ -2,10 +2,13 @@ package prometheus
import ( import (
"runtime" "runtime"
"runtime/debug"
"time"
) )
type goCollector struct { type goCollector struct {
goroutines Gauge goroutines Gauge
gcDesc *Desc
} }
// NewGoCollector returns a collector which exports metrics about the current // NewGoCollector returns a collector which exports metrics about the current
@ -16,16 +19,32 @@ func NewGoCollector() *goCollector {
Name: "process_goroutines", Name: "process_goroutines",
Help: "Number of goroutines that currently exist.", Help: "Number of goroutines that currently exist.",
}), }),
gcDesc: NewDesc(
"go_gc_duration_seconds",
"A summary of the GC invocation durations.",
nil, nil),
} }
} }
// Describe returns all descriptions of the collector. // Describe returns all descriptions of the collector.
func (c *goCollector) Describe(ch chan<- *Desc) { func (c *goCollector) Describe(ch chan<- *Desc) {
ch <- c.goroutines.Desc() ch <- c.goroutines.Desc()
ch <- c.gcDesc
} }
// Collect returns the current state of all metrics of the collector. // Collect returns the current state of all metrics of the collector.
func (c *goCollector) Collect(ch chan<- Metric) { func (c *goCollector) Collect(ch chan<- Metric) {
c.goroutines.Set(float64(runtime.NumGoroutine())) c.goroutines.Set(float64(runtime.NumGoroutine()))
ch <- c.goroutines ch <- c.goroutines
var stats debug.GCStats
stats.PauseQuantiles = make([]time.Duration, 5)
debug.ReadGCStats(&stats)
quantiles := make(map[float64]float64)
for idx, pq := range stats.PauseQuantiles[1:] {
quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
}
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)
} }

View file

@ -1,7 +1,7 @@
package prometheus package prometheus
import ( import (
"reflect" "runtime"
"testing" "testing"
"time" "time"
@ -35,6 +35,9 @@ func TestGoCollector(t *testing.T) {
case Gauge: case Gauge:
pb := &dto.Metric{} pb := &dto.Metric{}
m.Write(pb) m.Write(pb)
if pb.GetGauge() == nil {
continue
}
if old == -1 { if old == -1 {
old = int(pb.GetGauge().GetValue()) old = int(pb.GetGauge().GetValue())
@ -48,8 +51,66 @@ func TestGoCollector(t *testing.T) {
} }
return return
default: }
t.Errorf("want type Gauge, got %s", reflect.TypeOf(metric)) case <-time.After(1 * time.Second):
t.Fatalf("expected collect timed out")
}
}
}
func TestGCCollector(t *testing.T) {
var (
c = NewGoCollector()
ch = make(chan Metric)
waitc = make(chan struct{})
closec = make(chan struct{})
oldGC uint64
oldPause float64
)
defer close(closec)
go func() {
c.Collect(ch)
// force GC
runtime.GC()
<-waitc
c.Collect(ch)
}()
first := true
for {
select {
case metric := <-ch:
switch m := metric.(type) {
case *constSummary, *value:
pb := &dto.Metric{}
m.Write(pb)
if pb.GetSummary() == nil {
continue
}
if len(pb.GetSummary().Quantile) != 5 {
t.Errorf("expected 4 buckets, got %d", len(pb.GetSummary().Quantile))
}
for idx, want := range []float64{0.0, 0.25, 0.5, 0.75, 1.0} {
if *pb.GetSummary().Quantile[idx].Quantile != want {
t.Errorf("bucket #%d is off, got %f, want %f", idx, *pb.GetSummary().Quantile[idx].Quantile, want)
}
}
if first {
first = false
oldGC = *pb.GetSummary().SampleCount
oldPause = *pb.GetSummary().SampleSum
close(waitc)
continue
}
if diff := *pb.GetSummary().SampleCount - oldGC; diff != 1 {
t.Errorf("want 1 new garbage collection run, got %d", diff)
}
if diff := *pb.GetSummary().SampleSum - oldPause; diff <= 0 {
t.Errorf("want moar pause, got %f", diff)
}
return
} }
case <-time.After(1 * time.Second): case <-time.After(1 * time.Second):
t.Fatalf("expected collect timed out") t.Fatalf("expected collect timed out")

View file

@ -342,3 +342,102 @@ func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram {
func (m *HistogramVec) With(labels Labels) Histogram { func (m *HistogramVec) With(labels Labels) Histogram {
return m.MetricVec.With(labels).(Histogram) return m.MetricVec.With(labels).(Histogram)
} }
type constHistogram struct {
desc *Desc
count uint64
sum float64
buckets map[float64]uint64
labelPairs []*dto.LabelPair
}
func (h *constHistogram) Desc() *Desc {
return h.desc
}
func (h *constHistogram) Write(out *dto.Metric) error {
his := &dto.Histogram{}
buckets := make([]*dto.Bucket, 0, len(h.buckets))
his.SampleCount = proto.Uint64(h.count)
his.SampleSum = proto.Float64(h.sum)
for upperBound, count := range h.buckets {
buckets = append(buckets, &dto.Bucket{
CumulativeCount: proto.Uint64(count),
UpperBound: proto.Float64(upperBound),
})
}
if len(buckets) > 0 {
sort.Sort(buckSort(buckets))
}
his.Bucket = buckets
out.Histogram = his
out.Label = h.labelPairs
return nil
}
// NewConstHistogram returns a metric representing a Prometheus histogram with
// fixed values for the count, sum, and bucket counts. As those parameters
// cannot be changed, the returned value does not implement the Histogram
// interface (but only the Metric interface). Users of this package will not
// have much use for it in regular operations. However, when implementing custom
// Collectors, it is useful as a throw-away metric that is generated on the fly
// to send it to Prometheus in the Collect method.
//
// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
// bucket.
//
// NewConstHistogram returns an error if the length of labelValues is not
// consistent with the variable labels in Desc.
func NewConstHistogram(
desc *Desc,
count uint64,
sum float64,
buckets map[float64]uint64,
labelValues ...string,
) (Metric, error) {
if len(desc.variableLabels) != len(labelValues) {
return nil, errInconsistentCardinality
}
return &constHistogram{
desc: desc,
count: count,
sum: sum,
buckets: buckets,
labelPairs: makeLabelPairs(desc, labelValues),
}, nil
}
// MustNewConstHistogram is a version of NewConstHistogram that panics where
// NewConstMetric would have returned an error.
func MustNewConstHistogram(
desc *Desc,
count uint64,
sum float64,
buckets map[float64]uint64,
labelValues ...string,
) Metric {
m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...)
if err != nil {
panic(err)
}
return m
}
type buckSort []*dto.Bucket
func (s buckSort) Len() int {
return len(s)
}
func (s buckSort) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s buckSort) Less(i, j int) bool {
return s[i].GetUpperBound() < s[j].GetUpperBound()
}

View file

@ -448,3 +448,89 @@ func (m *SummaryVec) WithLabelValues(lvs ...string) Summary {
func (m *SummaryVec) With(labels Labels) Summary { func (m *SummaryVec) With(labels Labels) Summary {
return m.MetricVec.With(labels).(Summary) return m.MetricVec.With(labels).(Summary)
} }
type constSummary struct {
desc *Desc
count uint64
sum float64
quantiles map[float64]float64
labelPairs []*dto.LabelPair
}
func (s *constSummary) Desc() *Desc {
return s.desc
}
func (s *constSummary) Write(out *dto.Metric) error {
sum := &dto.Summary{}
qs := make([]*dto.Quantile, 0, len(s.quantiles))
sum.SampleCount = proto.Uint64(s.count)
sum.SampleSum = proto.Float64(s.sum)
for rank, q := range s.quantiles {
qs = append(qs, &dto.Quantile{
Quantile: proto.Float64(rank),
Value: proto.Float64(q),
})
}
if len(qs) > 0 {
sort.Sort(quantSort(qs))
}
sum.Quantile = qs
out.Summary = sum
out.Label = s.labelPairs
return nil
}
// NewConstSummary returns a metric representing a Prometheus summary with fixed
// values for the count, sum, and quantiles. As those parameters cannot be
// changed, the returned value does not implement the Summary interface (but
// only the Metric interface). Users of this package will not have much use for
// it in regular operations. However, when implementing custom Collectors, it is
// useful as a throw-away metric that is generated on the fly to send it to
// Prometheus in the Collect method.
//
// quantiles maps ranks to quantile values. For example, a median latency of
// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
// map[float64]float64{0.5: 0.23, 0.99: 0.56}
//
// NewConstSummary returns an error if the length of labelValues is not
// consistent with the variable labels in Desc.
func NewConstSummary(
desc *Desc,
count uint64,
sum float64,
quantiles map[float64]float64,
labelValues ...string,
) (Metric, error) {
if len(desc.variableLabels) != len(labelValues) {
return nil, errInconsistentCardinality
}
return &constSummary{
desc: desc,
count: count,
sum: sum,
quantiles: quantiles,
labelPairs: makeLabelPairs(desc, labelValues),
}, nil
}
// MustNewConstSummary is a version of NewConstSummary that panics where
// NewConstMetric would have returned an error.
func MustNewConstSummary(
desc *Desc,
count uint64,
sum float64,
quantiles map[float64]float64,
labelValues ...string,
) Metric {
m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...)
if err != nil {
panic(err)
}
return m
}

View file

@ -35,7 +35,7 @@ docker: build
tarball: $(ARCHIVE) tarball: $(ARCHIVE)
$(ARCHIVE): build $(ARCHIVE): build
tar -czf $(ARCHIVE) prometheus tar -czf $(ARCHIVE) prometheus tools/rule_checker/rule_checker consoles console_libraries
release: REMOTE ?= $(error "can't upload, REMOTE not set") release: REMOTE ?= $(error "can't upload, REMOTE not set")
release: REMOTE_DIR ?= $(error "can't upload, REMOTE_DIR not set") release: REMOTE_DIR ?= $(error "can't upload, REMOTE_DIR not set")

View file

@ -1 +1 @@
0.13.1 0.13.2

View file

@ -32,7 +32,7 @@ func newLabelName(ln string) *LabelName {
func TestUint64(t *testing.T) { func TestUint64(t *testing.T) {
var b bytes.Buffer var b bytes.Buffer
const n = 422010471112345 const n = uint64(422010471112345)
if err := EncodeUint64(&b, n); err != nil { if err := EncodeUint64(&b, n); err != nil {
t.Fatal(err) t.Fatal(err)
} }

View file

@ -50,9 +50,9 @@ func newTestPersistence(t *testing.T, encoding chunkEncoding) (*persistence, tes
func buildTestChunks(encoding chunkEncoding) map[clientmodel.Fingerprint][]chunk { func buildTestChunks(encoding chunkEncoding) map[clientmodel.Fingerprint][]chunk {
fps := clientmodel.Fingerprints{ fps := clientmodel.Fingerprints{
m1.Fingerprint(), m1.FastFingerprint(),
m2.Fingerprint(), m2.FastFingerprint(),
m3.Fingerprint(), m3.FastFingerprint(),
} }
fpToChunks := map[clientmodel.Fingerprint][]chunk{} fpToChunks := map[clientmodel.Fingerprint][]chunk{}
@ -375,11 +375,11 @@ func testCheckpointAndLoadSeriesMapAndHeads(t *testing.T, encoding chunkEncoding
s5.persistWatermark = 3 s5.persistWatermark = 3
chunkCountS4 := len(s4.chunkDescs) chunkCountS4 := len(s4.chunkDescs)
chunkCountS5 := len(s5.chunkDescs) chunkCountS5 := len(s5.chunkDescs)
sm.put(m1.Fingerprint(), s1) sm.put(m1.FastFingerprint(), s1)
sm.put(m2.Fingerprint(), s2) sm.put(m2.FastFingerprint(), s2)
sm.put(m3.Fingerprint(), s3) sm.put(m3.FastFingerprint(), s3)
sm.put(m4.Fingerprint(), s4) sm.put(m4.FastFingerprint(), s4)
sm.put(m5.Fingerprint(), s5) sm.put(m5.FastFingerprint(), s5)
if err := p.checkpointSeriesMapAndHeads(sm, fpLocker); err != nil { if err := p.checkpointSeriesMapAndHeads(sm, fpLocker); err != nil {
t.Fatal(err) t.Fatal(err)
@ -392,7 +392,7 @@ func testCheckpointAndLoadSeriesMapAndHeads(t *testing.T, encoding chunkEncoding
if loadedSM.length() != 4 { if loadedSM.length() != 4 {
t.Errorf("want 4 series in map, got %d", loadedSM.length()) t.Errorf("want 4 series in map, got %d", loadedSM.length())
} }
if loadedS1, ok := loadedSM.get(m1.Fingerprint()); ok { if loadedS1, ok := loadedSM.get(m1.FastFingerprint()); ok {
if !reflect.DeepEqual(loadedS1.metric, m1) { if !reflect.DeepEqual(loadedS1.metric, m1) {
t.Errorf("want metric %v, got %v", m1, loadedS1.metric) t.Errorf("want metric %v, got %v", m1, loadedS1.metric)
} }
@ -408,7 +408,7 @@ func testCheckpointAndLoadSeriesMapAndHeads(t *testing.T, encoding chunkEncoding
} else { } else {
t.Errorf("couldn't find %v in loaded map", m1) t.Errorf("couldn't find %v in loaded map", m1)
} }
if loadedS3, ok := loadedSM.get(m3.Fingerprint()); ok { if loadedS3, ok := loadedSM.get(m3.FastFingerprint()); ok {
if !reflect.DeepEqual(loadedS3.metric, m3) { if !reflect.DeepEqual(loadedS3.metric, m3) {
t.Errorf("want metric %v, got %v", m3, loadedS3.metric) t.Errorf("want metric %v, got %v", m3, loadedS3.metric)
} }
@ -424,7 +424,7 @@ func testCheckpointAndLoadSeriesMapAndHeads(t *testing.T, encoding chunkEncoding
} else { } else {
t.Errorf("couldn't find %v in loaded map", m3) t.Errorf("couldn't find %v in loaded map", m3)
} }
if loadedS4, ok := loadedSM.get(m4.Fingerprint()); ok { if loadedS4, ok := loadedSM.get(m4.FastFingerprint()); ok {
if !reflect.DeepEqual(loadedS4.metric, m4) { if !reflect.DeepEqual(loadedS4.metric, m4) {
t.Errorf("want metric %v, got %v", m4, loadedS4.metric) t.Errorf("want metric %v, got %v", m4, loadedS4.metric)
} }
@ -449,7 +449,7 @@ func testCheckpointAndLoadSeriesMapAndHeads(t *testing.T, encoding chunkEncoding
} else { } else {
t.Errorf("couldn't find %v in loaded map", m4) t.Errorf("couldn't find %v in loaded map", m4)
} }
if loadedS5, ok := loadedSM.get(m5.Fingerprint()); ok { if loadedS5, ok := loadedSM.get(m5.FastFingerprint()); ok {
if !reflect.DeepEqual(loadedS5.metric, m5) { if !reflect.DeepEqual(loadedS5.metric, m5) {
t.Errorf("want metric %v, got %v", m5, loadedS5.metric) t.Errorf("want metric %v, got %v", m5, loadedS5.metric)
} }

View file

@ -318,7 +318,7 @@ func (s *memorySeries) preloadChunks(indexes []int, mss *memorySeriesStorage) ([
if s.chunkDescsOffset == -1 { if s.chunkDescsOffset == -1 {
panic("requested loading chunks from persistence in a situation where we must not have persisted data for chunk descriptors in memory") panic("requested loading chunks from persistence in a situation where we must not have persisted data for chunk descriptors in memory")
} }
fp := s.metric.Fingerprint() fp := s.metric.FastFingerprint() // TODO(beorn): Handle collisions.
chunks, err := mss.loadChunks(fp, loadIndexes, s.chunkDescsOffset) chunks, err := mss.loadChunks(fp, loadIndexes, s.chunkDescsOffset)
if err != nil { if err != nil {
// Unpin the chunks since we won't return them as pinned chunks now. // Unpin the chunks since we won't return them as pinned chunks now.

View file

@ -382,7 +382,7 @@ func (s *memorySeriesStorage) Append(sample *clientmodel.Sample) {
} }
glog.Warning("Sample ingestion resumed.") glog.Warning("Sample ingestion resumed.")
} }
fp := sample.Metric.Fingerprint() fp := sample.Metric.FastFingerprint() // TODO(beorn): Handle collisions.
s.fpLocker.Lock(fp) s.fpLocker.Lock(fp)
series := s.getOrCreateSeries(fp, sample.Metric) series := s.getOrCreateSeries(fp, sample.Metric)
completedChunksCount := series.add(&metric.SamplePair{ completedChunksCount := series.add(&metric.SamplePair{

View file

@ -46,7 +46,7 @@ func TestGetFingerprintsForLabelMatchers(t *testing.T) {
Timestamp: clientmodel.Timestamp(i), Timestamp: clientmodel.Timestamp(i),
Value: clientmodel.SampleValue(i), Value: clientmodel.SampleValue(i),
} }
fingerprints[i] = metric.Fingerprint() fingerprints[i] = metric.FastFingerprint()
} }
for _, s := range samples { for _, s := range samples {
storage.Append(s) storage.Append(s)
@ -172,7 +172,7 @@ func TestLoop(t *testing.T) {
storage.Append(s) storage.Append(s)
} }
storage.WaitForIndexing() storage.WaitForIndexing()
series, _ := storage.(*memorySeriesStorage).fpToSeries.get(clientmodel.Metric{}.Fingerprint()) series, _ := storage.(*memorySeriesStorage).fpToSeries.get(clientmodel.Metric{}.FastFingerprint())
cdsBefore := len(series.chunkDescs) cdsBefore := len(series.chunkDescs)
time.Sleep(fpMaxWaitDuration + time.Second) // TODO(beorn7): Ugh, need to wait for maintenance to kick in. time.Sleep(fpMaxWaitDuration + time.Second) // TODO(beorn7): Ugh, need to wait for maintenance to kick in.
cdsAfter := len(series.chunkDescs) cdsAfter := len(series.chunkDescs)
@ -251,7 +251,7 @@ func testGetValueAtTime(t *testing.T, encoding chunkEncoding) {
} }
s.WaitForIndexing() s.WaitForIndexing()
fp := clientmodel.Metric{}.Fingerprint() fp := clientmodel.Metric{}.FastFingerprint()
it := s.NewIterator(fp) it := s.NewIterator(fp)
@ -344,7 +344,7 @@ func testGetRangeValues(t *testing.T, encoding chunkEncoding) {
} }
s.WaitForIndexing() s.WaitForIndexing()
fp := clientmodel.Metric{}.Fingerprint() fp := clientmodel.Metric{}.FastFingerprint()
it := s.NewIterator(fp) it := s.NewIterator(fp)
@ -498,7 +498,7 @@ func testEvictAndPurgeSeries(t *testing.T, encoding chunkEncoding) {
} }
s.WaitForIndexing() s.WaitForIndexing()
fp := clientmodel.Metric{}.Fingerprint() fp := clientmodel.Metric{}.FastFingerprint()
// Drop ~half of the chunks. // Drop ~half of the chunks.
ms.maintainMemorySeries(fp, 1000) ms.maintainMemorySeries(fp, 1000)
@ -803,7 +803,7 @@ func createRandomSamples(metricName string, minLen int) clientmodel.Samples {
return clientmodel.SampleValue(rand.Intn(1<<16) - 1<<15 + int(v)) return clientmodel.SampleValue(rand.Intn(1<<16) - 1<<15 + int(v))
}, },
func(v clientmodel.SampleValue) clientmodel.SampleValue { func(v clientmodel.SampleValue) clientmodel.SampleValue {
return clientmodel.SampleValue(rand.Intn(1<<32) - 1<<31 + int(v)) return clientmodel.SampleValue(rand.Int63n(1<<32) - 1<<31 + int64(v))
}, },
}, },
}, },
@ -896,7 +896,7 @@ func verifyStorage(t testing.TB, s Storage, samples clientmodel.Samples, maxAge
// retention period, we can verify here that no results // retention period, we can verify here that no results
// are returned. // are returned.
} }
fp := sample.Metric.Fingerprint() fp := sample.Metric.FastFingerprint()
p := s.NewPreloader() p := s.NewPreloader()
p.PreloadRange(fp, sample.Timestamp, sample.Timestamp, time.Hour) p.PreloadRange(fp, sample.Timestamp, sample.Timestamp, time.Hour)
found := s.NewIterator(fp).GetValueAtTime(sample.Timestamp) found := s.NewIterator(fp).GetValueAtTime(sample.Timestamp)