mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-25 05:34:05 -08:00
Convert metric.Values to slice of values.
The initial impetus for this was that it made unmarshalling sample values much faster. Other relevant benchmark changes in ns/op: Benchmark old new speedup ================================================================== BenchmarkMarshal 179170 127996 1.4x BenchmarkUnmarshal 404984 132186 3.1x BenchmarkMemoryGetValueAtTime 57801 50050 1.2x BenchmarkMemoryGetBoundaryValues 64496 53194 1.2x BenchmarkMemoryGetRangeValues 66585 54065 1.2x BenchmarkStreamAdd 45.0 75.3 0.6x BenchmarkAppendSample1 1157 1587 0.7x BenchmarkAppendSample10 4090 4284 0.95x BenchmarkAppendSample100 45660 44066 1.0x BenchmarkAppendSample1000 579084 582380 1.0x BenchmarkMemoryAppendRepeatingValues 22796594 22005502 1.0x Overall, this gives us good speedups in the areas where they matter most: decoding values from disk and accessing the memory storage (which is also used for views). Some of the smaller append examples take minimally longer, but the cost seems to get amortized over larger appends, so I'm not worried about these. Also, we're currently not bottlenecked on the write path and have plenty of other optimizations available in that area if it becomes necessary. Memory allocations during appends don't change measurably at all. Change-Id: I7dc7394edea09506976765551f35b138518db9e8
This commit is contained in:
parent
a7d0973fe3
commit
86fc13a52e
|
@ -384,7 +384,7 @@ func EvalVectorRange(node VectorNode, start clientmodel.Timestamp, end clientmod
|
|||
for t := start; t.Before(end); t = t.Add(interval) {
|
||||
vector := node.Eval(t, viewAdapter)
|
||||
for _, sample := range vector {
|
||||
samplePair := &metric.SamplePair{
|
||||
samplePair := metric.SamplePair{
|
||||
Value: sample.Value,
|
||||
Timestamp: sample.Timestamp,
|
||||
}
|
||||
|
|
|
@ -82,7 +82,7 @@ func (v *viewAdapter) chooseClosestSample(samples metric.Values, timestamp clien
|
|||
continue
|
||||
}
|
||||
sample := candidate
|
||||
closestBefore = sample
|
||||
closestBefore = &sample
|
||||
}
|
||||
|
||||
// Samples after target time.
|
||||
|
@ -96,7 +96,7 @@ func (v *viewAdapter) chooseClosestSample(samples metric.Values, timestamp clien
|
|||
continue
|
||||
}
|
||||
sample := candidate
|
||||
closestAfter = sample
|
||||
closestAfter = &sample
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ var testStartTime = clientmodel.Timestamp(0)
|
|||
func getTestValueStream(startVal clientmodel.SampleValue, endVal clientmodel.SampleValue, stepVal clientmodel.SampleValue, startTime clientmodel.Timestamp) (resultValues metric.Values) {
|
||||
currentTime := startTime
|
||||
for currentVal := startVal; currentVal <= endVal; currentVal += stepVal {
|
||||
sample := &metric.SamplePair{
|
||||
sample := metric.SamplePair{
|
||||
Value: currentVal,
|
||||
Timestamp: currentTime,
|
||||
}
|
||||
|
|
|
@ -350,7 +350,7 @@ func (l *LevelDBMetricPersistence) AppendSamples(samples clientmodel.Samples) (e
|
|||
key.Dump(keyDto)
|
||||
|
||||
for _, sample := range chunk {
|
||||
values = append(values, &SamplePair{
|
||||
values = append(values, SamplePair{
|
||||
Timestamp: sample.Timestamp,
|
||||
Value: sample.Value,
|
||||
})
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
const initialSeriesArenaSize = 4 * 60
|
||||
|
||||
type stream interface {
|
||||
add(...*SamplePair)
|
||||
add(Values)
|
||||
|
||||
clone() Values
|
||||
expunge(age clientmodel.Timestamp) Values
|
||||
|
@ -53,7 +53,7 @@ func (s *arrayStream) metric() clientmodel.Metric {
|
|||
return s.m
|
||||
}
|
||||
|
||||
func (s *arrayStream) add(v ...*SamplePair) {
|
||||
func (s *arrayStream) add(v Values) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
|
@ -202,9 +202,11 @@ func (s *memorySeriesStorage) AppendSample(sample *clientmodel.Sample) error {
|
|||
fingerprint := &clientmodel.Fingerprint{}
|
||||
fingerprint.LoadFromMetric(sample.Metric)
|
||||
series := s.getOrCreateSeries(sample.Metric, fingerprint)
|
||||
series.add(&SamplePair{
|
||||
Value: sample.Value,
|
||||
Timestamp: sample.Timestamp,
|
||||
series.add(Values{
|
||||
SamplePair{
|
||||
Value: sample.Value,
|
||||
Timestamp: sample.Timestamp,
|
||||
},
|
||||
})
|
||||
|
||||
if s.wmCache != nil {
|
||||
|
@ -325,7 +327,7 @@ func (s *memorySeriesStorage) appendSamplesWithoutIndexing(fingerprint *clientmo
|
|||
s.fingerprintToSeries[*fingerprint] = series
|
||||
}
|
||||
|
||||
series.add(samples...)
|
||||
series.add(samples)
|
||||
}
|
||||
|
||||
func (s *memorySeriesStorage) GetFingerprintsForLabelSet(l clientmodel.LabelSet) (clientmodel.Fingerprints, error) {
|
||||
|
|
|
@ -27,7 +27,7 @@ func BenchmarkStreamAdd(b *testing.B) {
|
|||
s := newArrayStream(clientmodel.Metric{})
|
||||
samples := make(Values, b.N)
|
||||
for i := 0; i < b.N; i++ {
|
||||
samples = append(samples, &SamplePair{
|
||||
samples = append(samples, SamplePair{
|
||||
Timestamp: clientmodel.TimestampFromTime(time.Date(i, 0, 0, 0, 0, 0, 0, time.UTC)),
|
||||
Value: clientmodel.SampleValue(i),
|
||||
})
|
||||
|
@ -38,7 +38,7 @@ func BenchmarkStreamAdd(b *testing.B) {
|
|||
var pre runtime.MemStats
|
||||
runtime.ReadMemStats(&pre)
|
||||
|
||||
s.add(samples...)
|
||||
s.add(samples)
|
||||
|
||||
var post runtime.MemStats
|
||||
runtime.ReadMemStats(&post)
|
||||
|
|
|
@ -211,7 +211,7 @@ func TestGetValuesAtTimeOp(t *testing.T) {
|
|||
t.Fatalf("%d. expected length %d, got %d", i, len(scenario.out), len(actual))
|
||||
}
|
||||
for j, out := range scenario.out {
|
||||
if !out.Equal(actual[j]) {
|
||||
if !out.Equal(&actual[j]) {
|
||||
t.Fatalf("%d. expected output %v, got %v", i, scenario.out, actual)
|
||||
}
|
||||
}
|
||||
|
@ -464,7 +464,7 @@ func TestGetValuesAtIntervalOp(t *testing.T) {
|
|||
}
|
||||
|
||||
for j, out := range scenario.out {
|
||||
if !out.Equal(actual[j]) {
|
||||
if !out.Equal(&actual[j]) {
|
||||
t.Fatalf("%d. expected output %v, got %v", i, scenario.out, actual)
|
||||
}
|
||||
}
|
||||
|
@ -639,7 +639,7 @@ func TestGetValuesAlongRangeOp(t *testing.T) {
|
|||
t.Fatalf("%d. expected length %d, got %d: %v", i, len(scenario.out), len(actual), actual)
|
||||
}
|
||||
for j, out := range scenario.out {
|
||||
if !out.Equal(actual[j]) {
|
||||
if !out.Equal(&actual[j]) {
|
||||
t.Fatalf("%d. expected output %v, got %v", i, scenario.out, actual)
|
||||
}
|
||||
}
|
||||
|
@ -796,7 +796,7 @@ func TestGetValueRangeAtIntervalOp(t *testing.T) {
|
|||
t.Fatalf("%d. expected length %d, got %d: %v", i, len(scenario.out), len(actual), actual)
|
||||
}
|
||||
for j, out := range scenario.out {
|
||||
if !out.Equal(actual[j]) {
|
||||
if !out.Equal(&actual[j]) {
|
||||
t.Fatalf("%d. expected output %v, got %v", i, scenario.out, actual)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -57,9 +57,9 @@ func (s *SamplePair) String() string {
|
|||
return fmt.Sprintf("SamplePair at %s of %s", s.Timestamp, s.Value)
|
||||
}
|
||||
|
||||
// Values is a sortable slice of SamplePair pointers (as in: it implements
|
||||
// Values is a sortable slice of SamplePairs (as in: it implements
|
||||
// sort.Interface). Sorting happens by Timestamp.
|
||||
type Values []*SamplePair
|
||||
type Values []SamplePair
|
||||
|
||||
// Len implements sort.Interface.
|
||||
func (v Values) Len() int {
|
||||
|
@ -84,7 +84,7 @@ func (v Values) Equal(o Values) bool {
|
|||
}
|
||||
|
||||
for i, expected := range v {
|
||||
if !expected.Equal(o[i]) {
|
||||
if !expected.Equal(&o[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@ -182,10 +182,8 @@ func unmarshalValues(buf []byte) Values {
|
|||
}
|
||||
for i := 0; i < n; i++ {
|
||||
offset := formatVersionSize + i*sampleSize
|
||||
v[i] = &SamplePair{
|
||||
Timestamp: clientmodel.TimestampFromUnix(int64(binary.LittleEndian.Uint64(buf[offset:]))),
|
||||
Value: clientmodel.SampleValue(math.Float64frombits(binary.LittleEndian.Uint64(buf[offset+8:]))),
|
||||
}
|
||||
v[i].Timestamp = clientmodel.TimestampFromUnix(int64(binary.LittleEndian.Uint64(buf[offset:])))
|
||||
v[i].Value = clientmodel.SampleValue(math.Float64frombits(binary.LittleEndian.Uint64(buf[offset+8:])))
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ func TestValuesMarshalAndUnmarshal(t *testing.T) {
|
|||
|
||||
for i, expected := range values {
|
||||
actual := unmarshalled[i]
|
||||
if !actual.Equal(expected) {
|
||||
if !actual.Equal(&expected) {
|
||||
t.Fatalf("%d. got: %v, expected: %v", i, actual, expected)
|
||||
}
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ func TestValuesMarshalAndUnmarshal(t *testing.T) {
|
|||
func randomValues(numSamples int) Values {
|
||||
v := make(Values, 0, numSamples)
|
||||
for i := 0; i < numSamples; i++ {
|
||||
v = append(v, &SamplePair{
|
||||
v = append(v, SamplePair{
|
||||
Timestamp: clientmodel.Timestamp(rand.Int63()),
|
||||
Value: clientmodel.SampleValue(rand.NormFloat64()),
|
||||
})
|
||||
|
|
|
@ -719,7 +719,7 @@ func testTruncateBefore(t test.Tester) {
|
|||
}
|
||||
|
||||
for j, actualValue := range actual {
|
||||
if !actualValue.Equal(scenario.out[j]) {
|
||||
if !actualValue.Equal(&scenario.out[j]) {
|
||||
t.Fatalf("%d.%d. expected %s, got %s", i, j, scenario.out[j], actualValue)
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue