prometheus/storage/metric/tiered/memory_test.go

285 lines
7.5 KiB
Go
Raw Normal View History

// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tiered
import (
"fmt"
Ensure temporal order in streams. BenchmarkAppendSample.* before this change: BenchmarkAppendSample1 1000000 1142 ns/op --- BENCH: BenchmarkAppendSample1 memory_test.go:81: 1 cycles with 9992.000000 bytes per cycle, totalling 9992 memory_test.go:81: 100 cycles with 250.399994 bytes per cycle, totalling 25040 memory_test.go:81: 10000 cycles with 239.428802 bytes per cycle, totalling 2394288 memory_test.go:81: 1000000 cycles with 255.504684 bytes per cycle, totalling 255504688 BenchmarkAppendSample10 500000 3823 ns/op --- BENCH: BenchmarkAppendSample10 memory_test.go:81: 1 cycles with 15536.000000 bytes per cycle, totalling 15536 memory_test.go:81: 100 cycles with 662.239990 bytes per cycle, totalling 66224 memory_test.go:81: 10000 cycles with 601.937622 bytes per cycle, totalling 6019376 memory_test.go:81: 500000 cycles with 598.582764 bytes per cycle, totalling 299291408 BenchmarkAppendSample100 50000 41111 ns/op --- BENCH: BenchmarkAppendSample100 memory_test.go:81: 1 cycles with 79824.000000 bytes per cycle, totalling 79824 memory_test.go:81: 100 cycles with 4924.479980 bytes per cycle, totalling 492448 memory_test.go:81: 10000 cycles with 4278.019043 bytes per cycle, totalling 42780192 memory_test.go:81: 50000 cycles with 4275.242676 bytes per cycle, totalling 213762144 BenchmarkAppendSample1000 5000 533933 ns/op --- BENCH: BenchmarkAppendSample1000 memory_test.go:81: 1 cycles with 840224.000000 bytes per cycle, totalling 840224 memory_test.go:81: 100 cycles with 62789.281250 bytes per cycle, totalling 6278928 memory_test.go:81: 5000 cycles with 55208.601562 bytes per cycle, totalling 276043008 ok github.com/prometheus/prometheus/storage/metric/tiered 27.828s BenchmarkAppendSample.* after this change: BenchmarkAppendSample1 1000000 1109 ns/op --- BENCH: BenchmarkAppendSample1 memory_test.go:131: 1 cycles with 9992.000000 bytes per cycle, totalling 9992 memory_test.go:131: 100 cycles with 250.399994 bytes per cycle, totalling 25040 memory_test.go:131: 10000 cycles with 239.220795 bytes per cycle, totalling 2392208 memory_test.go:131: 1000000 cycles with 255.492630 bytes per cycle, totalling 255492624 BenchmarkAppendSample10 500000 3663 ns/op --- BENCH: BenchmarkAppendSample10 memory_test.go:131: 1 cycles with 15536.000000 bytes per cycle, totalling 15536 memory_test.go:131: 100 cycles with 662.239990 bytes per cycle, totalling 66224 memory_test.go:131: 10000 cycles with 601.889587 bytes per cycle, totalling 6018896 memory_test.go:131: 500000 cycles with 598.550903 bytes per cycle, totalling 299275472 BenchmarkAppendSample100 50000 40694 ns/op --- BENCH: BenchmarkAppendSample100 memory_test.go:131: 1 cycles with 78976.000000 bytes per cycle, totalling 78976 memory_test.go:131: 100 cycles with 4928.319824 bytes per cycle, totalling 492832 memory_test.go:131: 10000 cycles with 4277.961426 bytes per cycle, totalling 42779616 memory_test.go:131: 50000 cycles with 4275.054199 bytes per cycle, totalling 213752720 BenchmarkAppendSample1000 5000 530744 ns/op --- BENCH: BenchmarkAppendSample1000 memory_test.go:131: 1 cycles with 842192.000000 bytes per cycle, totalling 842192 memory_test.go:131: 100 cycles with 62765.441406 bytes per cycle, totalling 6276544 memory_test.go:131: 5000 cycles with 55209.812500 bytes per cycle, totalling 276049056 ok github.com/prometheus/prometheus/storage/metric/tiered 27.468s Change-Id: Idaa339cd83539b5e4391614541a2c3a04002d66d
2014-04-22 05:09:25 -07:00
"reflect"
"runtime"
"sync"
"testing"
"time"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/metric"
)
func BenchmarkStreamAdd(b *testing.B) {
b.StopTimer()
s := newArrayStream(clientmodel.Metric{})
samples := make(metric.Values, b.N)
for i := 0; i < b.N; i++ {
samples = append(samples, metric.SamplePair{
Use custom timestamp type for sample timestamps and related code. So far we've been using Go's native time.Time for anything related to sample timestamps. Since the range of time.Time is much bigger than what we need, this has created two problems: - there could be time.Time values which were out of the range/precision of the time type that we persist to disk, therefore causing incorrectly ordered keys. One bug caused by this was: https://github.com/prometheus/prometheus/issues/367 It would be good to use a timestamp type that's more closely aligned with what the underlying storage supports. - sizeof(time.Time) is 192, while Prometheus should be ok with a single 64-bit Unix timestamp (possibly even a 32-bit one). Since we store samples in large numbers, this seriously affects memory usage. Furthermore, copying/working with the data will be faster if it's smaller. *MEMORY USAGE RESULTS* Initial memory usage comparisons for a running Prometheus with 1 timeseries and 100,000 samples show roughly a 13% decrease in total (VIRT) memory usage. In my tests, this advantage for some reason decreased a bit the more samples the timeseries had (to 5-7% for millions of samples). This I can't fully explain, but perhaps garbage collection issues were involved. *WHEN TO USE THE NEW TIMESTAMP TYPE* The new clientmodel.Timestamp type should be used whenever time calculations are either directly or indirectly related to sample timestamps. For example: - the timestamp of a sample itself - all kinds of watermarks - anything that may become or is compared to a sample timestamp (like the timestamp passed into Target.Scrape()). When to still use time.Time: - for measuring durations/times not related to sample timestamps, like duration telemetry exporting, timers that indicate how frequently to execute some action, etc. *NOTE ON OPERATOR OPTIMIZATION TESTS* We don't use operator optimization code anymore, but it still lives in the code as dead code. It still has tests, but I couldn't get all of them to pass with the new timestamp format. I commented out the failing cases for now, but we should probably remove the dead code soon. I just didn't want to do that in the same change as this. Change-Id: I821787414b0debe85c9fffaeb57abd453727af0f
2013-10-28 06:35:02 -07:00
Timestamp: clientmodel.TimestampFromTime(time.Date(i, 0, 0, 0, 0, 0, 0, time.UTC)),
Value: clientmodel.SampleValue(i),
})
}
b.StartTimer()
s.add(samples)
}
Ensure temporal order in streams. BenchmarkAppendSample.* before this change: BenchmarkAppendSample1 1000000 1142 ns/op --- BENCH: BenchmarkAppendSample1 memory_test.go:81: 1 cycles with 9992.000000 bytes per cycle, totalling 9992 memory_test.go:81: 100 cycles with 250.399994 bytes per cycle, totalling 25040 memory_test.go:81: 10000 cycles with 239.428802 bytes per cycle, totalling 2394288 memory_test.go:81: 1000000 cycles with 255.504684 bytes per cycle, totalling 255504688 BenchmarkAppendSample10 500000 3823 ns/op --- BENCH: BenchmarkAppendSample10 memory_test.go:81: 1 cycles with 15536.000000 bytes per cycle, totalling 15536 memory_test.go:81: 100 cycles with 662.239990 bytes per cycle, totalling 66224 memory_test.go:81: 10000 cycles with 601.937622 bytes per cycle, totalling 6019376 memory_test.go:81: 500000 cycles with 598.582764 bytes per cycle, totalling 299291408 BenchmarkAppendSample100 50000 41111 ns/op --- BENCH: BenchmarkAppendSample100 memory_test.go:81: 1 cycles with 79824.000000 bytes per cycle, totalling 79824 memory_test.go:81: 100 cycles with 4924.479980 bytes per cycle, totalling 492448 memory_test.go:81: 10000 cycles with 4278.019043 bytes per cycle, totalling 42780192 memory_test.go:81: 50000 cycles with 4275.242676 bytes per cycle, totalling 213762144 BenchmarkAppendSample1000 5000 533933 ns/op --- BENCH: BenchmarkAppendSample1000 memory_test.go:81: 1 cycles with 840224.000000 bytes per cycle, totalling 840224 memory_test.go:81: 100 cycles with 62789.281250 bytes per cycle, totalling 6278928 memory_test.go:81: 5000 cycles with 55208.601562 bytes per cycle, totalling 276043008 ok github.com/prometheus/prometheus/storage/metric/tiered 27.828s BenchmarkAppendSample.* after this change: BenchmarkAppendSample1 1000000 1109 ns/op --- BENCH: BenchmarkAppendSample1 memory_test.go:131: 1 cycles with 9992.000000 bytes per cycle, totalling 9992 memory_test.go:131: 100 cycles with 250.399994 bytes per cycle, totalling 25040 memory_test.go:131: 10000 cycles with 239.220795 bytes per cycle, totalling 2392208 memory_test.go:131: 1000000 cycles with 255.492630 bytes per cycle, totalling 255492624 BenchmarkAppendSample10 500000 3663 ns/op --- BENCH: BenchmarkAppendSample10 memory_test.go:131: 1 cycles with 15536.000000 bytes per cycle, totalling 15536 memory_test.go:131: 100 cycles with 662.239990 bytes per cycle, totalling 66224 memory_test.go:131: 10000 cycles with 601.889587 bytes per cycle, totalling 6018896 memory_test.go:131: 500000 cycles with 598.550903 bytes per cycle, totalling 299275472 BenchmarkAppendSample100 50000 40694 ns/op --- BENCH: BenchmarkAppendSample100 memory_test.go:131: 1 cycles with 78976.000000 bytes per cycle, totalling 78976 memory_test.go:131: 100 cycles with 4928.319824 bytes per cycle, totalling 492832 memory_test.go:131: 10000 cycles with 4277.961426 bytes per cycle, totalling 42779616 memory_test.go:131: 50000 cycles with 4275.054199 bytes per cycle, totalling 213752720 BenchmarkAppendSample1000 5000 530744 ns/op --- BENCH: BenchmarkAppendSample1000 memory_test.go:131: 1 cycles with 842192.000000 bytes per cycle, totalling 842192 memory_test.go:131: 100 cycles with 62765.441406 bytes per cycle, totalling 6276544 memory_test.go:131: 5000 cycles with 55209.812500 bytes per cycle, totalling 276049056 ok github.com/prometheus/prometheus/storage/metric/tiered 27.468s Change-Id: Idaa339cd83539b5e4391614541a2c3a04002d66d
2014-04-22 05:09:25 -07:00
func TestStreamAdd(t *testing.T) {
s := newArrayStream(clientmodel.Metric{})
// Add empty to empty.
v := metric.Values{}
expected := metric.Values{}
s.add(v)
if got := s.values; !reflect.DeepEqual(expected, got) {
t.Fatalf("Expected values %#v in stream, got %#v.", expected, got)
}
// Add something to empty.
v = metric.Values{
metric.SamplePair{Timestamp: 1, Value: -1},
}
expected = append(expected, v...)
s.add(v)
if got := s.values; !reflect.DeepEqual(expected, got) {
t.Fatalf("Expected values %#v in stream, got %#v.", expected, got)
}
// Add something to something.
v = metric.Values{
metric.SamplePair{Timestamp: 2, Value: -2},
metric.SamplePair{Timestamp: 5, Value: -5},
}
expected = append(expected, v...)
s.add(v)
if got := s.values; !reflect.DeepEqual(expected, got) {
t.Fatalf("Expected values %#v in stream, got %#v.", expected, got)
}
// Add something outdated to something.
v = metric.Values{
metric.SamplePair{Timestamp: 3, Value: -3},
metric.SamplePair{Timestamp: 4, Value: -4},
}
s.add(v)
if got := s.values; !reflect.DeepEqual(expected, got) {
t.Fatalf("Expected values %#v in stream, got %#v.", expected, got)
}
// Add something partially outdated to something.
v = metric.Values{
metric.SamplePair{Timestamp: 3, Value: -3},
metric.SamplePair{Timestamp: 6, Value: -6},
}
expected = append(expected, metric.SamplePair{Timestamp: 6, Value: -6})
s.add(v)
if got := s.values; !reflect.DeepEqual(expected, got) {
t.Fatalf("Expected values %#v in stream, got %#v.", expected, got)
}
}
func benchmarkAppendSamples(b *testing.B, labels int) {
b.StopTimer()
s := NewMemorySeriesStorage(MemorySeriesOptions{})
metric := clientmodel.Metric{}
for i := 0; i < labels; i++ {
metric[clientmodel.LabelName(fmt.Sprintf("label_%d", i))] = clientmodel.LabelValue(fmt.Sprintf("value_%d", i))
}
samples := make(clientmodel.Samples, 0, b.N)
for i := 0; i < b.N; i++ {
samples = append(samples, &clientmodel.Sample{
Metric: metric,
Value: clientmodel.SampleValue(i),
Use custom timestamp type for sample timestamps and related code. So far we've been using Go's native time.Time for anything related to sample timestamps. Since the range of time.Time is much bigger than what we need, this has created two problems: - there could be time.Time values which were out of the range/precision of the time type that we persist to disk, therefore causing incorrectly ordered keys. One bug caused by this was: https://github.com/prometheus/prometheus/issues/367 It would be good to use a timestamp type that's more closely aligned with what the underlying storage supports. - sizeof(time.Time) is 192, while Prometheus should be ok with a single 64-bit Unix timestamp (possibly even a 32-bit one). Since we store samples in large numbers, this seriously affects memory usage. Furthermore, copying/working with the data will be faster if it's smaller. *MEMORY USAGE RESULTS* Initial memory usage comparisons for a running Prometheus with 1 timeseries and 100,000 samples show roughly a 13% decrease in total (VIRT) memory usage. In my tests, this advantage for some reason decreased a bit the more samples the timeseries had (to 5-7% for millions of samples). This I can't fully explain, but perhaps garbage collection issues were involved. *WHEN TO USE THE NEW TIMESTAMP TYPE* The new clientmodel.Timestamp type should be used whenever time calculations are either directly or indirectly related to sample timestamps. For example: - the timestamp of a sample itself - all kinds of watermarks - anything that may become or is compared to a sample timestamp (like the timestamp passed into Target.Scrape()). When to still use time.Time: - for measuring durations/times not related to sample timestamps, like duration telemetry exporting, timers that indicate how frequently to execute some action, etc. *NOTE ON OPERATOR OPTIMIZATION TESTS* We don't use operator optimization code anymore, but it still lives in the code as dead code. It still has tests, but I couldn't get all of them to pass with the new timestamp format. I commented out the failing cases for now, but we should probably remove the dead code soon. I just didn't want to do that in the same change as this. Change-Id: I821787414b0debe85c9fffaeb57abd453727af0f
2013-10-28 06:35:02 -07:00
Timestamp: clientmodel.TimestampFromTime(time.Date(i, 0, 0, 0, 0, 0, 0, time.UTC)),
})
}
b.StartTimer()
for i := 0; i < b.N; i++ {
s.AppendSample(samples[i])
}
}
func BenchmarkAppendSample1(b *testing.B) {
benchmarkAppendSamples(b, 1)
}
func BenchmarkAppendSample10(b *testing.B) {
benchmarkAppendSamples(b, 10)
}
func BenchmarkAppendSample100(b *testing.B) {
benchmarkAppendSamples(b, 100)
}
func BenchmarkAppendSample1000(b *testing.B) {
benchmarkAppendSamples(b, 1000)
}
// Regression test for https://github.com/prometheus/prometheus/issues/381.
//
// 1. Creates samples for two timeseries with one common labelpair.
// 2. Flushes memory storage such that only one series is dropped from memory.
// 3. Gets fingerprints for common labelpair.
// 4. Checks that exactly one fingerprint remains.
func TestDroppedSeriesIndexRegression(t *testing.T) {
samples := clientmodel.Samples{
&clientmodel.Sample{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "testmetric",
"different": "differentvalue1",
"common": "samevalue",
},
Value: 1,
Timestamp: clientmodel.TimestampFromTime(time.Date(2000, 0, 0, 0, 0, 0, 0, time.UTC)),
},
&clientmodel.Sample{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "testmetric",
"different": "differentvalue2",
"common": "samevalue",
},
Value: 2,
Timestamp: clientmodel.TimestampFromTime(time.Date(2002, 0, 0, 0, 0, 0, 0, time.UTC)),
},
}
s := NewMemorySeriesStorage(MemorySeriesOptions{})
s.AppendSamples(samples)
common := clientmodel.LabelSet{"common": "samevalue"}
fps, err := s.GetFingerprintsForLabelMatchers(labelMatchersFromLabelSet(common))
if err != nil {
t.Fatal(err)
}
if len(fps) != 2 {
t.Fatalf("Got %d fingerprints, expected 2", len(fps))
}
toDisk := make(chan clientmodel.Samples, 2)
flushOlderThan := clientmodel.TimestampFromTime(time.Date(2001, 0, 0, 0, 0, 0, 0, time.UTC))
s.Flush(flushOlderThan, toDisk)
if len(toDisk) != 1 {
t.Fatalf("Got %d disk sample lists, expected 1", len(toDisk))
}
diskSamples := <-toDisk
if len(diskSamples) != 1 {
t.Fatalf("Got %d disk samples, expected 1", len(diskSamples))
}
s.Evict(flushOlderThan)
fps, err = s.GetFingerprintsForLabelMatchers(labelMatchersFromLabelSet(common))
if err != nil {
t.Fatal(err)
}
if len(fps) != 1 {
t.Fatalf("Got %d fingerprints, expected 1", len(fps))
}
}
func TestReaderWriterDeadlockRegression(t *testing.T) {
mp := runtime.GOMAXPROCS(2)
defer func(mp int) {
runtime.GOMAXPROCS(mp)
}(mp)
s := NewMemorySeriesStorage(MemorySeriesOptions{})
lms := metric.LabelMatchers{}
for i := 0; i < 100; i++ {
lm, err := metric.NewLabelMatcher(metric.NotEqual, clientmodel.MetricNameLabel, "testmetric")
if err != nil {
t.Fatal(err)
}
lms = append(lms, lm)
}
wg := sync.WaitGroup{}
wg.Add(2)
start := time.Now()
runDuration := 250 * time.Millisecond
writer := func() {
for time.Since(start) < runDuration {
s.AppendSamples(clientmodel.Samples{
&clientmodel.Sample{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "testmetric",
},
Value: 1,
Timestamp: 0,
},
})
}
wg.Done()
}
reader := func() {
for time.Since(start) < runDuration {
s.GetFingerprintsForLabelMatchers(lms)
}
wg.Done()
}
go reader()
go writer()
allDone := make(chan struct{})
go func() {
wg.Wait()
allDone <- struct{}{}
}()
select {
case <-allDone:
break
case <-time.NewTimer(5 * time.Second).C:
t.Fatalf("Deadlock timeout")
}
}
func BenchmarkGetFingerprintsForNotEqualMatcher1000(b *testing.B) {
numSeries := 1000
samples := make(clientmodel.Samples, 0, numSeries)
for i := 0; i < numSeries; i++ {
samples = append(samples, &clientmodel.Sample{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "testmetric",
"instance": clientmodel.LabelValue(fmt.Sprint("instance_", i)),
},
Value: 1,
Timestamp: clientmodel.TimestampFromTime(time.Date(2000, 0, 0, 0, 0, 0, 0, time.UTC)),
})
}
s := NewMemorySeriesStorage(MemorySeriesOptions{})
if err := s.AppendSamples(samples); err != nil {
b.Fatal(err)
}
m, err := metric.NewLabelMatcher(metric.NotEqual, "instance", "foo")
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
s.GetFingerprintsForLabelMatchers(metric.LabelMatchers{m})
}
}