mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-14 17:44:06 -08:00
de9a88b964
BenchmarkAppendSample.* before this change: BenchmarkAppendSample1 1000000 1142 ns/op --- BENCH: BenchmarkAppendSample1 memory_test.go:81: 1 cycles with 9992.000000 bytes per cycle, totalling 9992 memory_test.go:81: 100 cycles with 250.399994 bytes per cycle, totalling 25040 memory_test.go:81: 10000 cycles with 239.428802 bytes per cycle, totalling 2394288 memory_test.go:81: 1000000 cycles with 255.504684 bytes per cycle, totalling 255504688 BenchmarkAppendSample10 500000 3823 ns/op --- BENCH: BenchmarkAppendSample10 memory_test.go:81: 1 cycles with 15536.000000 bytes per cycle, totalling 15536 memory_test.go:81: 100 cycles with 662.239990 bytes per cycle, totalling 66224 memory_test.go:81: 10000 cycles with 601.937622 bytes per cycle, totalling 6019376 memory_test.go:81: 500000 cycles with 598.582764 bytes per cycle, totalling 299291408 BenchmarkAppendSample100 50000 41111 ns/op --- BENCH: BenchmarkAppendSample100 memory_test.go:81: 1 cycles with 79824.000000 bytes per cycle, totalling 79824 memory_test.go:81: 100 cycles with 4924.479980 bytes per cycle, totalling 492448 memory_test.go:81: 10000 cycles with 4278.019043 bytes per cycle, totalling 42780192 memory_test.go:81: 50000 cycles with 4275.242676 bytes per cycle, totalling 213762144 BenchmarkAppendSample1000 5000 533933 ns/op --- BENCH: BenchmarkAppendSample1000 memory_test.go:81: 1 cycles with 840224.000000 bytes per cycle, totalling 840224 memory_test.go:81: 100 cycles with 62789.281250 bytes per cycle, totalling 6278928 memory_test.go:81: 5000 cycles with 55208.601562 bytes per cycle, totalling 276043008 ok github.com/prometheus/prometheus/storage/metric/tiered 27.828s BenchmarkAppendSample.* after this change: BenchmarkAppendSample1 1000000 1109 ns/op --- BENCH: BenchmarkAppendSample1 memory_test.go:131: 1 cycles with 9992.000000 bytes per cycle, totalling 9992 memory_test.go:131: 100 cycles with 250.399994 bytes per cycle, totalling 25040 memory_test.go:131: 10000 cycles with 239.220795 bytes per cycle, totalling 2392208 memory_test.go:131: 1000000 cycles with 255.492630 bytes per cycle, totalling 255492624 BenchmarkAppendSample10 500000 3663 ns/op --- BENCH: BenchmarkAppendSample10 memory_test.go:131: 1 cycles with 15536.000000 bytes per cycle, totalling 15536 memory_test.go:131: 100 cycles with 662.239990 bytes per cycle, totalling 66224 memory_test.go:131: 10000 cycles with 601.889587 bytes per cycle, totalling 6018896 memory_test.go:131: 500000 cycles with 598.550903 bytes per cycle, totalling 299275472 BenchmarkAppendSample100 50000 40694 ns/op --- BENCH: BenchmarkAppendSample100 memory_test.go:131: 1 cycles with 78976.000000 bytes per cycle, totalling 78976 memory_test.go:131: 100 cycles with 4928.319824 bytes per cycle, totalling 492832 memory_test.go:131: 10000 cycles with 4277.961426 bytes per cycle, totalling 42779616 memory_test.go:131: 50000 cycles with 4275.054199 bytes per cycle, totalling 213752720 BenchmarkAppendSample1000 5000 530744 ns/op --- BENCH: BenchmarkAppendSample1000 memory_test.go:131: 1 cycles with 842192.000000 bytes per cycle, totalling 842192 memory_test.go:131: 100 cycles with 62765.441406 bytes per cycle, totalling 6276544 memory_test.go:131: 5000 cycles with 55209.812500 bytes per cycle, totalling 276049056 ok github.com/prometheus/prometheus/storage/metric/tiered 27.468s Change-Id: Idaa339cd83539b5e4391614541a2c3a04002d66d
270 lines
7.1 KiB
Go
270 lines
7.1 KiB
Go
// Copyright 2013 Prometheus Team
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
package tiered
|
|
|
|
import (
|
|
"fmt"
|
|
"reflect"
|
|
"runtime"
|
|
"sync"
|
|
"testing"
|
|
"time"
|
|
|
|
clientmodel "github.com/prometheus/client_golang/model"
|
|
|
|
"github.com/prometheus/prometheus/storage/metric"
|
|
)
|
|
|
|
func BenchmarkStreamAdd(b *testing.B) {
|
|
b.StopTimer()
|
|
s := newArrayStream(clientmodel.Metric{})
|
|
samples := make(metric.Values, b.N)
|
|
for i := 0; i < b.N; i++ {
|
|
samples = append(samples, metric.SamplePair{
|
|
Timestamp: clientmodel.TimestampFromTime(time.Date(i, 0, 0, 0, 0, 0, 0, time.UTC)),
|
|
Value: clientmodel.SampleValue(i),
|
|
})
|
|
}
|
|
|
|
b.StartTimer()
|
|
|
|
var pre runtime.MemStats
|
|
runtime.ReadMemStats(&pre)
|
|
|
|
s.add(samples)
|
|
|
|
var post runtime.MemStats
|
|
runtime.ReadMemStats(&post)
|
|
|
|
b.Logf("%d cycles with %f bytes per cycle, totalling %d", b.N, float32(post.TotalAlloc-pre.TotalAlloc)/float32(b.N), post.TotalAlloc-pre.TotalAlloc)
|
|
}
|
|
|
|
func TestStreamAdd(t *testing.T) {
|
|
s := newArrayStream(clientmodel.Metric{})
|
|
// Add empty to empty.
|
|
v := metric.Values{}
|
|
expected := metric.Values{}
|
|
s.add(v)
|
|
if got := s.values; !reflect.DeepEqual(expected, got) {
|
|
t.Fatalf("Expected values %#v in stream, got %#v.", expected, got)
|
|
}
|
|
// Add something to empty.
|
|
v = metric.Values{
|
|
metric.SamplePair{Timestamp: 1, Value: -1},
|
|
}
|
|
expected = append(expected, v...)
|
|
s.add(v)
|
|
if got := s.values; !reflect.DeepEqual(expected, got) {
|
|
t.Fatalf("Expected values %#v in stream, got %#v.", expected, got)
|
|
}
|
|
// Add something to something.
|
|
v = metric.Values{
|
|
metric.SamplePair{Timestamp: 2, Value: -2},
|
|
metric.SamplePair{Timestamp: 5, Value: -5},
|
|
}
|
|
expected = append(expected, v...)
|
|
s.add(v)
|
|
if got := s.values; !reflect.DeepEqual(expected, got) {
|
|
t.Fatalf("Expected values %#v in stream, got %#v.", expected, got)
|
|
}
|
|
// Add something outdated to something.
|
|
v = metric.Values{
|
|
metric.SamplePair{Timestamp: 3, Value: -3},
|
|
metric.SamplePair{Timestamp: 4, Value: -4},
|
|
}
|
|
s.add(v)
|
|
if got := s.values; !reflect.DeepEqual(expected, got) {
|
|
t.Fatalf("Expected values %#v in stream, got %#v.", expected, got)
|
|
}
|
|
// Add something partially outdated to something.
|
|
v = metric.Values{
|
|
metric.SamplePair{Timestamp: 3, Value: -3},
|
|
metric.SamplePair{Timestamp: 6, Value: -6},
|
|
}
|
|
expected = append(expected, metric.SamplePair{Timestamp: 6, Value: -6})
|
|
s.add(v)
|
|
if got := s.values; !reflect.DeepEqual(expected, got) {
|
|
t.Fatalf("Expected values %#v in stream, got %#v.", expected, got)
|
|
}
|
|
}
|
|
|
|
func benchmarkAppendSamples(b *testing.B, labels int) {
|
|
b.StopTimer()
|
|
s := NewMemorySeriesStorage(MemorySeriesOptions{})
|
|
|
|
metric := clientmodel.Metric{}
|
|
|
|
for i := 0; i < labels; i++ {
|
|
metric[clientmodel.LabelName(fmt.Sprintf("label_%d", i))] = clientmodel.LabelValue(fmt.Sprintf("value_%d", i))
|
|
}
|
|
samples := make(clientmodel.Samples, 0, b.N)
|
|
for i := 0; i < b.N; i++ {
|
|
samples = append(samples, &clientmodel.Sample{
|
|
Metric: metric,
|
|
Value: clientmodel.SampleValue(i),
|
|
Timestamp: clientmodel.TimestampFromTime(time.Date(i, 0, 0, 0, 0, 0, 0, time.UTC)),
|
|
})
|
|
}
|
|
|
|
b.StartTimer()
|
|
var pre runtime.MemStats
|
|
runtime.ReadMemStats(&pre)
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
s.AppendSample(samples[i])
|
|
}
|
|
|
|
var post runtime.MemStats
|
|
runtime.ReadMemStats(&post)
|
|
|
|
b.Logf("%d cycles with %f bytes per cycle, totalling %d", b.N, float32(post.TotalAlloc-pre.TotalAlloc)/float32(b.N), post.TotalAlloc-pre.TotalAlloc)
|
|
}
|
|
|
|
func BenchmarkAppendSample1(b *testing.B) {
|
|
benchmarkAppendSamples(b, 1)
|
|
}
|
|
|
|
func BenchmarkAppendSample10(b *testing.B) {
|
|
benchmarkAppendSamples(b, 10)
|
|
}
|
|
|
|
func BenchmarkAppendSample100(b *testing.B) {
|
|
benchmarkAppendSamples(b, 100)
|
|
}
|
|
|
|
func BenchmarkAppendSample1000(b *testing.B) {
|
|
benchmarkAppendSamples(b, 1000)
|
|
}
|
|
|
|
// Regression test for https://github.com/prometheus/prometheus/issues/381.
|
|
//
|
|
// 1. Creates samples for two timeseries with one common labelpair.
|
|
// 2. Flushes memory storage such that only one series is dropped from memory.
|
|
// 3. Gets fingerprints for common labelpair.
|
|
// 4. Checks that exactly one fingerprint remains.
|
|
func TestDroppedSeriesIndexRegression(t *testing.T) {
|
|
samples := clientmodel.Samples{
|
|
&clientmodel.Sample{
|
|
Metric: clientmodel.Metric{
|
|
clientmodel.MetricNameLabel: "testmetric",
|
|
"different": "differentvalue1",
|
|
"common": "samevalue",
|
|
},
|
|
Value: 1,
|
|
Timestamp: clientmodel.TimestampFromTime(time.Date(2000, 0, 0, 0, 0, 0, 0, time.UTC)),
|
|
},
|
|
&clientmodel.Sample{
|
|
Metric: clientmodel.Metric{
|
|
clientmodel.MetricNameLabel: "testmetric",
|
|
"different": "differentvalue2",
|
|
"common": "samevalue",
|
|
},
|
|
Value: 2,
|
|
Timestamp: clientmodel.TimestampFromTime(time.Date(2002, 0, 0, 0, 0, 0, 0, time.UTC)),
|
|
},
|
|
}
|
|
|
|
s := NewMemorySeriesStorage(MemorySeriesOptions{})
|
|
s.AppendSamples(samples)
|
|
|
|
common := clientmodel.LabelSet{"common": "samevalue"}
|
|
fps, err := s.GetFingerprintsForLabelMatchers(labelMatchersFromLabelSet(common))
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if len(fps) != 2 {
|
|
t.Fatalf("Got %d fingerprints, expected 2", len(fps))
|
|
}
|
|
|
|
toDisk := make(chan clientmodel.Samples, 2)
|
|
s.Flush(clientmodel.TimestampFromTime(time.Date(2001, 0, 0, 0, 0, 0, 0, time.UTC)), toDisk)
|
|
if len(toDisk) != 1 {
|
|
t.Fatalf("Got %d disk sample lists, expected 1", len(toDisk))
|
|
}
|
|
diskSamples := <-toDisk
|
|
if len(diskSamples) != 1 {
|
|
t.Fatalf("Got %d disk samples, expected 1", len(diskSamples))
|
|
}
|
|
|
|
fps, err = s.GetFingerprintsForLabelMatchers(labelMatchersFromLabelSet(common))
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if len(fps) != 1 {
|
|
t.Fatalf("Got %d fingerprints, expected 1", len(fps))
|
|
}
|
|
}
|
|
|
|
func TestReaderWriterDeadlockRegression(t *testing.T) {
|
|
mp := runtime.GOMAXPROCS(2)
|
|
defer func(mp int) {
|
|
runtime.GOMAXPROCS(mp)
|
|
}(mp)
|
|
|
|
s := NewMemorySeriesStorage(MemorySeriesOptions{})
|
|
lms := metric.LabelMatchers{}
|
|
|
|
for i := 0; i < 100; i++ {
|
|
lm, err := metric.NewLabelMatcher(metric.NotEqual, clientmodel.MetricNameLabel, "testmetric")
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
lms = append(lms, lm)
|
|
}
|
|
|
|
wg := sync.WaitGroup{}
|
|
wg.Add(2)
|
|
|
|
start := time.Now()
|
|
runDuration := 250 * time.Millisecond
|
|
|
|
writer := func() {
|
|
for time.Since(start) < runDuration {
|
|
s.AppendSamples(clientmodel.Samples{
|
|
&clientmodel.Sample{
|
|
Metric: clientmodel.Metric{
|
|
clientmodel.MetricNameLabel: "testmetric",
|
|
},
|
|
Value: 1,
|
|
Timestamp: 0,
|
|
},
|
|
})
|
|
}
|
|
wg.Done()
|
|
}
|
|
|
|
reader := func() {
|
|
for time.Since(start) < runDuration {
|
|
s.GetFingerprintsForLabelMatchers(lms)
|
|
}
|
|
wg.Done()
|
|
}
|
|
|
|
go reader()
|
|
go writer()
|
|
|
|
allDone := make(chan struct{})
|
|
go func() {
|
|
wg.Wait()
|
|
allDone <- struct{}{}
|
|
}()
|
|
|
|
select {
|
|
case <-allDone:
|
|
break
|
|
case <-time.NewTimer(5 * time.Second).C:
|
|
t.Fatalf("Deadlock timeout")
|
|
}
|
|
}
|