prometheus/storage/metric/tiered/sample_test.go
Julius Volz 01f652cb4c Separate storage implementation from interfaces.
This was initially motivated by wanting to distribute the rule checker
tool under `tools/rule_checker`. However, this was not possible without
also distributing the LevelDB dynamic libraries because the tool
transitively depended on Levigo:

rule checker -> query layer -> tiered storage layer -> leveldb

This change separates external storage interfaces from the
implementation (tiered storage, leveldb storage, memory storage) by
putting them into separate packages:

- storage/metric: public, implementation-agnostic interfaces
- storage/metric/tiered: tiered storage implementation, including memory
                         and LevelDB storage.

I initially also considered splitting up the implementation into
separate packages for tiered storage, memory storage, and LevelDB
storage, but these are currently so intertwined that it would be another
major project in itself.

The query layers and most other parts of Prometheus now have notion of
the storage implementation anymore and just use whatever implementation
they get passed in via interfaces.

The rule_checker is now a static binary :)

Change-Id: I793bbf631a8648ca31790e7e772ecf9c2b92f7a0
2014-04-16 13:30:19 +02:00

102 lines
2 KiB
Go

package tiered
import (
"math/rand"
"testing"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/metric"
)
const numTestValues = 5000
func TestValuesMarshalAndUnmarshal(t *testing.T) {
values := randomValues(numTestValues)
marshalled := marshalValues(values, nil)
unmarshalled := unmarshalValues(marshalled, nil)
for i, expected := range values {
actual := unmarshalled[i]
if !actual.Equal(&expected) {
t.Fatalf("%d. got: %v, expected: %v", i, actual, expected)
}
}
}
func randomValues(numSamples int) metric.Values {
v := make(metric.Values, 0, numSamples)
for i := 0; i < numSamples; i++ {
v = append(v, metric.SamplePair{
Timestamp: clientmodel.Timestamp(rand.Int63()),
Value: clientmodel.SampleValue(rand.NormFloat64()),
})
}
return v
}
func benchmarkMarshal(b *testing.B, n int) {
v := randomValues(n)
b.ResetTimer()
// TODO: Reuse buffer to compare performance.
// - Delta is -30 percent time overhead.
for i := 0; i < b.N; i++ {
marshalValues(v, nil)
}
}
func BenchmarkMarshal1(b *testing.B) {
benchmarkMarshal(b, 1)
}
func BenchmarkMarshal10(b *testing.B) {
benchmarkMarshal(b, 10)
}
func BenchmarkMarshal100(b *testing.B) {
benchmarkMarshal(b, 100)
}
func BenchmarkMarshal1000(b *testing.B) {
benchmarkMarshal(b, 1000)
}
func BenchmarkMarshal10000(b *testing.B) {
benchmarkMarshal(b, 10000)
}
func benchmarkUnmarshal(b *testing.B, n int) {
v := randomValues(numTestValues)
marshalled := marshalValues(v, nil)
b.ResetTimer()
// TODO: Reuse buffer to compare performance.
// - Delta is -15 percent time overhead.
for i := 0; i < b.N; i++ {
unmarshalValues(marshalled, nil)
}
}
func BenchmarkUnmarshal1(b *testing.B) {
benchmarkUnmarshal(b, 1)
}
func BenchmarkUnmarshal10(b *testing.B) {
benchmarkUnmarshal(b, 10)
}
func BenchmarkUnmarshal100(b *testing.B) {
benchmarkUnmarshal(b, 100)
}
func BenchmarkUnmarshal1000(b *testing.B) {
benchmarkUnmarshal(b, 1000)
}
func BenchmarkUnmarshal10000(b *testing.B) {
benchmarkUnmarshal(b, 10000)
}