mirror of
https://github.com/prometheus/prometheus.git
synced 2025-02-02 08:31:11 -08:00
So far we've been using Go's native time.Time for anything related to sample timestamps. Since the range of time.Time is much bigger than what we need, this has created two problems: - there could be time.Time values which were out of the range/precision of the time type that we persist to disk, therefore causing incorrectly ordered keys. One bug caused by this was: https://github.com/prometheus/prometheus/issues/367 It would be good to use a timestamp type that's more closely aligned with what the underlying storage supports. - sizeof(time.Time) is 192, while Prometheus should be ok with a single 64-bit Unix timestamp (possibly even a 32-bit one). Since we store samples in large numbers, this seriously affects memory usage. Furthermore, copying/working with the data will be faster if it's smaller. *MEMORY USAGE RESULTS* Initial memory usage comparisons for a running Prometheus with 1 timeseries and 100,000 samples show roughly a 13% decrease in total (VIRT) memory usage. In my tests, this advantage for some reason decreased a bit the more samples the timeseries had (to 5-7% for millions of samples). This I can't fully explain, but perhaps garbage collection issues were involved. *WHEN TO USE THE NEW TIMESTAMP TYPE* The new clientmodel.Timestamp type should be used whenever time calculations are either directly or indirectly related to sample timestamps. For example: - the timestamp of a sample itself - all kinds of watermarks - anything that may become or is compared to a sample timestamp (like the timestamp passed into Target.Scrape()). When to still use time.Time: - for measuring durations/times not related to sample timestamps, like duration telemetry exporting, timers that indicate how frequently to execute some action, etc. *NOTE ON OPERATOR OPTIMIZATION TESTS* We don't use operator optimization code anymore, but it still lives in the code as dead code. It still has tests, but I couldn't get all of them to pass with the new timestamp format. I commented out the failing cases for now, but we should probably remove the dead code soon. I just didn't want to do that in the same change as this. Change-Id: I821787414b0debe85c9fffaeb57abd453727af0f
96 lines
2.5 KiB
Go
96 lines
2.5 KiB
Go
// Copyright 2013 Prometheus Team
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
package metric
|
|
|
|
import (
|
|
"fmt"
|
|
"runtime"
|
|
"testing"
|
|
"time"
|
|
|
|
clientmodel "github.com/prometheus/client_golang/model"
|
|
)
|
|
|
|
func BenchmarkStreamAdd(b *testing.B) {
|
|
b.StopTimer()
|
|
s := newArrayStream(clientmodel.Metric{})
|
|
samples := make(Values, b.N)
|
|
for i := 0; i < b.N; i++ {
|
|
samples = append(samples, &SamplePair{
|
|
Timestamp: clientmodel.TimestampFromTime(time.Date(i, 0, 0, 0, 0, 0, 0, time.UTC)),
|
|
Value: clientmodel.SampleValue(i),
|
|
})
|
|
}
|
|
|
|
b.StartTimer()
|
|
|
|
var pre runtime.MemStats
|
|
runtime.ReadMemStats(&pre)
|
|
|
|
s.add(samples...)
|
|
|
|
var post runtime.MemStats
|
|
runtime.ReadMemStats(&post)
|
|
|
|
b.Logf("%d cycles with %f bytes per cycle, totalling %d", b.N, float32(post.TotalAlloc-pre.TotalAlloc)/float32(b.N), post.TotalAlloc-pre.TotalAlloc)
|
|
}
|
|
|
|
func benchmarkAppendSamples(b *testing.B, labels int) {
|
|
b.StopTimer()
|
|
s := NewMemorySeriesStorage(MemorySeriesOptions{})
|
|
|
|
metric := clientmodel.Metric{}
|
|
|
|
for i := 0; i < labels; i++ {
|
|
metric[clientmodel.LabelName(fmt.Sprintf("label_%d", i))] = clientmodel.LabelValue(fmt.Sprintf("value_%d", i))
|
|
}
|
|
samples := make(clientmodel.Samples, 0, b.N)
|
|
for i := 0; i < b.N; i++ {
|
|
samples = append(samples, &clientmodel.Sample{
|
|
Metric: metric,
|
|
Value: clientmodel.SampleValue(i),
|
|
Timestamp: clientmodel.TimestampFromTime(time.Date(i, 0, 0, 0, 0, 0, 0, time.UTC)),
|
|
})
|
|
}
|
|
|
|
b.StartTimer()
|
|
var pre runtime.MemStats
|
|
runtime.ReadMemStats(&pre)
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
s.AppendSample(samples[i])
|
|
}
|
|
|
|
var post runtime.MemStats
|
|
runtime.ReadMemStats(&post)
|
|
|
|
b.Logf("%d cycles with %f bytes per cycle, totalling %d", b.N, float32(post.TotalAlloc-pre.TotalAlloc)/float32(b.N), post.TotalAlloc-pre.TotalAlloc)
|
|
}
|
|
|
|
func BenchmarkAppendSample1(b *testing.B) {
|
|
benchmarkAppendSamples(b, 1)
|
|
}
|
|
|
|
func BenchmarkAppendSample10(b *testing.B) {
|
|
benchmarkAppendSamples(b, 10)
|
|
}
|
|
|
|
func BenchmarkAppendSample100(b *testing.B) {
|
|
benchmarkAppendSamples(b, 100)
|
|
}
|
|
|
|
func BenchmarkAppendSample1000(b *testing.B) {
|
|
benchmarkAppendSamples(b, 1000)
|
|
}
|