2015-01-21 11:07:45 -08:00
|
|
|
// Copyright 2014 The Prometheus Authors
|
2014-09-19 09:18:44 -07:00
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2014-09-16 06:47:24 -07:00
|
|
|
// Package local contains the local time series storage used by Prometheus.
|
|
|
|
package local
|
2014-06-06 02:55:53 -07:00
|
|
|
|
|
|
|
import (
|
2014-11-13 11:50:25 -08:00
|
|
|
"container/list"
|
2015-02-12 08:23:42 -08:00
|
|
|
"sync"
|
2014-10-23 06:18:32 -07:00
|
|
|
"sync/atomic"
|
2014-06-06 02:55:53 -07:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/golang/glog"
|
2014-10-22 10:21:23 -07:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
2014-06-06 02:55:53 -07:00
|
|
|
|
|
|
|
clientmodel "github.com/prometheus/client_golang/model"
|
2014-10-22 10:21:23 -07:00
|
|
|
|
2014-06-06 02:55:53 -07:00
|
|
|
"github.com/prometheus/prometheus/storage/metric"
|
|
|
|
)
|
|
|
|
|
2014-11-05 11:02:45 -08:00
|
|
|
const (
|
2014-11-13 11:50:25 -08:00
|
|
|
evictRequestsCap = 1024
|
|
|
|
chunkLen = 1024
|
2014-11-13 07:55:15 -08:00
|
|
|
|
|
|
|
// See waitForNextFP.
|
|
|
|
fpMaxWaitDuration = 10 * time.Second
|
2015-02-12 08:23:42 -08:00
|
|
|
fpMinWaitDuration = 20 * time.Millisecond // A small multiple of disk seek time.
|
2014-11-13 07:55:15 -08:00
|
|
|
fpMaxSweepTime = 6 * time.Hour
|
2014-11-13 11:50:25 -08:00
|
|
|
|
|
|
|
maxEvictInterval = time.Minute
|
|
|
|
headChunkTimeout = time.Hour // Close head chunk if not touched for that long.
|
2015-02-13 05:26:54 -08:00
|
|
|
|
|
|
|
appendWorkers = 8 // Should be enough to not make appending a bottleneck.
|
|
|
|
appendQueueCap = 2 * appendWorkers
|
2014-11-05 11:02:45 -08:00
|
|
|
)
|
2014-06-06 02:55:53 -07:00
|
|
|
|
|
|
|
type storageState uint
|
|
|
|
|
|
|
|
const (
|
|
|
|
storageStarting storageState = iota
|
|
|
|
storageServing
|
|
|
|
storageStopping
|
|
|
|
)
|
|
|
|
|
2014-10-24 11:27:27 -07:00
|
|
|
type persistRequest struct {
|
|
|
|
fingerprint clientmodel.Fingerprint
|
|
|
|
chunkDesc *chunkDesc
|
|
|
|
}
|
2014-06-06 02:55:53 -07:00
|
|
|
|
2014-11-13 11:50:25 -08:00
|
|
|
type evictRequest struct {
|
|
|
|
cd *chunkDesc
|
|
|
|
evict bool
|
|
|
|
}
|
|
|
|
|
2014-10-24 11:27:27 -07:00
|
|
|
type memorySeriesStorage struct {
|
2014-10-27 12:40:48 -07:00
|
|
|
fpLocker *fingerprintLocker
|
|
|
|
fpToSeries *seriesMap
|
2014-06-06 02:55:53 -07:00
|
|
|
|
2015-01-08 11:15:58 -08:00
|
|
|
loopStopping, loopStopped chan struct{}
|
|
|
|
maxMemoryChunks int
|
2015-02-26 06:19:44 -08:00
|
|
|
dropAfter time.Duration
|
2015-01-08 11:15:58 -08:00
|
|
|
checkpointInterval time.Duration
|
|
|
|
checkpointDirtySeriesLimit int
|
2014-10-24 11:27:27 -07:00
|
|
|
|
2015-02-13 05:26:54 -08:00
|
|
|
appendQueue chan *clientmodel.Sample
|
|
|
|
appendLastTimestamp clientmodel.Timestamp // The timestamp of the last sample sent to the append queue.
|
|
|
|
appendWaitGroup sync.WaitGroup // To wait for all appended samples to be processed.
|
2015-02-12 08:23:42 -08:00
|
|
|
|
Improve persisting chunks to disk.
This is done by bucketing chunks by fingerprint. If the persisting to
disk falls behind, more and more chunks are in the queue. As soon as
there are "double hits", we will now persist both chunks in one go,
doubling the disk throughput (assuming it is limited by disk
seeks). Should even more pile up so that we end wit "triple hits", we
will persist those first, and so on.
Even if we have millions of time series, this will still help,
assuming not all of them are growing with the same speed. Series that
get many samples and/or are not very compressable will accumulate
chunks faster, and they will soon get double- or triple-writes.
To improve the chance of double writes,
-storage.local.persistence-queue-capacity could be set to a higher
value. However, that will slow down shutdown a lot (as the queue has
to be worked through). So we leave it to the user to set it to a
really high value. A more fundamental solution would be to checkpoint
not only head chunks, but also chunks still in the persist queue. That
would be quite complicated for a rather limited use-case (running many
time series with high ingestion rate on slow spinning disks).
2015-02-13 11:08:52 -08:00
|
|
|
persistQueue chan persistRequest
|
|
|
|
persistQueueCap int // Not actually the cap of above channel. See handlePersistQueue.
|
|
|
|
persistStopped chan struct{}
|
|
|
|
|
|
|
|
persistence *persistence
|
2014-10-24 11:27:27 -07:00
|
|
|
|
2015-01-08 11:15:58 -08:00
|
|
|
countPersistedHeadChunks chan struct{}
|
|
|
|
|
2014-11-13 11:50:25 -08:00
|
|
|
evictList *list.List
|
|
|
|
evictRequests chan evictRequest
|
|
|
|
evictStopping, evictStopped chan struct{}
|
|
|
|
|
|
|
|
persistLatency prometheus.Summary
|
2015-02-01 03:47:51 -08:00
|
|
|
persistErrors prometheus.Counter
|
2015-02-06 05:54:53 -08:00
|
|
|
persistQueueCapacity prometheus.Metric
|
2014-11-13 11:50:25 -08:00
|
|
|
persistQueueLength prometheus.Gauge
|
|
|
|
numSeries prometheus.Gauge
|
|
|
|
seriesOps *prometheus.CounterVec
|
|
|
|
ingestedSamplesCount prometheus.Counter
|
|
|
|
invalidPreloadRequestsCount prometheus.Counter
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
2014-09-16 06:47:24 -07:00
|
|
|
// MemorySeriesStorageOptions contains options needed by
|
|
|
|
// NewMemorySeriesStorage. It is not safe to leave any of those at their zero
|
|
|
|
// values.
|
2014-06-06 02:55:53 -07:00
|
|
|
type MemorySeriesStorageOptions struct {
|
2014-11-13 11:50:25 -08:00
|
|
|
MemoryChunks int // How many chunks to keep in memory.
|
2014-10-07 10:11:24 -07:00
|
|
|
PersistenceStoragePath string // Location of persistence files.
|
2015-02-26 06:19:44 -08:00
|
|
|
PersistenceRetentionPeriod time.Duration // Chunks at least that old are dropped.
|
2015-02-06 05:54:53 -08:00
|
|
|
PersistenceQueueCapacity int // Capacity of queue for chunks to be persisted.
|
2014-10-24 11:27:27 -07:00
|
|
|
CheckpointInterval time.Duration // How often to checkpoint the series map and head chunks.
|
2015-01-08 11:15:58 -08:00
|
|
|
CheckpointDirtySeriesLimit int // How many dirty series will trigger an early checkpoint.
|
2014-11-05 11:02:45 -08:00
|
|
|
Dirty bool // Force the storage to consider itself dirty on startup.
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
2014-09-16 06:47:24 -07:00
|
|
|
// NewMemorySeriesStorage returns a newly allocated Storage. Storage.Serve still
|
|
|
|
// has to be called to start the storage.
|
2014-09-10 09:41:52 -07:00
|
|
|
func NewMemorySeriesStorage(o *MemorySeriesStorageOptions) (Storage, error) {
|
2014-11-05 11:02:45 -08:00
|
|
|
p, err := newPersistence(o.PersistenceStoragePath, chunkLen, o.Dirty)
|
2014-10-07 10:11:24 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2014-09-10 09:41:52 -07:00
|
|
|
glog.Info("Loading series map and head chunks...")
|
2014-10-27 12:40:48 -07:00
|
|
|
fpToSeries, err := p.loadSeriesMapAndHeads()
|
2014-09-10 09:41:52 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2014-10-27 12:40:48 -07:00
|
|
|
glog.Infof("%d series loaded.", fpToSeries.length())
|
2014-10-23 06:18:32 -07:00
|
|
|
numSeries := prometheus.NewGauge(prometheus.GaugeOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "memory_series",
|
|
|
|
Help: "The current number of series in memory.",
|
|
|
|
})
|
2014-10-27 12:40:48 -07:00
|
|
|
numSeries.Set(float64(fpToSeries.length()))
|
2014-06-06 02:55:53 -07:00
|
|
|
|
2015-02-13 05:26:54 -08:00
|
|
|
s := &memorySeriesStorage{
|
2015-02-12 08:23:42 -08:00
|
|
|
fpLocker: newFingerprintLocker(1024),
|
2014-10-27 12:40:48 -07:00
|
|
|
fpToSeries: fpToSeries,
|
2014-06-06 02:55:53 -07:00
|
|
|
|
2015-01-08 11:15:58 -08:00
|
|
|
loopStopping: make(chan struct{}),
|
|
|
|
loopStopped: make(chan struct{}),
|
|
|
|
maxMemoryChunks: o.MemoryChunks,
|
2015-02-26 06:19:44 -08:00
|
|
|
dropAfter: o.PersistenceRetentionPeriod,
|
2015-01-08 11:15:58 -08:00
|
|
|
checkpointInterval: o.CheckpointInterval,
|
|
|
|
checkpointDirtySeriesLimit: o.CheckpointDirtySeriesLimit,
|
2014-06-06 02:55:53 -07:00
|
|
|
|
2015-02-13 05:26:54 -08:00
|
|
|
appendLastTimestamp: clientmodel.Earliest,
|
|
|
|
appendQueue: make(chan *clientmodel.Sample, appendQueueCap),
|
2015-02-12 08:23:42 -08:00
|
|
|
|
Improve persisting chunks to disk.
This is done by bucketing chunks by fingerprint. If the persisting to
disk falls behind, more and more chunks are in the queue. As soon as
there are "double hits", we will now persist both chunks in one go,
doubling the disk throughput (assuming it is limited by disk
seeks). Should even more pile up so that we end wit "triple hits", we
will persist those first, and so on.
Even if we have millions of time series, this will still help,
assuming not all of them are growing with the same speed. Series that
get many samples and/or are not very compressable will accumulate
chunks faster, and they will soon get double- or triple-writes.
To improve the chance of double writes,
-storage.local.persistence-queue-capacity could be set to a higher
value. However, that will slow down shutdown a lot (as the queue has
to be worked through). So we leave it to the user to set it to a
really high value. A more fundamental solution would be to checkpoint
not only head chunks, but also chunks still in the persist queue. That
would be quite complicated for a rather limited use-case (running many
time series with high ingestion rate on slow spinning disks).
2015-02-13 11:08:52 -08:00
|
|
|
// The actual buffering happens within handlePersistQueue, so
|
|
|
|
// cap of persistQueue just has to be enough to not block while
|
|
|
|
// handlePersistQueue is writing to disk (20ms or so).
|
|
|
|
persistQueue: make(chan persistRequest, 1024),
|
|
|
|
persistQueueCap: o.PersistenceQueueCapacity,
|
|
|
|
persistStopped: make(chan struct{}),
|
|
|
|
persistence: p,
|
2014-10-23 06:18:32 -07:00
|
|
|
|
2015-02-26 15:53:52 -08:00
|
|
|
countPersistedHeadChunks: make(chan struct{}, 100),
|
2015-01-08 11:15:58 -08:00
|
|
|
|
2014-11-13 11:50:25 -08:00
|
|
|
evictList: list.New(),
|
|
|
|
evictRequests: make(chan evictRequest, evictRequestsCap),
|
|
|
|
evictStopping: make(chan struct{}),
|
|
|
|
evictStopped: make(chan struct{}),
|
|
|
|
|
2014-10-23 06:18:32 -07:00
|
|
|
persistLatency: prometheus.NewSummary(prometheus.SummaryOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "persist_latency_microseconds",
|
|
|
|
Help: "A summary of latencies for persisting each chunk.",
|
|
|
|
}),
|
2015-02-01 03:47:51 -08:00
|
|
|
persistErrors: prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "persist_errors_total",
|
|
|
|
Help: "The total number of errors while persisting chunks.",
|
|
|
|
}),
|
2015-02-06 05:54:53 -08:00
|
|
|
persistQueueCapacity: prometheus.MustNewConstMetric(
|
|
|
|
prometheus.NewDesc(
|
|
|
|
prometheus.BuildFQName(namespace, subsystem, "persist_queue_capacity"),
|
|
|
|
"The total capacity of the persist queue.",
|
|
|
|
nil, nil,
|
|
|
|
),
|
|
|
|
prometheus.GaugeValue, float64(o.PersistenceQueueCapacity),
|
|
|
|
),
|
2014-10-23 06:18:32 -07:00
|
|
|
persistQueueLength: prometheus.NewGauge(prometheus.GaugeOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "persist_queue_length",
|
|
|
|
Help: "The current number of chunks waiting in the persist queue.",
|
|
|
|
}),
|
|
|
|
numSeries: numSeries,
|
|
|
|
seriesOps: prometheus.NewCounterVec(
|
|
|
|
prometheus.CounterOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "series_ops_total",
|
|
|
|
Help: "The total number of series operations by their type.",
|
|
|
|
},
|
|
|
|
[]string{opTypeLabel},
|
|
|
|
),
|
|
|
|
ingestedSamplesCount: prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "ingested_samples_total",
|
|
|
|
Help: "The total number of samples ingested.",
|
|
|
|
}),
|
2014-11-05 11:02:45 -08:00
|
|
|
invalidPreloadRequestsCount: prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "invalid_preload_requests_total",
|
|
|
|
Help: "The total number of preload requests referring to a non-existent series. This is an indication of outdated label indexes.",
|
|
|
|
}),
|
2015-02-13 05:26:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < appendWorkers; i++ {
|
|
|
|
go func() {
|
|
|
|
for sample := range s.appendQueue {
|
|
|
|
s.appendSample(sample)
|
|
|
|
s.appendWaitGroup.Done()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
return s, nil
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
2014-10-24 11:27:27 -07:00
|
|
|
// Start implements Storage.
|
|
|
|
func (s *memorySeriesStorage) Start() {
|
2014-11-13 11:50:25 -08:00
|
|
|
go s.handleEvictList()
|
2014-10-24 11:27:27 -07:00
|
|
|
go s.handlePersistQueue()
|
|
|
|
go s.loop()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop implements Storage.
|
|
|
|
func (s *memorySeriesStorage) Stop() error {
|
2014-11-11 15:38:28 -08:00
|
|
|
glog.Info("Stopping local storage...")
|
2014-11-13 11:50:25 -08:00
|
|
|
|
2015-02-13 05:26:54 -08:00
|
|
|
glog.Info("Draining append queue...")
|
|
|
|
close(s.appendQueue)
|
|
|
|
s.appendWaitGroup.Wait()
|
|
|
|
glog.Info("Append queue drained.")
|
|
|
|
|
2014-10-24 11:27:27 -07:00
|
|
|
glog.Info("Stopping maintenance loop...")
|
|
|
|
close(s.loopStopping)
|
|
|
|
<-s.loopStopped
|
|
|
|
|
2014-11-13 11:50:25 -08:00
|
|
|
glog.Info("Stopping persist queue...")
|
2014-10-24 11:27:27 -07:00
|
|
|
close(s.persistQueue)
|
|
|
|
<-s.persistStopped
|
|
|
|
|
2014-11-13 11:50:25 -08:00
|
|
|
glog.Info("Stopping chunk eviction...")
|
|
|
|
close(s.evictStopping)
|
|
|
|
<-s.evictStopped
|
|
|
|
|
2014-10-24 11:27:27 -07:00
|
|
|
// One final checkpoint of the series map and the head chunks.
|
2014-10-27 12:40:48 -07:00
|
|
|
if err := s.persistence.checkpointSeriesMapAndHeads(s.fpToSeries, s.fpLocker); err != nil {
|
2014-10-24 11:27:27 -07:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := s.persistence.close(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-11-11 15:38:28 -08:00
|
|
|
glog.Info("Local storage stopped.")
|
2014-10-24 11:27:27 -07:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// WaitForIndexing implements Storage.
|
|
|
|
func (s *memorySeriesStorage) WaitForIndexing() {
|
2015-02-12 08:23:42 -08:00
|
|
|
// First let all goroutines appending samples stop.
|
|
|
|
s.appendWaitGroup.Wait()
|
|
|
|
// Only then wait for the persistence to index them.
|
2014-10-24 11:27:27 -07:00
|
|
|
s.persistence.waitForIndexing()
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewIterator implements storage.
|
|
|
|
func (s *memorySeriesStorage) NewIterator(fp clientmodel.Fingerprint) SeriesIterator {
|
|
|
|
s.fpLocker.Lock(fp)
|
|
|
|
defer s.fpLocker.Unlock(fp)
|
|
|
|
|
2014-10-27 12:40:48 -07:00
|
|
|
series, ok := s.fpToSeries.get(fp)
|
2014-10-24 11:27:27 -07:00
|
|
|
if !ok {
|
|
|
|
// Oops, no series for fp found. That happens if, after
|
|
|
|
// preloading is done, the whole series is identified as old
|
|
|
|
// enough for purging and hence purged for good. As there is no
|
|
|
|
// data left to iterate over, return an iterator that will never
|
|
|
|
// return any values.
|
|
|
|
return nopSeriesIterator{}
|
|
|
|
}
|
|
|
|
return series.newIterator(
|
|
|
|
func() { s.fpLocker.Lock(fp) },
|
|
|
|
func() { s.fpLocker.Unlock(fp) },
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewPreloader implements Storage.
|
|
|
|
func (s *memorySeriesStorage) NewPreloader() Preloader {
|
|
|
|
return &memorySeriesPreloader{
|
|
|
|
storage: s,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetFingerprintsForLabelMatchers implements Storage.
|
|
|
|
func (s *memorySeriesStorage) GetFingerprintsForLabelMatchers(labelMatchers metric.LabelMatchers) clientmodel.Fingerprints {
|
|
|
|
var result map[clientmodel.Fingerprint]struct{}
|
|
|
|
for _, matcher := range labelMatchers {
|
|
|
|
intersection := map[clientmodel.Fingerprint]struct{}{}
|
|
|
|
switch matcher.Type {
|
|
|
|
case metric.Equal:
|
|
|
|
fps, err := s.persistence.getFingerprintsForLabelPair(
|
|
|
|
metric.LabelPair{
|
|
|
|
Name: matcher.Name,
|
|
|
|
Value: matcher.Value,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
glog.Error("Error getting fingerprints for label pair: ", err)
|
|
|
|
}
|
|
|
|
if len(fps) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
for _, fp := range fps {
|
|
|
|
if _, ok := result[fp]; ok || result == nil {
|
|
|
|
intersection[fp] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
values, err := s.persistence.getLabelValuesForLabelName(matcher.Name)
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("Error getting label values for label name %q: %v", matcher.Name, err)
|
|
|
|
}
|
|
|
|
matches := matcher.Filter(values)
|
|
|
|
if len(matches) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
for _, v := range matches {
|
|
|
|
fps, err := s.persistence.getFingerprintsForLabelPair(
|
|
|
|
metric.LabelPair{
|
|
|
|
Name: matcher.Name,
|
|
|
|
Value: v,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
glog.Error("Error getting fingerprints for label pair: ", err)
|
|
|
|
}
|
|
|
|
for _, fp := range fps {
|
|
|
|
if _, ok := result[fp]; ok || result == nil {
|
|
|
|
intersection[fp] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(intersection) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
result = intersection
|
|
|
|
}
|
|
|
|
|
|
|
|
fps := make(clientmodel.Fingerprints, 0, len(result))
|
|
|
|
for fp := range result {
|
|
|
|
fps = append(fps, fp)
|
|
|
|
}
|
|
|
|
return fps
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetLabelValuesForLabelName implements Storage.
|
|
|
|
func (s *memorySeriesStorage) GetLabelValuesForLabelName(labelName clientmodel.LabelName) clientmodel.LabelValues {
|
|
|
|
lvs, err := s.persistence.getLabelValuesForLabelName(labelName)
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("Error getting label values for label name %q: %v", labelName, err)
|
|
|
|
}
|
|
|
|
return lvs
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetMetricForFingerprint implements Storage.
|
2014-12-08 07:55:49 -08:00
|
|
|
func (s *memorySeriesStorage) GetMetricForFingerprint(fp clientmodel.Fingerprint) clientmodel.COWMetric {
|
2014-10-24 11:27:27 -07:00
|
|
|
s.fpLocker.Lock(fp)
|
|
|
|
defer s.fpLocker.Unlock(fp)
|
|
|
|
|
2014-10-27 12:40:48 -07:00
|
|
|
series, ok := s.fpToSeries.get(fp)
|
2014-10-24 11:27:27 -07:00
|
|
|
if ok {
|
2014-12-08 07:55:49 -08:00
|
|
|
// Wrap the returned metric in a copy-on-write (COW) metric here because
|
|
|
|
// the caller might mutate it.
|
|
|
|
return clientmodel.COWMetric{
|
|
|
|
Metric: series.metric,
|
2014-10-24 11:27:27 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
metric, err := s.persistence.getArchivedMetric(fp)
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("Error retrieving archived metric for fingerprint %v: %v", fp, err)
|
|
|
|
}
|
2014-12-08 07:55:49 -08:00
|
|
|
return clientmodel.COWMetric{
|
|
|
|
Metric: metric,
|
|
|
|
}
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
2014-09-19 09:18:44 -07:00
|
|
|
// AppendSamples implements Storage.
|
2014-06-06 02:55:53 -07:00
|
|
|
func (s *memorySeriesStorage) AppendSamples(samples clientmodel.Samples) {
|
|
|
|
for _, sample := range samples {
|
2015-02-13 05:26:54 -08:00
|
|
|
if sample.Timestamp != s.appendLastTimestamp {
|
|
|
|
// Timestamp has changed. We have to wait for processing
|
|
|
|
// of all appended samples before proceeding. Otherwise,
|
|
|
|
// we might violate the storage contract that each
|
|
|
|
// sample appended to a given series has to have a
|
|
|
|
// timestamp greater or equal to the previous sample
|
|
|
|
// appended to that series.
|
2015-02-12 08:23:42 -08:00
|
|
|
s.appendWaitGroup.Wait()
|
2015-02-13 05:26:54 -08:00
|
|
|
s.appendLastTimestamp = sample.Timestamp
|
2015-02-12 08:23:42 -08:00
|
|
|
}
|
|
|
|
s.appendWaitGroup.Add(1)
|
2015-02-13 05:26:54 -08:00
|
|
|
s.appendQueue <- sample
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *memorySeriesStorage) appendSample(sample *clientmodel.Sample) {
|
2014-09-24 07:55:45 -07:00
|
|
|
fp := sample.Metric.Fingerprint()
|
2014-10-07 10:11:24 -07:00
|
|
|
s.fpLocker.Lock(fp)
|
2014-09-24 07:55:45 -07:00
|
|
|
series := s.getOrCreateSeries(fp, sample.Metric)
|
2014-10-27 12:40:48 -07:00
|
|
|
chunkDescsToPersist := series.add(fp, &metric.SamplePair{
|
2014-06-06 02:55:53 -07:00
|
|
|
Value: sample.Value,
|
|
|
|
Timestamp: sample.Timestamp,
|
2014-10-27 12:40:48 -07:00
|
|
|
})
|
|
|
|
s.fpLocker.Unlock(fp)
|
2015-02-12 08:23:42 -08:00
|
|
|
s.ingestedSamplesCount.Inc()
|
|
|
|
|
2015-01-08 11:15:58 -08:00
|
|
|
if len(chunkDescsToPersist) == 0 {
|
|
|
|
return
|
|
|
|
}
|
2014-10-27 12:40:48 -07:00
|
|
|
// Queue only outside of the locked area, processing the persistQueue
|
|
|
|
// requires the same lock!
|
|
|
|
for _, cd := range chunkDescsToPersist {
|
|
|
|
s.persistQueue <- persistRequest{fp, cd}
|
|
|
|
}
|
2015-01-08 11:15:58 -08:00
|
|
|
// Count that a head chunk was persisted, but only best effort, i.e. we
|
|
|
|
// don't want to block here.
|
|
|
|
select {
|
|
|
|
case s.countPersistedHeadChunks <- struct{}{}: // Counted.
|
|
|
|
default: // Meh...
|
|
|
|
}
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
2014-09-24 07:55:45 -07:00
|
|
|
func (s *memorySeriesStorage) getOrCreateSeries(fp clientmodel.Fingerprint, m clientmodel.Metric) *memorySeries {
|
2014-10-27 12:40:48 -07:00
|
|
|
series, ok := s.fpToSeries.get(fp)
|
2014-06-06 02:55:53 -07:00
|
|
|
if !ok {
|
2014-11-05 11:02:45 -08:00
|
|
|
unarchived, firstTime, err := s.persistence.unarchiveMetric(fp)
|
2014-09-10 09:41:52 -07:00
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("Error unarchiving fingerprint %v: %v", fp, err)
|
|
|
|
}
|
2014-10-22 10:21:23 -07:00
|
|
|
if unarchived {
|
2014-10-23 06:18:32 -07:00
|
|
|
s.seriesOps.WithLabelValues(unarchive).Inc()
|
2014-10-22 10:21:23 -07:00
|
|
|
} else {
|
2014-09-10 09:41:52 -07:00
|
|
|
// This was a genuinely new series, so index the metric.
|
2014-10-28 11:01:41 -07:00
|
|
|
s.persistence.indexMetric(fp, m)
|
2014-10-23 06:18:32 -07:00
|
|
|
s.seriesOps.WithLabelValues(create).Inc()
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
2014-11-05 11:02:45 -08:00
|
|
|
series = newMemorySeries(m, !unarchived, firstTime)
|
2014-10-27 12:40:48 -07:00
|
|
|
s.fpToSeries.put(fp, series)
|
2014-10-23 06:18:32 -07:00
|
|
|
s.numSeries.Inc()
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
return series
|
|
|
|
}
|
|
|
|
|
2014-10-15 06:53:05 -07:00
|
|
|
func (s *memorySeriesStorage) preloadChunksForRange(
|
|
|
|
fp clientmodel.Fingerprint,
|
|
|
|
from clientmodel.Timestamp, through clientmodel.Timestamp,
|
|
|
|
stalenessDelta time.Duration,
|
|
|
|
) ([]*chunkDesc, error) {
|
2014-10-07 10:11:24 -07:00
|
|
|
s.fpLocker.Lock(fp)
|
|
|
|
defer s.fpLocker.Unlock(fp)
|
2014-09-16 06:47:24 -07:00
|
|
|
|
2014-10-27 12:40:48 -07:00
|
|
|
series, ok := s.fpToSeries.get(fp)
|
2014-06-06 02:55:53 -07:00
|
|
|
if !ok {
|
2014-10-07 10:11:24 -07:00
|
|
|
has, first, last, err := s.persistence.hasArchivedMetric(fp)
|
2014-09-16 06:47:24 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if !has {
|
2014-11-05 11:02:45 -08:00
|
|
|
s.invalidPreloadRequestsCount.Inc()
|
|
|
|
return nil, nil
|
2014-09-16 06:47:24 -07:00
|
|
|
}
|
|
|
|
if from.Add(-stalenessDelta).Before(last) && through.Add(stalenessDelta).After(first) {
|
2014-10-07 10:11:24 -07:00
|
|
|
metric, err := s.persistence.getArchivedMetric(fp)
|
2014-09-16 06:47:24 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2014-09-24 07:55:45 -07:00
|
|
|
series = s.getOrCreateSeries(fp, metric)
|
2014-10-13 09:55:46 -07:00
|
|
|
} else {
|
|
|
|
return nil, nil
|
2014-09-16 06:47:24 -07:00
|
|
|
}
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
2014-11-13 11:50:25 -08:00
|
|
|
return series.preloadChunksForRange(from, through, fp, s)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *memorySeriesStorage) handleEvictList() {
|
|
|
|
ticker := time.NewTicker(maxEvictInterval)
|
|
|
|
count := 0
|
2015-01-07 10:02:38 -08:00
|
|
|
|
2014-11-13 11:50:25 -08:00
|
|
|
for {
|
|
|
|
// To batch up evictions a bit, this tries evictions at least
|
|
|
|
// once per evict interval, but earlier if the number of evict
|
2014-11-20 12:03:51 -08:00
|
|
|
// requests with evict==true that have happened since the last
|
2014-11-13 11:50:25 -08:00
|
|
|
// evict run is more than maxMemoryChunks/1000.
|
|
|
|
select {
|
|
|
|
case req := <-s.evictRequests:
|
|
|
|
if req.evict {
|
|
|
|
req.cd.evictListElement = s.evictList.PushBack(req.cd)
|
|
|
|
count++
|
|
|
|
if count > s.maxMemoryChunks/1000 {
|
|
|
|
s.maybeEvict()
|
|
|
|
count = 0
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if req.cd.evictListElement != nil {
|
|
|
|
s.evictList.Remove(req.cd.evictListElement)
|
|
|
|
req.cd.evictListElement = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case <-ticker.C:
|
|
|
|
if s.evictList.Len() > 0 {
|
|
|
|
s.maybeEvict()
|
|
|
|
}
|
|
|
|
case <-s.evictStopping:
|
2015-01-22 05:42:15 -08:00
|
|
|
// Drain evictRequests forever in a goroutine to not let
|
|
|
|
// requesters hang.
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
<-s.evictRequests
|
2015-01-07 10:02:38 -08:00
|
|
|
}
|
2015-01-22 05:42:15 -08:00
|
|
|
}()
|
|
|
|
ticker.Stop()
|
|
|
|
glog.Info("Chunk eviction stopped.")
|
|
|
|
close(s.evictStopped)
|
|
|
|
return
|
2014-11-13 11:50:25 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// maybeEvict is a local helper method. Must only be called by handleEvictList.
|
|
|
|
func (s *memorySeriesStorage) maybeEvict() {
|
|
|
|
numChunksToEvict := int(atomic.LoadInt64(&numMemChunks)) - s.maxMemoryChunks
|
|
|
|
if numChunksToEvict <= 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
chunkDescsToEvict := make([]*chunkDesc, numChunksToEvict)
|
|
|
|
for i := range chunkDescsToEvict {
|
|
|
|
e := s.evictList.Front()
|
|
|
|
if e == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
cd := e.Value.(*chunkDesc)
|
|
|
|
cd.evictListElement = nil
|
|
|
|
chunkDescsToEvict[i] = cd
|
|
|
|
s.evictList.Remove(e)
|
|
|
|
}
|
|
|
|
// Do the actual eviction in a goroutine as we might otherwise deadlock,
|
|
|
|
// in the following way: A chunk was unpinned completely and therefore
|
|
|
|
// scheduled for eviction. At the time we actually try to evict it,
|
|
|
|
// another goroutine is pinning the chunk. The pinning goroutine has
|
|
|
|
// currently locked the chunk and tries to send the evict request (to
|
|
|
|
// remove the chunk from the evict list) to the evictRequests
|
|
|
|
// channel. The send blocks because evictRequests is full. However, the
|
2014-11-20 12:03:51 -08:00
|
|
|
// goroutine that is supposed to empty the channel is waiting for the
|
2014-11-13 11:50:25 -08:00
|
|
|
// chunkDesc lock to try to evict the chunk.
|
|
|
|
go func() {
|
|
|
|
for _, cd := range chunkDescsToEvict {
|
|
|
|
if cd == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
cd.maybeEvict()
|
|
|
|
// We don't care if the eviction succeeds. If the chunk
|
|
|
|
// was pinned in the meantime, it will be added to the
|
|
|
|
// evict list once it gets unpinned again.
|
|
|
|
}
|
|
|
|
}()
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *memorySeriesStorage) handlePersistQueue() {
|
Improve persisting chunks to disk.
This is done by bucketing chunks by fingerprint. If the persisting to
disk falls behind, more and more chunks are in the queue. As soon as
there are "double hits", we will now persist both chunks in one go,
doubling the disk throughput (assuming it is limited by disk
seeks). Should even more pile up so that we end wit "triple hits", we
will persist those first, and so on.
Even if we have millions of time series, this will still help,
assuming not all of them are growing with the same speed. Series that
get many samples and/or are not very compressable will accumulate
chunks faster, and they will soon get double- or triple-writes.
To improve the chance of double writes,
-storage.local.persistence-queue-capacity could be set to a higher
value. However, that will slow down shutdown a lot (as the queue has
to be worked through). So we leave it to the user to set it to a
really high value. A more fundamental solution would be to checkpoint
not only head chunks, but also chunks still in the persist queue. That
would be quite complicated for a rather limited use-case (running many
time series with high ingestion rate on slow spinning disks).
2015-02-13 11:08:52 -08:00
|
|
|
chunkMaps := chunkMaps{}
|
|
|
|
chunkCount := 0
|
|
|
|
|
|
|
|
persistMostConsecutiveChunks := func() {
|
|
|
|
fp, cds := chunkMaps.pop()
|
|
|
|
if err := s.persistChunks(fp, cds); err != nil {
|
|
|
|
// Need to put chunks back for retry.
|
|
|
|
for _, cd := range cds {
|
|
|
|
chunkMaps.add(fp, cd)
|
|
|
|
}
|
|
|
|
return
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
Improve persisting chunks to disk.
This is done by bucketing chunks by fingerprint. If the persisting to
disk falls behind, more and more chunks are in the queue. As soon as
there are "double hits", we will now persist both chunks in one go,
doubling the disk throughput (assuming it is limited by disk
seeks). Should even more pile up so that we end wit "triple hits", we
will persist those first, and so on.
Even if we have millions of time series, this will still help,
assuming not all of them are growing with the same speed. Series that
get many samples and/or are not very compressable will accumulate
chunks faster, and they will soon get double- or triple-writes.
To improve the chance of double writes,
-storage.local.persistence-queue-capacity could be set to a higher
value. However, that will slow down shutdown a lot (as the queue has
to be worked through). So we leave it to the user to set it to a
really high value. A more fundamental solution would be to checkpoint
not only head chunks, but also chunks still in the persist queue. That
would be quite complicated for a rather limited use-case (running many
time series with high ingestion rate on slow spinning disks).
2015-02-13 11:08:52 -08:00
|
|
|
chunkCount -= len(cds)
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
Improve persisting chunks to disk.
This is done by bucketing chunks by fingerprint. If the persisting to
disk falls behind, more and more chunks are in the queue. As soon as
there are "double hits", we will now persist both chunks in one go,
doubling the disk throughput (assuming it is limited by disk
seeks). Should even more pile up so that we end wit "triple hits", we
will persist those first, and so on.
Even if we have millions of time series, this will still help,
assuming not all of them are growing with the same speed. Series that
get many samples and/or are not very compressable will accumulate
chunks faster, and they will soon get double- or triple-writes.
To improve the chance of double writes,
-storage.local.persistence-queue-capacity could be set to a higher
value. However, that will slow down shutdown a lot (as the queue has
to be worked through). So we leave it to the user to set it to a
really high value. A more fundamental solution would be to checkpoint
not only head chunks, but also chunks still in the persist queue. That
would be quite complicated for a rather limited use-case (running many
time series with high ingestion rate on slow spinning disks).
2015-02-13 11:08:52 -08:00
|
|
|
|
|
|
|
loop:
|
|
|
|
for {
|
|
|
|
if chunkCount >= s.persistQueueCap && chunkCount > 0 {
|
|
|
|
glog.Warningf("%d chunks queued for persistence. Ingestion pipeline will backlog.", chunkCount)
|
|
|
|
persistMostConsecutiveChunks()
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case req, ok := <-s.persistQueue:
|
|
|
|
if !ok {
|
|
|
|
break loop
|
|
|
|
}
|
|
|
|
chunkMaps.add(req.fingerprint, req.chunkDesc)
|
|
|
|
chunkCount++
|
|
|
|
default:
|
|
|
|
if chunkCount > 0 {
|
|
|
|
persistMostConsecutiveChunks()
|
|
|
|
continue loop
|
|
|
|
}
|
|
|
|
// If we are here, there is nothing to do right now. So
|
|
|
|
// just wait for a persist request to come in.
|
|
|
|
req, ok := <-s.persistQueue
|
|
|
|
if !ok {
|
|
|
|
break loop
|
|
|
|
}
|
|
|
|
chunkMaps.add(req.fingerprint, req.chunkDesc)
|
|
|
|
chunkCount++
|
|
|
|
}
|
|
|
|
s.persistQueueLength.Set(float64(chunkCount))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Drain all requests.
|
|
|
|
for _, m := range chunkMaps {
|
|
|
|
for fp, cds := range m {
|
|
|
|
if s.persistChunks(fp, cds) == nil {
|
|
|
|
chunkCount -= len(cds)
|
|
|
|
if (chunkCount+len(cds))/1000 > chunkCount/1000 {
|
|
|
|
glog.Infof(
|
|
|
|
"Still draining persist queue, %d chunks left to persist...",
|
|
|
|
chunkCount,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
s.persistQueueLength.Set(float64(chunkCount))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-13 11:50:25 -08:00
|
|
|
glog.Info("Persist queue drained and stopped.")
|
2014-10-24 11:27:27 -07:00
|
|
|
close(s.persistStopped)
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
Improve persisting chunks to disk.
This is done by bucketing chunks by fingerprint. If the persisting to
disk falls behind, more and more chunks are in the queue. As soon as
there are "double hits", we will now persist both chunks in one go,
doubling the disk throughput (assuming it is limited by disk
seeks). Should even more pile up so that we end wit "triple hits", we
will persist those first, and so on.
Even if we have millions of time series, this will still help,
assuming not all of them are growing with the same speed. Series that
get many samples and/or are not very compressable will accumulate
chunks faster, and they will soon get double- or triple-writes.
To improve the chance of double writes,
-storage.local.persistence-queue-capacity could be set to a higher
value. However, that will slow down shutdown a lot (as the queue has
to be worked through). So we leave it to the user to set it to a
really high value. A more fundamental solution would be to checkpoint
not only head chunks, but also chunks still in the persist queue. That
would be quite complicated for a rather limited use-case (running many
time series with high ingestion rate on slow spinning disks).
2015-02-13 11:08:52 -08:00
|
|
|
func (s *memorySeriesStorage) persistChunks(fp clientmodel.Fingerprint, cds []*chunkDesc) error {
|
|
|
|
start := time.Now()
|
|
|
|
chunks := make([]chunk, len(cds))
|
|
|
|
for i, cd := range cds {
|
|
|
|
chunks[i] = cd.chunk
|
|
|
|
}
|
|
|
|
s.fpLocker.Lock(fp)
|
|
|
|
offset, err := s.persistence.persistChunks(fp, chunks)
|
|
|
|
if series, seriesInMemory := s.fpToSeries.get(fp); err == nil && seriesInMemory && series.chunkDescsOffset == -1 {
|
|
|
|
// This is the first chunk persisted for a newly created
|
|
|
|
// series that had prior chunks on disk. Finally, we can
|
|
|
|
// set the chunkDescsOffset.
|
|
|
|
series.chunkDescsOffset = offset
|
|
|
|
}
|
|
|
|
s.fpLocker.Unlock(fp)
|
|
|
|
s.persistLatency.Observe(float64(time.Since(start)) / float64(time.Microsecond))
|
|
|
|
if err != nil {
|
|
|
|
s.persistErrors.Inc()
|
|
|
|
glog.Error("Error persisting chunks: ", err)
|
|
|
|
s.persistence.setDirty(true)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, cd := range cds {
|
|
|
|
cd.unpin(s.evictRequests)
|
|
|
|
}
|
|
|
|
chunkOps.WithLabelValues(persistAndUnpin).Add(float64(len(cds)))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-11-10 13:26:07 -08:00
|
|
|
// waitForNextFP waits an estimated duration, after which we want to process
|
|
|
|
// another fingerprint so that we will process all fingerprints in a tenth of
|
2015-02-26 06:19:44 -08:00
|
|
|
// s.dropAfter assuming that the system is doing nothing else, e.g. if we want
|
|
|
|
// to drop chunks after 40h, we want to cycle through all fingerprints within
|
2014-11-13 07:55:15 -08:00
|
|
|
// 4h. However, the maximum sweep time is capped at fpMaxSweepTime. Furthermore,
|
|
|
|
// this method will always wait for at least fpMinWaitDuration and never longer
|
|
|
|
// than fpMaxWaitDuration. If s.loopStopped is closed, it will return false
|
|
|
|
// immediately. The estimation is based on the total number of fingerprints as
|
|
|
|
// passed in.
|
2014-11-10 13:26:07 -08:00
|
|
|
func (s *memorySeriesStorage) waitForNextFP(numberOfFPs int) bool {
|
2014-11-13 07:55:15 -08:00
|
|
|
d := fpMaxWaitDuration
|
2014-11-10 13:26:07 -08:00
|
|
|
if numberOfFPs != 0 {
|
2015-02-26 06:19:44 -08:00
|
|
|
sweepTime := s.dropAfter / 10
|
2014-11-13 07:55:15 -08:00
|
|
|
if sweepTime > fpMaxSweepTime {
|
|
|
|
sweepTime = fpMaxSweepTime
|
|
|
|
}
|
|
|
|
d = sweepTime / time.Duration(numberOfFPs)
|
|
|
|
if d < fpMinWaitDuration {
|
|
|
|
d = fpMinWaitDuration
|
2014-11-10 13:26:07 -08:00
|
|
|
}
|
2014-11-13 07:55:15 -08:00
|
|
|
if d > fpMaxWaitDuration {
|
|
|
|
d = fpMaxWaitDuration
|
2014-11-10 13:26:07 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
t := time.NewTimer(d)
|
|
|
|
select {
|
|
|
|
case <-t.C:
|
|
|
|
return true
|
|
|
|
case <-s.loopStopping:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-20 12:03:51 -08:00
|
|
|
// cycleThroughMemoryFingerprints returns a channel that emits fingerprints for
|
|
|
|
// series in memory in a throttled fashion. It continues to cycle through all
|
|
|
|
// fingerprints in memory until s.loopStopping is closed.
|
|
|
|
func (s *memorySeriesStorage) cycleThroughMemoryFingerprints() chan clientmodel.Fingerprint {
|
2014-11-10 13:26:07 -08:00
|
|
|
memoryFingerprints := make(chan clientmodel.Fingerprint)
|
|
|
|
go func() {
|
|
|
|
var fpIter <-chan clientmodel.Fingerprint
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if fpIter != nil {
|
2014-12-26 04:37:30 -08:00
|
|
|
for range fpIter {
|
2014-11-10 13:26:07 -08:00
|
|
|
// Consume the iterator.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
close(memoryFingerprints)
|
|
|
|
}()
|
|
|
|
|
|
|
|
for {
|
|
|
|
// Initial wait, also important if there are no FPs yet.
|
|
|
|
if !s.waitForNextFP(s.fpToSeries.length()) {
|
|
|
|
return
|
|
|
|
}
|
2014-11-20 12:03:51 -08:00
|
|
|
begin := time.Now()
|
2014-11-10 13:26:07 -08:00
|
|
|
fpIter = s.fpToSeries.fpIter()
|
2015-02-26 06:19:44 -08:00
|
|
|
count := 0
|
2014-11-10 13:26:07 -08:00
|
|
|
for fp := range fpIter {
|
|
|
|
select {
|
|
|
|
case memoryFingerprints <- fp:
|
|
|
|
case <-s.loopStopping:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
s.waitForNextFP(s.fpToSeries.length())
|
2015-02-26 06:19:44 -08:00
|
|
|
count++
|
|
|
|
}
|
|
|
|
if count > 0 {
|
|
|
|
glog.Infof(
|
|
|
|
"Completed maintenance sweep through %d in-memory fingerprints in %v.",
|
|
|
|
count, time.Since(begin),
|
|
|
|
)
|
2014-11-10 13:26:07 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2014-11-20 12:03:51 -08:00
|
|
|
return memoryFingerprints
|
|
|
|
}
|
|
|
|
|
|
|
|
// cycleThroughArchivedFingerprints returns a channel that emits fingerprints
|
|
|
|
// for archived series in a throttled fashion. It continues to cycle through all
|
|
|
|
// archived fingerprints until s.loopStopping is closed.
|
|
|
|
func (s *memorySeriesStorage) cycleThroughArchivedFingerprints() chan clientmodel.Fingerprint {
|
2014-11-10 13:26:07 -08:00
|
|
|
archivedFingerprints := make(chan clientmodel.Fingerprint)
|
|
|
|
go func() {
|
|
|
|
defer close(archivedFingerprints)
|
|
|
|
|
|
|
|
for {
|
|
|
|
archivedFPs, err := s.persistence.getFingerprintsModifiedBefore(
|
2015-02-26 06:19:44 -08:00
|
|
|
clientmodel.TimestampFromTime(time.Now()).Add(-s.dropAfter),
|
2014-11-10 13:26:07 -08:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
glog.Error("Failed to lookup archived fingerprint ranges: ", err)
|
|
|
|
s.waitForNextFP(0)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Initial wait, also important if there are no FPs yet.
|
|
|
|
if !s.waitForNextFP(len(archivedFPs)) {
|
|
|
|
return
|
|
|
|
}
|
2014-11-20 12:03:51 -08:00
|
|
|
begin := time.Now()
|
2014-11-10 13:26:07 -08:00
|
|
|
for _, fp := range archivedFPs {
|
|
|
|
select {
|
|
|
|
case archivedFingerprints <- fp:
|
|
|
|
case <-s.loopStopping:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
s.waitForNextFP(len(archivedFPs))
|
|
|
|
}
|
2015-02-26 06:19:44 -08:00
|
|
|
if len(archivedFPs) > 0 {
|
|
|
|
glog.Infof(
|
|
|
|
"Completed maintenance sweep through %d archived fingerprints in %v.",
|
|
|
|
len(archivedFPs), time.Since(begin),
|
|
|
|
)
|
|
|
|
}
|
2014-11-10 13:26:07 -08:00
|
|
|
}
|
|
|
|
}()
|
2014-11-20 12:03:51 -08:00
|
|
|
return archivedFingerprints
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *memorySeriesStorage) loop() {
|
2015-01-08 11:15:58 -08:00
|
|
|
checkpointTimer := time.NewTimer(s.checkpointInterval)
|
|
|
|
|
|
|
|
// We take the number of head chunks persisted since the last checkpoint
|
|
|
|
// as an approximation for the number of series that are "dirty",
|
|
|
|
// i.e. whose head chunk is different from the one in the most recent
|
|
|
|
// checkpoint or for which the fact that the head chunk has been
|
|
|
|
// persisted is not reflected in the most recent checkpoint. This count
|
|
|
|
// could overestimate the number of dirty series, but it's good enough
|
2015-01-09 02:04:20 -08:00
|
|
|
// as a heuristic.
|
2015-01-08 11:15:58 -08:00
|
|
|
headChunksPersistedSinceLastCheckpoint := 0
|
2014-11-20 12:03:51 -08:00
|
|
|
|
|
|
|
defer func() {
|
2015-01-08 11:15:58 -08:00
|
|
|
checkpointTimer.Stop()
|
2014-11-20 12:03:51 -08:00
|
|
|
glog.Info("Maintenance loop stopped.")
|
|
|
|
close(s.loopStopped)
|
|
|
|
}()
|
|
|
|
|
|
|
|
memoryFingerprints := s.cycleThroughMemoryFingerprints()
|
|
|
|
archivedFingerprints := s.cycleThroughArchivedFingerprints()
|
2014-11-10 13:26:07 -08:00
|
|
|
|
|
|
|
loop:
|
2014-06-06 02:55:53 -07:00
|
|
|
for {
|
|
|
|
select {
|
2014-10-24 11:27:27 -07:00
|
|
|
case <-s.loopStopping:
|
2014-11-10 13:26:07 -08:00
|
|
|
break loop
|
2015-01-08 11:15:58 -08:00
|
|
|
case <-checkpointTimer.C:
|
2014-10-27 12:40:48 -07:00
|
|
|
s.persistence.checkpointSeriesMapAndHeads(s.fpToSeries, s.fpLocker)
|
2015-01-08 11:15:58 -08:00
|
|
|
headChunksPersistedSinceLastCheckpoint = 0
|
|
|
|
checkpointTimer.Reset(s.checkpointInterval)
|
2014-11-10 13:26:07 -08:00
|
|
|
case fp := <-memoryFingerprints:
|
2015-02-26 06:19:44 -08:00
|
|
|
s.maintainMemorySeries(fp, clientmodel.TimestampFromTime(time.Now()).Add(-s.dropAfter))
|
2014-11-10 13:26:07 -08:00
|
|
|
case fp := <-archivedFingerprints:
|
2015-02-26 06:19:44 -08:00
|
|
|
s.maintainArchivedSeries(fp, clientmodel.TimestampFromTime(time.Now()).Add(-s.dropAfter))
|
2015-01-08 11:15:58 -08:00
|
|
|
case <-s.countPersistedHeadChunks:
|
|
|
|
headChunksPersistedSinceLastCheckpoint++
|
2015-02-12 08:23:42 -08:00
|
|
|
// Check if we have enough "dirty" series so that we need an early checkpoint.
|
|
|
|
// As described above, we take the headChunksPersistedSinceLastCheckpoint as a
|
|
|
|
// heuristic for "dirty" series. However, if we are already backlogging
|
|
|
|
// chunks to be persisted, creating a checkpoint would be counterproductive,
|
|
|
|
// as it would slow down chunk persisting even more, while in a situation like
|
|
|
|
// that, the best we can do for crash recovery is to work through the persist
|
|
|
|
// queue as quickly as possible. So only checkpoint if s.persistQueue is
|
|
|
|
// at most 20% full.
|
|
|
|
if headChunksPersistedSinceLastCheckpoint >= s.checkpointDirtySeriesLimit &&
|
|
|
|
len(s.persistQueue) < cap(s.persistQueue)/5 {
|
2015-01-08 11:15:58 -08:00
|
|
|
checkpointTimer.Reset(0)
|
|
|
|
}
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
}
|
2014-11-10 13:26:07 -08:00
|
|
|
// Wait until both channels are closed.
|
2014-12-26 04:37:30 -08:00
|
|
|
for range memoryFingerprints {
|
2014-11-10 13:26:07 -08:00
|
|
|
}
|
2014-12-26 04:37:30 -08:00
|
|
|
for range archivedFingerprints {
|
2014-11-10 13:26:07 -08:00
|
|
|
}
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
2015-02-26 06:19:44 -08:00
|
|
|
// maintainMemorySeries first purges the series from old chunks. If the series
|
|
|
|
// still exists after that, it proceeds with the following steps: It closes the
|
|
|
|
// head chunk if it was not touched in a while. It archives a series if all
|
|
|
|
// chunks are evicted. It evicts chunkDescs if there are too many.
|
|
|
|
func (s *memorySeriesStorage) maintainMemorySeries(fp clientmodel.Fingerprint, beforeTime clientmodel.Timestamp) {
|
2014-11-13 11:50:25 -08:00
|
|
|
var headChunkToPersist *chunkDesc
|
|
|
|
s.fpLocker.Lock(fp)
|
|
|
|
defer func() {
|
|
|
|
s.fpLocker.Unlock(fp)
|
|
|
|
// Queue outside of lock!
|
|
|
|
if headChunkToPersist != nil {
|
|
|
|
s.persistQueue <- persistRequest{fp, headChunkToPersist}
|
2015-02-26 06:19:44 -08:00
|
|
|
// Count that a head chunk was persisted, but only best effort, i.e. we
|
|
|
|
// don't want to block here.
|
|
|
|
select {
|
|
|
|
case s.countPersistedHeadChunks <- struct{}{}: // Counted.
|
|
|
|
default: // Meh...
|
|
|
|
}
|
2015-01-08 11:15:58 -08:00
|
|
|
}
|
2014-11-13 11:50:25 -08:00
|
|
|
}()
|
|
|
|
|
|
|
|
series, ok := s.fpToSeries.get(fp)
|
|
|
|
if !ok {
|
2015-02-26 06:19:44 -08:00
|
|
|
// Series is actually not in memory, perhaps archived or dropped in the meantime.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
defer s.seriesOps.WithLabelValues(memoryMaintenance).Inc()
|
|
|
|
|
|
|
|
if s.purgeMemorySeries(fp, series, beforeTime) {
|
|
|
|
// Series is gone now, we are done.
|
2014-11-13 11:50:25 -08:00
|
|
|
return
|
|
|
|
}
|
2015-02-26 06:19:44 -08:00
|
|
|
|
2014-11-13 11:50:25 -08:00
|
|
|
iOldestNotEvicted := -1
|
|
|
|
for i, cd := range series.chunkDescs {
|
|
|
|
if !cd.isEvicted() {
|
|
|
|
iOldestNotEvicted = i
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Archive if all chunks are evicted.
|
|
|
|
if iOldestNotEvicted == -1 {
|
|
|
|
s.fpToSeries.del(fp)
|
|
|
|
s.numSeries.Dec()
|
Fix a bug handling freshly unarchived series.
Usually, if you unarchive a series, it is to add something to it,
which will create a new head chunk. However, if a series in
unarchived, and before anything is added to it, it is handled by the
maintenance loop, it will be archived again. In that case, we have to
load the chunkDescs to know the lastTime of the series to be
archived. Usually, this case will happen only rarely (as a race, has
never happened so far, possibly because the locking around unarchiving
and the subsequent sample append is smart enough). However, during
crash recovery, we sometimes treat series as "freshly unarchived"
without directly appending a sample. We might add more cases of that
type later, so better deal with archiving properly and load chunkDescs
if required.
2015-01-08 07:10:31 -08:00
|
|
|
// Make sure we have a head chunk descriptor (a freshly
|
|
|
|
// unarchived series has none).
|
|
|
|
if len(series.chunkDescs) == 0 {
|
2015-02-12 08:23:42 -08:00
|
|
|
cds, err := s.loadChunkDescs(fp, clientmodel.Latest)
|
Fix a bug handling freshly unarchived series.
Usually, if you unarchive a series, it is to add something to it,
which will create a new head chunk. However, if a series in
unarchived, and before anything is added to it, it is handled by the
maintenance loop, it will be archived again. In that case, we have to
load the chunkDescs to know the lastTime of the series to be
archived. Usually, this case will happen only rarely (as a race, has
never happened so far, possibly because the locking around unarchiving
and the subsequent sample append is smart enough). However, during
crash recovery, we sometimes treat series as "freshly unarchived"
without directly appending a sample. We might add more cases of that
type later, so better deal with archiving properly and load chunkDescs
if required.
2015-01-08 07:10:31 -08:00
|
|
|
if err != nil {
|
2015-02-26 06:19:44 -08:00
|
|
|
glog.Errorf(
|
|
|
|
"Could not load chunk descriptors prior to archiving metric %v, metric will not be archived: %v",
|
|
|
|
series.metric, err,
|
|
|
|
)
|
Fix a bug handling freshly unarchived series.
Usually, if you unarchive a series, it is to add something to it,
which will create a new head chunk. However, if a series in
unarchived, and before anything is added to it, it is handled by the
maintenance loop, it will be archived again. In that case, we have to
load the chunkDescs to know the lastTime of the series to be
archived. Usually, this case will happen only rarely (as a race, has
never happened so far, possibly because the locking around unarchiving
and the subsequent sample append is smart enough). However, during
crash recovery, we sometimes treat series as "freshly unarchived"
without directly appending a sample. We might add more cases of that
type later, so better deal with archiving properly and load chunkDescs
if required.
2015-01-08 07:10:31 -08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
series.chunkDescs = cds
|
|
|
|
}
|
2014-11-13 11:50:25 -08:00
|
|
|
if err := s.persistence.archiveMetric(
|
Fix a bug handling freshly unarchived series.
Usually, if you unarchive a series, it is to add something to it,
which will create a new head chunk. However, if a series in
unarchived, and before anything is added to it, it is handled by the
maintenance loop, it will be archived again. In that case, we have to
load the chunkDescs to know the lastTime of the series to be
archived. Usually, this case will happen only rarely (as a race, has
never happened so far, possibly because the locking around unarchiving
and the subsequent sample append is smart enough). However, during
crash recovery, we sometimes treat series as "freshly unarchived"
without directly appending a sample. We might add more cases of that
type later, so better deal with archiving properly and load chunkDescs
if required.
2015-01-08 07:10:31 -08:00
|
|
|
fp, series.metric, series.firstTime(), series.head().lastTime(),
|
2014-11-13 11:50:25 -08:00
|
|
|
); err != nil {
|
|
|
|
glog.Errorf("Error archiving metric %v: %v", series.metric, err)
|
Fix a bug handling freshly unarchived series.
Usually, if you unarchive a series, it is to add something to it,
which will create a new head chunk. However, if a series in
unarchived, and before anything is added to it, it is handled by the
maintenance loop, it will be archived again. In that case, we have to
load the chunkDescs to know the lastTime of the series to be
archived. Usually, this case will happen only rarely (as a race, has
never happened so far, possibly because the locking around unarchiving
and the subsequent sample append is smart enough). However, during
crash recovery, we sometimes treat series as "freshly unarchived"
without directly appending a sample. We might add more cases of that
type later, so better deal with archiving properly and load chunkDescs
if required.
2015-01-08 07:10:31 -08:00
|
|
|
return
|
2014-11-13 11:50:25 -08:00
|
|
|
}
|
Fix a bug handling freshly unarchived series.
Usually, if you unarchive a series, it is to add something to it,
which will create a new head chunk. However, if a series in
unarchived, and before anything is added to it, it is handled by the
maintenance loop, it will be archived again. In that case, we have to
load the chunkDescs to know the lastTime of the series to be
archived. Usually, this case will happen only rarely (as a race, has
never happened so far, possibly because the locking around unarchiving
and the subsequent sample append is smart enough). However, during
crash recovery, we sometimes treat series as "freshly unarchived"
without directly appending a sample. We might add more cases of that
type later, so better deal with archiving properly and load chunkDescs
if required.
2015-01-08 07:10:31 -08:00
|
|
|
s.seriesOps.WithLabelValues(archive).Inc()
|
2014-11-13 11:50:25 -08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
// If we are here, the series is not archived, so check for chunkDesc
|
|
|
|
// eviction next and then if the head chunk needs to be persisted.
|
|
|
|
series.evictChunkDescs(iOldestNotEvicted)
|
2015-02-26 14:40:35 -08:00
|
|
|
if !series.headChunkPersisted && time.Now().Sub(series.head().lastTime().Time()) > headChunkTimeout {
|
2014-11-13 11:50:25 -08:00
|
|
|
series.headChunkPersisted = true
|
|
|
|
// Since we cannot modify the head chunk from now on, we
|
|
|
|
// don't need to bother with cloning anymore.
|
|
|
|
series.headChunkUsedByIterator = false
|
|
|
|
headChunkToPersist = series.head()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-26 06:19:44 -08:00
|
|
|
// purgeMemorySeries drops chunks older than beforeTime from the provided memory
|
|
|
|
// series. The caller must have locked fp. If the series contains no chunks
|
|
|
|
// after dropping old chunks, it is purged entirely. In that case, the method
|
|
|
|
// returns true.
|
|
|
|
func (s *memorySeriesStorage) purgeMemorySeries(fp clientmodel.Fingerprint, series *memorySeries, beforeTime clientmodel.Timestamp) bool {
|
|
|
|
if !series.firstTime().Before(beforeTime) {
|
|
|
|
// Oldest sample not old enough.
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
newFirstTime, numDroppedFromPersistence, allDroppedFromPersistence, err := s.persistence.dropChunks(fp, beforeTime)
|
|
|
|
if err != nil {
|
|
|
|
glog.Error("Error dropping persisted chunks: ", err)
|
|
|
|
}
|
|
|
|
numDroppedFromMemory, allDroppedFromMemory := series.dropChunks(beforeTime)
|
|
|
|
if allDroppedFromPersistence && allDroppedFromMemory {
|
|
|
|
s.fpToSeries.del(fp)
|
|
|
|
s.numSeries.Dec()
|
|
|
|
s.seriesOps.WithLabelValues(memoryPurge).Inc()
|
|
|
|
s.persistence.unindexMetric(fp, series.metric)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if series.chunkDescsOffset != -1 {
|
|
|
|
series.savedFirstTime = newFirstTime
|
|
|
|
series.chunkDescsOffset += numDroppedFromMemory - numDroppedFromPersistence
|
|
|
|
if series.chunkDescsOffset < 0 {
|
|
|
|
panic("dropped more chunks from persistence than from memory")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// maintainArchivedSeries drops chunks older than beforeTime from an archived
|
|
|
|
// series. If the series contains no chunks after that, it is purged entirely.
|
|
|
|
func (s *memorySeriesStorage) maintainArchivedSeries(fp clientmodel.Fingerprint, beforeTime clientmodel.Timestamp) {
|
2014-10-07 10:11:24 -07:00
|
|
|
s.fpLocker.Lock(fp)
|
|
|
|
defer s.fpLocker.Unlock(fp)
|
2014-09-10 09:41:52 -07:00
|
|
|
|
2014-11-10 09:22:08 -08:00
|
|
|
has, firstTime, lastTime, err := s.persistence.hasArchivedMetric(fp)
|
|
|
|
if err != nil {
|
|
|
|
glog.Error("Error looking up archived time range: ", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if !has || !firstTime.Before(beforeTime) {
|
|
|
|
// Oldest sample not old enough, or metric purged or unarchived in the meantime.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-02-26 06:19:44 -08:00
|
|
|
defer s.seriesOps.WithLabelValues(archiveMaintenance).Inc()
|
|
|
|
|
2014-11-10 09:22:08 -08:00
|
|
|
newFirstTime, _, allDropped, err := s.persistence.dropChunks(fp, beforeTime)
|
|
|
|
if err != nil {
|
2015-02-26 06:19:44 -08:00
|
|
|
glog.Error("Error dropping persisted chunks: ", err)
|
2014-11-10 09:22:08 -08:00
|
|
|
}
|
2014-10-15 06:53:05 -07:00
|
|
|
if allDropped {
|
2015-02-26 06:19:44 -08:00
|
|
|
if err := s.persistence.purgeArchivedMetric(fp); err != nil {
|
|
|
|
glog.Errorf("Error purging archived metric for fingerprint %v: %v", fp, err)
|
2014-11-10 09:22:08 -08:00
|
|
|
return
|
2014-10-15 06:53:05 -07:00
|
|
|
}
|
2014-11-10 09:22:08 -08:00
|
|
|
s.seriesOps.WithLabelValues(archivePurge).Inc()
|
|
|
|
return
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
2014-11-10 09:22:08 -08:00
|
|
|
s.persistence.updateArchivedTimeRange(fp, newFirstTime, lastTime)
|
2014-06-06 02:55:53 -07:00
|
|
|
}
|
|
|
|
|
2014-11-27 11:46:45 -08:00
|
|
|
// See persistence.loadChunks for detailed explanation.
|
|
|
|
func (s *memorySeriesStorage) loadChunks(fp clientmodel.Fingerprint, indexes []int, indexOffset int) ([]chunk, error) {
|
|
|
|
return s.persistence.loadChunks(fp, indexes, indexOffset)
|
|
|
|
}
|
|
|
|
|
|
|
|
// See persistence.loadChunkDescs for detailed explanation.
|
|
|
|
func (s *memorySeriesStorage) loadChunkDescs(fp clientmodel.Fingerprint, beforeTime clientmodel.Timestamp) ([]*chunkDesc, error) {
|
|
|
|
return s.persistence.loadChunkDescs(fp, beforeTime)
|
|
|
|
}
|
|
|
|
|
2014-10-07 10:11:24 -07:00
|
|
|
// Describe implements prometheus.Collector.
|
|
|
|
func (s *memorySeriesStorage) Describe(ch chan<- *prometheus.Desc) {
|
|
|
|
s.persistence.Describe(ch)
|
2014-10-23 06:18:32 -07:00
|
|
|
|
|
|
|
ch <- s.persistLatency.Desc()
|
2015-02-01 03:47:51 -08:00
|
|
|
ch <- s.persistErrors.Desc()
|
2015-02-06 05:54:53 -08:00
|
|
|
ch <- s.persistQueueCapacity.Desc()
|
2014-10-23 06:18:32 -07:00
|
|
|
ch <- s.persistQueueLength.Desc()
|
|
|
|
ch <- s.numSeries.Desc()
|
|
|
|
s.seriesOps.Describe(ch)
|
|
|
|
ch <- s.ingestedSamplesCount.Desc()
|
2014-11-05 11:02:45 -08:00
|
|
|
ch <- s.invalidPreloadRequestsCount.Desc()
|
2014-10-23 06:18:32 -07:00
|
|
|
|
|
|
|
ch <- numMemChunksDesc
|
2014-10-07 10:11:24 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Collect implements prometheus.Collector.
|
|
|
|
func (s *memorySeriesStorage) Collect(ch chan<- prometheus.Metric) {
|
|
|
|
s.persistence.Collect(ch)
|
2014-10-23 06:18:32 -07:00
|
|
|
|
|
|
|
ch <- s.persistLatency
|
2015-02-01 03:47:51 -08:00
|
|
|
ch <- s.persistErrors
|
2015-02-06 05:54:53 -08:00
|
|
|
ch <- s.persistQueueCapacity
|
2014-10-23 06:18:32 -07:00
|
|
|
ch <- s.persistQueueLength
|
|
|
|
ch <- s.numSeries
|
|
|
|
s.seriesOps.Collect(ch)
|
|
|
|
ch <- s.ingestedSamplesCount
|
2014-11-05 11:02:45 -08:00
|
|
|
ch <- s.invalidPreloadRequestsCount
|
2014-10-23 06:18:32 -07:00
|
|
|
|
2015-02-26 15:06:16 -08:00
|
|
|
ch <- prometheus.MustNewConstMetric(
|
|
|
|
numMemChunksDesc,
|
|
|
|
prometheus.GaugeValue,
|
|
|
|
float64(atomic.LoadInt64(&numMemChunks)))
|
2014-10-07 10:11:24 -07:00
|
|
|
}
|
Improve persisting chunks to disk.
This is done by bucketing chunks by fingerprint. If the persisting to
disk falls behind, more and more chunks are in the queue. As soon as
there are "double hits", we will now persist both chunks in one go,
doubling the disk throughput (assuming it is limited by disk
seeks). Should even more pile up so that we end wit "triple hits", we
will persist those first, and so on.
Even if we have millions of time series, this will still help,
assuming not all of them are growing with the same speed. Series that
get many samples and/or are not very compressable will accumulate
chunks faster, and they will soon get double- or triple-writes.
To improve the chance of double writes,
-storage.local.persistence-queue-capacity could be set to a higher
value. However, that will slow down shutdown a lot (as the queue has
to be worked through). So we leave it to the user to set it to a
really high value. A more fundamental solution would be to checkpoint
not only head chunks, but also chunks still in the persist queue. That
would be quite complicated for a rather limited use-case (running many
time series with high ingestion rate on slow spinning disks).
2015-02-13 11:08:52 -08:00
|
|
|
|
|
|
|
// chunkMaps is a slice of maps with chunkDescs to be persisted.
|
|
|
|
// Each chunk map contains n consecutive chunks to persist, where
|
|
|
|
// n is the index+1.
|
|
|
|
type chunkMaps []map[clientmodel.Fingerprint][]*chunkDesc
|
|
|
|
|
|
|
|
// add adds a chunk to chunkMaps.
|
|
|
|
func (cm *chunkMaps) add(fp clientmodel.Fingerprint, cd *chunkDesc) {
|
|
|
|
// Runtime of this method is linear with the number of
|
|
|
|
// chunkMaps. However, we expect only ever very few maps.
|
|
|
|
numMaps := len(*cm)
|
|
|
|
for i, m := range *cm {
|
|
|
|
if cds, ok := m[fp]; ok {
|
|
|
|
// Found our fp! Add cd and level up.
|
|
|
|
cds = append(cds, cd)
|
|
|
|
delete(m, fp)
|
|
|
|
if i == numMaps-1 {
|
|
|
|
*cm = append(*cm, map[clientmodel.Fingerprint][]*chunkDesc{})
|
|
|
|
}
|
|
|
|
(*cm)[i+1][fp] = cds
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Our fp isn't contained in cm yet. Add it to the first map (and add a
|
|
|
|
// first map if there is none).
|
|
|
|
if numMaps == 0 {
|
|
|
|
*cm = chunkMaps{map[clientmodel.Fingerprint][]*chunkDesc{}}
|
|
|
|
}
|
|
|
|
(*cm)[0][fp] = []*chunkDesc{cd}
|
|
|
|
}
|
|
|
|
|
|
|
|
// pop retrieves and removes a fingerprint with all its chunks. It chooses one
|
|
|
|
// of the fingerprints with the most chunks. It panics if cm has no entries.
|
|
|
|
func (cm *chunkMaps) pop() (clientmodel.Fingerprint, []*chunkDesc) {
|
|
|
|
m := (*cm)[len(*cm)-1]
|
|
|
|
for fp, cds := range m {
|
|
|
|
delete(m, fp)
|
|
|
|
// Prune empty maps from top level.
|
|
|
|
for len(m) == 0 {
|
|
|
|
*cm = (*cm)[:len(*cm)-1]
|
|
|
|
if len(*cm) == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
m = (*cm)[len(*cm)-1]
|
|
|
|
}
|
|
|
|
return fp, cds
|
|
|
|
}
|
|
|
|
panic("popped from empty chunkMaps")
|
|
|
|
}
|