2017-05-10 02:44:13 -07:00
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package remote
import (
2018-05-29 01:51:29 -07:00
"context"
2017-05-10 02:44:13 -07:00
"math"
2018-09-07 14:26:04 -07:00
"strconv"
2017-05-10 02:44:13 -07:00
"sync"
"time"
2021-06-11 09:17:59 -07:00
"github.com/go-kit/log"
"github.com/go-kit/log/level"
2018-09-07 14:26:04 -07:00
"github.com/gogo/protobuf/proto"
"github.com/golang/snappy"
2020-06-01 08:21:13 -07:00
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
2020-07-30 00:45:42 -07:00
"go.uber.org/atomic"
2018-09-07 14:26:04 -07:00
2020-11-19 07:23:03 -08:00
"github.com/prometheus/client_golang/prometheus"
2021-02-10 14:25:37 -08:00
"github.com/prometheus/common/model"
2017-05-10 02:44:13 -07:00
"github.com/prometheus/prometheus/config"
2019-03-08 08:29:25 -08:00
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/relabel"
2017-10-23 13:28:17 -07:00
"github.com/prometheus/prometheus/prompb"
2020-11-19 07:23:03 -08:00
"github.com/prometheus/prometheus/scrape"
2019-09-19 02:15:41 -07:00
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/wal"
2017-05-10 02:44:13 -07:00
)
const (
// We track samples in/out and how long pushes take using an Exponentially
// Weighted Moving Average.
ewmaWeight = 0.2
shardUpdateDuration = 10 * time . Second
// Allow 30% too many shards before scaling down.
shardToleranceFraction = 0.3
)
2020-02-03 13:47:03 -08:00
type queueManagerMetrics struct {
2020-04-24 20:39:46 -07:00
reg prometheus . Registerer
2021-05-06 13:53:52 -07:00
samplesTotal prometheus . Counter
exemplarsTotal prometheus . Counter
metadataTotal prometheus . Counter
failedSamplesTotal prometheus . Counter
failedExemplarsTotal prometheus . Counter
failedMetadataTotal prometheus . Counter
retriedSamplesTotal prometheus . Counter
retriedExemplarsTotal prometheus . Counter
retriedMetadataTotal prometheus . Counter
droppedSamplesTotal prometheus . Counter
droppedExemplarsTotal prometheus . Counter
enqueueRetriesTotal prometheus . Counter
sentBatchDuration prometheus . Histogram
highestSentTimestamp * maxTimestamp
pendingSamples prometheus . Gauge
pendingExemplars prometheus . Gauge
shardCapacity prometheus . Gauge
numShards prometheus . Gauge
maxNumShards prometheus . Gauge
minNumShards prometheus . Gauge
desiredNumShards prometheus . Gauge
sentBytesTotal prometheus . Counter
metadataBytesTotal prometheus . Counter
maxSamplesPerSend prometheus . Gauge
2020-02-03 13:47:03 -08:00
}
2020-04-24 20:39:46 -07:00
func newQueueManagerMetrics ( r prometheus . Registerer , rn , e string ) * queueManagerMetrics {
m := & queueManagerMetrics {
reg : r ,
}
constLabels := prometheus . Labels {
remoteName : rn ,
endpoint : e ,
}
2020-11-19 07:23:03 -08:00
m . samplesTotal = prometheus . NewCounter ( prometheus . CounterOpts {
Namespace : namespace ,
Subsystem : subsystem ,
Name : "samples_total" ,
Help : "Total number of samples sent to remote storage." ,
ConstLabels : constLabels ,
} )
2021-05-06 13:53:52 -07:00
m . exemplarsTotal = prometheus . NewCounter ( prometheus . CounterOpts {
Namespace : namespace ,
Subsystem : subsystem ,
Name : "exemplars_total" ,
Help : "Total number of exemplars sent to remote storage." ,
ConstLabels : constLabels ,
} )
2020-11-19 07:23:03 -08:00
m . metadataTotal = prometheus . NewCounter ( prometheus . CounterOpts {
2020-04-24 20:39:46 -07:00
Namespace : namespace ,
Subsystem : subsystem ,
2020-11-19 07:23:03 -08:00
Name : "metadata_total" ,
Help : "Total number of metadata entries sent to remote storage." ,
2020-04-24 20:39:46 -07:00
ConstLabels : constLabels ,
} )
m . failedSamplesTotal = prometheus . NewCounter ( prometheus . CounterOpts {
Namespace : namespace ,
Subsystem : subsystem ,
2020-11-19 07:23:03 -08:00
Name : "samples_failed_total" ,
2020-04-24 20:39:46 -07:00
Help : "Total number of samples which failed on send to remote storage, non-recoverable errors." ,
ConstLabels : constLabels ,
} )
2021-05-06 13:53:52 -07:00
m . failedExemplarsTotal = prometheus . NewCounter ( prometheus . CounterOpts {
Namespace : namespace ,
Subsystem : subsystem ,
Name : "exemplars_failed_total" ,
Help : "Total number of exemplars which failed on send to remote storage, non-recoverable errors." ,
ConstLabels : constLabels ,
} )
2020-11-19 07:23:03 -08:00
m . failedMetadataTotal = prometheus . NewCounter ( prometheus . CounterOpts {
Namespace : namespace ,
Subsystem : subsystem ,
Name : "metadata_failed_total" ,
Help : "Total number of metadata entries which failed on send to remote storage, non-recoverable errors." ,
ConstLabels : constLabels ,
} )
2020-04-24 20:39:46 -07:00
m . retriedSamplesTotal = prometheus . NewCounter ( prometheus . CounterOpts {
Namespace : namespace ,
Subsystem : subsystem ,
2020-11-19 07:23:03 -08:00
Name : "samples_retried_total" ,
2020-04-24 20:39:46 -07:00
Help : "Total number of samples which failed on send to remote storage but were retried because the send error was recoverable." ,
ConstLabels : constLabels ,
} )
2021-05-06 13:53:52 -07:00
m . retriedExemplarsTotal = prometheus . NewCounter ( prometheus . CounterOpts {
Namespace : namespace ,
Subsystem : subsystem ,
Name : "exemplars_retried_total" ,
Help : "Total number of exemplars which failed on send to remote storage but were retried because the send error was recoverable." ,
ConstLabels : constLabels ,
} )
2020-11-19 07:23:03 -08:00
m . retriedMetadataTotal = prometheus . NewCounter ( prometheus . CounterOpts {
Namespace : namespace ,
Subsystem : subsystem ,
Name : "metadata_retried_total" ,
Help : "Total number of metadata entries which failed on send to remote storage but were retried because the send error was recoverable." ,
ConstLabels : constLabels ,
} )
2020-04-24 20:39:46 -07:00
m . droppedSamplesTotal = prometheus . NewCounter ( prometheus . CounterOpts {
Namespace : namespace ,
Subsystem : subsystem ,
2020-11-19 07:23:03 -08:00
Name : "samples_dropped_total" ,
2021-05-06 13:53:52 -07:00
Help : "Total number of samples which were dropped after being read from the WAL before being sent via remote write, either via relabelling or unintentionally because of an unknown reference ID." ,
ConstLabels : constLabels ,
} )
m . droppedExemplarsTotal = prometheus . NewCounter ( prometheus . CounterOpts {
Namespace : namespace ,
Subsystem : subsystem ,
Name : "exemplars_dropped_total" ,
Help : "Total number of exemplars which were dropped after being read from the WAL before being sent via remote write, either via relabelling or unintentionally because of an unknown reference ID." ,
2020-04-24 20:39:46 -07:00
ConstLabels : constLabels ,
} )
m . enqueueRetriesTotal = prometheus . NewCounter ( prometheus . CounterOpts {
Namespace : namespace ,
Subsystem : subsystem ,
Name : "enqueue_retries_total" ,
Help : "Total number of times enqueue has failed because a shards queue was full." ,
ConstLabels : constLabels ,
} )
m . sentBatchDuration = prometheus . NewHistogram ( prometheus . HistogramOpts {
Namespace : namespace ,
Subsystem : subsystem ,
Name : "sent_batch_duration_seconds" ,
2020-11-19 07:23:03 -08:00
Help : "Duration of send calls to the remote storage." ,
increase the remote write bucket range (#7323)
* increase the remote write bucket range
Increase the range of remote write buckets to capture times above 10s for laggy scenarios
Buckets had been: {.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
Buckets are now: {0.03125, 0.0625, 0.125, 0.25, 0.5, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512}
Signed-off-by: Bert Hartmann <berthartm@gmail.com>
* revert back to DefBuckets with addons to be backwards compatible
Signed-off-by: Bert Hartmann <berthartm@gmail.com>
* shuffle the buckets to maintain 2-2.5x increases
Signed-off-by: Bert Hartmann <berthartm@gmail.com>
2020-06-04 12:54:47 -07:00
Buckets : append ( prometheus . DefBuckets , 25 , 60 , 120 , 300 ) ,
2020-04-24 20:39:46 -07:00
ConstLabels : constLabels ,
} )
2020-10-15 14:53:59 -07:00
m . highestSentTimestamp = & maxTimestamp {
2020-04-24 20:39:46 -07:00
Gauge : prometheus . NewGauge ( prometheus . GaugeOpts {
Namespace : namespace ,
Subsystem : subsystem ,
Name : "queue_highest_sent_timestamp_seconds" ,
Help : "Timestamp from a WAL sample, the highest timestamp successfully sent by this queue, in seconds since epoch." ,
ConstLabels : constLabels ,
} ) ,
}
m . pendingSamples = prometheus . NewGauge ( prometheus . GaugeOpts {
Namespace : namespace ,
Subsystem : subsystem ,
2020-11-19 07:23:03 -08:00
Name : "samples_pending" ,
2020-04-24 20:39:46 -07:00
Help : "The number of samples pending in the queues shards to be sent to the remote storage." ,
ConstLabels : constLabels ,
} )
2021-05-06 13:53:52 -07:00
m . pendingExemplars = prometheus . NewGauge ( prometheus . GaugeOpts {
Namespace : namespace ,
Subsystem : subsystem ,
Name : "exemplars_pending" ,
Help : "The number of exemplars pending in the queues shards to be sent to the remote storage." ,
ConstLabels : constLabels ,
} )
2020-04-24 20:39:46 -07:00
m . shardCapacity = prometheus . NewGauge ( prometheus . GaugeOpts {
Namespace : namespace ,
Subsystem : subsystem ,
Name : "shard_capacity" ,
Help : "The capacity of each shard of the queue used for parallel sending to the remote storage." ,
ConstLabels : constLabels ,
} )
m . numShards = prometheus . NewGauge ( prometheus . GaugeOpts {
Namespace : namespace ,
Subsystem : subsystem ,
Name : "shards" ,
Help : "The number of shards used for parallel sending to the remote storage." ,
ConstLabels : constLabels ,
} )
m . maxNumShards = prometheus . NewGauge ( prometheus . GaugeOpts {
Namespace : namespace ,
Subsystem : subsystem ,
Name : "shards_max" ,
Help : "The maximum number of shards that the queue is allowed to run." ,
ConstLabels : constLabels ,
} )
m . minNumShards = prometheus . NewGauge ( prometheus . GaugeOpts {
Namespace : namespace ,
Subsystem : subsystem ,
Name : "shards_min" ,
Help : "The minimum number of shards that the queue is allowed to run." ,
ConstLabels : constLabels ,
} )
m . desiredNumShards = prometheus . NewGauge ( prometheus . GaugeOpts {
Namespace : namespace ,
Subsystem : subsystem ,
Name : "shards_desired" ,
Help : "The number of shards that the queues shard calculation wants to run based on the rate of samples in vs. samples out." ,
ConstLabels : constLabels ,
} )
2021-05-06 13:53:52 -07:00
m . sentBytesTotal = prometheus . NewCounter ( prometheus . CounterOpts {
2020-11-19 07:23:03 -08:00
Namespace : namespace ,
Subsystem : subsystem ,
2021-05-06 13:53:52 -07:00
Name : "bytes_total" ,
Help : "The total number of bytes of data (not metadata) sent by the queue after compression. Note that when exemplars over remote write is enabled the exemplars included in a remote write request count towards this metric." ,
2020-11-19 07:23:03 -08:00
ConstLabels : constLabels ,
} )
m . metadataBytesTotal = prometheus . NewCounter ( prometheus . CounterOpts {
2020-04-24 20:39:46 -07:00
Namespace : namespace ,
Subsystem : subsystem ,
2020-11-19 07:23:03 -08:00
Name : "metadata_bytes_total" ,
Help : "The total number of bytes of metadata sent by the queue after compression." ,
2020-04-24 20:39:46 -07:00
ConstLabels : constLabels ,
} )
2020-10-28 04:39:36 -07:00
m . maxSamplesPerSend = prometheus . NewGauge ( prometheus . GaugeOpts {
Namespace : namespace ,
Subsystem : subsystem ,
Name : "max_samples_per_send" ,
2021-05-06 13:53:52 -07:00
Help : "The maximum number of samples to be sent, in a single request, to the remote storage. Note that, when sending of exemplars over remote write is enabled, exemplars count towards this limt." ,
2020-10-28 04:39:36 -07:00
ConstLabels : constLabels ,
} )
2020-02-03 13:47:03 -08:00
2020-06-25 23:33:52 -07:00
return m
}
func ( m * queueManagerMetrics ) register ( ) {
if m . reg != nil {
m . reg . MustRegister (
2020-11-19 07:23:03 -08:00
m . samplesTotal ,
2021-05-06 13:53:52 -07:00
m . exemplarsTotal ,
2020-11-19 07:23:03 -08:00
m . metadataTotal ,
2020-02-03 13:47:03 -08:00
m . failedSamplesTotal ,
2021-05-06 13:53:52 -07:00
m . failedExemplarsTotal ,
2020-11-19 07:23:03 -08:00
m . failedMetadataTotal ,
2020-02-03 13:47:03 -08:00
m . retriedSamplesTotal ,
2021-05-06 13:53:52 -07:00
m . retriedExemplarsTotal ,
2020-11-19 07:23:03 -08:00
m . retriedMetadataTotal ,
2020-02-03 13:47:03 -08:00
m . droppedSamplesTotal ,
2021-05-06 13:53:52 -07:00
m . droppedExemplarsTotal ,
2020-02-03 13:47:03 -08:00
m . enqueueRetriesTotal ,
m . sentBatchDuration ,
2020-04-24 20:39:46 -07:00
m . highestSentTimestamp ,
m . pendingSamples ,
2021-05-06 13:53:52 -07:00
m . pendingExemplars ,
2020-02-03 13:47:03 -08:00
m . shardCapacity ,
m . numShards ,
m . maxNumShards ,
m . minNumShards ,
m . desiredNumShards ,
2021-05-06 13:53:52 -07:00
m . sentBytesTotal ,
2020-11-19 07:23:03 -08:00
m . metadataBytesTotal ,
2020-10-28 04:39:36 -07:00
m . maxSamplesPerSend ,
2020-02-03 13:47:03 -08:00
)
}
}
2017-05-10 02:44:13 -07:00
2020-04-24 20:39:46 -07:00
func ( m * queueManagerMetrics ) unregister ( ) {
if m . reg != nil {
2020-11-19 07:23:03 -08:00
m . reg . Unregister ( m . samplesTotal )
2021-05-06 13:53:52 -07:00
m . reg . Unregister ( m . exemplarsTotal )
2020-11-19 07:23:03 -08:00
m . reg . Unregister ( m . metadataTotal )
2020-04-24 20:39:46 -07:00
m . reg . Unregister ( m . failedSamplesTotal )
2021-05-06 13:53:52 -07:00
m . reg . Unregister ( m . failedExemplarsTotal )
2020-11-19 07:23:03 -08:00
m . reg . Unregister ( m . failedMetadataTotal )
2020-04-24 20:39:46 -07:00
m . reg . Unregister ( m . retriedSamplesTotal )
2021-05-06 13:53:52 -07:00
m . reg . Unregister ( m . retriedExemplarsTotal )
2020-11-19 07:23:03 -08:00
m . reg . Unregister ( m . retriedMetadataTotal )
2020-04-24 20:39:46 -07:00
m . reg . Unregister ( m . droppedSamplesTotal )
2021-05-06 13:53:52 -07:00
m . reg . Unregister ( m . droppedExemplarsTotal )
2020-04-24 20:39:46 -07:00
m . reg . Unregister ( m . enqueueRetriesTotal )
m . reg . Unregister ( m . sentBatchDuration )
m . reg . Unregister ( m . highestSentTimestamp )
m . reg . Unregister ( m . pendingSamples )
2021-05-06 13:53:52 -07:00
m . reg . Unregister ( m . pendingExemplars )
2020-04-24 20:39:46 -07:00
m . reg . Unregister ( m . shardCapacity )
m . reg . Unregister ( m . numShards )
m . reg . Unregister ( m . maxNumShards )
m . reg . Unregister ( m . minNumShards )
m . reg . Unregister ( m . desiredNumShards )
2021-05-06 13:53:52 -07:00
m . reg . Unregister ( m . sentBytesTotal )
2020-11-19 07:23:03 -08:00
m . reg . Unregister ( m . metadataBytesTotal )
2020-10-28 04:39:36 -07:00
m . reg . Unregister ( m . maxSamplesPerSend )
2020-04-24 20:39:46 -07:00
}
}
2020-06-24 06:41:52 -07:00
// WriteClient defines an interface for sending a batch of samples to an
2017-05-10 02:44:13 -07:00
// external timeseries database.
2020-06-24 06:41:52 -07:00
type WriteClient interface {
2017-05-10 02:44:13 -07:00
// Store stores the given samples in the remote storage.
2018-09-07 14:26:04 -07:00
Store ( context . Context , [ ] byte ) error
2019-12-12 12:47:23 -08:00
// Name uniquely identifies the remote storage.
2017-05-10 02:44:13 -07:00
Name ( ) string
2019-12-12 12:47:23 -08:00
// Endpoint is the remote read or write endpoint for the storage client.
Endpoint ( ) string
2017-05-10 02:44:13 -07:00
}
// QueueManager manages a queue of samples to be sent to the Storage
2020-06-24 06:41:52 -07:00
// indicated by the provided WriteClient. Implements writeTo interface
2018-09-07 14:26:04 -07:00
// used by WAL Watcher.
2017-05-10 02:44:13 -07:00
type QueueManager struct {
2020-07-30 00:45:42 -07:00
lastSendTimestamp atomic . Int64
2019-10-21 14:54:25 -07:00
2020-11-19 07:23:03 -08:00
logger log . Logger
flushDeadline time . Duration
cfg config . QueueConfig
mcfg config . MetadataConfig
externalLabels labels . Labels
relabelConfigs [ ] * relabel . Config
2021-05-06 13:53:52 -07:00
sendExemplars bool
2020-11-19 07:23:03 -08:00
watcher * wal . Watcher
metadataWatcher * MetadataWatcher
2018-09-07 14:26:04 -07:00
2020-03-30 20:39:29 -07:00
clientMtx sync . RWMutex
2020-06-24 06:41:52 -07:00
storeClient WriteClient
2020-03-30 20:39:29 -07:00
2019-09-13 10:23:58 -07:00
seriesMtx sync . Mutex
2019-08-07 12:39:07 -07:00
seriesLabels map [ uint64 ] labels . Labels
2018-09-07 14:26:04 -07:00
seriesSegmentIndexes map [ uint64 ] int
droppedSeries map [ uint64 ] struct { }
2017-05-10 02:44:13 -07:00
shards * shards
numShards int
reshardChan chan int
2019-01-18 04:48:16 -08:00
quit chan struct { }
wg sync . WaitGroup
2017-05-10 02:44:13 -07:00
2021-05-06 13:53:52 -07:00
dataIn , dataDropped , dataOut , dataOutDuration * ewmaRate
2019-03-05 04:21:11 -08:00
2020-09-24 11:44:18 -07:00
metrics * queueManagerMetrics
interner * pool
2020-10-15 14:53:59 -07:00
highestRecvTimestamp * maxTimestamp
2017-05-10 02:44:13 -07:00
}
// NewQueueManager builds a new QueueManager.
2020-04-24 20:39:46 -07:00
func NewQueueManager (
metrics * queueManagerMetrics ,
watcherMetrics * wal . WatcherMetrics ,
readerMetrics * wal . LiveReaderMetrics ,
logger log . Logger ,
walDir string ,
samplesIn * ewmaRate ,
cfg config . QueueConfig ,
2020-11-19 07:23:03 -08:00
mCfg config . MetadataConfig ,
2020-04-24 20:39:46 -07:00
externalLabels labels . Labels ,
relabelConfigs [ ] * relabel . Config ,
2020-06-24 06:41:52 -07:00
client WriteClient ,
2020-04-24 20:39:46 -07:00
flushDeadline time . Duration ,
2020-09-24 11:44:18 -07:00
interner * pool ,
2020-10-15 14:53:59 -07:00
highestRecvTimestamp * maxTimestamp ,
2020-11-19 07:23:03 -08:00
sm ReadyScrapeManager ,
2021-05-06 13:53:52 -07:00
enableExemplarRemoteWrite bool ,
2020-04-24 20:39:46 -07:00
) * QueueManager {
2017-08-11 11:45:52 -07:00
if logger == nil {
logger = log . NewNopLogger ( )
}
2019-03-05 04:21:11 -08:00
2019-12-12 12:47:23 -08:00
logger = log . With ( logger , remoteName , client . Name ( ) , endpoint , client . Endpoint ( ) )
2017-05-10 02:44:13 -07:00
t := & QueueManager {
2019-03-05 04:21:11 -08:00
logger : logger ,
2018-05-23 07:03:54 -07:00
flushDeadline : flushDeadline ,
2017-05-10 02:44:13 -07:00
cfg : cfg ,
2020-11-19 07:23:03 -08:00
mcfg : mCfg ,
2017-05-10 02:44:13 -07:00
externalLabels : externalLabels ,
relabelConfigs : relabelConfigs ,
2020-03-30 20:39:29 -07:00
storeClient : client ,
2021-05-06 13:53:52 -07:00
sendExemplars : enableExemplarRemoteWrite ,
2017-05-10 02:44:13 -07:00
2019-08-07 12:39:07 -07:00
seriesLabels : make ( map [ uint64 ] labels . Labels ) ,
2018-09-07 14:26:04 -07:00
seriesSegmentIndexes : make ( map [ uint64 ] int ) ,
droppedSeries : make ( map [ uint64 ] struct { } ) ,
2018-12-04 09:32:14 -08:00
numShards : cfg . MinShards ,
2017-05-10 02:44:13 -07:00
reshardChan : make ( chan int ) ,
quit : make ( chan struct { } ) ,
2021-05-06 13:53:52 -07:00
dataIn : samplesIn ,
dataDropped : newEWMARate ( ewmaWeight , shardUpdateDuration ) ,
dataOut : newEWMARate ( ewmaWeight , shardUpdateDuration ) ,
dataOutDuration : newEWMARate ( ewmaWeight , shardUpdateDuration ) ,
2020-02-03 13:47:03 -08:00
2020-09-24 11:44:18 -07:00
metrics : metrics ,
interner : interner ,
highestRecvTimestamp : highestRecvTimestamp ,
2019-03-01 11:04:26 -08:00
}
2018-09-07 14:26:04 -07:00
2021-05-06 13:53:52 -07:00
t . watcher = wal . NewWatcher ( watcherMetrics , readerMetrics , logger , client . Name ( ) , t , walDir , enableExemplarRemoteWrite )
2020-11-19 07:23:03 -08:00
if t . mcfg . Send {
t . metadataWatcher = NewMetadataWatcher ( logger , sm , client . Name ( ) , t , t . mcfg . SendInterval , flushDeadline )
}
2019-03-05 04:21:11 -08:00
t . shards = t . newShards ( )
2017-05-10 02:44:13 -07:00
return t
}
2020-11-19 07:23:03 -08:00
// AppendMetadata sends metadata the remote storage. Metadata is sent all at once and is not parallelized.
func ( t * QueueManager ) AppendMetadata ( ctx context . Context , metadata [ ] scrape . MetricMetadata ) {
mm := make ( [ ] prompb . MetricMetadata , 0 , len ( metadata ) )
for _ , entry := range metadata {
mm = append ( mm , prompb . MetricMetadata {
MetricFamilyName : entry . Metric ,
Help : entry . Help ,
Type : metricTypeToMetricTypeProto ( entry . Type ) ,
Unit : entry . Unit ,
} )
}
2021-06-24 15:39:50 -07:00
numSends := int ( math . Ceil ( float64 ( len ( metadata ) ) / float64 ( t . mcfg . MaxSamplesPerSend ) ) )
for i := 0 ; i < numSends ; i ++ {
last := ( i + 1 ) * t . mcfg . MaxSamplesPerSend
if last > len ( metadata ) {
last = len ( metadata )
}
err := t . sendMetadataWithBackoff ( ctx , mm [ i * t . mcfg . MaxSamplesPerSend : last ] )
if err != nil {
t . metrics . failedMetadataTotal . Add ( float64 ( last - ( i * t . mcfg . MaxSamplesPerSend ) ) )
level . Error ( t . logger ) . Log ( "msg" , "non-recoverable error while sending metadata" , "count" , last - ( i * t . mcfg . MaxSamplesPerSend ) , "err" , err )
}
2020-11-19 07:23:03 -08:00
}
}
func ( t * QueueManager ) sendMetadataWithBackoff ( ctx context . Context , metadata [ ] prompb . MetricMetadata ) error {
// Build the WriteRequest with no samples.
req , _ , err := buildWriteRequest ( nil , metadata , nil )
if err != nil {
return err
}
metadataCount := len ( metadata )
attemptStore := func ( try int ) error {
span , ctx := opentracing . StartSpanFromContext ( ctx , "Remote Metadata Send Batch" )
defer span . Finish ( )
span . SetTag ( "metadata" , metadataCount )
span . SetTag ( "try" , try )
span . SetTag ( "remote_name" , t . storeClient . Name ( ) )
span . SetTag ( "remote_url" , t . storeClient . Endpoint ( ) )
begin := time . Now ( )
err := t . storeClient . Store ( ctx , req )
t . metrics . sentBatchDuration . Observe ( time . Since ( begin ) . Seconds ( ) )
if err != nil {
span . LogKV ( "error" , err )
ext . Error . Set ( span , true )
return err
}
return nil
}
retry := func ( ) {
t . metrics . retriedMetadataTotal . Add ( float64 ( len ( metadata ) ) )
}
2021-02-04 05:38:32 -08:00
err = sendWriteRequestWithBackoff ( ctx , t . cfg , t . logger , attemptStore , retry )
2020-11-19 07:23:03 -08:00
if err != nil {
return err
}
t . metrics . metadataTotal . Add ( float64 ( len ( metadata ) ) )
t . metrics . metadataBytesTotal . Add ( float64 ( len ( req ) ) )
return nil
}
2018-09-07 14:26:04 -07:00
// Append queues a sample to be sent to the remote storage. Blocks until all samples are
// enqueued on their shards or a shutdown signal is received.
2019-09-19 02:15:41 -07:00
func ( t * QueueManager ) Append ( samples [ ] record . RefSample ) bool {
2021-05-06 13:53:52 -07:00
var appendSample prompb . Sample
2019-06-27 11:48:21 -07:00
outer :
2019-08-12 09:22:02 -07:00
for _ , s := range samples {
2019-09-13 10:23:58 -07:00
t . seriesMtx . Lock ( )
2019-08-12 09:22:02 -07:00
lbls , ok := t . seriesLabels [ s . Ref ]
2019-06-27 11:48:21 -07:00
if ! ok {
2020-04-24 20:39:46 -07:00
t . metrics . droppedSamplesTotal . Inc ( )
2021-05-06 13:53:52 -07:00
t . dataDropped . incr ( 1 )
2019-08-12 09:22:02 -07:00
if _ , ok := t . droppedSeries [ s . Ref ] ; ! ok {
2020-04-11 01:22:18 -07:00
level . Info ( t . logger ) . Log ( "msg" , "Dropped sample for series that was not explicitly dropped via relabelling" , "ref" , s . Ref )
2018-09-07 14:26:04 -07:00
}
2019-09-13 10:23:58 -07:00
t . seriesMtx . Unlock ( )
2018-09-07 14:26:04 -07:00
continue
}
2019-09-13 10:23:58 -07:00
t . seriesMtx . Unlock ( )
2019-01-18 04:48:16 -08:00
// This will only loop if the queues are being resharded.
backoff := t . cfg . MinBackoff
2018-09-07 14:26:04 -07:00
for {
select {
case <- t . quit :
return false
default :
}
2021-05-06 13:53:52 -07:00
appendSample . Value = s . V
appendSample . Timestamp = s . T
if t . shards . enqueue ( s . Ref , writeSample { lbls , appendSample } ) {
continue outer
}
t . metrics . enqueueRetriesTotal . Inc ( )
time . Sleep ( time . Duration ( backoff ) )
backoff = backoff * 2
if backoff > t . cfg . MaxBackoff {
backoff = t . cfg . MaxBackoff
}
}
}
return true
}
2017-05-10 02:44:13 -07:00
2021-05-06 13:53:52 -07:00
func ( t * QueueManager ) AppendExemplars ( exemplars [ ] record . RefExemplar ) bool {
if ! t . sendExemplars {
return true
}
var appendExemplar prompb . Exemplar
outer :
for _ , e := range exemplars {
t . seriesMtx . Lock ( )
lbls , ok := t . seriesLabels [ e . Ref ]
if ! ok {
t . metrics . droppedExemplarsTotal . Inc ( )
// Track dropped exemplars in the same EWMA for sharding calc.
t . dataDropped . incr ( 1 )
if _ , ok := t . droppedSeries [ e . Ref ] ; ! ok {
level . Info ( t . logger ) . Log ( "msg" , "Dropped exemplar for series that was not explicitly dropped via relabelling" , "ref" , e . Ref )
}
t . seriesMtx . Unlock ( )
continue
}
t . seriesMtx . Unlock ( )
// This will only loop if the queues are being resharded.
backoff := t . cfg . MinBackoff
for {
select {
case <- t . quit :
return false
default :
}
appendExemplar . Labels = labelsToLabelsProto ( e . Labels , nil )
appendExemplar . Timestamp = e . T
appendExemplar . Value = e . V
if t . shards . enqueue ( e . Ref , writeExemplar { lbls , appendExemplar } ) {
2018-09-07 14:26:04 -07:00
continue outer
}
2019-01-18 04:48:16 -08:00
2020-04-24 20:39:46 -07:00
t . metrics . enqueueRetriesTotal . Inc ( )
2018-09-07 14:26:04 -07:00
time . Sleep ( time . Duration ( backoff ) )
backoff = backoff * 2
if backoff > t . cfg . MaxBackoff {
backoff = t . cfg . MaxBackoff
}
2017-05-10 02:44:13 -07:00
}
}
2018-09-07 14:26:04 -07:00
return true
2017-05-10 02:44:13 -07:00
}
// Start the queue manager sending samples to the remote storage.
// Does not block.
func ( t * QueueManager ) Start ( ) {
2020-06-25 23:33:52 -07:00
// Register and initialise some metrics.
t . metrics . register ( )
2020-04-24 20:39:46 -07:00
t . metrics . shardCapacity . Set ( float64 ( t . cfg . Capacity ) )
t . metrics . maxNumShards . Set ( float64 ( t . cfg . MaxShards ) )
t . metrics . minNumShards . Set ( float64 ( t . cfg . MinShards ) )
t . metrics . desiredNumShards . Set ( float64 ( t . cfg . MinShards ) )
2020-10-28 04:39:36 -07:00
t . metrics . maxSamplesPerSend . Set ( float64 ( t . cfg . MaxSamplesPerSend ) )
2019-04-23 01:49:17 -07:00
2018-09-07 14:26:04 -07:00
t . shards . start ( t . numShards )
t . watcher . Start ( )
2020-11-19 07:23:03 -08:00
if t . mcfg . Send {
t . metadataWatcher . Start ( )
}
2018-09-07 14:26:04 -07:00
2017-05-10 02:44:13 -07:00
t . wg . Add ( 2 )
go t . updateShardsLoop ( )
go t . reshardLoop ( )
}
// Stop stops sending samples to the remote storage and waits for pending
// sends to complete.
func ( t * QueueManager ) Stop ( ) {
2017-08-11 11:45:52 -07:00
level . Info ( t . logger ) . Log ( "msg" , "Stopping remote storage..." )
2018-09-07 14:26:04 -07:00
defer level . Info ( t . logger ) . Log ( "msg" , "Remote storage stopped." )
2017-05-10 02:44:13 -07:00
close ( t . quit )
2019-04-16 03:25:19 -07:00
t . wg . Wait ( )
2020-11-19 07:23:03 -08:00
// Wait for all QueueManager routines to end before stopping shards, metadata watcher, and WAL watcher. This
2019-04-16 03:25:19 -07:00
// is to ensure we don't end up executing a reshard and shards.stop() at the same time, which
// causes a closed channel panic.
2018-09-07 14:26:04 -07:00
t . shards . stop ( )
t . watcher . Stop ( )
2020-11-19 07:23:03 -08:00
if t . mcfg . Send {
t . metadataWatcher . Stop ( )
}
2019-03-13 03:02:36 -07:00
// On shutdown, release the strings in the labels from the intern pool.
2019-09-13 10:23:58 -07:00
t . seriesMtx . Lock ( )
2019-03-13 03:02:36 -07:00
for _ , labels := range t . seriesLabels {
2020-09-24 11:44:18 -07:00
t . releaseLabels ( labels )
2019-03-13 03:02:36 -07:00
}
2019-09-13 10:23:58 -07:00
t . seriesMtx . Unlock ( )
2020-04-24 20:39:46 -07:00
t . metrics . unregister ( )
2018-09-07 14:26:04 -07:00
}
// StoreSeries keeps track of which series we know about for lookups when sending samples to remote.
2019-09-19 02:15:41 -07:00
func ( t * QueueManager ) StoreSeries ( series [ ] record . RefSeries , index int ) {
2019-09-13 10:23:58 -07:00
t . seriesMtx . Lock ( )
defer t . seriesMtx . Unlock ( )
2018-09-07 14:26:04 -07:00
for _ , s := range series {
2021-01-22 07:03:10 -08:00
// Just make sure all the Refs of Series will insert into seriesSegmentIndexes map for tracking.
t . seriesSegmentIndexes [ s . Ref ] = index
2019-03-08 08:29:25 -08:00
ls := processExternalLabels ( s . Labels , t . externalLabels )
2019-08-07 12:39:07 -07:00
lbls := relabel . Process ( ls , t . relabelConfigs ... )
if len ( lbls ) == 0 {
2018-09-07 14:26:04 -07:00
t . droppedSeries [ s . Ref ] = struct { } { }
continue
}
2020-09-24 11:44:18 -07:00
t . internLabels ( lbls )
2019-03-11 16:44:23 -07:00
2019-03-13 03:02:36 -07:00
// We should not ever be replacing a series labels in the map, but just
// in case we do we need to ensure we do not leak the replaced interned
// strings.
2019-06-27 11:48:21 -07:00
if orig , ok := t . seriesLabels [ s . Ref ] ; ok {
2020-09-24 11:44:18 -07:00
t . releaseLabels ( orig )
2019-03-11 16:44:23 -07:00
}
2019-08-07 12:39:07 -07:00
t . seriesLabels [ s . Ref ] = lbls
2018-09-07 14:26:04 -07:00
}
}
2017-05-10 02:44:13 -07:00
2018-09-07 14:26:04 -07:00
// SeriesReset is used when reading a checkpoint. WAL Watcher should have
// stored series records with the checkpoints index number, so we can now
// delete any ref ID's lower than that # from the two maps.
func ( t * QueueManager ) SeriesReset ( index int ) {
2019-09-13 10:23:58 -07:00
t . seriesMtx . Lock ( )
defer t . seriesMtx . Unlock ( )
2018-09-07 14:26:04 -07:00
// Check for series that are in segments older than the checkpoint
// that were not also present in the checkpoint.
for k , v := range t . seriesSegmentIndexes {
if v < index {
delete ( t . seriesSegmentIndexes , k )
2020-09-24 11:44:18 -07:00
t . releaseLabels ( t . seriesLabels [ k ] )
2019-03-11 16:44:23 -07:00
delete ( t . seriesLabels , k )
2019-09-13 10:23:58 -07:00
delete ( t . droppedSeries , k )
2018-09-07 14:26:04 -07:00
}
}
}
2017-08-11 11:45:52 -07:00
2020-03-30 20:39:29 -07:00
// SetClient updates the client used by a queue. Used when only client specific
// fields are updated to avoid restarting the queue.
2020-06-24 06:41:52 -07:00
func ( t * QueueManager ) SetClient ( c WriteClient ) {
2020-03-30 20:39:29 -07:00
t . clientMtx . Lock ( )
t . storeClient = c
t . clientMtx . Unlock ( )
}
2020-06-24 06:41:52 -07:00
func ( t * QueueManager ) client ( ) WriteClient {
2020-03-30 20:39:29 -07:00
t . clientMtx . RLock ( )
defer t . clientMtx . RUnlock ( )
return t . storeClient
}
2020-09-24 11:44:18 -07:00
func ( t * QueueManager ) internLabels ( lbls labels . Labels ) {
2019-08-07 12:39:07 -07:00
for i , l := range lbls {
2020-09-24 11:44:18 -07:00
lbls [ i ] . Name = t . interner . intern ( l . Name )
lbls [ i ] . Value = t . interner . intern ( l . Value )
2019-08-07 12:39:07 -07:00
}
}
2020-09-24 11:44:18 -07:00
func ( t * QueueManager ) releaseLabels ( ls labels . Labels ) {
2019-03-11 16:44:23 -07:00
for _ , l := range ls {
2020-09-24 11:44:18 -07:00
t . interner . release ( l . Name )
t . interner . release ( l . Value )
2019-03-11 16:44:23 -07:00
}
}
2019-03-13 03:02:36 -07:00
// processExternalLabels merges externalLabels into ls. If ls contains
2019-03-08 08:29:25 -08:00
// a label in externalLabels, the value in ls wins.
2019-11-18 11:53:33 -08:00
func processExternalLabels ( ls labels . Labels , externalLabels labels . Labels ) labels . Labels {
2019-03-08 08:29:25 -08:00
i , j , result := 0 , 0 , make ( labels . Labels , 0 , len ( ls ) + len ( externalLabels ) )
for i < len ( ls ) && j < len ( externalLabels ) {
if ls [ i ] . Name < externalLabels [ j ] . Name {
result = append ( result , labels . Label {
Name : ls [ i ] . Name ,
Value : ls [ i ] . Value ,
} )
i ++
} else if ls [ i ] . Name > externalLabels [ j ] . Name {
result = append ( result , externalLabels [ j ] )
j ++
} else {
result = append ( result , labels . Label {
Name : ls [ i ] . Name ,
Value : ls [ i ] . Value ,
} )
i ++
j ++
2018-09-07 14:26:04 -07:00
}
}
2021-04-30 09:37:07 -07:00
return append ( append ( result , ls [ i : ] ... ) , externalLabels [ j : ] ... )
2017-05-10 02:44:13 -07:00
}
func ( t * QueueManager ) updateShardsLoop ( ) {
defer t . wg . Done ( )
2017-10-09 09:36:20 -07:00
ticker := time . NewTicker ( shardUpdateDuration )
defer ticker . Stop ( )
2017-05-10 02:44:13 -07:00
for {
select {
2017-10-09 09:36:20 -07:00
case <- ticker . C :
2019-10-21 14:54:25 -07:00
desiredShards := t . calculateDesiredShards ( )
2020-04-20 15:20:39 -07:00
if ! t . shouldReshard ( desiredShards ) {
2019-10-21 14:54:25 -07:00
continue
}
// Resharding can take some time, and we want this loop
// to stay close to shardUpdateDuration.
select {
case t . reshardChan <- desiredShards :
2019-11-26 05:22:56 -08:00
level . Info ( t . logger ) . Log ( "msg" , "Remote storage resharding" , "from" , t . numShards , "to" , desiredShards )
2019-10-21 14:54:25 -07:00
t . numShards = desiredShards
default :
level . Info ( t . logger ) . Log ( "msg" , "Currently resharding, skipping." )
}
2017-05-10 02:44:13 -07:00
case <- t . quit :
return
}
}
}
2020-04-20 15:20:39 -07:00
// shouldReshard returns if resharding should occur
func ( t * QueueManager ) shouldReshard ( desiredShards int ) bool {
if desiredShards == t . numShards {
return false
}
// We shouldn't reshard if Prometheus hasn't been able to send to the
// remote endpoint successfully within some period of time.
minSendTimestamp := time . Now ( ) . Add ( - 2 * time . Duration ( t . cfg . BatchSendDeadline ) ) . Unix ( )
2020-07-30 00:45:42 -07:00
lsts := t . lastSendTimestamp . Load ( )
2020-04-20 15:20:39 -07:00
if lsts < minSendTimestamp {
level . Warn ( t . logger ) . Log ( "msg" , "Skipping resharding, last successful send was beyond threshold" , "lastSendTimestamp" , lsts , "minSendTimestamp" , minSendTimestamp )
return false
}
return true
}
2019-10-21 14:54:25 -07:00
// calculateDesiredShards returns the number of desired shards, which will be
// the current QueueManager.numShards if resharding should not occur for reasons
// outlined in this functions implementation. It is up to the caller to reshard, or not,
// based on the return value.
func ( t * QueueManager ) calculateDesiredShards ( ) int {
2021-05-06 13:53:52 -07:00
t . dataOut . tick ( )
t . dataDropped . tick ( )
t . dataOutDuration . tick ( )
2017-05-10 02:44:13 -07:00
// We use the number of incoming samples as a prediction of how much work we
// will need to do next iteration. We add to this any pending samples
// (received - send) so we can catch up with any backlog. We use the average
// outgoing batch latency to work out how many shards we need.
var (
2021-05-06 13:53:52 -07:00
dataInRate = t . dataIn . rate ( )
dataOutRate = t . dataOut . rate ( )
dataKeptRatio = dataOutRate / ( t . dataDropped . rate ( ) + dataOutRate )
dataOutDuration = t . dataOutDuration . rate ( ) / float64 ( time . Second )
dataPendingRate = dataInRate * dataKeptRatio - dataOutRate
highestSent = t . metrics . highestSentTimestamp . Get ( )
highestRecv = t . highestRecvTimestamp . Get ( )
delay = highestRecv - highestSent
dataPending = delay * dataInRate * dataKeptRatio
2017-05-10 02:44:13 -07:00
)
2021-05-06 13:53:52 -07:00
if dataOutRate <= 0 {
2019-10-21 14:54:25 -07:00
return t . numShards
2017-05-10 02:44:13 -07:00
}
2019-12-23 10:03:54 -08:00
// When behind we will try to catch up on a proporation of samples per tick.
// This works similarly to an integral accumulator in that pending samples
// is the result of the error integral.
const integralGain = 0.1 / float64 ( shardUpdateDuration / time . Second )
2017-05-10 02:44:13 -07:00
var (
2021-05-06 13:53:52 -07:00
timePerSample = dataOutDuration / dataOutRate
desiredShards = timePerSample * ( dataInRate * dataKeptRatio + integralGain * dataPending )
2017-05-10 02:44:13 -07:00
)
2020-04-24 20:39:46 -07:00
t . metrics . desiredNumShards . Set ( desiredShards )
2019-08-10 08:24:58 -07:00
level . Debug ( t . logger ) . Log ( "msg" , "QueueManager.calculateDesiredShards" ,
2021-05-06 13:53:52 -07:00
"dataInRate" , dataInRate ,
"dataOutRate" , dataOutRate ,
"dataKeptRatio" , dataKeptRatio ,
"dataPendingRate" , dataPendingRate ,
"dataPending" , dataPending ,
"dataOutDuration" , dataOutDuration ,
2019-03-01 11:04:26 -08:00
"timePerSample" , timePerSample ,
"desiredShards" , desiredShards ,
"highestSent" , highestSent ,
2019-08-13 02:10:21 -07:00
"highestRecv" , highestRecv ,
)
2017-05-10 02:44:13 -07:00
// Changes in the number of shards must be greater than shardToleranceFraction.
var (
lowerBound = float64 ( t . numShards ) * ( 1. - shardToleranceFraction )
upperBound = float64 ( t . numShards ) * ( 1. + shardToleranceFraction )
)
2017-08-11 11:45:52 -07:00
level . Debug ( t . logger ) . Log ( "msg" , "QueueManager.updateShardsLoop" ,
"lowerBound" , lowerBound , "desiredShards" , desiredShards , "upperBound" , upperBound )
2017-05-10 02:44:13 -07:00
if lowerBound <= desiredShards && desiredShards <= upperBound {
2019-10-21 14:54:25 -07:00
return t . numShards
2017-05-10 02:44:13 -07:00
}
numShards := int ( math . Ceil ( desiredShards ) )
2020-01-02 09:30:56 -08:00
// Do not downshard if we are more than ten seconds back.
if numShards < t . numShards && delay > 10.0 {
level . Debug ( t . logger ) . Log ( "msg" , "Not downsharding due to being too far behind" )
return t . numShards
}
2017-05-10 02:44:13 -07:00
if numShards > t . cfg . MaxShards {
numShards = t . cfg . MaxShards
2018-12-04 09:32:14 -08:00
} else if numShards < t . cfg . MinShards {
numShards = t . cfg . MinShards
2017-05-10 02:44:13 -07:00
}
2019-10-21 14:54:25 -07:00
return numShards
2017-05-10 02:44:13 -07:00
}
func ( t * QueueManager ) reshardLoop ( ) {
defer t . wg . Done ( )
for {
select {
case numShards := <- t . reshardChan :
2018-09-07 14:26:04 -07:00
// We start the newShards after we have stopped (the therefore completely
// flushed) the oldShards, to guarantee we only every deliver samples in
// order.
t . shards . stop ( )
t . shards . start ( numShards )
2017-05-10 02:44:13 -07:00
case <- t . quit :
return
}
}
}
2018-09-07 14:26:04 -07:00
func ( t * QueueManager ) newShards ( ) * shards {
s := & shards {
qm : t ,
done : make ( chan struct { } ) ,
}
return s
}
2017-05-10 02:44:13 -07:00
2021-05-06 13:53:52 -07:00
type writeSample struct {
seriesLabels labels . Labels
sample prompb . Sample
}
type writeExemplar struct {
seriesLabels labels . Labels
exemplar prompb . Exemplar
2019-08-12 09:22:02 -07:00
}
2017-05-10 02:44:13 -07:00
type shards struct {
2018-09-07 14:26:04 -07:00
mtx sync . RWMutex // With the WAL, this is never actually contended.
qm * QueueManager
2021-05-06 13:53:52 -07:00
queues [ ] chan interface { }
// So we can accurately track how many of each are lost during shard shutdowns.
enqueuedSamples atomic . Int64
enqueuedExemplars atomic . Int64
2018-09-07 14:26:04 -07:00
// Emulate a wait group with a channel and an atomic int, as you
// cannot select on a wait group.
2018-02-01 05:20:38 -08:00
done chan struct { }
2020-07-30 00:45:42 -07:00
running atomic . Int32
2018-09-07 14:26:04 -07:00
// Soft shutdown context will prevent new enqueues and deadlocks.
softShutdown chan struct { }
// Hard shutdown context is used to terminate outgoing HTTP connections
// after giving them a chance to terminate.
2021-05-06 13:53:52 -07:00
hardShutdown context . CancelFunc
samplesDroppedOnHardShutdown atomic . Uint32
exemplarsDroppedOnHardShutdown atomic . Uint32
2017-05-10 02:44:13 -07:00
}
2018-09-07 14:26:04 -07:00
// start the shards; must be called before any call to enqueue.
func ( s * shards ) start ( n int ) {
s . mtx . Lock ( )
defer s . mtx . Unlock ( )
2020-06-25 13:48:30 -07:00
s . qm . metrics . pendingSamples . Set ( 0 )
s . qm . metrics . numShards . Set ( float64 ( n ) )
2021-05-06 13:53:52 -07:00
newQueues := make ( [ ] chan interface { } , n )
2018-09-07 14:26:04 -07:00
for i := 0 ; i < n ; i ++ {
2021-05-06 13:53:52 -07:00
newQueues [ i ] = make ( chan interface { } , s . qm . cfg . Capacity )
2017-05-10 02:44:13 -07:00
}
2018-09-07 14:26:04 -07:00
s . queues = newQueues
var hardShutdownCtx context . Context
hardShutdownCtx , s . hardShutdown = context . WithCancel ( context . Background ( ) )
s . softShutdown = make ( chan struct { } )
2020-07-30 00:45:42 -07:00
s . running . Store ( int32 ( n ) )
2018-09-07 14:26:04 -07:00
s . done = make ( chan struct { } )
2021-05-06 13:53:52 -07:00
s . samplesDroppedOnHardShutdown . Store ( 0 )
s . exemplarsDroppedOnHardShutdown . Store ( 0 )
2018-09-07 14:26:04 -07:00
for i := 0 ; i < n ; i ++ {
go s . runShard ( hardShutdownCtx , i , newQueues [ i ] )
2017-05-10 02:44:13 -07:00
}
}
2018-09-07 14:26:04 -07:00
// stop the shards; subsequent call to enqueue will return false.
func ( s * shards ) stop ( ) {
// Attempt a clean shutdown, but only wait flushDeadline for all the shards
2019-03-03 03:35:29 -08:00
// to cleanly exit. As we're doing RPCs, enqueue can block indefinitely.
2018-09-07 14:26:04 -07:00
// We must be able so call stop concurrently, hence we can only take the
// RLock here.
s . mtx . RLock ( )
close ( s . softShutdown )
s . mtx . RUnlock ( )
// Enqueue should now be unblocked, so we can take the write lock. This
// also ensures we don't race with writes to the queues, and get a panic:
// send on closed channel.
s . mtx . Lock ( )
defer s . mtx . Unlock ( )
for _ , queue := range s . queues {
close ( queue )
2017-05-10 02:44:13 -07:00
}
2018-01-31 07:41:48 -08:00
select {
2018-02-01 05:20:38 -08:00
case <- s . done :
2018-05-29 01:51:29 -07:00
return
2018-09-07 14:26:04 -07:00
case <- time . After ( s . qm . flushDeadline ) :
2018-01-31 07:41:48 -08:00
}
2018-05-29 01:51:29 -07:00
2018-05-29 03:35:43 -07:00
// Force an unclean shutdown.
2018-09-07 14:26:04 -07:00
s . hardShutdown ( )
2018-05-29 01:51:29 -07:00
<- s . done
2021-05-06 13:53:52 -07:00
if dropped := s . samplesDroppedOnHardShutdown . Load ( ) ; dropped > 0 {
2020-06-25 13:48:30 -07:00
level . Error ( s . qm . logger ) . Log ( "msg" , "Failed to flush all samples on shutdown" , "count" , dropped )
}
2021-05-06 13:53:52 -07:00
if dropped := s . exemplarsDroppedOnHardShutdown . Load ( ) ; dropped > 0 {
level . Error ( s . qm . logger ) . Log ( "msg" , "Failed to flush all exemplars on shutdown" , "count" , dropped )
}
2017-05-10 02:44:13 -07:00
}
2021-05-06 13:53:52 -07:00
// enqueue data (sample or exemplar). If we are currently in the process of shutting down or resharding,
2018-09-07 14:26:04 -07:00
// will return false; in this case, you should back off and retry.
2021-05-06 13:53:52 -07:00
func ( s * shards ) enqueue ( ref uint64 , data interface { } ) bool {
2018-09-07 14:26:04 -07:00
s . mtx . RLock ( )
defer s . mtx . RUnlock ( )
2017-05-10 02:44:13 -07:00
2018-09-07 14:26:04 -07:00
select {
case <- s . softShutdown :
return false
default :
}
2017-05-10 02:44:13 -07:00
2018-09-07 14:26:04 -07:00
shard := uint64 ( ref ) % uint64 ( len ( s . queues ) )
2017-05-10 02:44:13 -07:00
select {
2018-09-07 14:26:04 -07:00
case <- s . softShutdown :
return false
2021-05-06 13:53:52 -07:00
case s . queues [ shard ] <- data :
switch data . ( type ) {
case writeSample :
s . qm . metrics . pendingSamples . Inc ( )
s . enqueuedSamples . Inc ( )
case writeExemplar :
s . qm . metrics . pendingExemplars . Inc ( )
s . enqueuedExemplars . Inc ( )
default :
level . Warn ( s . qm . logger ) . Log ( "msg" , "Invalid object type in shards enqueue" )
}
2017-05-10 02:44:13 -07:00
return true
}
}
2021-05-06 13:53:52 -07:00
func ( s * shards ) runShard ( ctx context . Context , shardID int , queue chan interface { } ) {
2018-02-01 05:20:38 -08:00
defer func ( ) {
2020-07-30 00:45:42 -07:00
if s . running . Dec ( ) == 0 {
2018-02-01 05:20:38 -08:00
close ( s . done )
}
} ( )
2019-08-12 09:22:02 -07:00
shardNum := strconv . Itoa ( shardID )
2017-05-10 02:44:13 -07:00
// Send batches of at most MaxSamplesPerSend samples to the remote storage.
2021-05-06 13:53:52 -07:00
// If we have fewer samples than that, flush them out after a deadline anyways.
2019-08-12 09:22:02 -07:00
var (
2021-05-06 13:53:52 -07:00
max = s . qm . cfg . MaxSamplesPerSend
// Rough estimate, 1% of active series will contain an exemplar on each scrape.
// TODO(cstyan): Casting this many times smells, also we could get index out of bounds issues here.
maxExemplars = int ( math . Max ( 1 , float64 ( max / 10 ) ) )
nPending , nPendingSamples , nPendingExemplars = 0 , 0 , 0
sampleBuffer = allocateSampleBuffer ( max )
2019-08-12 09:22:02 -07:00
buf [ ] byte
2021-05-06 13:53:52 -07:00
pendingData [ ] prompb . TimeSeries
exemplarBuffer [ ] [ ] prompb . Exemplar
2019-08-12 09:22:02 -07:00
)
2021-05-06 13:53:52 -07:00
totalPending := max
if s . qm . sendExemplars {
exemplarBuffer = allocateExemplarBuffer ( maxExemplars )
totalPending += maxExemplars
}
pendingData = make ( [ ] prompb . TimeSeries , totalPending )
2019-06-27 11:48:21 -07:00
2018-08-24 07:55:21 -07:00
timer := time . NewTimer ( time . Duration ( s . qm . cfg . BatchSendDeadline ) )
2018-03-12 07:27:48 -07:00
stop := func ( ) {
2018-03-09 04:00:26 -08:00
if ! timer . Stop ( ) {
select {
case <- timer . C :
default :
}
}
2018-03-12 07:27:48 -07:00
}
defer stop ( )
2018-01-24 04:36:29 -08:00
2017-05-10 02:44:13 -07:00
for {
select {
2018-09-07 14:26:04 -07:00
case <- ctx . Done ( ) :
2020-06-25 13:48:30 -07:00
// In this case we drop all samples in the buffer and the queue.
// Remove them from pending and mark them as failed.
2021-05-06 13:53:52 -07:00
droppedSamples := nPendingSamples + int ( s . enqueuedSamples . Load ( ) )
droppedExemplars := nPendingExemplars + int ( s . enqueuedExemplars . Load ( ) )
2020-06-25 13:48:30 -07:00
s . qm . metrics . pendingSamples . Sub ( float64 ( droppedSamples ) )
2021-05-06 13:53:52 -07:00
s . qm . metrics . pendingExemplars . Sub ( float64 ( droppedExemplars ) )
2020-06-25 13:48:30 -07:00
s . qm . metrics . failedSamplesTotal . Add ( float64 ( droppedSamples ) )
2021-05-06 13:53:52 -07:00
s . qm . metrics . failedExemplarsTotal . Add ( float64 ( droppedExemplars ) )
s . samplesDroppedOnHardShutdown . Add ( uint32 ( droppedSamples ) )
s . exemplarsDroppedOnHardShutdown . Add ( uint32 ( droppedExemplars ) )
2018-05-29 01:51:29 -07:00
return
2017-05-10 02:44:13 -07:00
case sample , ok := <- queue :
if ! ok {
2021-05-06 13:53:52 -07:00
if nPendingSamples > 0 || nPendingExemplars > 0 {
level . Debug ( s . qm . logger ) . Log ( "msg" , "Flushing data to remote storage..." , "samples" , nPendingSamples , "exemplars" , nPendingExemplars )
s . sendSamples ( ctx , pendingData [ : nPending ] , nPendingSamples , nPendingExemplars , & buf )
s . qm . metrics . pendingSamples . Sub ( float64 ( nPendingSamples ) )
s . qm . metrics . pendingExemplars . Sub ( float64 ( nPendingExemplars ) )
2017-08-11 11:45:52 -07:00
level . Debug ( s . qm . logger ) . Log ( "msg" , "Done flushing." )
2017-05-10 02:44:13 -07:00
}
return
}
2018-09-07 14:26:04 -07:00
// Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff)
2019-08-12 09:22:02 -07:00
// retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll
// stop reading from the queue. This makes it safe to reference pendingSamples by index.
2021-05-06 13:53:52 -07:00
switch d := sample . ( type ) {
case writeSample :
sampleBuffer [ nPendingSamples ] [ 0 ] = d . sample
pendingData [ nPending ] . Labels = labelsToLabelsProto ( d . seriesLabels , pendingData [ nPending ] . Labels )
pendingData [ nPending ] . Samples = sampleBuffer [ nPendingSamples ]
pendingData [ nPending ] . Exemplars = nil
nPendingSamples ++
nPending ++
case writeExemplar :
exemplarBuffer [ nPendingExemplars ] [ 0 ] = d . exemplar
pendingData [ nPending ] . Labels = labelsToLabelsProto ( d . seriesLabels , pendingData [ nPending ] . Labels )
pendingData [ nPending ] . Samples = nil
pendingData [ nPending ] . Exemplars = exemplarBuffer [ nPendingExemplars ]
nPendingExemplars ++
nPending ++
}
2017-05-10 02:44:13 -07:00
2021-05-06 13:53:52 -07:00
if nPendingSamples >= max || nPendingExemplars >= maxExemplars {
s . sendSamples ( ctx , pendingData [ : nPending ] , nPendingSamples , nPendingExemplars , & buf )
s . qm . metrics . pendingSamples . Sub ( float64 ( nPendingSamples ) )
s . qm . metrics . pendingExemplars . Sub ( float64 ( nPendingExemplars ) )
nPendingSamples = 0
nPendingExemplars = 0
2019-08-12 09:22:02 -07:00
nPending = 0
2018-03-09 04:00:26 -08:00
2018-03-12 07:27:48 -07:00
stop ( )
2018-08-24 07:55:21 -07:00
timer . Reset ( time . Duration ( s . qm . cfg . BatchSendDeadline ) )
2017-05-10 02:44:13 -07:00
}
2018-03-09 04:00:26 -08:00
2018-01-24 04:36:29 -08:00
case <- timer . C :
2021-05-06 13:53:52 -07:00
if nPendingSamples > 0 || nPendingExemplars > 0 {
level . Debug ( s . qm . logger ) . Log ( "msg" , "runShard timer ticked, sending buffered data" , "samples" , nPendingSamples , "exemplars" , nPendingExemplars , "shard" , shardNum )
s . sendSamples ( ctx , pendingData [ : nPending ] , nPendingSamples , nPendingExemplars , & buf )
s . qm . metrics . pendingSamples . Sub ( float64 ( nPendingSamples ) )
s . qm . metrics . pendingExemplars . Sub ( float64 ( nPendingExemplars ) )
nPendingSamples = 0
nPendingExemplars = 0
2020-02-09 19:51:21 -08:00
nPending = 0
2017-05-10 02:44:13 -07:00
}
2018-08-24 07:55:21 -07:00
timer . Reset ( time . Duration ( s . qm . cfg . BatchSendDeadline ) )
2017-05-10 02:44:13 -07:00
}
}
}
2021-05-06 13:53:52 -07:00
func ( s * shards ) sendSamples ( ctx context . Context , samples [ ] prompb . TimeSeries , sampleCount int , exemplarCount int , buf * [ ] byte ) {
2017-05-10 02:44:13 -07:00
begin := time . Now ( )
2021-05-06 13:53:52 -07:00
err := s . sendSamplesWithBackoff ( ctx , samples , sampleCount , exemplarCount , buf )
2019-02-12 06:58:25 -08:00
if err != nil {
2021-05-06 13:53:52 -07:00
level . Error ( s . qm . logger ) . Log ( "msg" , "non-recoverable error" , "count" , sampleCount , "exemplarCount" , exemplarCount , "err" , err )
s . qm . metrics . failedSamplesTotal . Add ( float64 ( sampleCount ) )
s . qm . metrics . failedExemplarsTotal . Add ( float64 ( exemplarCount ) )
2018-09-07 14:26:04 -07:00
}
2017-05-10 02:44:13 -07:00
2018-04-08 02:51:54 -07:00
// These counters are used to calculate the dynamic sharding, and as such
2017-05-10 02:44:13 -07:00
// should be maintained irrespective of success or failure.
2021-05-06 13:53:52 -07:00
s . qm . dataOut . incr ( int64 ( len ( samples ) ) )
s . qm . dataOutDuration . incr ( int64 ( time . Since ( begin ) ) )
2020-07-30 00:45:42 -07:00
s . qm . lastSendTimestamp . Store ( time . Now ( ) . Unix ( ) )
2017-05-10 02:44:13 -07:00
}
// sendSamples to the remote storage with backoff for recoverable errors.
2021-05-06 13:53:52 -07:00
func ( s * shards ) sendSamplesWithBackoff ( ctx context . Context , samples [ ] prompb . TimeSeries , sampleCount int , exemplarCount int , buf * [ ] byte ) error {
2020-11-19 07:23:03 -08:00
// Build the WriteRequest with no metadata.
req , highest , err := buildWriteRequest ( samples , nil , * buf )
2018-09-07 14:26:04 -07:00
if err != nil {
2019-03-01 11:04:26 -08:00
// Failing to build the write request is non-recoverable, since it will
// only error if marshaling the proto to bytes fails.
2018-09-07 14:26:04 -07:00
return err
}
2019-03-01 11:04:26 -08:00
2020-06-01 08:21:13 -07:00
reqSize := len ( * buf )
* buf = req
// An anonymous function allows us to defer the completion of our per-try spans
// without causing a memory leak, and it has the nice effect of not propagating any
// parameters for sendSamplesWithBackoff/3.
2020-11-19 07:23:03 -08:00
attemptStore := func ( try int ) error {
2020-06-01 08:21:13 -07:00
span , ctx := opentracing . StartSpanFromContext ( ctx , "Remote Send Batch" )
defer span . Finish ( )
span . SetTag ( "samples" , sampleCount )
2021-05-06 13:53:52 -07:00
if exemplarCount > 0 {
span . SetTag ( "exemplars" , exemplarCount )
}
2020-06-01 08:21:13 -07:00
span . SetTag ( "request_size" , reqSize )
span . SetTag ( "try" , try )
span . SetTag ( "remote_name" , s . qm . storeClient . Name ( ) )
span . SetTag ( "remote_url" , s . qm . storeClient . Endpoint ( ) )
begin := time . Now ( )
2020-11-19 07:23:03 -08:00
s . qm . metrics . samplesTotal . Add ( float64 ( sampleCount ) )
2021-05-06 13:53:52 -07:00
s . qm . metrics . exemplarsTotal . Add ( float64 ( exemplarCount ) )
2020-06-01 08:21:13 -07:00
err := s . qm . client ( ) . Store ( ctx , * buf )
s . qm . metrics . sentBatchDuration . Observe ( time . Since ( begin ) . Seconds ( ) )
if err != nil {
span . LogKV ( "error" , err )
ext . Error . Set ( span , true )
return err
}
return nil
}
2020-11-19 07:23:03 -08:00
onRetry := func ( ) {
s . qm . metrics . retriedSamplesTotal . Add ( float64 ( sampleCount ) )
2021-05-06 13:53:52 -07:00
s . qm . metrics . retriedExemplarsTotal . Add ( float64 ( exemplarCount ) )
2020-11-19 07:23:03 -08:00
}
2021-02-04 05:38:32 -08:00
err = sendWriteRequestWithBackoff ( ctx , s . qm . cfg , s . qm . logger , attemptStore , onRetry )
2020-11-19 07:23:03 -08:00
if err != nil {
return err
}
2021-05-06 13:53:52 -07:00
s . qm . metrics . sentBytesTotal . Add ( float64 ( reqSize ) )
2020-11-19 07:23:03 -08:00
s . qm . metrics . highestSentTimestamp . Set ( float64 ( highest / 1000 ) )
return nil
}
2021-02-04 05:38:32 -08:00
func sendWriteRequestWithBackoff ( ctx context . Context , cfg config . QueueConfig , l log . Logger , attempt func ( int ) error , onRetry func ( ) ) error {
2020-11-19 07:23:03 -08:00
backoff := cfg . MinBackoff
2021-02-10 14:25:37 -08:00
sleepDuration := model . Duration ( 0 )
2020-11-19 07:23:03 -08:00
try := 0
2018-09-07 14:26:04 -07:00
for {
select {
case <- ctx . Done ( ) :
return ctx . Err ( )
default :
}
2017-05-10 02:44:13 -07:00
2020-11-19 07:23:03 -08:00
err := attempt ( try )
2018-09-07 14:26:04 -07:00
2020-11-19 07:23:03 -08:00
if err == nil {
return nil
}
2017-05-10 02:44:13 -07:00
2020-11-19 07:23:03 -08:00
// If the error is unrecoverable, we should not retry.
2021-02-10 14:25:37 -08:00
backoffErr , ok := err . ( RecoverableError )
if ! ok {
2020-11-19 07:23:03 -08:00
return err
}
2020-06-01 08:21:13 -07:00
2021-02-10 14:25:37 -08:00
sleepDuration = backoff
if backoffErr . retryAfter > 0 {
sleepDuration = backoffErr . retryAfter
level . Info ( l ) . Log ( "msg" , "Retrying after duration specified by Retry-After header" , "duration" , sleepDuration )
} else if backoffErr . retryAfter < 0 {
level . Debug ( l ) . Log ( "msg" , "retry-after cannot be in past, retrying using default backoff mechanism" )
}
select {
case <- ctx . Done ( ) :
case <- time . After ( time . Duration ( sleepDuration ) ) :
}
2020-11-19 07:23:03 -08:00
// If we make it this far, we've encountered a recoverable error and will retry.
onRetry ( )
2021-01-27 08:38:34 -08:00
level . Warn ( l ) . Log ( "msg" , "Failed to send batch, retrying" , "err" , err )
2018-09-07 14:26:04 -07:00
2021-02-10 14:25:37 -08:00
backoff = sleepDuration * 2
2020-11-19 07:23:03 -08:00
if backoff > cfg . MaxBackoff {
backoff = cfg . MaxBackoff
2017-05-10 02:44:13 -07:00
}
2020-06-01 08:21:13 -07:00
2020-11-19 07:23:03 -08:00
try ++
continue
2017-05-10 02:44:13 -07:00
}
2018-09-07 14:26:04 -07:00
}
2020-11-19 07:23:03 -08:00
func buildWriteRequest ( samples [ ] prompb . TimeSeries , metadata [ ] prompb . MetricMetadata , buf [ ] byte ) ( [ ] byte , int64 , error ) {
2018-09-07 14:26:04 -07:00
var highest int64
for _ , ts := range samples {
2021-05-06 13:53:52 -07:00
// At the moment we only ever append a TimeSeries with a single sample or exemplar in it.
if len ( ts . Samples ) > 0 && ts . Samples [ 0 ] . Timestamp > highest {
2018-09-07 14:26:04 -07:00
highest = ts . Samples [ 0 ] . Timestamp
}
2021-05-06 13:53:52 -07:00
if len ( ts . Exemplars ) > 0 && ts . Exemplars [ 0 ] . Timestamp > highest {
highest = ts . Exemplars [ 0 ] . Timestamp
}
2018-09-07 14:26:04 -07:00
}
2020-11-19 07:23:03 -08:00
2018-09-07 14:26:04 -07:00
req := & prompb . WriteRequest {
Timeseries : samples ,
2020-11-19 07:23:03 -08:00
Metadata : metadata ,
2018-09-07 14:26:04 -07:00
}
data , err := proto . Marshal ( req )
if err != nil {
return nil , highest , err
}
2017-05-10 02:44:13 -07:00
2019-06-27 11:48:21 -07:00
// snappy uses len() to see if it needs to allocate a new slice. Make the
// buffer as long as possible.
if buf != nil {
buf = buf [ 0 : cap ( buf ) ]
}
compressed := snappy . Encode ( buf , data )
2018-09-07 14:26:04 -07:00
return compressed , highest , nil
2017-05-10 02:44:13 -07:00
}
2019-08-12 09:22:02 -07:00
2021-05-06 13:53:52 -07:00
func allocateSampleBuffer ( capacity int ) [ ] [ ] prompb . Sample {
buf := make ( [ ] [ ] prompb . Sample , capacity )
for i := range buf {
buf [ i ] = [ ] prompb . Sample { { } }
}
return buf
}
func allocateExemplarBuffer ( capacity int ) [ ] [ ] prompb . Exemplar {
buf := make ( [ ] [ ] prompb . Exemplar , capacity )
for i := range buf {
buf [ i ] = [ ] prompb . Exemplar { { } }
2019-08-12 09:22:02 -07:00
}
2021-05-06 13:53:52 -07:00
return buf
2019-08-12 09:22:02 -07:00
}