2017-05-10 02:44:13 -07:00
|
|
|
// Copyright 2017 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package remote
|
|
|
|
|
|
|
|
import (
|
2020-07-30 04:11:13 -07:00
|
|
|
"context"
|
2019-12-12 12:47:23 -08:00
|
|
|
"fmt"
|
2021-10-29 08:25:05 -07:00
|
|
|
"math"
|
2019-05-31 18:39:40 -07:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2021-06-11 09:17:59 -07:00
|
|
|
"github.com/go-kit/log"
|
2019-03-01 11:04:26 -08:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
|
|
|
"github.com/prometheus/client_golang/prometheus/promauto"
|
2020-10-22 02:00:08 -07:00
|
|
|
|
2019-05-31 18:39:40 -07:00
|
|
|
"github.com/prometheus/prometheus/config"
|
2021-11-08 06:23:17 -08:00
|
|
|
"github.com/prometheus/prometheus/model/exemplar"
|
|
|
|
"github.com/prometheus/prometheus/model/labels"
|
2022-07-19 01:58:52 -07:00
|
|
|
"github.com/prometheus/prometheus/model/metadata"
|
2017-05-10 02:44:13 -07:00
|
|
|
"github.com/prometheus/prometheus/storage"
|
2020-03-20 09:34:15 -07:00
|
|
|
"github.com/prometheus/prometheus/tsdb/wal"
|
2017-05-10 02:44:13 -07:00
|
|
|
)
|
|
|
|
|
2019-03-01 11:04:26 -08:00
|
|
|
var (
|
|
|
|
samplesIn = promauto.NewCounter(prometheus.CounterOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "samples_in_total",
|
|
|
|
Help: "Samples in to remote storage, compare to samples out for queue managers.",
|
|
|
|
})
|
2021-05-06 13:53:52 -07:00
|
|
|
exemplarsIn = promauto.NewCounter(prometheus.CounterOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "exemplars_in_total",
|
|
|
|
Help: "Exemplars in to remote storage, compare to exemplars out for queue managers.",
|
|
|
|
})
|
2019-03-01 11:04:26 -08:00
|
|
|
)
|
|
|
|
|
2019-05-31 18:39:40 -07:00
|
|
|
// WriteStorage represents all the remote write storage.
|
|
|
|
type WriteStorage struct {
|
|
|
|
logger log.Logger
|
2020-04-24 20:39:46 -07:00
|
|
|
reg prometheus.Registerer
|
2019-05-31 18:39:40 -07:00
|
|
|
mtx sync.Mutex
|
|
|
|
|
2020-03-20 09:34:15 -07:00
|
|
|
watcherMetrics *wal.WatcherMetrics
|
|
|
|
liveReaderMetrics *wal.LiveReaderMetrics
|
2020-03-30 20:39:29 -07:00
|
|
|
externalLabels labels.Labels
|
2022-05-30 21:45:30 -07:00
|
|
|
dir string
|
2019-12-12 12:47:23 -08:00
|
|
|
queues map[string]*QueueManager
|
2019-09-04 10:21:53 -07:00
|
|
|
samplesIn *ewmaRate
|
|
|
|
flushDeadline time.Duration
|
2020-09-24 11:44:18 -07:00
|
|
|
interner *pool
|
2020-11-19 07:23:03 -08:00
|
|
|
scraper ReadyScrapeManager
|
2021-10-29 16:39:02 -07:00
|
|
|
quit chan struct{}
|
2020-09-24 11:44:18 -07:00
|
|
|
|
|
|
|
// For timestampTracker.
|
2020-10-15 14:53:59 -07:00
|
|
|
highestTimestamp *maxTimestamp
|
2019-05-31 18:39:40 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewWriteStorage creates and runs a WriteStorage.
|
2022-05-30 21:45:30 -07:00
|
|
|
func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager) *WriteStorage {
|
2019-05-31 18:39:40 -07:00
|
|
|
if logger == nil {
|
|
|
|
logger = log.NewNopLogger()
|
|
|
|
}
|
|
|
|
rws := &WriteStorage{
|
2020-03-20 09:34:15 -07:00
|
|
|
queues: make(map[string]*QueueManager),
|
|
|
|
watcherMetrics: wal.NewWatcherMetrics(reg),
|
|
|
|
liveReaderMetrics: wal.NewLiveReaderMetrics(reg),
|
|
|
|
logger: logger,
|
2020-04-24 20:39:46 -07:00
|
|
|
reg: reg,
|
2020-03-20 09:34:15 -07:00
|
|
|
flushDeadline: flushDeadline,
|
|
|
|
samplesIn: newEWMARate(ewmaWeight, shardUpdateDuration),
|
2022-05-30 21:45:30 -07:00
|
|
|
dir: dir,
|
2020-09-24 11:44:18 -07:00
|
|
|
interner: newPool(),
|
2020-11-19 07:23:03 -08:00
|
|
|
scraper: sm,
|
2021-10-29 16:39:02 -07:00
|
|
|
quit: make(chan struct{}),
|
2020-10-15 14:53:59 -07:00
|
|
|
highestTimestamp: &maxTimestamp{
|
2020-09-24 11:44:18 -07:00
|
|
|
Gauge: prometheus.NewGauge(prometheus.GaugeOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "highest_timestamp_in_seconds",
|
|
|
|
Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch.",
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if reg != nil {
|
|
|
|
reg.MustRegister(rws.highestTimestamp)
|
2019-05-31 18:39:40 -07:00
|
|
|
}
|
|
|
|
go rws.run()
|
|
|
|
return rws
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rws *WriteStorage) run() {
|
|
|
|
ticker := time.NewTicker(shardUpdateDuration)
|
|
|
|
defer ticker.Stop()
|
2021-10-29 16:39:02 -07:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
rws.samplesIn.tick()
|
|
|
|
case <-rws.quit:
|
|
|
|
return
|
|
|
|
}
|
2019-05-31 18:39:40 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ApplyConfig updates the state as the new config requires.
|
2019-09-04 10:21:53 -07:00
|
|
|
// Only stop & create queues which have changes.
|
2019-05-31 18:39:40 -07:00
|
|
|
func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
|
|
|
|
rws.mtx.Lock()
|
|
|
|
defer rws.mtx.Unlock()
|
|
|
|
|
2019-12-12 12:47:23 -08:00
|
|
|
// Remote write queues only need to change if the remote write config or
|
|
|
|
// external labels change.
|
2020-03-30 20:39:29 -07:00
|
|
|
externalLabelUnchanged := labels.Equal(conf.GlobalConfig.ExternalLabels, rws.externalLabels)
|
|
|
|
rws.externalLabels = conf.GlobalConfig.ExternalLabels
|
2019-05-31 18:39:40 -07:00
|
|
|
|
2019-12-12 12:47:23 -08:00
|
|
|
newQueues := make(map[string]*QueueManager)
|
|
|
|
newHashes := []string{}
|
|
|
|
for _, rwConf := range conf.RemoteWriteConfigs {
|
|
|
|
hash, err := toHash(rwConf)
|
2019-09-04 10:21:53 -07:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-03-30 20:39:29 -07:00
|
|
|
// Don't allow duplicate remote write configs.
|
|
|
|
if _, ok := newQueues[hash]; ok {
|
|
|
|
return fmt.Errorf("duplicate remote write configs are not allowed, found duplicate for URL: %s", rwConf.URL)
|
|
|
|
}
|
|
|
|
|
2019-12-12 12:47:23 -08:00
|
|
|
// Set the queue name to the config hash if the user has not set
|
|
|
|
// a name in their remote write config so we can still differentiate
|
|
|
|
// between queues that have the same remote write endpoint.
|
2020-06-24 06:41:52 -07:00
|
|
|
name := hash[:6]
|
2019-12-12 12:47:23 -08:00
|
|
|
if rwConf.Name != "" {
|
|
|
|
name = rwConf.Name
|
|
|
|
}
|
|
|
|
|
2020-06-24 06:41:52 -07:00
|
|
|
c, err := NewWriteClient(name, &ClientConfig{
|
2019-05-31 18:39:40 -07:00
|
|
|
URL: rwConf.URL,
|
|
|
|
Timeout: rwConf.RemoteTimeout,
|
|
|
|
HTTPClientConfig: rwConf.HTTPClientConfig,
|
2021-03-08 11:20:09 -08:00
|
|
|
SigV4Config: rwConf.SigV4Config,
|
2021-02-04 13:18:13 -08:00
|
|
|
Headers: rwConf.Headers,
|
2021-02-11 09:24:49 -08:00
|
|
|
RetryOnRateLimit: rwConf.QueueConfig.RetryOnRateLimit,
|
2019-05-31 18:39:40 -07:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-03-30 20:39:29 -07:00
|
|
|
|
|
|
|
queue, ok := rws.queues[hash]
|
|
|
|
if externalLabelUnchanged && ok {
|
|
|
|
// Update the client in case any secret configuration has changed.
|
|
|
|
queue.SetClient(c)
|
|
|
|
newQueues[hash] = queue
|
|
|
|
delete(rws.queues, hash)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2021-09-23 11:34:09 -07:00
|
|
|
// Redacted to remove any passwords in the URL (that are
|
|
|
|
// technically accepted but not recommended) since this is
|
|
|
|
// only used for metric labels.
|
|
|
|
endpoint := rwConf.URL.Redacted()
|
2019-12-12 12:47:23 -08:00
|
|
|
newQueues[hash] = NewQueueManager(
|
2020-04-24 20:39:46 -07:00
|
|
|
newQueueManagerMetrics(rws.reg, name, endpoint),
|
2020-03-20 09:34:15 -07:00
|
|
|
rws.watcherMetrics,
|
|
|
|
rws.liveReaderMetrics,
|
2019-05-31 18:39:40 -07:00
|
|
|
rws.logger,
|
2022-05-30 21:45:30 -07:00
|
|
|
rws.dir,
|
2019-05-31 18:39:40 -07:00
|
|
|
rws.samplesIn,
|
|
|
|
rwConf.QueueConfig,
|
2020-11-19 07:23:03 -08:00
|
|
|
rwConf.MetadataConfig,
|
2019-05-31 18:39:40 -07:00
|
|
|
conf.GlobalConfig.ExternalLabels,
|
|
|
|
rwConf.WriteRelabelConfigs,
|
|
|
|
c,
|
|
|
|
rws.flushDeadline,
|
2020-09-24 11:44:18 -07:00
|
|
|
rws.interner,
|
|
|
|
rws.highestTimestamp,
|
2020-11-19 07:23:03 -08:00
|
|
|
rws.scraper,
|
2021-05-06 13:53:52 -07:00
|
|
|
rwConf.SendExemplars,
|
2019-12-12 12:47:23 -08:00
|
|
|
)
|
|
|
|
// Keep track of which queues are new so we know which to start.
|
2019-09-04 10:21:53 -07:00
|
|
|
newHashes = append(newHashes, hash)
|
2019-05-31 18:39:40 -07:00
|
|
|
}
|
|
|
|
|
2019-12-12 12:47:23 -08:00
|
|
|
// Anything remaining in rws.queues is a queue who's config has
|
|
|
|
// changed or was removed from the overall remote write config.
|
2019-05-31 18:39:40 -07:00
|
|
|
for _, q := range rws.queues {
|
2019-12-12 12:47:23 -08:00
|
|
|
q.Stop()
|
2019-05-31 18:39:40 -07:00
|
|
|
}
|
|
|
|
|
2019-12-12 12:47:23 -08:00
|
|
|
for _, hash := range newHashes {
|
|
|
|
newQueues[hash].Start()
|
2019-05-31 18:39:40 -07:00
|
|
|
}
|
2019-09-04 10:21:53 -07:00
|
|
|
|
|
|
|
rws.queues = newQueues
|
|
|
|
|
2019-05-31 18:39:40 -07:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Appender implements storage.Storage.
|
2020-07-30 04:11:13 -07:00
|
|
|
func (rws *WriteStorage) Appender(_ context.Context) storage.Appender {
|
2019-01-18 04:48:16 -08:00
|
|
|
return ×tampTracker{
|
2020-09-24 11:44:18 -07:00
|
|
|
writeStorage: rws,
|
|
|
|
highestRecvTimestamp: rws.highestTimestamp,
|
2020-02-06 07:58:38 -08:00
|
|
|
}
|
2019-01-18 04:48:16 -08:00
|
|
|
}
|
|
|
|
|
2021-10-29 08:25:05 -07:00
|
|
|
// LowestSentTimestamp returns the lowest sent timestamp across all queues.
|
|
|
|
func (rws *WriteStorage) LowestSentTimestamp() int64 {
|
|
|
|
rws.mtx.Lock()
|
|
|
|
defer rws.mtx.Unlock()
|
|
|
|
|
|
|
|
var lowestTs int64 = math.MaxInt64
|
|
|
|
|
|
|
|
for _, q := range rws.queues {
|
|
|
|
ts := int64(q.metrics.highestSentTimestamp.Get() * 1000)
|
|
|
|
if ts < lowestTs {
|
|
|
|
lowestTs = ts
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(rws.queues) == 0 {
|
|
|
|
lowestTs = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
return lowestTs
|
|
|
|
}
|
|
|
|
|
2019-05-31 18:39:40 -07:00
|
|
|
// Close closes the WriteStorage.
|
|
|
|
func (rws *WriteStorage) Close() error {
|
|
|
|
rws.mtx.Lock()
|
|
|
|
defer rws.mtx.Unlock()
|
|
|
|
for _, q := range rws.queues {
|
|
|
|
q.Stop()
|
|
|
|
}
|
2021-10-29 16:39:02 -07:00
|
|
|
close(rws.quit)
|
2019-05-31 18:39:40 -07:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-01-18 04:48:16 -08:00
|
|
|
type timestampTracker struct {
|
2020-09-24 11:44:18 -07:00
|
|
|
writeStorage *WriteStorage
|
|
|
|
samples int64
|
2021-05-06 13:53:52 -07:00
|
|
|
exemplars int64
|
2020-09-24 11:44:18 -07:00
|
|
|
highestTimestamp int64
|
2020-10-15 14:53:59 -07:00
|
|
|
highestRecvTimestamp *maxTimestamp
|
2017-05-10 02:44:13 -07:00
|
|
|
}
|
|
|
|
|
2021-02-18 04:07:00 -08:00
|
|
|
// Append implements storage.Appender.
|
2021-11-06 03:10:04 -07:00
|
|
|
func (t *timestampTracker) Append(_ storage.SeriesRef, _ labels.Labels, ts int64, _ float64) (storage.SeriesRef, error) {
|
2019-01-18 04:48:16 -08:00
|
|
|
t.samples++
|
|
|
|
if ts > t.highestTimestamp {
|
|
|
|
t.highestTimestamp = ts
|
2017-05-10 02:44:13 -07:00
|
|
|
}
|
2017-09-07 05:14:41 -07:00
|
|
|
return 0, nil
|
2017-05-10 02:44:13 -07:00
|
|
|
}
|
|
|
|
|
2021-11-06 03:10:04 -07:00
|
|
|
func (t *timestampTracker) AppendExemplar(_ storage.SeriesRef, _ labels.Labels, _ exemplar.Exemplar) (storage.SeriesRef, error) {
|
2021-05-06 13:53:52 -07:00
|
|
|
t.exemplars++
|
2021-03-16 02:47:45 -07:00
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
2022-07-19 01:58:52 -07:00
|
|
|
func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) {
|
|
|
|
// TODO: Add and increment a `metadata` field when we get around to wiring metadata in remote_write.
|
|
|
|
// UpadteMetadata is no-op for remote write (where timestampTracker is being used) for now.
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
2017-10-23 06:57:30 -07:00
|
|
|
// Commit implements storage.Appender.
|
2019-01-18 04:48:16 -08:00
|
|
|
func (t *timestampTracker) Commit() error {
|
2021-05-06 13:53:52 -07:00
|
|
|
t.writeStorage.samplesIn.incr(t.samples + t.exemplars)
|
2019-01-18 04:48:16 -08:00
|
|
|
|
2019-03-01 11:04:26 -08:00
|
|
|
samplesIn.Add(float64(t.samples))
|
2021-05-06 13:53:52 -07:00
|
|
|
exemplarsIn.Add(float64(t.exemplars))
|
2020-09-24 11:44:18 -07:00
|
|
|
t.highestRecvTimestamp.Set(float64(t.highestTimestamp / 1000))
|
2017-05-10 02:44:13 -07:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-10-23 06:57:30 -07:00
|
|
|
// Rollback implements storage.Appender.
|
2019-01-18 04:48:16 -08:00
|
|
|
func (*timestampTracker) Rollback() error {
|
2017-05-10 02:44:13 -07:00
|
|
|
return nil
|
|
|
|
}
|