2017-05-10 02:44:13 -07:00
|
|
|
// Copyright 2017 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package remote
|
|
|
|
|
|
|
|
import (
|
2020-07-30 04:11:13 -07:00
|
|
|
"context"
|
2019-12-12 12:47:23 -08:00
|
|
|
"fmt"
|
2021-10-29 08:25:05 -07:00
|
|
|
"math"
|
2019-05-31 18:39:40 -07:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2021-06-11 09:17:59 -07:00
|
|
|
"github.com/go-kit/log"
|
2019-03-01 11:04:26 -08:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
|
|
|
"github.com/prometheus/client_golang/prometheus/promauto"
|
2020-10-22 02:00:08 -07:00
|
|
|
|
2019-05-31 18:39:40 -07:00
|
|
|
"github.com/prometheus/prometheus/config"
|
2021-11-08 06:23:17 -08:00
|
|
|
"github.com/prometheus/prometheus/model/exemplar"
|
Style cleanup of all the changes in sparsehistogram so far
A lot of this code was hacked together, literally during a
hackathon. This commit intends not to change the code substantially,
but just make the code obey the usual style practices.
A (possibly incomplete) list of areas:
* Generally address linter warnings.
* The `pgk` directory is deprecated as per dev-summit. No new packages should
be added to it. I moved the new `pkg/histogram` package to `model`
anticipating what's proposed in #9478.
* Make the naming of the Sparse Histogram more consistent. Including
abbreviations, there were just too many names for it: SparseHistogram,
Histogram, Histo, hist, his, shs, h. The idea is to call it "Histogram" in
general. Only add "Sparse" if it is needed to avoid confusion with
conventional Histograms (which is rare because the TSDB really has no notion
of conventional Histograms). Use abbreviations only in local scope, and then
really abbreviate (not just removing three out of seven letters like in
"Histo"). This is in the spirit of
https://github.com/golang/go/wiki/CodeReviewComments#variable-names
* Several other minor name changes.
* A lot of formatting of doc comments. For one, following
https://github.com/golang/go/wiki/CodeReviewComments#comment-sentences
, but also layout question, anticipating how things will look like
when rendered by `godoc` (even where `godoc` doesn't render them
right now because they are for unexported types or not a doc comment
at all but just a normal code comment - consistency is queen!).
* Re-enabled `TestQueryLog` and `TestEndopints` (they pass now,
leaving them disabled was presumably an oversight).
* Bucket iterator for histogram.Histogram is now created with a
method.
* HistogramChunk.iterator now allows iterator recycling. (I think
@dieterbe only commented it out because he was confused by the
question in the comment.)
* HistogramAppender.Append panics now because we decided to treat
staleness marker differently.
Signed-off-by: beorn7 <beorn@grafana.com>
2021-10-09 06:57:07 -07:00
|
|
|
"github.com/prometheus/prometheus/model/histogram"
|
2021-11-08 06:23:17 -08:00
|
|
|
"github.com/prometheus/prometheus/model/labels"
|
2022-07-19 01:58:52 -07:00
|
|
|
"github.com/prometheus/prometheus/model/metadata"
|
2017-05-10 02:44:13 -07:00
|
|
|
"github.com/prometheus/prometheus/storage"
|
2022-10-10 08:08:46 -07:00
|
|
|
"github.com/prometheus/prometheus/tsdb/wlog"
|
2017-05-10 02:44:13 -07:00
|
|
|
)
|
|
|
|
|
2019-03-01 11:04:26 -08:00
|
|
|
var (
|
|
|
|
samplesIn = promauto.NewCounter(prometheus.CounterOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "samples_in_total",
|
|
|
|
Help: "Samples in to remote storage, compare to samples out for queue managers.",
|
|
|
|
})
|
2021-05-06 13:53:52 -07:00
|
|
|
exemplarsIn = promauto.NewCounter(prometheus.CounterOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "exemplars_in_total",
|
|
|
|
Help: "Exemplars in to remote storage, compare to exemplars out for queue managers.",
|
|
|
|
})
|
2022-07-14 06:13:12 -07:00
|
|
|
histogramsIn = promauto.NewCounter(prometheus.CounterOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "histograms_in_total",
|
2022-08-29 05:08:36 -07:00
|
|
|
Help: "HistogramSamples in to remote storage, compare to histograms out for queue managers.",
|
2022-07-14 06:13:12 -07:00
|
|
|
})
|
2019-03-01 11:04:26 -08:00
|
|
|
)
|
|
|
|
|
2019-05-31 18:39:40 -07:00
|
|
|
// WriteStorage represents all the remote write storage.
|
|
|
|
type WriteStorage struct {
|
|
|
|
logger log.Logger
|
2020-04-24 20:39:46 -07:00
|
|
|
reg prometheus.Registerer
|
2019-05-31 18:39:40 -07:00
|
|
|
mtx sync.Mutex
|
|
|
|
|
2022-10-10 08:08:46 -07:00
|
|
|
watcherMetrics *wlog.WatcherMetrics
|
|
|
|
liveReaderMetrics *wlog.LiveReaderMetrics
|
2020-03-30 20:39:29 -07:00
|
|
|
externalLabels labels.Labels
|
2022-05-30 21:45:30 -07:00
|
|
|
dir string
|
2019-12-12 12:47:23 -08:00
|
|
|
queues map[string]*QueueManager
|
2019-09-04 10:21:53 -07:00
|
|
|
samplesIn *ewmaRate
|
|
|
|
flushDeadline time.Duration
|
2020-09-24 11:44:18 -07:00
|
|
|
interner *pool
|
2020-11-19 07:23:03 -08:00
|
|
|
scraper ReadyScrapeManager
|
2021-10-29 16:39:02 -07:00
|
|
|
quit chan struct{}
|
2020-09-24 11:44:18 -07:00
|
|
|
|
|
|
|
// For timestampTracker.
|
2020-10-15 14:53:59 -07:00
|
|
|
highestTimestamp *maxTimestamp
|
2019-05-31 18:39:40 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewWriteStorage creates and runs a WriteStorage.
|
2022-05-30 21:45:30 -07:00
|
|
|
func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager) *WriteStorage {
|
2019-05-31 18:39:40 -07:00
|
|
|
if logger == nil {
|
|
|
|
logger = log.NewNopLogger()
|
|
|
|
}
|
|
|
|
rws := &WriteStorage{
|
2020-03-20 09:34:15 -07:00
|
|
|
queues: make(map[string]*QueueManager),
|
2022-10-10 08:08:46 -07:00
|
|
|
watcherMetrics: wlog.NewWatcherMetrics(reg),
|
|
|
|
liveReaderMetrics: wlog.NewLiveReaderMetrics(reg),
|
2020-03-20 09:34:15 -07:00
|
|
|
logger: logger,
|
2020-04-24 20:39:46 -07:00
|
|
|
reg: reg,
|
2020-03-20 09:34:15 -07:00
|
|
|
flushDeadline: flushDeadline,
|
|
|
|
samplesIn: newEWMARate(ewmaWeight, shardUpdateDuration),
|
2022-05-30 21:45:30 -07:00
|
|
|
dir: dir,
|
2020-09-24 11:44:18 -07:00
|
|
|
interner: newPool(),
|
2020-11-19 07:23:03 -08:00
|
|
|
scraper: sm,
|
2021-10-29 16:39:02 -07:00
|
|
|
quit: make(chan struct{}),
|
2020-10-15 14:53:59 -07:00
|
|
|
highestTimestamp: &maxTimestamp{
|
2020-09-24 11:44:18 -07:00
|
|
|
Gauge: prometheus.NewGauge(prometheus.GaugeOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "highest_timestamp_in_seconds",
|
|
|
|
Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch.",
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if reg != nil {
|
|
|
|
reg.MustRegister(rws.highestTimestamp)
|
2019-05-31 18:39:40 -07:00
|
|
|
}
|
|
|
|
go rws.run()
|
|
|
|
return rws
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rws *WriteStorage) run() {
|
|
|
|
ticker := time.NewTicker(shardUpdateDuration)
|
|
|
|
defer ticker.Stop()
|
2021-10-29 16:39:02 -07:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
rws.samplesIn.tick()
|
|
|
|
case <-rws.quit:
|
|
|
|
return
|
|
|
|
}
|
2019-05-31 18:39:40 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-13 04:26:02 -08:00
|
|
|
func (rws *WriteStorage) Notify() {
|
|
|
|
rws.mtx.Lock()
|
|
|
|
defer rws.mtx.Unlock()
|
|
|
|
|
|
|
|
for _, q := range rws.queues {
|
|
|
|
// These should all be non blocking
|
|
|
|
q.watcher.Notify()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-31 18:39:40 -07:00
|
|
|
// ApplyConfig updates the state as the new config requires.
|
2019-09-04 10:21:53 -07:00
|
|
|
// Only stop & create queues which have changes.
|
2019-05-31 18:39:40 -07:00
|
|
|
func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
|
|
|
|
rws.mtx.Lock()
|
|
|
|
defer rws.mtx.Unlock()
|
|
|
|
|
2019-12-12 12:47:23 -08:00
|
|
|
// Remote write queues only need to change if the remote write config or
|
|
|
|
// external labels change.
|
2020-03-30 20:39:29 -07:00
|
|
|
externalLabelUnchanged := labels.Equal(conf.GlobalConfig.ExternalLabels, rws.externalLabels)
|
|
|
|
rws.externalLabels = conf.GlobalConfig.ExternalLabels
|
2019-05-31 18:39:40 -07:00
|
|
|
|
2019-12-12 12:47:23 -08:00
|
|
|
newQueues := make(map[string]*QueueManager)
|
|
|
|
newHashes := []string{}
|
|
|
|
for _, rwConf := range conf.RemoteWriteConfigs {
|
|
|
|
hash, err := toHash(rwConf)
|
2019-09-04 10:21:53 -07:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-03-30 20:39:29 -07:00
|
|
|
// Don't allow duplicate remote write configs.
|
|
|
|
if _, ok := newQueues[hash]; ok {
|
|
|
|
return fmt.Errorf("duplicate remote write configs are not allowed, found duplicate for URL: %s", rwConf.URL)
|
|
|
|
}
|
|
|
|
|
2019-12-12 12:47:23 -08:00
|
|
|
// Set the queue name to the config hash if the user has not set
|
|
|
|
// a name in their remote write config so we can still differentiate
|
|
|
|
// between queues that have the same remote write endpoint.
|
2020-06-24 06:41:52 -07:00
|
|
|
name := hash[:6]
|
2019-12-12 12:47:23 -08:00
|
|
|
if rwConf.Name != "" {
|
|
|
|
name = rwConf.Name
|
|
|
|
}
|
|
|
|
|
2020-06-24 06:41:52 -07:00
|
|
|
c, err := NewWriteClient(name, &ClientConfig{
|
2019-05-31 18:39:40 -07:00
|
|
|
URL: rwConf.URL,
|
|
|
|
Timeout: rwConf.RemoteTimeout,
|
|
|
|
HTTPClientConfig: rwConf.HTTPClientConfig,
|
2021-03-08 11:20:09 -08:00
|
|
|
SigV4Config: rwConf.SigV4Config,
|
2023-06-01 14:20:10 -07:00
|
|
|
AzureADConfig: rwConf.AzureADConfig,
|
2021-02-04 13:18:13 -08:00
|
|
|
Headers: rwConf.Headers,
|
2021-02-11 09:24:49 -08:00
|
|
|
RetryOnRateLimit: rwConf.QueueConfig.RetryOnRateLimit,
|
2019-05-31 18:39:40 -07:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-03-30 20:39:29 -07:00
|
|
|
|
|
|
|
queue, ok := rws.queues[hash]
|
|
|
|
if externalLabelUnchanged && ok {
|
|
|
|
// Update the client in case any secret configuration has changed.
|
|
|
|
queue.SetClient(c)
|
|
|
|
newQueues[hash] = queue
|
|
|
|
delete(rws.queues, hash)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2021-09-23 11:34:09 -07:00
|
|
|
// Redacted to remove any passwords in the URL (that are
|
|
|
|
// technically accepted but not recommended) since this is
|
|
|
|
// only used for metric labels.
|
|
|
|
endpoint := rwConf.URL.Redacted()
|
2019-12-12 12:47:23 -08:00
|
|
|
newQueues[hash] = NewQueueManager(
|
2020-04-24 20:39:46 -07:00
|
|
|
newQueueManagerMetrics(rws.reg, name, endpoint),
|
2020-03-20 09:34:15 -07:00
|
|
|
rws.watcherMetrics,
|
|
|
|
rws.liveReaderMetrics,
|
2019-05-31 18:39:40 -07:00
|
|
|
rws.logger,
|
2022-05-30 21:45:30 -07:00
|
|
|
rws.dir,
|
2019-05-31 18:39:40 -07:00
|
|
|
rws.samplesIn,
|
|
|
|
rwConf.QueueConfig,
|
2020-11-19 07:23:03 -08:00
|
|
|
rwConf.MetadataConfig,
|
2019-05-31 18:39:40 -07:00
|
|
|
conf.GlobalConfig.ExternalLabels,
|
|
|
|
rwConf.WriteRelabelConfigs,
|
|
|
|
c,
|
|
|
|
rws.flushDeadline,
|
2020-09-24 11:44:18 -07:00
|
|
|
rws.interner,
|
|
|
|
rws.highestTimestamp,
|
2020-11-19 07:23:03 -08:00
|
|
|
rws.scraper,
|
2021-05-06 13:53:52 -07:00
|
|
|
rwConf.SendExemplars,
|
2022-07-14 06:13:12 -07:00
|
|
|
rwConf.SendNativeHistograms,
|
2019-12-12 12:47:23 -08:00
|
|
|
)
|
|
|
|
// Keep track of which queues are new so we know which to start.
|
2019-09-04 10:21:53 -07:00
|
|
|
newHashes = append(newHashes, hash)
|
2019-05-31 18:39:40 -07:00
|
|
|
}
|
|
|
|
|
2019-12-12 12:47:23 -08:00
|
|
|
// Anything remaining in rws.queues is a queue who's config has
|
|
|
|
// changed or was removed from the overall remote write config.
|
2019-05-31 18:39:40 -07:00
|
|
|
for _, q := range rws.queues {
|
2019-12-12 12:47:23 -08:00
|
|
|
q.Stop()
|
2019-05-31 18:39:40 -07:00
|
|
|
}
|
|
|
|
|
2019-12-12 12:47:23 -08:00
|
|
|
for _, hash := range newHashes {
|
|
|
|
newQueues[hash].Start()
|
2019-05-31 18:39:40 -07:00
|
|
|
}
|
2019-09-04 10:21:53 -07:00
|
|
|
|
|
|
|
rws.queues = newQueues
|
|
|
|
|
2019-05-31 18:39:40 -07:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Appender implements storage.Storage.
|
2020-07-30 04:11:13 -07:00
|
|
|
func (rws *WriteStorage) Appender(_ context.Context) storage.Appender {
|
2019-01-18 04:48:16 -08:00
|
|
|
return ×tampTracker{
|
2020-09-24 11:44:18 -07:00
|
|
|
writeStorage: rws,
|
|
|
|
highestRecvTimestamp: rws.highestTimestamp,
|
2020-02-06 07:58:38 -08:00
|
|
|
}
|
2019-01-18 04:48:16 -08:00
|
|
|
}
|
|
|
|
|
2021-10-29 08:25:05 -07:00
|
|
|
// LowestSentTimestamp returns the lowest sent timestamp across all queues.
|
|
|
|
func (rws *WriteStorage) LowestSentTimestamp() int64 {
|
|
|
|
rws.mtx.Lock()
|
|
|
|
defer rws.mtx.Unlock()
|
|
|
|
|
|
|
|
var lowestTs int64 = math.MaxInt64
|
|
|
|
|
|
|
|
for _, q := range rws.queues {
|
|
|
|
ts := int64(q.metrics.highestSentTimestamp.Get() * 1000)
|
|
|
|
if ts < lowestTs {
|
|
|
|
lowestTs = ts
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(rws.queues) == 0 {
|
|
|
|
lowestTs = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
return lowestTs
|
|
|
|
}
|
|
|
|
|
2019-05-31 18:39:40 -07:00
|
|
|
// Close closes the WriteStorage.
|
|
|
|
func (rws *WriteStorage) Close() error {
|
|
|
|
rws.mtx.Lock()
|
|
|
|
defer rws.mtx.Unlock()
|
|
|
|
for _, q := range rws.queues {
|
|
|
|
q.Stop()
|
|
|
|
}
|
2021-10-29 16:39:02 -07:00
|
|
|
close(rws.quit)
|
2019-05-31 18:39:40 -07:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-01-18 04:48:16 -08:00
|
|
|
type timestampTracker struct {
|
2020-09-24 11:44:18 -07:00
|
|
|
writeStorage *WriteStorage
|
|
|
|
samples int64
|
2021-05-06 13:53:52 -07:00
|
|
|
exemplars int64
|
2021-07-01 05:49:04 -07:00
|
|
|
histograms int64
|
2020-09-24 11:44:18 -07:00
|
|
|
highestTimestamp int64
|
2020-10-15 14:53:59 -07:00
|
|
|
highestRecvTimestamp *maxTimestamp
|
2017-05-10 02:44:13 -07:00
|
|
|
}
|
|
|
|
|
2021-02-18 04:07:00 -08:00
|
|
|
// Append implements storage.Appender.
|
2021-11-06 03:10:04 -07:00
|
|
|
func (t *timestampTracker) Append(_ storage.SeriesRef, _ labels.Labels, ts int64, _ float64) (storage.SeriesRef, error) {
|
2019-01-18 04:48:16 -08:00
|
|
|
t.samples++
|
|
|
|
if ts > t.highestTimestamp {
|
|
|
|
t.highestTimestamp = ts
|
2017-05-10 02:44:13 -07:00
|
|
|
}
|
2017-09-07 05:14:41 -07:00
|
|
|
return 0, nil
|
2017-05-10 02:44:13 -07:00
|
|
|
}
|
|
|
|
|
2021-11-06 03:10:04 -07:00
|
|
|
func (t *timestampTracker) AppendExemplar(_ storage.SeriesRef, _ labels.Labels, _ exemplar.Exemplar) (storage.SeriesRef, error) {
|
2021-05-06 13:53:52 -07:00
|
|
|
t.exemplars++
|
2021-03-16 02:47:45 -07:00
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
2022-12-28 00:55:07 -08:00
|
|
|
func (t *timestampTracker) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, ts int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
2021-07-01 05:49:04 -07:00
|
|
|
t.histograms++
|
|
|
|
if ts > t.highestTimestamp {
|
|
|
|
t.highestTimestamp = ts
|
|
|
|
}
|
|
|
|
return 0, nil
|
2021-06-28 08:00:55 -07:00
|
|
|
}
|
|
|
|
|
2022-07-19 01:58:52 -07:00
|
|
|
func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) {
|
|
|
|
// TODO: Add and increment a `metadata` field when we get around to wiring metadata in remote_write.
|
|
|
|
// UpadteMetadata is no-op for remote write (where timestampTracker is being used) for now.
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
2023-12-11 00:43:42 -08:00
|
|
|
func (t *timestampTracker) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64) (storage.SeriesRef, error) {
|
|
|
|
// AppendCTZeroSample is no-op for remote-write for now.
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
2017-10-23 06:57:30 -07:00
|
|
|
// Commit implements storage.Appender.
|
2019-01-18 04:48:16 -08:00
|
|
|
func (t *timestampTracker) Commit() error {
|
2022-07-14 06:13:12 -07:00
|
|
|
t.writeStorage.samplesIn.incr(t.samples + t.exemplars + t.histograms)
|
2019-01-18 04:48:16 -08:00
|
|
|
|
2019-03-01 11:04:26 -08:00
|
|
|
samplesIn.Add(float64(t.samples))
|
2021-05-06 13:53:52 -07:00
|
|
|
exemplarsIn.Add(float64(t.exemplars))
|
2022-07-14 06:13:12 -07:00
|
|
|
histogramsIn.Add(float64(t.histograms))
|
2020-09-24 11:44:18 -07:00
|
|
|
t.highestRecvTimestamp.Set(float64(t.highestTimestamp / 1000))
|
2017-05-10 02:44:13 -07:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-10-23 06:57:30 -07:00
|
|
|
// Rollback implements storage.Appender.
|
2019-01-18 04:48:16 -08:00
|
|
|
func (*timestampTracker) Rollback() error {
|
2017-05-10 02:44:13 -07:00
|
|
|
return nil
|
|
|
|
}
|