2021-10-29 08:25:05 -07:00
|
|
|
// Copyright 2021 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package agent
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
2021-11-30 07:44:40 -08:00
|
|
|
"math"
|
2021-10-29 08:25:05 -07:00
|
|
|
"path/filepath"
|
|
|
|
"sync"
|
|
|
|
"time"
|
2021-11-30 07:44:40 -08:00
|
|
|
"unicode/utf8"
|
2021-10-29 08:25:05 -07:00
|
|
|
|
|
|
|
"github.com/go-kit/log"
|
|
|
|
"github.com/go-kit/log/level"
|
|
|
|
"github.com/pkg/errors"
|
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
|
|
|
"github.com/prometheus/common/model"
|
2021-10-22 01:19:38 -07:00
|
|
|
"go.uber.org/atomic"
|
|
|
|
|
2021-11-08 06:23:17 -08:00
|
|
|
"github.com/prometheus/prometheus/model/exemplar"
|
2021-11-17 10:57:31 -08:00
|
|
|
"github.com/prometheus/prometheus/model/histogram"
|
2021-11-08 06:23:17 -08:00
|
|
|
"github.com/prometheus/prometheus/model/labels"
|
2022-07-19 01:58:52 -07:00
|
|
|
"github.com/prometheus/prometheus/model/metadata"
|
2021-11-08 06:23:17 -08:00
|
|
|
"github.com/prometheus/prometheus/model/timestamp"
|
2021-10-29 08:25:05 -07:00
|
|
|
"github.com/prometheus/prometheus/storage"
|
|
|
|
"github.com/prometheus/prometheus/storage/remote"
|
|
|
|
"github.com/prometheus/prometheus/tsdb"
|
2021-11-06 03:10:04 -07:00
|
|
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
2021-11-11 08:45:25 -08:00
|
|
|
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
2021-10-29 08:25:05 -07:00
|
|
|
"github.com/prometheus/prometheus/tsdb/record"
|
2021-11-11 08:45:25 -08:00
|
|
|
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
2022-10-10 08:08:46 -07:00
|
|
|
"github.com/prometheus/prometheus/tsdb/wlog"
|
2021-10-29 08:25:05 -07:00
|
|
|
)
|
|
|
|
|
2023-01-12 08:13:44 -08:00
|
|
|
const (
|
|
|
|
sampleMetricTypeFloat = "float"
|
|
|
|
sampleMetricTypeHistogram = "histogram"
|
|
|
|
)
|
|
|
|
|
2021-10-22 01:06:44 -07:00
|
|
|
var ErrUnsupported = errors.New("unsupported operation with WAL-only storage")
|
2021-10-29 08:25:05 -07:00
|
|
|
|
|
|
|
// Default values for options.
|
|
|
|
var (
|
|
|
|
DefaultTruncateFrequency = 2 * time.Hour
|
|
|
|
DefaultMinWALTime = int64(5 * time.Minute / time.Millisecond)
|
|
|
|
DefaultMaxWALTime = int64(4 * time.Hour / time.Millisecond)
|
|
|
|
)
|
|
|
|
|
|
|
|
// Options of the WAL storage.
|
|
|
|
type Options struct {
|
|
|
|
// Segments (wal files) max size.
|
|
|
|
// WALSegmentSize <= 0, segment size is default size.
|
|
|
|
// WALSegmentSize > 0, segment size is WALSegmentSize.
|
|
|
|
WALSegmentSize int
|
|
|
|
|
2023-07-11 05:57:57 -07:00
|
|
|
// WALCompression configures the compression type to use on records in the WAL.
|
|
|
|
WALCompression wlog.CompressionType
|
2021-10-29 08:25:05 -07:00
|
|
|
|
|
|
|
// StripeSize is the size (power of 2) in entries of the series hash map. Reducing the size will save memory but impact performance.
|
|
|
|
StripeSize int
|
|
|
|
|
|
|
|
// TruncateFrequency determines how frequently to truncate data from the WAL.
|
|
|
|
TruncateFrequency time.Duration
|
|
|
|
|
|
|
|
// Shortest and longest amount of time data can exist in the WAL before being
|
|
|
|
// deleted.
|
|
|
|
MinWALTime, MaxWALTime int64
|
2021-11-11 08:45:25 -08:00
|
|
|
|
|
|
|
// NoLockfile disables creation and consideration of a lock file.
|
|
|
|
NoLockfile bool
|
2021-10-29 08:25:05 -07:00
|
|
|
}
|
|
|
|
|
2022-11-28 09:09:18 -08:00
|
|
|
// DefaultOptions used for the WAL storage. They are reasonable for setups using
|
2021-10-29 08:25:05 -07:00
|
|
|
// millisecond-precision timestamps.
|
|
|
|
func DefaultOptions() *Options {
|
|
|
|
return &Options{
|
2022-10-10 08:08:46 -07:00
|
|
|
WALSegmentSize: wlog.DefaultSegmentSize,
|
2023-07-11 05:57:57 -07:00
|
|
|
WALCompression: wlog.CompressionNone,
|
2021-10-29 08:25:05 -07:00
|
|
|
StripeSize: tsdb.DefaultStripeSize,
|
|
|
|
TruncateFrequency: DefaultTruncateFrequency,
|
|
|
|
MinWALTime: DefaultMinWALTime,
|
|
|
|
MaxWALTime: DefaultMaxWALTime,
|
2021-11-11 08:45:25 -08:00
|
|
|
NoLockfile: false,
|
2021-10-29 08:25:05 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type dbMetrics struct {
|
|
|
|
r prometheus.Registerer
|
|
|
|
|
|
|
|
numActiveSeries prometheus.Gauge
|
|
|
|
numWALSeriesPendingDeletion prometheus.Gauge
|
2023-01-12 08:13:44 -08:00
|
|
|
totalAppendedSamples *prometheus.CounterVec
|
2021-11-30 07:44:40 -08:00
|
|
|
totalAppendedExemplars prometheus.Counter
|
|
|
|
totalOutOfOrderSamples prometheus.Counter
|
2021-10-29 08:25:05 -07:00
|
|
|
walTruncateDuration prometheus.Summary
|
|
|
|
walCorruptionsTotal prometheus.Counter
|
|
|
|
walTotalReplayDuration prometheus.Gauge
|
|
|
|
checkpointDeleteFail prometheus.Counter
|
|
|
|
checkpointDeleteTotal prometheus.Counter
|
|
|
|
checkpointCreationFail prometheus.Counter
|
|
|
|
checkpointCreationTotal prometheus.Counter
|
|
|
|
}
|
|
|
|
|
|
|
|
func newDBMetrics(r prometheus.Registerer) *dbMetrics {
|
|
|
|
m := dbMetrics{r: r}
|
|
|
|
m.numActiveSeries = prometheus.NewGauge(prometheus.GaugeOpts{
|
|
|
|
Name: "prometheus_agent_active_series",
|
|
|
|
Help: "Number of active series being tracked by the WAL storage",
|
|
|
|
})
|
|
|
|
|
|
|
|
m.numWALSeriesPendingDeletion = prometheus.NewGauge(prometheus.GaugeOpts{
|
|
|
|
Name: "prometheus_agent_deleted_series",
|
|
|
|
Help: "Number of series pending deletion from the WAL",
|
|
|
|
})
|
|
|
|
|
2023-01-12 08:13:44 -08:00
|
|
|
m.totalAppendedSamples = prometheus.NewCounterVec(prometheus.CounterOpts{
|
2021-10-29 08:25:05 -07:00
|
|
|
Name: "prometheus_agent_samples_appended_total",
|
|
|
|
Help: "Total number of samples appended to the storage",
|
2023-01-12 08:13:44 -08:00
|
|
|
}, []string{"type"})
|
2021-10-29 08:25:05 -07:00
|
|
|
|
2021-11-30 07:44:40 -08:00
|
|
|
m.totalAppendedExemplars = prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
Name: "prometheus_agent_exemplars_appended_total",
|
|
|
|
Help: "Total number of exemplars appended to the storage",
|
|
|
|
})
|
|
|
|
|
|
|
|
m.totalOutOfOrderSamples = prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
Name: "prometheus_agent_out_of_order_samples_total",
|
|
|
|
Help: "Total number of out of order samples ingestion failed attempts.",
|
|
|
|
})
|
|
|
|
|
2021-10-29 08:25:05 -07:00
|
|
|
m.walTruncateDuration = prometheus.NewSummary(prometheus.SummaryOpts{
|
|
|
|
Name: "prometheus_agent_truncate_duration_seconds",
|
|
|
|
Help: "Duration of WAL truncation.",
|
|
|
|
})
|
|
|
|
|
|
|
|
m.walCorruptionsTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
Name: "prometheus_agent_corruptions_total",
|
|
|
|
Help: "Total number of WAL corruptions.",
|
|
|
|
})
|
|
|
|
|
|
|
|
m.walTotalReplayDuration = prometheus.NewGauge(prometheus.GaugeOpts{
|
|
|
|
Name: "prometheus_agent_data_replay_duration_seconds",
|
|
|
|
Help: "Time taken to replay the data on disk.",
|
|
|
|
})
|
|
|
|
|
|
|
|
m.checkpointDeleteFail = prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
Name: "prometheus_agent_checkpoint_deletions_failed_total",
|
|
|
|
Help: "Total number of checkpoint deletions that failed.",
|
|
|
|
})
|
|
|
|
|
|
|
|
m.checkpointDeleteTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
Name: "prometheus_agent_checkpoint_deletions_total",
|
|
|
|
Help: "Total number of checkpoint deletions attempted.",
|
|
|
|
})
|
|
|
|
|
|
|
|
m.checkpointCreationFail = prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
Name: "prometheus_agent_checkpoint_creations_failed_total",
|
|
|
|
Help: "Total number of checkpoint creations that failed.",
|
|
|
|
})
|
|
|
|
|
|
|
|
m.checkpointCreationTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
Name: "prometheus_agent_checkpoint_creations_total",
|
|
|
|
Help: "Total number of checkpoint creations attempted.",
|
|
|
|
})
|
|
|
|
|
|
|
|
if r != nil {
|
|
|
|
r.MustRegister(
|
|
|
|
m.numActiveSeries,
|
|
|
|
m.numWALSeriesPendingDeletion,
|
|
|
|
m.totalAppendedSamples,
|
2021-11-30 07:44:40 -08:00
|
|
|
m.totalAppendedExemplars,
|
|
|
|
m.totalOutOfOrderSamples,
|
2021-10-29 08:25:05 -07:00
|
|
|
m.walTruncateDuration,
|
|
|
|
m.walCorruptionsTotal,
|
|
|
|
m.walTotalReplayDuration,
|
|
|
|
m.checkpointDeleteFail,
|
|
|
|
m.checkpointDeleteTotal,
|
|
|
|
m.checkpointCreationFail,
|
|
|
|
m.checkpointCreationTotal,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
return &m
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *dbMetrics) Unregister() {
|
|
|
|
if m.r == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
cs := []prometheus.Collector{
|
|
|
|
m.numActiveSeries,
|
|
|
|
m.numWALSeriesPendingDeletion,
|
|
|
|
m.totalAppendedSamples,
|
2021-11-30 07:44:40 -08:00
|
|
|
m.totalAppendedExemplars,
|
|
|
|
m.totalOutOfOrderSamples,
|
|
|
|
m.walTruncateDuration,
|
|
|
|
m.walCorruptionsTotal,
|
|
|
|
m.walTotalReplayDuration,
|
|
|
|
m.checkpointDeleteFail,
|
|
|
|
m.checkpointDeleteTotal,
|
|
|
|
m.checkpointCreationFail,
|
|
|
|
m.checkpointCreationTotal,
|
2021-10-29 08:25:05 -07:00
|
|
|
}
|
|
|
|
for _, c := range cs {
|
|
|
|
m.r.Unregister(c)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// DB represents a WAL-only storage. It implements storage.DB.
|
|
|
|
type DB struct {
|
|
|
|
mtx sync.RWMutex
|
|
|
|
logger log.Logger
|
|
|
|
opts *Options
|
|
|
|
rs *remote.Storage
|
|
|
|
|
2022-10-10 08:08:46 -07:00
|
|
|
wal *wlog.WL
|
2021-11-11 08:45:25 -08:00
|
|
|
locker *tsdbutil.DirLocker
|
2021-10-29 08:25:05 -07:00
|
|
|
|
|
|
|
appenderPool sync.Pool
|
|
|
|
bufPool sync.Pool
|
|
|
|
|
|
|
|
nextRef *atomic.Uint64
|
|
|
|
series *stripeSeries
|
|
|
|
// deleted is a map of (ref IDs that should be deleted from WAL) to (the WAL segment they
|
|
|
|
// must be kept around to).
|
2021-11-06 03:10:04 -07:00
|
|
|
deleted map[chunks.HeadSeriesRef]int
|
2021-10-29 08:25:05 -07:00
|
|
|
|
|
|
|
donec chan struct{}
|
|
|
|
stopc chan struct{}
|
|
|
|
|
|
|
|
metrics *dbMetrics
|
|
|
|
}
|
|
|
|
|
|
|
|
// Open returns a new agent.DB in the given directory.
|
|
|
|
func Open(l log.Logger, reg prometheus.Registerer, rs *remote.Storage, dir string, opts *Options) (*DB, error) {
|
|
|
|
opts = validateOptions(opts)
|
|
|
|
|
2021-11-11 08:45:25 -08:00
|
|
|
locker, err := tsdbutil.NewDirLocker(dir, "agent", l, reg)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if !opts.NoLockfile {
|
|
|
|
if err := locker.Lock(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-29 08:25:05 -07:00
|
|
|
// remote_write expects WAL to be stored in a "wal" subdirectory of the main storage.
|
|
|
|
dir = filepath.Join(dir, "wal")
|
|
|
|
|
2022-10-10 08:08:46 -07:00
|
|
|
w, err := wlog.NewSize(l, reg, dir, opts.WALSegmentSize, opts.WALCompression)
|
2021-10-29 08:25:05 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "creating WAL")
|
|
|
|
}
|
|
|
|
|
|
|
|
db := &DB{
|
|
|
|
logger: l,
|
|
|
|
opts: opts,
|
|
|
|
rs: rs,
|
|
|
|
|
2021-11-11 08:45:25 -08:00
|
|
|
wal: w,
|
|
|
|
locker: locker,
|
2021-10-29 08:25:05 -07:00
|
|
|
|
|
|
|
nextRef: atomic.NewUint64(0),
|
|
|
|
series: newStripeSeries(opts.StripeSize),
|
2021-11-06 03:10:04 -07:00
|
|
|
deleted: make(map[chunks.HeadSeriesRef]int),
|
2021-10-29 08:25:05 -07:00
|
|
|
|
|
|
|
donec: make(chan struct{}),
|
|
|
|
stopc: make(chan struct{}),
|
|
|
|
|
|
|
|
metrics: newDBMetrics(reg),
|
|
|
|
}
|
|
|
|
|
|
|
|
db.bufPool.New = func() interface{} {
|
|
|
|
return make([]byte, 0, 1024)
|
|
|
|
}
|
|
|
|
|
|
|
|
db.appenderPool.New = func() interface{} {
|
|
|
|
return &appender{
|
2023-01-12 08:13:44 -08:00
|
|
|
DB: db,
|
|
|
|
pendingSeries: make([]record.RefSeries, 0, 100),
|
|
|
|
pendingSamples: make([]record.RefSample, 0, 100),
|
|
|
|
pendingHistograms: make([]record.RefHistogramSample, 0, 100),
|
|
|
|
pendingFloatHistograms: make([]record.RefFloatHistogramSample, 0, 100),
|
|
|
|
pendingExamplars: make([]record.RefExemplar, 0, 10),
|
2021-10-29 08:25:05 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := db.replayWAL(); err != nil {
|
|
|
|
level.Warn(db.logger).Log("msg", "encountered WAL read error, attempting repair", "err", err)
|
|
|
|
if err := w.Repair(err); err != nil {
|
|
|
|
return nil, errors.Wrap(err, "repair corrupted WAL")
|
|
|
|
}
|
2023-03-21 07:03:43 -07:00
|
|
|
level.Info(db.logger).Log("msg", "successfully repaired WAL")
|
2021-10-29 08:25:05 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
go db.run()
|
|
|
|
return db, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func validateOptions(opts *Options) *Options {
|
|
|
|
if opts == nil {
|
|
|
|
opts = DefaultOptions()
|
|
|
|
}
|
|
|
|
if opts.WALSegmentSize <= 0 {
|
2022-10-10 08:08:46 -07:00
|
|
|
opts.WALSegmentSize = wlog.DefaultSegmentSize
|
2023-07-11 05:57:57 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if opts.WALCompression == "" {
|
|
|
|
opts.WALCompression = wlog.CompressionNone
|
2021-10-29 08:25:05 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Revert Stripesize to DefaultStripsize if Stripsize is either 0 or not a power of 2.
|
|
|
|
if opts.StripeSize <= 0 || ((opts.StripeSize & (opts.StripeSize - 1)) != 0) {
|
|
|
|
opts.StripeSize = tsdb.DefaultStripeSize
|
|
|
|
}
|
|
|
|
if opts.TruncateFrequency <= 0 {
|
|
|
|
opts.TruncateFrequency = DefaultTruncateFrequency
|
|
|
|
}
|
|
|
|
if opts.MinWALTime <= 0 {
|
2022-09-27 07:11:43 -07:00
|
|
|
opts.MinWALTime = DefaultMinWALTime
|
2021-10-29 08:25:05 -07:00
|
|
|
}
|
|
|
|
if opts.MaxWALTime <= 0 {
|
|
|
|
opts.MaxWALTime = DefaultMaxWALTime
|
|
|
|
}
|
2022-09-27 07:11:43 -07:00
|
|
|
if opts.MinWALTime > opts.MaxWALTime {
|
|
|
|
opts.MaxWALTime = opts.MinWALTime
|
|
|
|
}
|
2021-10-29 08:25:05 -07:00
|
|
|
|
2022-09-27 07:11:43 -07:00
|
|
|
if t := int64(opts.TruncateFrequency / time.Millisecond); opts.MaxWALTime < t {
|
2021-10-29 08:25:05 -07:00
|
|
|
opts.MaxWALTime = t
|
|
|
|
}
|
|
|
|
return opts
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *DB) replayWAL() error {
|
|
|
|
level.Info(db.logger).Log("msg", "replaying WAL, this may take a while", "dir", db.wal.Dir())
|
|
|
|
start := time.Now()
|
|
|
|
|
2022-10-10 08:08:46 -07:00
|
|
|
dir, startFrom, err := wlog.LastCheckpoint(db.wal.Dir())
|
2021-10-29 08:25:05 -07:00
|
|
|
if err != nil && err != record.ErrNotFound {
|
|
|
|
return errors.Wrap(err, "find last checkpoint")
|
|
|
|
}
|
|
|
|
|
2021-11-06 03:10:04 -07:00
|
|
|
multiRef := map[chunks.HeadSeriesRef]chunks.HeadSeriesRef{}
|
2021-10-29 08:25:05 -07:00
|
|
|
|
|
|
|
if err == nil {
|
2022-10-10 08:08:46 -07:00
|
|
|
sr, err := wlog.NewSegmentsReader(dir)
|
2021-10-29 08:25:05 -07:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "open checkpoint")
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
if err := sr.Close(); err != nil {
|
|
|
|
level.Warn(db.logger).Log("msg", "error while closing the wal segments reader", "err", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// A corrupted checkpoint is a hard error for now and requires user
|
|
|
|
// intervention. There's likely little data that can be recovered anyway.
|
2022-10-10 08:08:46 -07:00
|
|
|
if err := db.loadWAL(wlog.NewReader(sr), multiRef); err != nil {
|
2021-10-29 08:25:05 -07:00
|
|
|
return errors.Wrap(err, "backfill checkpoint")
|
|
|
|
}
|
|
|
|
startFrom++
|
|
|
|
level.Info(db.logger).Log("msg", "WAL checkpoint loaded")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Find the last segment.
|
2022-10-10 08:08:46 -07:00
|
|
|
_, last, err := wlog.Segments(db.wal.Dir())
|
2021-10-29 08:25:05 -07:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "finding WAL segments")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Backfil segments from the most recent checkpoint onwards.
|
|
|
|
for i := startFrom; i <= last; i++ {
|
2022-10-10 08:08:46 -07:00
|
|
|
seg, err := wlog.OpenReadSegment(wlog.SegmentName(db.wal.Dir(), i))
|
2021-10-29 08:25:05 -07:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, fmt.Sprintf("open WAL segment: %d", i))
|
|
|
|
}
|
|
|
|
|
2022-10-10 08:08:46 -07:00
|
|
|
sr := wlog.NewSegmentBufReader(seg)
|
|
|
|
err = db.loadWAL(wlog.NewReader(sr), multiRef)
|
2021-10-29 08:25:05 -07:00
|
|
|
if err := sr.Close(); err != nil {
|
|
|
|
level.Warn(db.logger).Log("msg", "error while closing the wal segments reader", "err", err)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
level.Info(db.logger).Log("msg", "WAL segment loaded", "segment", i, "maxSegment", last)
|
|
|
|
}
|
|
|
|
|
|
|
|
walReplayDuration := time.Since(start)
|
|
|
|
db.metrics.walTotalReplayDuration.Set(walReplayDuration.Seconds())
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-10-10 08:08:46 -07:00
|
|
|
func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef) (err error) {
|
2021-10-29 08:25:05 -07:00
|
|
|
var (
|
|
|
|
dec record.Decoder
|
2022-04-14 01:27:06 -07:00
|
|
|
lastRef = chunks.HeadSeriesRef(db.nextRef.Load())
|
2021-10-29 08:25:05 -07:00
|
|
|
|
|
|
|
decoded = make(chan interface{}, 10)
|
|
|
|
errCh = make(chan error, 1)
|
|
|
|
seriesPool = sync.Pool{
|
|
|
|
New: func() interface{} {
|
|
|
|
return []record.RefSeries{}
|
|
|
|
},
|
|
|
|
}
|
|
|
|
samplesPool = sync.Pool{
|
|
|
|
New: func() interface{} {
|
|
|
|
return []record.RefSample{}
|
|
|
|
},
|
|
|
|
}
|
2023-01-12 08:13:44 -08:00
|
|
|
histogramsPool = sync.Pool{
|
|
|
|
New: func() interface{} {
|
|
|
|
return []record.RefHistogramSample{}
|
|
|
|
},
|
|
|
|
}
|
|
|
|
floatHistogramsPool = sync.Pool{
|
|
|
|
New: func() interface{} {
|
|
|
|
return []record.RefFloatHistogramSample{}
|
|
|
|
},
|
|
|
|
}
|
2021-10-29 08:25:05 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer close(decoded)
|
2022-04-12 03:30:20 -07:00
|
|
|
var err error
|
2021-10-29 08:25:05 -07:00
|
|
|
for r.Next() {
|
|
|
|
rec := r.Record()
|
|
|
|
switch dec.Type(rec) {
|
|
|
|
case record.Series:
|
|
|
|
series := seriesPool.Get().([]record.RefSeries)[:0]
|
|
|
|
series, err = dec.Series(rec, series)
|
|
|
|
if err != nil {
|
2022-10-10 08:08:46 -07:00
|
|
|
errCh <- &wlog.CorruptionErr{
|
2021-10-29 08:25:05 -07:00
|
|
|
Err: errors.Wrap(err, "decode series"),
|
|
|
|
Segment: r.Segment(),
|
|
|
|
Offset: r.Offset(),
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
decoded <- series
|
|
|
|
case record.Samples:
|
|
|
|
samples := samplesPool.Get().([]record.RefSample)[:0]
|
|
|
|
samples, err = dec.Samples(rec, samples)
|
|
|
|
if err != nil {
|
2022-10-10 08:08:46 -07:00
|
|
|
errCh <- &wlog.CorruptionErr{
|
2021-10-29 08:25:05 -07:00
|
|
|
Err: errors.Wrap(err, "decode samples"),
|
|
|
|
Segment: r.Segment(),
|
|
|
|
Offset: r.Offset(),
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
decoded <- samples
|
2023-01-12 08:13:44 -08:00
|
|
|
case record.HistogramSamples:
|
|
|
|
histograms := histogramsPool.Get().([]record.RefHistogramSample)[:0]
|
|
|
|
histograms, err = dec.HistogramSamples(rec, histograms)
|
|
|
|
if err != nil {
|
|
|
|
errCh <- &wlog.CorruptionErr{
|
|
|
|
Err: errors.Wrap(err, "decode histogram samples"),
|
|
|
|
Segment: r.Segment(),
|
|
|
|
Offset: r.Offset(),
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
decoded <- histograms
|
|
|
|
case record.FloatHistogramSamples:
|
|
|
|
floatHistograms := floatHistogramsPool.Get().([]record.RefFloatHistogramSample)[:0]
|
|
|
|
floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms)
|
|
|
|
if err != nil {
|
|
|
|
errCh <- &wlog.CorruptionErr{
|
|
|
|
Err: errors.Wrap(err, "decode float histogram samples"),
|
|
|
|
Segment: r.Segment(),
|
|
|
|
Offset: r.Offset(),
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
decoded <- floatHistograms
|
2021-11-30 07:44:40 -08:00
|
|
|
case record.Tombstones, record.Exemplars:
|
|
|
|
// We don't care about tombstones or exemplars during replay.
|
2022-04-18 08:41:04 -07:00
|
|
|
// TODO: If decide to decode exemplars, we should make sure to prepopulate
|
|
|
|
// stripeSeries.exemplars in the next block by using setLatestExemplar.
|
2021-10-29 08:25:05 -07:00
|
|
|
continue
|
|
|
|
default:
|
2022-10-10 08:08:46 -07:00
|
|
|
errCh <- &wlog.CorruptionErr{
|
2021-10-29 08:25:05 -07:00
|
|
|
Err: errors.Errorf("invalid record type %v", dec.Type(rec)),
|
|
|
|
Segment: r.Segment(),
|
|
|
|
Offset: r.Offset(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
var nonExistentSeriesRefs atomic.Uint64
|
|
|
|
|
|
|
|
for d := range decoded {
|
|
|
|
switch v := d.(type) {
|
|
|
|
case []record.RefSeries:
|
|
|
|
for _, entry := range v {
|
|
|
|
// If this is a new series, create it in memory. If we never read in a
|
|
|
|
// sample for this series, its timestamp will remain at 0 and it will
|
|
|
|
// be deleted at the next GC.
|
|
|
|
if db.series.GetByID(entry.Ref) == nil {
|
|
|
|
series := &memSeries{ref: entry.Ref, lset: entry.Labels, lastTs: 0}
|
|
|
|
db.series.Set(entry.Labels.Hash(), series)
|
|
|
|
multiRef[entry.Ref] = series.ref
|
|
|
|
db.metrics.numActiveSeries.Inc()
|
|
|
|
if entry.Ref > lastRef {
|
|
|
|
lastRef = entry.Ref
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//nolint:staticcheck
|
|
|
|
seriesPool.Put(v)
|
|
|
|
case []record.RefSample:
|
|
|
|
for _, entry := range v {
|
|
|
|
// Update the lastTs for the series based
|
|
|
|
ref, ok := multiRef[entry.Ref]
|
|
|
|
if !ok {
|
|
|
|
nonExistentSeriesRefs.Inc()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
series := db.series.GetByID(ref)
|
|
|
|
if entry.T > series.lastTs {
|
|
|
|
series.lastTs = entry.T
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//nolint:staticcheck
|
|
|
|
samplesPool.Put(v)
|
2023-01-12 08:13:44 -08:00
|
|
|
case []record.RefHistogramSample:
|
|
|
|
for _, entry := range v {
|
|
|
|
// Update the lastTs for the series based
|
|
|
|
ref, ok := multiRef[entry.Ref]
|
|
|
|
if !ok {
|
|
|
|
nonExistentSeriesRefs.Inc()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
series := db.series.GetByID(ref)
|
|
|
|
if entry.T > series.lastTs {
|
|
|
|
series.lastTs = entry.T
|
|
|
|
}
|
|
|
|
}
|
|
|
|
//nolint:staticcheck
|
|
|
|
histogramsPool.Put(v)
|
|
|
|
case []record.RefFloatHistogramSample:
|
|
|
|
for _, entry := range v {
|
|
|
|
// Update the lastTs for the series based
|
|
|
|
ref, ok := multiRef[entry.Ref]
|
|
|
|
if !ok {
|
|
|
|
nonExistentSeriesRefs.Inc()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
series := db.series.GetByID(ref)
|
|
|
|
if entry.T > series.lastTs {
|
|
|
|
series.lastTs = entry.T
|
|
|
|
}
|
|
|
|
}
|
|
|
|
//nolint:staticcheck
|
|
|
|
floatHistogramsPool.Put(v)
|
2021-10-29 08:25:05 -07:00
|
|
|
default:
|
|
|
|
panic(fmt.Errorf("unexpected decoded type: %T", d))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if v := nonExistentSeriesRefs.Load(); v > 0 {
|
|
|
|
level.Warn(db.logger).Log("msg", "found sample referencing non-existing series", "skipped_series", v)
|
|
|
|
}
|
|
|
|
|
2021-11-06 03:10:04 -07:00
|
|
|
db.nextRef.Store(uint64(lastRef))
|
2021-10-29 08:25:05 -07:00
|
|
|
|
|
|
|
select {
|
|
|
|
case err := <-errCh:
|
|
|
|
return err
|
|
|
|
default:
|
|
|
|
if r.Err() != nil {
|
|
|
|
return errors.Wrap(r.Err(), "read records")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *DB) run() {
|
|
|
|
defer close(db.donec)
|
|
|
|
|
|
|
|
Loop:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-db.stopc:
|
|
|
|
break Loop
|
|
|
|
case <-time.After(db.opts.TruncateFrequency):
|
|
|
|
// The timestamp ts is used to determine which series are not receiving
|
|
|
|
// samples and may be deleted from the WAL. Their most recent append
|
|
|
|
// timestamp is compared to ts, and if that timestamp is older then ts,
|
|
|
|
// they are considered inactive and may be deleted.
|
|
|
|
//
|
|
|
|
// Subtracting a duration from ts will add a buffer for when series are
|
|
|
|
// considered inactive and safe for deletion.
|
|
|
|
ts := db.rs.LowestSentTimestamp() - db.opts.MinWALTime
|
|
|
|
if ts < 0 {
|
|
|
|
ts = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// Network issues can prevent the result of getRemoteWriteTimestamp from
|
|
|
|
// changing. We don't want data in the WAL to grow forever, so we set a cap
|
|
|
|
// on the maximum age data can be. If our ts is older than this cutoff point,
|
|
|
|
// we'll shift it forward to start deleting very stale data.
|
|
|
|
if maxTS := timestamp.FromTime(time.Now()) - db.opts.MaxWALTime; ts < maxTS {
|
|
|
|
ts = maxTS
|
|
|
|
}
|
|
|
|
|
|
|
|
level.Debug(db.logger).Log("msg", "truncating the WAL", "ts", ts)
|
|
|
|
if err := db.truncate(ts); err != nil {
|
|
|
|
level.Warn(db.logger).Log("msg", "failed to truncate WAL", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *DB) truncate(mint int64) error {
|
|
|
|
db.mtx.RLock()
|
|
|
|
defer db.mtx.RUnlock()
|
|
|
|
|
|
|
|
start := time.Now()
|
|
|
|
|
|
|
|
db.gc(mint)
|
|
|
|
level.Info(db.logger).Log("msg", "series GC completed", "duration", time.Since(start))
|
|
|
|
|
2022-10-10 08:08:46 -07:00
|
|
|
first, last, err := wlog.Segments(db.wal.Dir())
|
2021-10-29 08:25:05 -07:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "get segment range")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start a new segment so low ingestion volume instances don't have more WAL
|
|
|
|
// than needed.
|
2022-09-20 10:05:50 -07:00
|
|
|
if _, err := db.wal.NextSegment(); err != nil {
|
2021-10-29 08:25:05 -07:00
|
|
|
return errors.Wrap(err, "next segment")
|
|
|
|
}
|
|
|
|
|
|
|
|
last-- // Never consider most recent segment for checkpoint
|
|
|
|
if last < 0 {
|
|
|
|
return nil // no segments yet
|
|
|
|
}
|
|
|
|
|
|
|
|
// The lower two-thirds of segments should contain mostly obsolete samples.
|
|
|
|
// If we have less than two segments, it's not worth checkpointing yet.
|
|
|
|
last = first + (last-first)*2/3
|
|
|
|
if last <= first {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-11-06 03:10:04 -07:00
|
|
|
keep := func(id chunks.HeadSeriesRef) bool {
|
2021-10-29 08:25:05 -07:00
|
|
|
if db.series.GetByID(id) != nil {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
seg, ok := db.deleted[id]
|
2023-05-01 08:43:15 -07:00
|
|
|
return ok && seg > last
|
2021-10-29 08:25:05 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
db.metrics.checkpointCreationTotal.Inc()
|
|
|
|
|
2022-10-10 08:08:46 -07:00
|
|
|
if _, err = wlog.Checkpoint(db.logger, db.wal, first, last, keep, mint); err != nil {
|
2021-10-29 08:25:05 -07:00
|
|
|
db.metrics.checkpointCreationFail.Inc()
|
2022-10-10 08:08:46 -07:00
|
|
|
if _, ok := errors.Cause(err).(*wlog.CorruptionErr); ok {
|
2021-10-29 08:25:05 -07:00
|
|
|
db.metrics.walCorruptionsTotal.Inc()
|
|
|
|
}
|
|
|
|
return errors.Wrap(err, "create checkpoint")
|
|
|
|
}
|
|
|
|
if err := db.wal.Truncate(last + 1); err != nil {
|
|
|
|
// If truncating fails, we'll just try it again at the next checkpoint.
|
|
|
|
// Leftover segments will still just be ignored in the future if there's a
|
|
|
|
// checkpoint that supersedes them.
|
|
|
|
level.Error(db.logger).Log("msg", "truncating segments failed", "err", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The checkpoint is written and segments before it are truncated, so we
|
|
|
|
// no longer need to track deleted series that were being kept around.
|
|
|
|
for ref, segment := range db.deleted {
|
2023-05-01 08:43:15 -07:00
|
|
|
if segment <= last {
|
2021-10-29 08:25:05 -07:00
|
|
|
delete(db.deleted, ref)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
db.metrics.checkpointDeleteTotal.Inc()
|
|
|
|
db.metrics.numWALSeriesPendingDeletion.Set(float64(len(db.deleted)))
|
|
|
|
|
2022-10-10 08:08:46 -07:00
|
|
|
if err := wlog.DeleteCheckpoints(db.wal.Dir(), last); err != nil {
|
2021-10-29 08:25:05 -07:00
|
|
|
// Leftover old checkpoints do not cause problems down the line beyond
|
|
|
|
// occupying disk space. They will just be ignored since a newer checkpoint
|
|
|
|
// exists.
|
|
|
|
level.Error(db.logger).Log("msg", "delete old checkpoints", "err", err)
|
|
|
|
db.metrics.checkpointDeleteFail.Inc()
|
|
|
|
}
|
|
|
|
|
|
|
|
db.metrics.walTruncateDuration.Observe(time.Since(start).Seconds())
|
|
|
|
|
|
|
|
level.Info(db.logger).Log("msg", "WAL checkpoint complete", "first", first, "last", last, "duration", time.Since(start))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// gc marks ref IDs that have not received a sample since mint as deleted in
|
|
|
|
// s.deleted, along with the segment where they originally got deleted.
|
|
|
|
func (db *DB) gc(mint int64) {
|
|
|
|
deleted := db.series.GC(mint)
|
|
|
|
db.metrics.numActiveSeries.Sub(float64(len(deleted)))
|
|
|
|
|
2022-10-10 08:08:46 -07:00
|
|
|
_, last, _ := wlog.Segments(db.wal.Dir())
|
2021-10-29 08:25:05 -07:00
|
|
|
|
|
|
|
// We want to keep series records for any newly deleted series
|
|
|
|
// until we've passed the last recorded segment. This prevents
|
|
|
|
// the WAL having samples for series records that no longer exist.
|
|
|
|
for ref := range deleted {
|
|
|
|
db.deleted[ref] = last
|
|
|
|
}
|
|
|
|
|
|
|
|
db.metrics.numWALSeriesPendingDeletion.Set(float64(len(db.deleted)))
|
|
|
|
}
|
|
|
|
|
|
|
|
// StartTime implements the Storage interface.
|
|
|
|
func (db *DB) StartTime() (int64, error) {
|
|
|
|
return int64(model.Latest), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Querier implements the Storage interface.
|
2023-04-12 04:05:41 -07:00
|
|
|
func (db *DB) Querier(context.Context, int64, int64) (storage.Querier, error) {
|
2021-10-29 08:25:05 -07:00
|
|
|
return nil, ErrUnsupported
|
|
|
|
}
|
|
|
|
|
|
|
|
// ChunkQuerier implements the Storage interface.
|
2023-04-12 04:05:41 -07:00
|
|
|
func (db *DB) ChunkQuerier(context.Context, int64, int64) (storage.ChunkQuerier, error) {
|
2021-10-29 08:25:05 -07:00
|
|
|
return nil, ErrUnsupported
|
|
|
|
}
|
|
|
|
|
|
|
|
// ExemplarQuerier implements the Storage interface.
|
2023-04-12 04:05:41 -07:00
|
|
|
func (db *DB) ExemplarQuerier(context.Context) (storage.ExemplarQuerier, error) {
|
2021-10-29 08:25:05 -07:00
|
|
|
return nil, ErrUnsupported
|
|
|
|
}
|
|
|
|
|
|
|
|
// Appender implements storage.Storage.
|
2023-04-12 04:05:41 -07:00
|
|
|
func (db *DB) Appender(context.Context) storage.Appender {
|
2021-10-29 08:25:05 -07:00
|
|
|
return db.appenderPool.Get().(storage.Appender)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close implements the Storage interface.
|
|
|
|
func (db *DB) Close() error {
|
|
|
|
db.mtx.Lock()
|
|
|
|
defer db.mtx.Unlock()
|
|
|
|
|
|
|
|
close(db.stopc)
|
|
|
|
<-db.donec
|
|
|
|
|
|
|
|
db.metrics.Unregister()
|
|
|
|
|
2021-11-11 08:45:25 -08:00
|
|
|
return tsdb_errors.NewMulti(db.locker.Release(), db.wal.Close()).Err()
|
2021-10-29 08:25:05 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
type appender struct {
|
|
|
|
*DB
|
|
|
|
|
2023-01-12 08:13:44 -08:00
|
|
|
pendingSeries []record.RefSeries
|
|
|
|
pendingSamples []record.RefSample
|
|
|
|
pendingHistograms []record.RefHistogramSample
|
|
|
|
pendingFloatHistograms []record.RefFloatHistogramSample
|
|
|
|
pendingExamplars []record.RefExemplar
|
2021-11-30 07:44:40 -08:00
|
|
|
|
|
|
|
// Pointers to the series referenced by each element of pendingSamples.
|
|
|
|
// Series lock is not held on elements.
|
|
|
|
sampleSeries []*memSeries
|
2023-01-12 08:13:44 -08:00
|
|
|
|
|
|
|
// Pointers to the series referenced by each element of pendingHistograms.
|
|
|
|
// Series lock is not held on elements.
|
|
|
|
histogramSeries []*memSeries
|
|
|
|
|
|
|
|
// Pointers to the series referenced by each element of pendingFloatHistograms.
|
|
|
|
// Series lock is not held on elements.
|
|
|
|
floatHistogramSeries []*memSeries
|
2021-10-29 08:25:05 -07:00
|
|
|
}
|
|
|
|
|
2021-11-06 03:10:04 -07:00
|
|
|
func (a *appender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
|
2021-11-30 07:44:40 -08:00
|
|
|
// series references and chunk references are identical for agent mode.
|
|
|
|
headRef := chunks.HeadSeriesRef(ref)
|
2021-10-29 08:25:05 -07:00
|
|
|
|
2021-11-30 07:44:40 -08:00
|
|
|
series := a.series.GetByID(headRef)
|
|
|
|
if series == nil {
|
|
|
|
// Ensure no empty or duplicate labels have gotten through. This mirrors the
|
|
|
|
// equivalent validation code in the TSDB's headAppender.
|
|
|
|
l = l.WithoutEmpty()
|
2022-02-20 12:42:11 -08:00
|
|
|
if l.IsEmpty() {
|
2021-11-30 07:44:40 -08:00
|
|
|
return 0, errors.Wrap(tsdb.ErrInvalidSample, "empty labelset")
|
|
|
|
}
|
2021-10-29 08:25:05 -07:00
|
|
|
|
2021-11-30 07:44:40 -08:00
|
|
|
if lbl, dup := l.HasDuplicateLabelNames(); dup {
|
|
|
|
return 0, errors.Wrap(tsdb.ErrInvalidSample, fmt.Sprintf(`label name "%s" is not unique`, lbl))
|
|
|
|
}
|
|
|
|
|
|
|
|
var created bool
|
|
|
|
series, created = a.getOrCreate(l)
|
|
|
|
if created {
|
|
|
|
a.pendingSeries = append(a.pendingSeries, record.RefSeries{
|
|
|
|
Ref: series.ref,
|
|
|
|
Labels: l,
|
|
|
|
})
|
2021-10-29 08:25:05 -07:00
|
|
|
|
2021-11-30 07:44:40 -08:00
|
|
|
a.metrics.numActiveSeries.Inc()
|
|
|
|
}
|
2021-10-29 08:25:05 -07:00
|
|
|
}
|
|
|
|
|
2021-11-30 07:44:40 -08:00
|
|
|
series.Lock()
|
|
|
|
defer series.Unlock()
|
2021-10-29 08:25:05 -07:00
|
|
|
|
2021-11-30 07:44:40 -08:00
|
|
|
if t < series.lastTs {
|
|
|
|
a.metrics.totalOutOfOrderSamples.Inc()
|
|
|
|
return 0, storage.ErrOutOfOrderSample
|
|
|
|
}
|
|
|
|
|
2023-04-12 04:05:41 -07:00
|
|
|
// NOTE: always modify pendingSamples and sampleSeries together.
|
2021-10-29 08:25:05 -07:00
|
|
|
a.pendingSamples = append(a.pendingSamples, record.RefSample{
|
2021-11-30 07:44:40 -08:00
|
|
|
Ref: series.ref,
|
2021-10-29 08:25:05 -07:00
|
|
|
T: t,
|
|
|
|
V: v,
|
|
|
|
})
|
2021-11-30 07:44:40 -08:00
|
|
|
a.sampleSeries = append(a.sampleSeries, series)
|
2021-10-29 08:25:05 -07:00
|
|
|
|
2023-01-12 08:13:44 -08:00
|
|
|
a.metrics.totalAppendedSamples.WithLabelValues(sampleMetricTypeFloat).Inc()
|
2021-11-30 07:44:40 -08:00
|
|
|
return storage.SeriesRef(series.ref), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (a *appender) getOrCreate(l labels.Labels) (series *memSeries, created bool) {
|
|
|
|
hash := l.Hash()
|
|
|
|
|
|
|
|
series = a.series.GetByHash(hash, l)
|
|
|
|
if series != nil {
|
|
|
|
return series, false
|
|
|
|
}
|
2021-10-29 08:25:05 -07:00
|
|
|
|
2021-11-30 07:44:40 -08:00
|
|
|
ref := chunks.HeadSeriesRef(a.nextRef.Inc())
|
|
|
|
series = &memSeries{ref: ref, lset: l, lastTs: math.MinInt64}
|
|
|
|
a.series.Set(hash, series)
|
|
|
|
return series, true
|
2021-10-29 08:25:05 -07:00
|
|
|
}
|
|
|
|
|
2023-04-12 04:05:41 -07:00
|
|
|
func (a *appender) AppendExemplar(ref storage.SeriesRef, _ labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
|
|
|
|
// Series references and chunk references are identical for agent mode.
|
2021-11-30 07:44:40 -08:00
|
|
|
headRef := chunks.HeadSeriesRef(ref)
|
|
|
|
|
|
|
|
s := a.series.GetByID(headRef)
|
|
|
|
if s == nil {
|
|
|
|
return 0, fmt.Errorf("unknown series ref when trying to add exemplar: %d", ref)
|
2021-10-29 08:25:05 -07:00
|
|
|
}
|
|
|
|
|
2021-11-30 07:44:40 -08:00
|
|
|
// Ensure no empty labels have gotten through.
|
|
|
|
e.Labels = e.Labels.WithoutEmpty()
|
2021-10-29 08:25:05 -07:00
|
|
|
|
2021-11-30 07:44:40 -08:00
|
|
|
if lbl, dup := e.Labels.HasDuplicateLabelNames(); dup {
|
|
|
|
return 0, errors.Wrap(tsdb.ErrInvalidExemplar, fmt.Sprintf(`label name "%s" is not unique`, lbl))
|
|
|
|
}
|
2021-10-29 08:25:05 -07:00
|
|
|
|
2021-11-30 07:44:40 -08:00
|
|
|
// Exemplar label length does not include chars involved in text rendering such as quotes
|
|
|
|
// equals sign, or commas. See definition of const ExemplarMaxLabelLength.
|
|
|
|
labelSetLen := 0
|
2022-02-20 12:42:11 -08:00
|
|
|
err := e.Labels.Validate(func(l labels.Label) error {
|
2021-11-30 07:44:40 -08:00
|
|
|
labelSetLen += utf8.RuneCountInString(l.Name)
|
|
|
|
labelSetLen += utf8.RuneCountInString(l.Value)
|
2021-10-29 08:25:05 -07:00
|
|
|
|
2021-11-30 07:44:40 -08:00
|
|
|
if labelSetLen > exemplar.ExemplarMaxLabelSetLength {
|
2022-02-20 12:42:11 -08:00
|
|
|
return storage.ErrExemplarLabelLength
|
2021-11-30 07:44:40 -08:00
|
|
|
}
|
2022-02-20 12:42:11 -08:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
2021-11-30 07:44:40 -08:00
|
|
|
}
|
|
|
|
|
2022-04-18 08:41:04 -07:00
|
|
|
// Check for duplicate vs last stored exemplar for this series, and discard those.
|
|
|
|
// Otherwise, record the current exemplar as the latest.
|
|
|
|
// Prometheus' TSDB returns 0 when encountering duplicates, so we do the same here.
|
|
|
|
prevExemplar := a.series.GetLatestExemplar(s.ref)
|
|
|
|
if prevExemplar != nil && prevExemplar.Equals(e) {
|
|
|
|
// Duplicate, don't return an error but don't accept the exemplar.
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
a.series.SetLatestExemplar(s.ref, &e)
|
|
|
|
|
2021-11-30 07:44:40 -08:00
|
|
|
a.pendingExamplars = append(a.pendingExamplars, record.RefExemplar{
|
|
|
|
Ref: s.ref,
|
|
|
|
T: e.Ts,
|
|
|
|
V: e.Value,
|
|
|
|
Labels: e.Labels,
|
|
|
|
})
|
|
|
|
|
2022-04-18 08:41:04 -07:00
|
|
|
a.metrics.totalAppendedExemplars.Inc()
|
2021-11-30 07:44:40 -08:00
|
|
|
return storage.SeriesRef(s.ref), nil
|
2021-10-29 08:25:05 -07:00
|
|
|
}
|
|
|
|
|
2022-12-28 00:55:07 -08:00
|
|
|
func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
2023-01-12 08:13:44 -08:00
|
|
|
if h != nil {
|
|
|
|
if err := tsdb.ValidateHistogram(h); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if fh != nil {
|
|
|
|
if err := tsdb.ValidateFloatHistogram(fh); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// series references and chunk references are identical for agent mode.
|
|
|
|
headRef := chunks.HeadSeriesRef(ref)
|
|
|
|
|
|
|
|
series := a.series.GetByID(headRef)
|
|
|
|
if series == nil {
|
|
|
|
// Ensure no empty or duplicate labels have gotten through. This mirrors the
|
|
|
|
// equivalent validation code in the TSDB's headAppender.
|
|
|
|
l = l.WithoutEmpty()
|
|
|
|
if l.IsEmpty() {
|
|
|
|
return 0, errors.Wrap(tsdb.ErrInvalidSample, "empty labelset")
|
|
|
|
}
|
|
|
|
|
|
|
|
if lbl, dup := l.HasDuplicateLabelNames(); dup {
|
|
|
|
return 0, errors.Wrap(tsdb.ErrInvalidSample, fmt.Sprintf(`label name "%s" is not unique`, lbl))
|
|
|
|
}
|
|
|
|
|
|
|
|
var created bool
|
|
|
|
series, created = a.getOrCreate(l)
|
|
|
|
if created {
|
|
|
|
a.pendingSeries = append(a.pendingSeries, record.RefSeries{
|
|
|
|
Ref: series.ref,
|
|
|
|
Labels: l,
|
|
|
|
})
|
|
|
|
|
|
|
|
a.metrics.numActiveSeries.Inc()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
series.Lock()
|
|
|
|
defer series.Unlock()
|
|
|
|
|
|
|
|
if t < series.lastTs {
|
|
|
|
a.metrics.totalOutOfOrderSamples.Inc()
|
|
|
|
return 0, storage.ErrOutOfOrderSample
|
|
|
|
}
|
|
|
|
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
switch {
|
|
|
|
case h != nil:
|
2023-01-12 08:13:44 -08:00
|
|
|
// NOTE: always modify pendingHistograms and histogramSeries together
|
|
|
|
a.pendingHistograms = append(a.pendingHistograms, record.RefHistogramSample{
|
|
|
|
Ref: series.ref,
|
|
|
|
T: t,
|
|
|
|
H: h,
|
|
|
|
})
|
|
|
|
a.histogramSeries = append(a.histogramSeries, series)
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
case fh != nil:
|
2023-01-12 08:13:44 -08:00
|
|
|
// NOTE: always modify pendingFloatHistograms and floatHistogramSeries together
|
|
|
|
a.pendingFloatHistograms = append(a.pendingFloatHistograms, record.RefFloatHistogramSample{
|
|
|
|
Ref: series.ref,
|
|
|
|
T: t,
|
|
|
|
FH: fh,
|
|
|
|
})
|
|
|
|
a.floatHistogramSeries = append(a.floatHistogramSeries, series)
|
|
|
|
}
|
|
|
|
|
|
|
|
a.metrics.totalAppendedSamples.WithLabelValues(sampleMetricTypeHistogram).Inc()
|
|
|
|
return storage.SeriesRef(series.ref), nil
|
2022-08-10 08:54:37 -07:00
|
|
|
}
|
|
|
|
|
2023-04-12 04:05:41 -07:00
|
|
|
func (a *appender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) {
|
2022-07-19 01:58:52 -07:00
|
|
|
// TODO: Wire metadata in the Agent's appender.
|
2021-11-17 10:57:31 -08:00
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
2021-10-29 08:25:05 -07:00
|
|
|
// Commit submits the collected samples and purges the batch.
|
|
|
|
func (a *appender) Commit() error {
|
|
|
|
a.mtx.RLock()
|
|
|
|
defer a.mtx.RUnlock()
|
|
|
|
|
|
|
|
var encoder record.Encoder
|
|
|
|
buf := a.bufPool.Get().([]byte)
|
|
|
|
|
|
|
|
if len(a.pendingSeries) > 0 {
|
|
|
|
buf = encoder.Series(a.pendingSeries, buf)
|
|
|
|
if err := a.wal.Log(buf); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
buf = buf[:0]
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(a.pendingSamples) > 0 {
|
|
|
|
buf = encoder.Samples(a.pendingSamples, buf)
|
|
|
|
if err := a.wal.Log(buf); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
buf = buf[:0]
|
|
|
|
}
|
|
|
|
|
2023-01-12 08:13:44 -08:00
|
|
|
if len(a.pendingHistograms) > 0 {
|
|
|
|
buf = encoder.HistogramSamples(a.pendingHistograms, buf)
|
|
|
|
if err := a.wal.Log(buf); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
buf = buf[:0]
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(a.pendingFloatHistograms) > 0 {
|
|
|
|
buf = encoder.FloatHistogramSamples(a.pendingFloatHistograms, buf)
|
|
|
|
if err := a.wal.Log(buf); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
buf = buf[:0]
|
|
|
|
}
|
|
|
|
|
2021-11-30 07:44:40 -08:00
|
|
|
if len(a.pendingExamplars) > 0 {
|
|
|
|
buf = encoder.Exemplars(a.pendingExamplars, buf)
|
|
|
|
if err := a.wal.Log(buf); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
buf = buf[:0]
|
|
|
|
}
|
|
|
|
|
|
|
|
var series *memSeries
|
|
|
|
for i, s := range a.pendingSamples {
|
|
|
|
series = a.sampleSeries[i]
|
|
|
|
if !series.updateTimestamp(s.T) {
|
|
|
|
a.metrics.totalOutOfOrderSamples.Inc()
|
|
|
|
}
|
|
|
|
}
|
2023-01-12 08:13:44 -08:00
|
|
|
for i, s := range a.pendingHistograms {
|
|
|
|
series = a.histogramSeries[i]
|
|
|
|
if !series.updateTimestamp(s.T) {
|
|
|
|
a.metrics.totalOutOfOrderSamples.Inc()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for i, s := range a.pendingFloatHistograms {
|
|
|
|
series = a.floatHistogramSeries[i]
|
|
|
|
if !series.updateTimestamp(s.T) {
|
|
|
|
a.metrics.totalOutOfOrderSamples.Inc()
|
|
|
|
}
|
|
|
|
}
|
2021-11-30 07:44:40 -08:00
|
|
|
|
2021-10-29 08:25:05 -07:00
|
|
|
//nolint:staticcheck
|
|
|
|
a.bufPool.Put(buf)
|
|
|
|
return a.Rollback()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (a *appender) Rollback() error {
|
|
|
|
a.pendingSeries = a.pendingSeries[:0]
|
|
|
|
a.pendingSamples = a.pendingSamples[:0]
|
2023-01-12 08:13:44 -08:00
|
|
|
a.pendingHistograms = a.pendingHistograms[:0]
|
|
|
|
a.pendingFloatHistograms = a.pendingFloatHistograms[:0]
|
2021-11-30 07:44:40 -08:00
|
|
|
a.pendingExamplars = a.pendingExamplars[:0]
|
|
|
|
a.sampleSeries = a.sampleSeries[:0]
|
2023-01-12 08:13:44 -08:00
|
|
|
a.histogramSeries = a.histogramSeries[:0]
|
|
|
|
a.floatHistogramSeries = a.floatHistogramSeries[:0]
|
2021-10-29 08:25:05 -07:00
|
|
|
a.appenderPool.Put(a)
|
|
|
|
return nil
|
|
|
|
}
|