2017-04-10 11:59:45 -07:00
|
|
|
|
// Copyright 2017 The Prometheus Authors
|
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
|
//
|
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
//
|
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
2016-12-04 04:16:11 -08:00
|
|
|
|
package tsdb
|
|
|
|
|
|
|
|
|
|
import (
|
2020-07-30 04:11:13 -07:00
|
|
|
|
"context"
|
2019-05-24 11:33:28 -07:00
|
|
|
|
"fmt"
|
2021-06-28 08:00:55 -07:00
|
|
|
|
"github.com/prometheus/prometheus/pkg/histogram"
|
2017-01-04 05:06:40 -08:00
|
|
|
|
"math"
|
2020-05-06 08:30:00 -07:00
|
|
|
|
"path/filepath"
|
2017-10-07 06:55:11 -07:00
|
|
|
|
"runtime"
|
2016-12-14 09:38:46 -08:00
|
|
|
|
"sort"
|
2016-12-04 04:16:11 -08:00
|
|
|
|
"sync"
|
2017-08-30 09:34:54 -07:00
|
|
|
|
"time"
|
2017-05-17 07:43:01 -07:00
|
|
|
|
|
2021-06-11 09:17:59 -07:00
|
|
|
|
"github.com/go-kit/log"
|
|
|
|
|
"github.com/go-kit/log/level"
|
2019-07-23 01:04:48 -07:00
|
|
|
|
"github.com/oklog/ulid"
|
2017-01-19 02:22:47 -08:00
|
|
|
|
"github.com/pkg/errors"
|
2017-08-30 09:34:54 -07:00
|
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
2020-10-22 02:00:08 -07:00
|
|
|
|
"go.uber.org/atomic"
|
|
|
|
|
|
2021-03-16 02:47:45 -07:00
|
|
|
|
"github.com/prometheus/prometheus/pkg/exemplar"
|
2019-11-18 11:53:33 -08:00
|
|
|
|
"github.com/prometheus/prometheus/pkg/labels"
|
2020-02-06 07:58:38 -08:00
|
|
|
|
"github.com/prometheus/prometheus/storage"
|
2019-08-13 01:34:14 -07:00
|
|
|
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
|
|
|
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
2020-05-06 08:30:00 -07:00
|
|
|
|
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
2019-08-13 01:34:14 -07:00
|
|
|
|
"github.com/prometheus/prometheus/tsdb/index"
|
2019-09-19 02:15:41 -07:00
|
|
|
|
"github.com/prometheus/prometheus/tsdb/record"
|
|
|
|
|
"github.com/prometheus/prometheus/tsdb/tombstones"
|
2020-07-31 08:03:02 -07:00
|
|
|
|
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
2019-08-13 01:34:14 -07:00
|
|
|
|
"github.com/prometheus/prometheus/tsdb/wal"
|
2016-12-04 04:16:11 -08:00
|
|
|
|
)
|
|
|
|
|
|
2017-01-17 07:33:58 -08:00
|
|
|
|
var (
|
2020-01-20 03:05:27 -08:00
|
|
|
|
// ErrInvalidSample is returned if an appended sample is not valid and can't
|
|
|
|
|
// be ingested.
|
|
|
|
|
ErrInvalidSample = errors.New("invalid sample")
|
2021-03-16 02:47:45 -07:00
|
|
|
|
// ErrInvalidExemplar is returned if an appended exemplar is not valid and can't
|
|
|
|
|
// be ingested.
|
|
|
|
|
ErrInvalidExemplar = errors.New("invalid exemplar")
|
2020-07-22 02:57:38 -07:00
|
|
|
|
// ErrAppenderClosed is returned if an appender has already be successfully
|
2020-08-07 00:57:25 -07:00
|
|
|
|
// rolled back or committed.
|
2020-07-22 02:57:38 -07:00
|
|
|
|
ErrAppenderClosed = errors.New("appender closed")
|
2017-01-17 07:33:58 -08:00
|
|
|
|
)
|
|
|
|
|
|
2021-03-16 02:47:45 -07:00
|
|
|
|
type ExemplarStorage interface {
|
|
|
|
|
storage.ExemplarQueryable
|
|
|
|
|
AddExemplar(labels.Labels, exemplar.Exemplar) error
|
2021-05-06 13:53:52 -07:00
|
|
|
|
ValidateExemplar(labels.Labels, exemplar.Exemplar) error
|
2021-03-16 02:47:45 -07:00
|
|
|
|
}
|
|
|
|
|
|
2017-08-28 15:39:17 -07:00
|
|
|
|
// Head handles reads and writes of time series data within a time window.
|
|
|
|
|
type Head struct {
|
2020-10-19 08:27:08 -07:00
|
|
|
|
chunkRange atomic.Int64
|
|
|
|
|
numSeries atomic.Uint64
|
|
|
|
|
minTime, maxTime atomic.Int64 // Current min and max of the samples included in the head.
|
|
|
|
|
minValidTime atomic.Int64 // Mint allowed to be added to the head. It shouldn't be lower than the maxt of the last persisted block.
|
|
|
|
|
lastWALTruncationTime atomic.Int64
|
|
|
|
|
lastSeriesID atomic.Uint64
|
2021-07-03 10:34:34 -07:00
|
|
|
|
// hasHistograms this is used to m-map all chunks in case there are histograms.
|
|
|
|
|
// A hack to avoid updating all the failing tests.
|
|
|
|
|
hasHistograms atomic.Bool
|
2019-10-09 08:41:46 -07:00
|
|
|
|
|
2021-03-16 02:47:45 -07:00
|
|
|
|
metrics *headMetrics
|
|
|
|
|
opts *HeadOptions
|
|
|
|
|
wal *wal.WAL
|
|
|
|
|
exemplars ExemplarStorage
|
|
|
|
|
logger log.Logger
|
|
|
|
|
appendPool sync.Pool
|
|
|
|
|
exemplarsPool sync.Pool
|
|
|
|
|
seriesPool sync.Pool
|
|
|
|
|
bytesPool sync.Pool
|
|
|
|
|
memChunkPool sync.Pool
|
2017-02-04 02:53:52 -08:00
|
|
|
|
|
2017-09-05 02:45:18 -07:00
|
|
|
|
// All series addressable by their ID or hash.
|
2021-02-09 06:12:48 -08:00
|
|
|
|
series *stripeSeries
|
2016-12-21 16:12:28 -08:00
|
|
|
|
|
2017-09-05 02:45:18 -07:00
|
|
|
|
symMtx sync.RWMutex
|
|
|
|
|
symbols map[string]struct{}
|
|
|
|
|
|
2019-04-09 06:16:24 -07:00
|
|
|
|
deletedMtx sync.Mutex
|
|
|
|
|
deleted map[uint64]int // Deleted series, and what WAL segment they must be kept until.
|
|
|
|
|
|
2020-02-17 10:37:09 -08:00
|
|
|
|
postings *index.MemPostings // Postings lists for terms.
|
2019-11-04 18:06:13 -08:00
|
|
|
|
|
2020-01-20 07:38:00 -08:00
|
|
|
|
tombstones *tombstones.MemTombstones
|
|
|
|
|
|
2020-02-12 11:22:27 -08:00
|
|
|
|
iso *isolation
|
|
|
|
|
|
2019-11-04 18:06:13 -08:00
|
|
|
|
cardinalityMutex sync.Mutex
|
2020-02-17 10:37:09 -08:00
|
|
|
|
cardinalityCache *index.PostingsStats // Posting stats cache which will expire after 30sec.
|
|
|
|
|
lastPostingsStatsCall time.Duration // Last posting stats call (PostingsCardinalityStats()) time for caching.
|
2020-05-06 08:30:00 -07:00
|
|
|
|
|
|
|
|
|
// chunkDiskMapper is used to write and read Head chunks to/from disk.
|
|
|
|
|
chunkDiskMapper *chunks.ChunkDiskMapper
|
2020-05-22 02:03:23 -07:00
|
|
|
|
|
|
|
|
|
closedMtx sync.Mutex
|
|
|
|
|
closed bool
|
2021-06-05 07:29:32 -07:00
|
|
|
|
|
|
|
|
|
stats *HeadStats
|
2016-12-04 04:16:11 -08:00
|
|
|
|
}
|
|
|
|
|
|
2021-02-09 06:12:48 -08:00
|
|
|
|
// HeadOptions are parameters for the Head block.
|
|
|
|
|
type HeadOptions struct {
|
|
|
|
|
ChunkRange int64
|
|
|
|
|
// ChunkDirRoot is the parent directory of the chunks directory.
|
|
|
|
|
ChunkDirRoot string
|
|
|
|
|
ChunkPool chunkenc.Pool
|
|
|
|
|
ChunkWriteBufferSize int
|
|
|
|
|
// StripeSize sets the number of entries in the hash map, it must be a power of 2.
|
|
|
|
|
// A larger StripeSize will allocate more memory up-front, but will increase performance when handling a large number of series.
|
|
|
|
|
// A smaller StripeSize reduces the memory allocated, but can decrease performance with large number of series.
|
|
|
|
|
StripeSize int
|
|
|
|
|
SeriesCallback SeriesLifecycleCallback
|
2021-03-16 02:47:45 -07:00
|
|
|
|
NumExemplars int
|
2021-02-09 06:12:48 -08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func DefaultHeadOptions() *HeadOptions {
|
|
|
|
|
return &HeadOptions{
|
|
|
|
|
ChunkRange: DefaultBlockDuration,
|
|
|
|
|
ChunkDirRoot: "",
|
|
|
|
|
ChunkPool: chunkenc.NewPool(),
|
|
|
|
|
ChunkWriteBufferSize: chunks.DefaultWriteBufferSize,
|
|
|
|
|
StripeSize: DefaultStripeSize,
|
|
|
|
|
SeriesCallback: &noopSeriesLifecycleCallback{},
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-30 09:34:54 -07:00
|
|
|
|
type headMetrics struct {
|
2020-05-06 08:30:00 -07:00
|
|
|
|
activeAppenders prometheus.Gauge
|
|
|
|
|
series prometheus.GaugeFunc
|
|
|
|
|
seriesCreated prometheus.Counter
|
|
|
|
|
seriesRemoved prometheus.Counter
|
|
|
|
|
seriesNotFound prometheus.Counter
|
|
|
|
|
chunks prometheus.Gauge
|
|
|
|
|
chunksCreated prometheus.Counter
|
|
|
|
|
chunksRemoved prometheus.Counter
|
|
|
|
|
gcDuration prometheus.Summary
|
|
|
|
|
samplesAppended prometheus.Counter
|
|
|
|
|
outOfBoundSamples prometheus.Counter
|
|
|
|
|
outOfOrderSamples prometheus.Counter
|
|
|
|
|
walTruncateDuration prometheus.Summary
|
|
|
|
|
walCorruptionsTotal prometheus.Counter
|
2020-09-21 09:25:05 -07:00
|
|
|
|
walTotalReplayDuration prometheus.Gauge
|
2020-05-06 08:30:00 -07:00
|
|
|
|
headTruncateFail prometheus.Counter
|
|
|
|
|
headTruncateTotal prometheus.Counter
|
|
|
|
|
checkpointDeleteFail prometheus.Counter
|
|
|
|
|
checkpointDeleteTotal prometheus.Counter
|
|
|
|
|
checkpointCreationFail prometheus.Counter
|
|
|
|
|
checkpointCreationTotal prometheus.Counter
|
|
|
|
|
mmapChunkCorruptionTotal prometheus.Counter
|
2017-08-30 09:34:54 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics {
|
2020-02-12 11:22:27 -08:00
|
|
|
|
m := &headMetrics{
|
|
|
|
|
activeAppenders: prometheus.NewGauge(prometheus.GaugeOpts{
|
|
|
|
|
Name: "prometheus_tsdb_head_active_appenders",
|
|
|
|
|
Help: "Number of currently active appender transactions",
|
|
|
|
|
}),
|
|
|
|
|
series: prometheus.NewGaugeFunc(prometheus.GaugeOpts{
|
|
|
|
|
Name: "prometheus_tsdb_head_series",
|
|
|
|
|
Help: "Total number of series in the head block.",
|
|
|
|
|
}, func() float64 {
|
|
|
|
|
return float64(h.NumSeries())
|
|
|
|
|
}),
|
|
|
|
|
seriesCreated: prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
|
Name: "prometheus_tsdb_head_series_created_total",
|
|
|
|
|
Help: "Total number of series created in the head",
|
|
|
|
|
}),
|
|
|
|
|
seriesRemoved: prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
|
Name: "prometheus_tsdb_head_series_removed_total",
|
|
|
|
|
Help: "Total number of series removed in the head",
|
|
|
|
|
}),
|
|
|
|
|
seriesNotFound: prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
|
Name: "prometheus_tsdb_head_series_not_found_total",
|
|
|
|
|
Help: "Total number of requests for series that were not found.",
|
|
|
|
|
}),
|
|
|
|
|
chunks: prometheus.NewGauge(prometheus.GaugeOpts{
|
|
|
|
|
Name: "prometheus_tsdb_head_chunks",
|
|
|
|
|
Help: "Total number of chunks in the head block.",
|
|
|
|
|
}),
|
|
|
|
|
chunksCreated: prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
|
Name: "prometheus_tsdb_head_chunks_created_total",
|
|
|
|
|
Help: "Total number of chunks created in the head",
|
|
|
|
|
}),
|
|
|
|
|
chunksRemoved: prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
|
Name: "prometheus_tsdb_head_chunks_removed_total",
|
|
|
|
|
Help: "Total number of chunks removed in the head",
|
|
|
|
|
}),
|
|
|
|
|
gcDuration: prometheus.NewSummary(prometheus.SummaryOpts{
|
|
|
|
|
Name: "prometheus_tsdb_head_gc_duration_seconds",
|
|
|
|
|
Help: "Runtime of garbage collection in the head block.",
|
|
|
|
|
}),
|
|
|
|
|
walTruncateDuration: prometheus.NewSummary(prometheus.SummaryOpts{
|
|
|
|
|
Name: "prometheus_tsdb_wal_truncate_duration_seconds",
|
|
|
|
|
Help: "Duration of WAL truncation.",
|
|
|
|
|
}),
|
|
|
|
|
walCorruptionsTotal: prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
|
Name: "prometheus_tsdb_wal_corruptions_total",
|
|
|
|
|
Help: "Total number of WAL corruptions.",
|
|
|
|
|
}),
|
2020-09-21 09:25:05 -07:00
|
|
|
|
walTotalReplayDuration: prometheus.NewGauge(prometheus.GaugeOpts{
|
|
|
|
|
Name: "prometheus_tsdb_data_replay_duration_seconds",
|
|
|
|
|
Help: "Time taken to replay the data on disk.",
|
|
|
|
|
}),
|
2020-02-12 11:22:27 -08:00
|
|
|
|
samplesAppended: prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
|
Name: "prometheus_tsdb_head_samples_appended_total",
|
|
|
|
|
Help: "Total number of appended samples.",
|
|
|
|
|
}),
|
2020-05-06 08:30:00 -07:00
|
|
|
|
outOfBoundSamples: prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
|
Name: "prometheus_tsdb_out_of_bound_samples_total",
|
|
|
|
|
Help: "Total number of out of bound samples ingestion failed attempts.",
|
|
|
|
|
}),
|
|
|
|
|
outOfOrderSamples: prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
|
Name: "prometheus_tsdb_out_of_order_samples_total",
|
|
|
|
|
Help: "Total number of out of order samples ingestion failed attempts.",
|
|
|
|
|
}),
|
2020-02-12 11:22:27 -08:00
|
|
|
|
headTruncateFail: prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
|
Name: "prometheus_tsdb_head_truncations_failed_total",
|
|
|
|
|
Help: "Total number of head truncations that failed.",
|
|
|
|
|
}),
|
|
|
|
|
headTruncateTotal: prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
|
Name: "prometheus_tsdb_head_truncations_total",
|
|
|
|
|
Help: "Total number of head truncations attempted.",
|
|
|
|
|
}),
|
|
|
|
|
checkpointDeleteFail: prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
|
Name: "prometheus_tsdb_checkpoint_deletions_failed_total",
|
|
|
|
|
Help: "Total number of checkpoint deletions that failed.",
|
|
|
|
|
}),
|
|
|
|
|
checkpointDeleteTotal: prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
|
Name: "prometheus_tsdb_checkpoint_deletions_total",
|
|
|
|
|
Help: "Total number of checkpoint deletions attempted.",
|
|
|
|
|
}),
|
|
|
|
|
checkpointCreationFail: prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
|
Name: "prometheus_tsdb_checkpoint_creations_failed_total",
|
|
|
|
|
Help: "Total number of checkpoint creations that failed.",
|
|
|
|
|
}),
|
|
|
|
|
checkpointCreationTotal: prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
|
Name: "prometheus_tsdb_checkpoint_creations_total",
|
|
|
|
|
Help: "Total number of checkpoint creations attempted.",
|
|
|
|
|
}),
|
2020-05-06 08:30:00 -07:00
|
|
|
|
mmapChunkCorruptionTotal: prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
|
Name: "prometheus_tsdb_mmap_chunk_corruptions_total",
|
|
|
|
|
Help: "Total number of memory-mapped chunk corruptions.",
|
|
|
|
|
}),
|
2020-02-12 11:22:27 -08:00
|
|
|
|
}
|
2017-08-30 09:34:54 -07:00
|
|
|
|
|
|
|
|
|
if r != nil {
|
|
|
|
|
r.MustRegister(
|
|
|
|
|
m.activeAppenders,
|
2020-02-12 11:22:27 -08:00
|
|
|
|
m.series,
|
2017-08-30 09:34:54 -07:00
|
|
|
|
m.chunks,
|
|
|
|
|
m.chunksCreated,
|
|
|
|
|
m.chunksRemoved,
|
|
|
|
|
m.seriesCreated,
|
|
|
|
|
m.seriesRemoved,
|
2017-10-12 06:25:12 -07:00
|
|
|
|
m.seriesNotFound,
|
2017-08-30 09:34:54 -07:00
|
|
|
|
m.gcDuration,
|
|
|
|
|
m.walTruncateDuration,
|
2018-12-18 02:24:56 -08:00
|
|
|
|
m.walCorruptionsTotal,
|
2020-09-21 09:25:05 -07:00
|
|
|
|
m.walTotalReplayDuration,
|
2017-08-30 09:34:54 -07:00
|
|
|
|
m.samplesAppended,
|
2020-05-06 08:30:00 -07:00
|
|
|
|
m.outOfBoundSamples,
|
|
|
|
|
m.outOfOrderSamples,
|
2018-09-25 06:48:33 -07:00
|
|
|
|
m.headTruncateFail,
|
|
|
|
|
m.headTruncateTotal,
|
2018-09-25 04:49:09 -07:00
|
|
|
|
m.checkpointDeleteFail,
|
2018-09-25 06:48:33 -07:00
|
|
|
|
m.checkpointDeleteTotal,
|
|
|
|
|
m.checkpointCreationFail,
|
|
|
|
|
m.checkpointCreationTotal,
|
2020-05-06 08:30:00 -07:00
|
|
|
|
m.mmapChunkCorruptionTotal,
|
2020-02-12 11:22:27 -08:00
|
|
|
|
// Metrics bound to functions and not needed in tests
|
|
|
|
|
// can be created and registered on the spot.
|
|
|
|
|
prometheus.NewGaugeFunc(prometheus.GaugeOpts{
|
|
|
|
|
Name: "prometheus_tsdb_head_max_time",
|
|
|
|
|
Help: "Maximum timestamp of the head block. The unit is decided by the library consumer.",
|
|
|
|
|
}, func() float64 {
|
|
|
|
|
return float64(h.MaxTime())
|
|
|
|
|
}),
|
|
|
|
|
prometheus.NewGaugeFunc(prometheus.GaugeOpts{
|
|
|
|
|
Name: "prometheus_tsdb_head_min_time",
|
|
|
|
|
Help: "Minimum time bound of the head block. The unit is decided by the library consumer.",
|
|
|
|
|
}, func() float64 {
|
|
|
|
|
return float64(h.MinTime())
|
|
|
|
|
}),
|
|
|
|
|
prometheus.NewGaugeFunc(prometheus.GaugeOpts{
|
|
|
|
|
Name: "prometheus_tsdb_isolation_low_watermark",
|
|
|
|
|
Help: "The lowest TSDB append ID that is still referenced.",
|
|
|
|
|
}, func() float64 {
|
|
|
|
|
return float64(h.iso.lowWatermark())
|
|
|
|
|
}),
|
|
|
|
|
prometheus.NewGaugeFunc(prometheus.GaugeOpts{
|
|
|
|
|
Name: "prometheus_tsdb_isolation_high_watermark",
|
|
|
|
|
Help: "The highest TSDB append ID that has been given out.",
|
|
|
|
|
}, func() float64 {
|
2020-06-03 11:09:05 -07:00
|
|
|
|
return float64(h.iso.lastAppendID())
|
2020-02-12 11:22:27 -08:00
|
|
|
|
}),
|
2017-08-30 09:34:54 -07:00
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
return m
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-05 07:29:32 -07:00
|
|
|
|
// HeadStats are the statistics for the head component of the DB.
|
|
|
|
|
type HeadStats struct {
|
|
|
|
|
WALReplayStatus *WALReplayStatus
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// NewHeadStats returns a new HeadStats object.
|
|
|
|
|
func NewHeadStats() *HeadStats {
|
|
|
|
|
return &HeadStats{
|
|
|
|
|
WALReplayStatus: &WALReplayStatus{},
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// WALReplayStatus contains status information about the WAL replay.
|
|
|
|
|
type WALReplayStatus struct {
|
|
|
|
|
sync.RWMutex
|
|
|
|
|
Min int
|
|
|
|
|
Max int
|
|
|
|
|
Current int
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetWALReplayStatus returns the WAL replay status information.
|
|
|
|
|
func (s *WALReplayStatus) GetWALReplayStatus() WALReplayStatus {
|
|
|
|
|
s.RLock()
|
|
|
|
|
defer s.RUnlock()
|
|
|
|
|
|
|
|
|
|
return WALReplayStatus{
|
|
|
|
|
Min: s.Min,
|
|
|
|
|
Max: s.Max,
|
|
|
|
|
Current: s.Current,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-04 18:06:13 -08:00
|
|
|
|
const cardinalityCacheExpirationTime = time.Duration(30) * time.Second
|
|
|
|
|
|
|
|
|
|
// PostingsCardinalityStats returns top 10 highest cardinality stats By label and value names.
|
|
|
|
|
func (h *Head) PostingsCardinalityStats(statsByLabelName string) *index.PostingsStats {
|
|
|
|
|
h.cardinalityMutex.Lock()
|
|
|
|
|
defer h.cardinalityMutex.Unlock()
|
|
|
|
|
currentTime := time.Duration(time.Now().Unix()) * time.Second
|
|
|
|
|
seconds := currentTime - h.lastPostingsStatsCall
|
|
|
|
|
if seconds > cardinalityCacheExpirationTime {
|
|
|
|
|
h.cardinalityCache = nil
|
|
|
|
|
}
|
|
|
|
|
if h.cardinalityCache != nil {
|
|
|
|
|
return h.cardinalityCache
|
|
|
|
|
}
|
|
|
|
|
h.cardinalityCache = h.postings.Stats(statsByLabelName)
|
|
|
|
|
h.lastPostingsStatsCall = time.Duration(time.Now().Unix()) * time.Second
|
|
|
|
|
|
|
|
|
|
return h.cardinalityCache
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-28 15:39:17 -07:00
|
|
|
|
// NewHead opens the head block in dir.
|
2021-06-05 07:29:32 -07:00
|
|
|
|
func NewHead(r prometheus.Registerer, l log.Logger, wal *wal.WAL, opts *HeadOptions, stats *HeadStats) (*Head, error) {
|
2017-08-30 09:34:54 -07:00
|
|
|
|
if l == nil {
|
|
|
|
|
l = log.NewNopLogger()
|
|
|
|
|
}
|
2021-02-09 06:12:48 -08:00
|
|
|
|
if opts.ChunkRange < 1 {
|
|
|
|
|
return nil, errors.Errorf("invalid chunk range %d", opts.ChunkRange)
|
2017-08-30 09:34:54 -07:00
|
|
|
|
}
|
2021-02-09 06:12:48 -08:00
|
|
|
|
if opts.SeriesCallback == nil {
|
|
|
|
|
opts.SeriesCallback = &noopSeriesLifecycleCallback{}
|
2020-05-20 06:22:08 -07:00
|
|
|
|
}
|
2021-03-16 02:47:45 -07:00
|
|
|
|
|
|
|
|
|
es, err := NewCircularExemplarStorage(opts.NumExemplars, r)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-05 07:29:32 -07:00
|
|
|
|
if stats == nil {
|
|
|
|
|
stats = NewHeadStats()
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-28 15:39:17 -07:00
|
|
|
|
h := &Head{
|
2017-08-30 09:34:54 -07:00
|
|
|
|
wal: wal,
|
|
|
|
|
logger: l,
|
2021-02-09 06:12:48 -08:00
|
|
|
|
opts: opts,
|
2021-03-16 02:47:45 -07:00
|
|
|
|
exemplars: es,
|
2021-02-09 06:12:48 -08:00
|
|
|
|
series: newStripeSeries(opts.StripeSize, opts.SeriesCallback),
|
2017-08-05 04:31:48 -07:00
|
|
|
|
symbols: map[string]struct{}{},
|
2017-11-30 06:34:49 -08:00
|
|
|
|
postings: index.NewUnorderedMemPostings(),
|
2020-01-20 07:38:00 -08:00
|
|
|
|
tombstones: tombstones.NewMemTombstones(),
|
2020-02-12 11:22:27 -08:00
|
|
|
|
iso: newIsolation(),
|
2019-04-09 06:16:24 -07:00
|
|
|
|
deleted: map[uint64]int{},
|
2020-05-06 08:30:00 -07:00
|
|
|
|
memChunkPool: sync.Pool{
|
|
|
|
|
New: func() interface{} {
|
|
|
|
|
return &memChunk{}
|
|
|
|
|
},
|
|
|
|
|
},
|
2021-06-05 07:29:32 -07:00
|
|
|
|
stats: stats,
|
2017-01-07 07:20:32 -08:00
|
|
|
|
}
|
2021-02-09 06:12:48 -08:00
|
|
|
|
h.chunkRange.Store(opts.ChunkRange)
|
2020-07-27 21:42:42 -07:00
|
|
|
|
h.minTime.Store(math.MaxInt64)
|
|
|
|
|
h.maxTime.Store(math.MinInt64)
|
2020-10-19 08:27:08 -07:00
|
|
|
|
h.lastWALTruncationTime.Store(math.MinInt64)
|
2017-08-30 09:34:54 -07:00
|
|
|
|
h.metrics = newHeadMetrics(h, r)
|
|
|
|
|
|
2021-02-09 06:12:48 -08:00
|
|
|
|
if opts.ChunkPool == nil {
|
|
|
|
|
opts.ChunkPool = chunkenc.NewPool()
|
2020-05-06 08:30:00 -07:00
|
|
|
|
}
|
|
|
|
|
|
2021-02-09 06:12:48 -08:00
|
|
|
|
h.chunkDiskMapper, err = chunks.NewChunkDiskMapper(
|
|
|
|
|
mmappedChunksDir(opts.ChunkDirRoot),
|
|
|
|
|
opts.ChunkPool,
|
|
|
|
|
opts.ChunkWriteBufferSize,
|
|
|
|
|
)
|
2020-05-06 08:30:00 -07:00
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
2017-09-06 07:20:37 -07:00
|
|
|
|
return h, nil
|
2017-08-30 09:34:54 -07:00
|
|
|
|
}
|
|
|
|
|
|
2020-05-06 08:30:00 -07:00
|
|
|
|
func mmappedChunksDir(dir string) string { return filepath.Join(dir, "chunks_head") }
|
|
|
|
|
|
2021-03-16 02:47:45 -07:00
|
|
|
|
func (h *Head) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, error) {
|
|
|
|
|
return h.exemplars.ExemplarQuerier(ctx)
|
|
|
|
|
}
|
|
|
|
|
|
2017-10-07 06:55:11 -07:00
|
|
|
|
// processWALSamples adds a partition of samples it receives to the head and passes
|
|
|
|
|
// them on to other workers.
|
|
|
|
|
// Samples before the mint timestamp are discarded.
|
|
|
|
|
func (h *Head) processWALSamples(
|
2018-05-25 14:19:32 -07:00
|
|
|
|
minValidTime int64,
|
2019-09-19 02:15:41 -07:00
|
|
|
|
input <-chan []record.RefSample, output chan<- []record.RefSample,
|
2017-10-07 06:55:11 -07:00
|
|
|
|
) (unknownRefs uint64) {
|
|
|
|
|
defer close(output)
|
|
|
|
|
|
2018-10-31 05:51:21 -07:00
|
|
|
|
// Mitigate lock contention in getByID.
|
|
|
|
|
refSeries := map[uint64]*memSeries{}
|
|
|
|
|
|
2018-05-25 14:19:32 -07:00
|
|
|
|
mint, maxt := int64(math.MaxInt64), int64(math.MinInt64)
|
2018-05-17 06:04:32 -07:00
|
|
|
|
|
2017-10-07 06:55:11 -07:00
|
|
|
|
for samples := range input {
|
|
|
|
|
for _, s := range samples {
|
2018-10-31 15:52:26 -07:00
|
|
|
|
if s.T < minValidTime {
|
2017-10-07 06:55:11 -07:00
|
|
|
|
continue
|
|
|
|
|
}
|
2018-10-31 05:51:21 -07:00
|
|
|
|
ms := refSeries[s.Ref]
|
2017-10-07 06:55:11 -07:00
|
|
|
|
if ms == nil {
|
2018-10-31 05:51:21 -07:00
|
|
|
|
ms = h.series.getByID(s.Ref)
|
|
|
|
|
if ms == nil {
|
|
|
|
|
unknownRefs++
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
refSeries[s.Ref] = ms
|
2017-10-07 06:55:11 -07:00
|
|
|
|
}
|
2020-05-06 08:30:00 -07:00
|
|
|
|
if _, chunkCreated := ms.append(s.T, s.V, 0, h.chunkDiskMapper); chunkCreated {
|
2017-10-07 06:55:11 -07:00
|
|
|
|
h.metrics.chunksCreated.Inc()
|
|
|
|
|
h.metrics.chunks.Inc()
|
|
|
|
|
}
|
2018-05-17 06:04:32 -07:00
|
|
|
|
if s.T > maxt {
|
|
|
|
|
maxt = s.T
|
|
|
|
|
}
|
2018-05-25 14:19:32 -07:00
|
|
|
|
if s.T < mint {
|
|
|
|
|
mint = s.T
|
|
|
|
|
}
|
2017-10-07 06:55:11 -07:00
|
|
|
|
}
|
|
|
|
|
output <- samples
|
|
|
|
|
}
|
2018-05-25 14:19:32 -07:00
|
|
|
|
h.updateMinMaxTime(mint, maxt)
|
|
|
|
|
|
|
|
|
|
return unknownRefs
|
|
|
|
|
}
|
2018-05-17 06:04:32 -07:00
|
|
|
|
|
2018-05-25 14:19:32 -07:00
|
|
|
|
func (h *Head) updateMinMaxTime(mint, maxt int64) {
|
|
|
|
|
for {
|
|
|
|
|
lt := h.MinTime()
|
|
|
|
|
if mint >= lt {
|
|
|
|
|
break
|
|
|
|
|
}
|
2020-07-27 21:42:42 -07:00
|
|
|
|
if h.minTime.CAS(lt, mint) {
|
2018-05-25 14:19:32 -07:00
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-05-17 06:04:32 -07:00
|
|
|
|
for {
|
|
|
|
|
ht := h.MaxTime()
|
|
|
|
|
if maxt <= ht {
|
|
|
|
|
break
|
|
|
|
|
}
|
2020-07-27 21:42:42 -07:00
|
|
|
|
if h.maxTime.CAS(ht, maxt) {
|
2018-05-17 06:04:32 -07:00
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
2017-10-07 06:55:11 -07:00
|
|
|
|
}
|
|
|
|
|
|
2020-05-06 08:30:00 -07:00
|
|
|
|
func (h *Head) loadWAL(r *wal.Reader, multiRef map[uint64]uint64, mmappedChunks map[uint64][]*mmappedChunk) (err error) {
|
2017-09-21 02:02:30 -07:00
|
|
|
|
// Track number of samples that referenced a series we don't know about
|
|
|
|
|
// for error reporting.
|
2020-07-27 21:42:42 -07:00
|
|
|
|
var unknownRefs atomic.Uint64
|
2021-05-06 13:53:52 -07:00
|
|
|
|
var unknownExemplarRefs atomic.Uint64
|
2017-10-07 06:55:11 -07:00
|
|
|
|
|
|
|
|
|
// Start workers that each process samples for a partition of the series ID space.
|
|
|
|
|
// They are connected through a ring of channels which ensures that all sample batches
|
|
|
|
|
// read from the WAL are processed in order.
|
|
|
|
|
var (
|
2021-05-06 13:53:52 -07:00
|
|
|
|
wg sync.WaitGroup
|
|
|
|
|
n = runtime.GOMAXPROCS(0)
|
|
|
|
|
inputs = make([]chan []record.RefSample, n)
|
|
|
|
|
outputs = make([]chan []record.RefSample, n)
|
|
|
|
|
exemplarsInput chan record.RefExemplar
|
2020-05-20 06:22:08 -07:00
|
|
|
|
|
|
|
|
|
dec record.Decoder
|
|
|
|
|
shards = make([][]record.RefSample, n)
|
|
|
|
|
|
|
|
|
|
decoded = make(chan interface{}, 10)
|
|
|
|
|
decodeErr, seriesCreationErr error
|
|
|
|
|
seriesPool = sync.Pool{
|
|
|
|
|
New: func() interface{} {
|
|
|
|
|
return []record.RefSeries{}
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
samplesPool = sync.Pool{
|
|
|
|
|
New: func() interface{} {
|
|
|
|
|
return []record.RefSample{}
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
tstonesPool = sync.Pool{
|
|
|
|
|
New: func() interface{} {
|
|
|
|
|
return []tombstones.Stone{}
|
|
|
|
|
},
|
|
|
|
|
}
|
2021-05-06 13:53:52 -07:00
|
|
|
|
exemplarsPool = sync.Pool{
|
|
|
|
|
New: func() interface{} {
|
|
|
|
|
return []record.RefExemplar{}
|
|
|
|
|
},
|
|
|
|
|
}
|
2017-10-07 06:55:11 -07:00
|
|
|
|
)
|
2017-10-11 01:12:29 -07:00
|
|
|
|
|
2019-06-14 08:39:22 -07:00
|
|
|
|
defer func() {
|
|
|
|
|
// For CorruptionErr ensure to terminate all workers before exiting.
|
2020-05-20 06:22:08 -07:00
|
|
|
|
_, ok := err.(*wal.CorruptionErr)
|
|
|
|
|
if ok || seriesCreationErr != nil {
|
2019-06-14 08:39:22 -07:00
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
|
close(inputs[i])
|
|
|
|
|
for range outputs[i] {
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-05-06 13:53:52 -07:00
|
|
|
|
close(exemplarsInput)
|
2019-06-14 08:39:22 -07:00
|
|
|
|
wg.Wait()
|
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
|
2020-05-20 06:22:08 -07:00
|
|
|
|
wg.Add(n)
|
2017-10-07 06:55:11 -07:00
|
|
|
|
for i := 0; i < n; i++ {
|
2019-09-19 02:15:41 -07:00
|
|
|
|
outputs[i] = make(chan []record.RefSample, 300)
|
|
|
|
|
inputs[i] = make(chan []record.RefSample, 300)
|
2017-10-07 06:55:11 -07:00
|
|
|
|
|
2019-09-19 02:15:41 -07:00
|
|
|
|
go func(input <-chan []record.RefSample, output chan<- []record.RefSample) {
|
2020-07-27 21:42:42 -07:00
|
|
|
|
unknown := h.processWALSamples(h.minValidTime.Load(), input, output)
|
|
|
|
|
unknownRefs.Add(unknown)
|
2017-10-11 01:12:29 -07:00
|
|
|
|
wg.Done()
|
2018-10-31 15:52:26 -07:00
|
|
|
|
}(inputs[i], outputs[i])
|
2017-10-07 06:55:11 -07:00
|
|
|
|
}
|
2017-09-21 02:02:30 -07:00
|
|
|
|
|
2021-05-06 13:53:52 -07:00
|
|
|
|
wg.Add(1)
|
|
|
|
|
exemplarsInput = make(chan record.RefExemplar, 300)
|
|
|
|
|
go func(input <-chan record.RefExemplar) {
|
|
|
|
|
defer wg.Done()
|
|
|
|
|
for e := range input {
|
|
|
|
|
ms := h.series.getByID(e.Ref)
|
|
|
|
|
if ms == nil {
|
|
|
|
|
unknownExemplarRefs.Inc()
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if e.T < h.minValidTime.Load() {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
// At the moment the only possible error here is out of order exemplars, which we shouldn't see when
|
|
|
|
|
// replaying the WAL, so lets just log the error if it's not that type.
|
|
|
|
|
err = h.exemplars.AddExemplar(ms.lset, exemplar.Exemplar{Ts: e.T, Value: e.V, Labels: e.Labels})
|
|
|
|
|
if err != nil && err == storage.ErrOutOfOrderExemplar {
|
|
|
|
|
level.Warn(h.logger).Log("msg", "Unexpected error when replaying WAL on exemplar record", "err", err)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}(exemplarsInput)
|
|
|
|
|
|
2019-11-07 08:26:45 -08:00
|
|
|
|
go func() {
|
|
|
|
|
defer close(decoded)
|
|
|
|
|
for r.Next() {
|
|
|
|
|
rec := r.Record()
|
|
|
|
|
switch dec.Type(rec) {
|
|
|
|
|
case record.Series:
|
|
|
|
|
series := seriesPool.Get().([]record.RefSeries)[:0]
|
|
|
|
|
series, err = dec.Series(rec, series)
|
|
|
|
|
if err != nil {
|
2020-05-20 06:22:08 -07:00
|
|
|
|
decodeErr = &wal.CorruptionErr{
|
2019-11-07 08:26:45 -08:00
|
|
|
|
Err: errors.Wrap(err, "decode series"),
|
|
|
|
|
Segment: r.Segment(),
|
|
|
|
|
Offset: r.Offset(),
|
|
|
|
|
}
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
decoded <- series
|
|
|
|
|
case record.Samples:
|
|
|
|
|
samples := samplesPool.Get().([]record.RefSample)[:0]
|
|
|
|
|
samples, err = dec.Samples(rec, samples)
|
|
|
|
|
if err != nil {
|
2020-05-20 06:22:08 -07:00
|
|
|
|
decodeErr = &wal.CorruptionErr{
|
2019-11-07 08:26:45 -08:00
|
|
|
|
Err: errors.Wrap(err, "decode samples"),
|
|
|
|
|
Segment: r.Segment(),
|
|
|
|
|
Offset: r.Offset(),
|
|
|
|
|
}
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
decoded <- samples
|
|
|
|
|
case record.Tombstones:
|
|
|
|
|
tstones := tstonesPool.Get().([]tombstones.Stone)[:0]
|
|
|
|
|
tstones, err = dec.Tombstones(rec, tstones)
|
|
|
|
|
if err != nil {
|
2020-05-20 06:22:08 -07:00
|
|
|
|
decodeErr = &wal.CorruptionErr{
|
2019-11-07 08:26:45 -08:00
|
|
|
|
Err: errors.Wrap(err, "decode tombstones"),
|
|
|
|
|
Segment: r.Segment(),
|
|
|
|
|
Offset: r.Offset(),
|
|
|
|
|
}
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
decoded <- tstones
|
2021-05-06 13:53:52 -07:00
|
|
|
|
case record.Exemplars:
|
|
|
|
|
exemplars := exemplarsPool.Get().([]record.RefExemplar)[:0]
|
|
|
|
|
exemplars, err = dec.Exemplars(rec, exemplars)
|
|
|
|
|
if err != nil {
|
|
|
|
|
decodeErr = &wal.CorruptionErr{
|
|
|
|
|
Err: errors.Wrap(err, "decode exemplars"),
|
|
|
|
|
Segment: r.Segment(),
|
|
|
|
|
Offset: r.Offset(),
|
|
|
|
|
}
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
decoded <- exemplars
|
2019-11-07 08:26:45 -08:00
|
|
|
|
default:
|
2020-10-05 02:09:59 -07:00
|
|
|
|
// Noop.
|
2018-05-17 06:04:32 -07:00
|
|
|
|
}
|
2019-11-07 08:26:45 -08:00
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
|
2020-05-20 06:22:08 -07:00
|
|
|
|
Outer:
|
2019-11-07 08:26:45 -08:00
|
|
|
|
for d := range decoded {
|
|
|
|
|
switch v := d.(type) {
|
|
|
|
|
case []record.RefSeries:
|
|
|
|
|
for _, s := range v {
|
2020-05-20 06:22:08 -07:00
|
|
|
|
series, created, err := h.getOrCreateWithID(s.Ref, s.Labels.Hash(), s.Labels)
|
|
|
|
|
if err != nil {
|
|
|
|
|
seriesCreationErr = err
|
|
|
|
|
break Outer
|
|
|
|
|
}
|
2019-06-06 06:28:54 -07:00
|
|
|
|
|
2020-05-06 08:30:00 -07:00
|
|
|
|
if created {
|
|
|
|
|
// If this series gets a duplicate record, we don't restore its mmapped chunks,
|
|
|
|
|
// and instead restore everything from WAL records.
|
|
|
|
|
series.mmappedChunks = mmappedChunks[series.ref]
|
|
|
|
|
|
|
|
|
|
h.metrics.chunks.Add(float64(len(series.mmappedChunks)))
|
|
|
|
|
h.metrics.chunksCreated.Add(float64(len(series.mmappedChunks)))
|
|
|
|
|
|
|
|
|
|
if len(series.mmappedChunks) > 0 {
|
|
|
|
|
h.updateMinMaxTime(series.minTime(), series.maxTime())
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
// TODO(codesome) Discard old samples and mmapped chunks and use mmap chunks for the new series ID.
|
|
|
|
|
|
2019-06-06 06:28:54 -07:00
|
|
|
|
// There's already a different ref for this series.
|
|
|
|
|
multiRef[s.Ref] = series.ref
|
|
|
|
|
}
|
2017-09-19 01:20:19 -07:00
|
|
|
|
|
2020-07-27 21:42:42 -07:00
|
|
|
|
if h.lastSeriesID.Load() < s.Ref {
|
|
|
|
|
h.lastSeriesID.Store(s.Ref)
|
2018-05-17 06:04:32 -07:00
|
|
|
|
}
|
2017-09-19 01:20:19 -07:00
|
|
|
|
}
|
2021-02-04 03:57:16 -08:00
|
|
|
|
//nolint:staticcheck // Ignore SA6002 relax staticcheck verification.
|
2019-11-07 08:26:45 -08:00
|
|
|
|
seriesPool.Put(v)
|
|
|
|
|
case []record.RefSample:
|
|
|
|
|
samples := v
|
2018-05-17 06:04:32 -07:00
|
|
|
|
// We split up the samples into chunks of 5000 samples or less.
|
|
|
|
|
// With O(300 * #cores) in-flight sample batches, large scrapes could otherwise
|
|
|
|
|
// cause thousands of very large in flight buffers occupying large amounts
|
|
|
|
|
// of unused memory.
|
|
|
|
|
for len(samples) > 0 {
|
2018-10-31 15:52:26 -07:00
|
|
|
|
m := 5000
|
|
|
|
|
if len(samples) < m {
|
|
|
|
|
m = len(samples)
|
|
|
|
|
}
|
|
|
|
|
for i := 0; i < n; i++ {
|
2019-09-19 02:15:41 -07:00
|
|
|
|
var buf []record.RefSample
|
2018-10-31 15:52:26 -07:00
|
|
|
|
select {
|
|
|
|
|
case buf = <-outputs[i]:
|
|
|
|
|
default:
|
|
|
|
|
}
|
|
|
|
|
shards[i] = buf[:0]
|
2018-05-17 06:04:32 -07:00
|
|
|
|
}
|
2018-10-31 15:52:26 -07:00
|
|
|
|
for _, sam := range samples[:m] {
|
2019-06-06 06:28:54 -07:00
|
|
|
|
if r, ok := multiRef[sam.Ref]; ok {
|
|
|
|
|
sam.Ref = r
|
|
|
|
|
}
|
2018-10-31 15:52:26 -07:00
|
|
|
|
mod := sam.Ref % uint64(n)
|
|
|
|
|
shards[mod] = append(shards[mod], sam)
|
2018-05-17 06:04:32 -07:00
|
|
|
|
}
|
2018-10-31 15:52:26 -07:00
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
|
inputs[i] <- shards[i]
|
2018-05-17 06:04:32 -07:00
|
|
|
|
}
|
2018-10-31 15:52:26 -07:00
|
|
|
|
samples = samples[m:]
|
2017-10-23 07:22:24 -07:00
|
|
|
|
}
|
2021-02-04 03:57:16 -08:00
|
|
|
|
//nolint:staticcheck // Ignore SA6002 relax staticcheck verification.
|
2019-11-07 08:26:45 -08:00
|
|
|
|
samplesPool.Put(v)
|
|
|
|
|
case []tombstones.Stone:
|
|
|
|
|
for _, s := range v {
|
2019-09-19 02:15:41 -07:00
|
|
|
|
for _, itv := range s.Intervals {
|
2020-07-27 21:42:42 -07:00
|
|
|
|
if itv.Maxt < h.minValidTime.Load() {
|
2018-05-17 06:04:32 -07:00
|
|
|
|
continue
|
|
|
|
|
}
|
2019-09-19 02:15:41 -07:00
|
|
|
|
if m := h.series.getByID(s.Ref); m == nil {
|
2020-07-27 21:42:42 -07:00
|
|
|
|
unknownRefs.Inc()
|
2019-05-16 06:36:44 -07:00
|
|
|
|
continue
|
|
|
|
|
}
|
2020-01-20 07:38:00 -08:00
|
|
|
|
h.tombstones.AddInterval(s.Ref, itv)
|
2017-09-06 07:20:37 -07:00
|
|
|
|
}
|
2017-08-30 09:34:54 -07:00
|
|
|
|
}
|
2021-02-04 03:57:16 -08:00
|
|
|
|
//nolint:staticcheck // Ignore SA6002 relax staticcheck verification.
|
2019-11-07 08:26:45 -08:00
|
|
|
|
tstonesPool.Put(v)
|
2021-05-06 13:53:52 -07:00
|
|
|
|
case []record.RefExemplar:
|
|
|
|
|
for _, e := range v {
|
|
|
|
|
exemplarsInput <- e
|
|
|
|
|
}
|
|
|
|
|
//nolint:staticcheck // Ignore SA6002 relax staticcheck verification.
|
|
|
|
|
exemplarsPool.Put(v)
|
2018-05-17 06:04:32 -07:00
|
|
|
|
default:
|
2019-11-07 08:26:45 -08:00
|
|
|
|
panic(fmt.Errorf("unexpected decoded type: %T", d))
|
2017-08-30 09:34:54 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
2017-09-21 02:02:30 -07:00
|
|
|
|
|
2020-05-20 06:22:08 -07:00
|
|
|
|
if decodeErr != nil {
|
|
|
|
|
return decodeErr
|
|
|
|
|
}
|
|
|
|
|
if seriesCreationErr != nil {
|
|
|
|
|
// Drain the channel to unblock the goroutine.
|
|
|
|
|
for range decoded {
|
|
|
|
|
}
|
|
|
|
|
return seriesCreationErr
|
2019-11-07 08:26:45 -08:00
|
|
|
|
}
|
|
|
|
|
|
2018-10-31 15:52:26 -07:00
|
|
|
|
// Signal termination to each worker and wait for it to close its output channel.
|
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
|
close(inputs[i])
|
|
|
|
|
for range outputs[i] {
|
|
|
|
|
}
|
2017-10-07 06:55:11 -07:00
|
|
|
|
}
|
2021-05-06 13:53:52 -07:00
|
|
|
|
close(exemplarsInput)
|
2017-10-11 01:12:29 -07:00
|
|
|
|
wg.Wait()
|
|
|
|
|
|
2019-06-14 08:39:22 -07:00
|
|
|
|
if r.Err() != nil {
|
|
|
|
|
return errors.Wrap(r.Err(), "read records")
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-06 13:53:52 -07:00
|
|
|
|
if unknownRefs.Load() > 0 || unknownExemplarRefs.Load() > 0 {
|
|
|
|
|
level.Warn(h.logger).Log("msg", "Unknown series references", "samples", unknownRefs.Load(), "exemplars", unknownExemplarRefs.Load())
|
2018-05-17 06:04:32 -07:00
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Init loads data from the write ahead log and prepares the head for writes.
|
2020-02-12 11:22:27 -08:00
|
|
|
|
// It should be called before using an appender so that it
|
2018-12-04 02:30:49 -08:00
|
|
|
|
// limits the ingested samples to the head min valid time.
|
|
|
|
|
func (h *Head) Init(minValidTime int64) error {
|
2020-07-27 21:42:42 -07:00
|
|
|
|
h.minValidTime.Store(minValidTime)
|
2018-05-17 06:04:32 -07:00
|
|
|
|
defer h.postings.EnsureOrder()
|
2018-12-04 02:30:49 -08:00
|
|
|
|
defer h.gc() // After loading the wal remove the obsolete data from the head.
|
2018-05-17 06:04:32 -07:00
|
|
|
|
|
2020-07-16 06:04:08 -07:00
|
|
|
|
level.Info(h.logger).Log("msg", "Replaying on-disk memory mappable chunks if any")
|
2020-03-03 06:11:14 -08:00
|
|
|
|
start := time.Now()
|
2020-05-06 08:30:00 -07:00
|
|
|
|
|
|
|
|
|
mmappedChunks, err := h.loadMmappedChunks()
|
|
|
|
|
if err != nil {
|
|
|
|
|
level.Error(h.logger).Log("msg", "Loading on-disk chunks failed", "err", err)
|
|
|
|
|
if _, ok := errors.Cause(err).(*chunks.CorruptionErr); ok {
|
|
|
|
|
h.metrics.mmapChunkCorruptionTotal.Inc()
|
|
|
|
|
}
|
|
|
|
|
// If this fails, data will be recovered from WAL.
|
|
|
|
|
// Hence we wont lose any data (given WAL is not corrupt).
|
2020-08-26 10:59:18 -07:00
|
|
|
|
mmappedChunks = h.removeCorruptedMmappedChunks(err)
|
2020-05-06 08:30:00 -07:00
|
|
|
|
}
|
|
|
|
|
|
2020-07-16 06:04:08 -07:00
|
|
|
|
level.Info(h.logger).Log("msg", "On-disk memory mappable chunks replay completed", "duration", time.Since(start).String())
|
|
|
|
|
if h.wal == nil {
|
|
|
|
|
level.Info(h.logger).Log("msg", "WAL not found")
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
level.Info(h.logger).Log("msg", "Replaying WAL, this may take a while")
|
|
|
|
|
|
|
|
|
|
checkpointReplayStart := time.Now()
|
2018-05-17 06:04:32 -07:00
|
|
|
|
// Backfill the checkpoint first if it exists.
|
2019-09-19 02:15:41 -07:00
|
|
|
|
dir, startFrom, err := wal.LastCheckpoint(h.wal.Dir())
|
|
|
|
|
if err != nil && err != record.ErrNotFound {
|
2018-05-17 06:04:32 -07:00
|
|
|
|
return errors.Wrap(err, "find last checkpoint")
|
|
|
|
|
}
|
2019-06-06 06:28:54 -07:00
|
|
|
|
multiRef := map[uint64]uint64{}
|
2018-05-17 06:04:32 -07:00
|
|
|
|
if err == nil {
|
2018-11-30 06:46:16 -08:00
|
|
|
|
sr, err := wal.NewSegmentsReader(dir)
|
2018-05-17 06:04:32 -07:00
|
|
|
|
if err != nil {
|
|
|
|
|
return errors.Wrap(err, "open checkpoint")
|
|
|
|
|
}
|
2019-06-14 08:39:22 -07:00
|
|
|
|
defer func() {
|
|
|
|
|
if err := sr.Close(); err != nil {
|
2020-04-11 01:22:18 -07:00
|
|
|
|
level.Warn(h.logger).Log("msg", "Error while closing the wal segments reader", "err", err)
|
2019-06-14 08:39:22 -07:00
|
|
|
|
}
|
|
|
|
|
}()
|
2018-05-17 06:04:32 -07:00
|
|
|
|
|
|
|
|
|
// A corrupted checkpoint is a hard error for now and requires user
|
|
|
|
|
// intervention. There's likely little data that can be recovered anyway.
|
2020-05-06 08:30:00 -07:00
|
|
|
|
if err := h.loadWAL(wal.NewReader(sr), multiRef, mmappedChunks); err != nil {
|
2018-05-17 06:04:32 -07:00
|
|
|
|
return errors.Wrap(err, "backfill checkpoint")
|
|
|
|
|
}
|
2018-10-11 08:23:52 -07:00
|
|
|
|
startFrom++
|
2019-07-13 10:10:44 -07:00
|
|
|
|
level.Info(h.logger).Log("msg", "WAL checkpoint loaded")
|
2018-05-17 06:04:32 -07:00
|
|
|
|
}
|
2020-07-16 06:04:08 -07:00
|
|
|
|
checkpointReplayDuration := time.Since(checkpointReplayStart)
|
2018-05-17 06:04:32 -07:00
|
|
|
|
|
2020-07-16 06:04:08 -07:00
|
|
|
|
walReplayStart := time.Now()
|
2019-05-24 11:33:28 -07:00
|
|
|
|
// Find the last segment.
|
2020-09-01 02:16:57 -07:00
|
|
|
|
_, last, err := wal.Segments(h.wal.Dir())
|
2017-10-07 06:55:11 -07:00
|
|
|
|
if err != nil {
|
2019-05-24 11:33:28 -07:00
|
|
|
|
return errors.Wrap(err, "finding WAL segments")
|
2017-08-30 09:34:54 -07:00
|
|
|
|
}
|
2018-05-17 06:04:32 -07:00
|
|
|
|
|
2021-06-05 07:29:32 -07:00
|
|
|
|
h.startWALReplayStatus(startFrom, last)
|
|
|
|
|
|
2019-05-24 11:33:28 -07:00
|
|
|
|
// Backfill segments from the most recent checkpoint onwards.
|
|
|
|
|
for i := startFrom; i <= last; i++ {
|
|
|
|
|
s, err := wal.OpenReadSegment(wal.SegmentName(h.wal.Dir(), i))
|
|
|
|
|
if err != nil {
|
|
|
|
|
return errors.Wrap(err, fmt.Sprintf("open WAL segment: %d", i))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
sr := wal.NewSegmentBufReader(s)
|
2020-05-06 08:30:00 -07:00
|
|
|
|
err = h.loadWAL(wal.NewReader(sr), multiRef, mmappedChunks)
|
2019-06-14 08:39:22 -07:00
|
|
|
|
if err := sr.Close(); err != nil {
|
2020-04-11 01:22:18 -07:00
|
|
|
|
level.Warn(h.logger).Log("msg", "Error while closing the wal segments reader", "err", err)
|
2019-05-24 11:33:28 -07:00
|
|
|
|
}
|
2019-06-14 08:39:22 -07:00
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
2019-05-24 11:33:28 -07:00
|
|
|
|
}
|
2019-07-13 10:10:44 -07:00
|
|
|
|
level.Info(h.logger).Log("msg", "WAL segment loaded", "segment", i, "maxSegment", last)
|
2021-06-05 07:29:32 -07:00
|
|
|
|
h.updateWALReplayStatusRead(i)
|
2017-10-07 06:55:11 -07:00
|
|
|
|
}
|
2019-05-24 11:33:28 -07:00
|
|
|
|
|
2020-09-21 09:25:05 -07:00
|
|
|
|
walReplayDuration := time.Since(start)
|
|
|
|
|
h.metrics.walTotalReplayDuration.Set(walReplayDuration.Seconds())
|
2020-07-16 06:04:08 -07:00
|
|
|
|
level.Info(h.logger).Log(
|
|
|
|
|
"msg", "WAL replay completed",
|
|
|
|
|
"checkpoint_replay_duration", checkpointReplayDuration.String(),
|
|
|
|
|
"wal_replay_duration", time.Since(walReplayStart).String(),
|
2020-09-21 09:25:05 -07:00
|
|
|
|
"total_replay_duration", walReplayDuration.String(),
|
2020-07-16 06:04:08 -07:00
|
|
|
|
)
|
2020-03-03 06:11:14 -08:00
|
|
|
|
|
2017-08-30 09:34:54 -07:00
|
|
|
|
return nil
|
2017-05-13 09:14:18 -07:00
|
|
|
|
}
|
2017-01-06 03:37:28 -08:00
|
|
|
|
|
2021-02-17 21:22:35 -08:00
|
|
|
|
// SetMinValidTime sets the minimum timestamp the head can ingest.
|
|
|
|
|
func (h *Head) SetMinValidTime(minValidTime int64) {
|
|
|
|
|
h.minValidTime.Store(minValidTime)
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-06 08:30:00 -07:00
|
|
|
|
func (h *Head) loadMmappedChunks() (map[uint64][]*mmappedChunk, error) {
|
|
|
|
|
mmappedChunks := map[uint64][]*mmappedChunk{}
|
|
|
|
|
if err := h.chunkDiskMapper.IterateAllChunks(func(seriesRef, chunkRef uint64, mint, maxt int64, numSamples uint16) error {
|
2020-07-27 21:42:42 -07:00
|
|
|
|
if maxt < h.minValidTime.Load() {
|
2020-05-06 08:30:00 -07:00
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
slice := mmappedChunks[seriesRef]
|
|
|
|
|
if len(slice) > 0 {
|
|
|
|
|
if slice[len(slice)-1].maxTime >= mint {
|
2020-08-26 08:06:27 -07:00
|
|
|
|
return &chunks.CorruptionErr{
|
|
|
|
|
Err: errors.Errorf("out of sequence m-mapped chunk for series ref %d", seriesRef),
|
|
|
|
|
}
|
2020-05-06 08:30:00 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
slice = append(slice, &mmappedChunk{
|
|
|
|
|
ref: chunkRef,
|
|
|
|
|
minTime: mint,
|
|
|
|
|
maxTime: maxt,
|
|
|
|
|
numSamples: numSamples,
|
|
|
|
|
})
|
|
|
|
|
mmappedChunks[seriesRef] = slice
|
|
|
|
|
return nil
|
|
|
|
|
}); err != nil {
|
|
|
|
|
return nil, errors.Wrap(err, "iterate on on-disk chunks")
|
|
|
|
|
}
|
|
|
|
|
return mmappedChunks, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// removeCorruptedMmappedChunks attempts to delete the corrupted mmapped chunks and if it fails, it clears all the previously
|
|
|
|
|
// loaded mmapped chunks.
|
|
|
|
|
func (h *Head) removeCorruptedMmappedChunks(err error) map[uint64][]*mmappedChunk {
|
|
|
|
|
level.Info(h.logger).Log("msg", "Deleting mmapped chunk files")
|
|
|
|
|
|
|
|
|
|
if err := h.chunkDiskMapper.DeleteCorrupted(err); err != nil {
|
|
|
|
|
level.Info(h.logger).Log("msg", "Deletion of mmap chunk files failed, discarding chunk files completely", "err", err)
|
|
|
|
|
return map[uint64][]*mmappedChunk{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
level.Info(h.logger).Log("msg", "Deletion of mmap chunk files successful, reattempting m-mapping the on-disk chunks")
|
|
|
|
|
mmappedChunks, err := h.loadMmappedChunks()
|
|
|
|
|
if err != nil {
|
|
|
|
|
level.Error(h.logger).Log("msg", "Loading on-disk chunks failed, discarding chunk files completely", "err", err)
|
|
|
|
|
mmappedChunks = map[uint64][]*mmappedChunk{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return mmappedChunks
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-19 08:27:08 -07:00
|
|
|
|
// Truncate removes old data before mint from the head and WAL.
|
2018-09-25 06:48:33 -07:00
|
|
|
|
func (h *Head) Truncate(mint int64) (err error) {
|
2020-10-19 08:27:08 -07:00
|
|
|
|
initialize := h.MinTime() == math.MaxInt64
|
|
|
|
|
if err := h.truncateMemory(mint); err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
if initialize {
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
return h.truncateWAL(mint)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// truncateMemory removes old data before mint from the head.
|
|
|
|
|
func (h *Head) truncateMemory(mint int64) (err error) {
|
2018-09-25 06:48:33 -07:00
|
|
|
|
defer func() {
|
|
|
|
|
if err != nil {
|
|
|
|
|
h.metrics.headTruncateFail.Inc()
|
|
|
|
|
}
|
|
|
|
|
}()
|
2018-05-25 14:19:32 -07:00
|
|
|
|
initialize := h.MinTime() == math.MaxInt64
|
2017-09-06 07:20:37 -07:00
|
|
|
|
|
2018-05-25 14:19:32 -07:00
|
|
|
|
if h.MinTime() >= mint && !initialize {
|
2017-09-01 05:38:49 -07:00
|
|
|
|
return nil
|
2017-08-30 09:34:54 -07:00
|
|
|
|
}
|
2020-07-27 21:42:42 -07:00
|
|
|
|
h.minTime.Store(mint)
|
|
|
|
|
h.minValidTime.Store(mint)
|
2017-08-30 09:34:54 -07:00
|
|
|
|
|
2017-09-07 04:04:02 -07:00
|
|
|
|
// Ensure that max time is at least as high as min time.
|
|
|
|
|
for h.MaxTime() < mint {
|
2020-07-27 21:42:42 -07:00
|
|
|
|
h.maxTime.CAS(h.MaxTime(), mint)
|
2017-09-07 04:04:02 -07:00
|
|
|
|
}
|
|
|
|
|
|
2017-09-06 07:20:37 -07:00
|
|
|
|
// This was an initial call to Truncate after loading blocks on startup.
|
|
|
|
|
// We haven't read back the WAL yet, so do not attempt to truncate it.
|
|
|
|
|
if initialize {
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-25 06:48:33 -07:00
|
|
|
|
h.metrics.headTruncateTotal.Inc()
|
2017-08-30 09:34:54 -07:00
|
|
|
|
start := time.Now()
|
|
|
|
|
|
2020-11-25 05:03:30 -08:00
|
|
|
|
actualMint := h.gc()
|
2020-04-11 01:22:18 -07:00
|
|
|
|
level.Info(h.logger).Log("msg", "Head GC completed", "duration", time.Since(start))
|
2017-08-30 09:34:54 -07:00
|
|
|
|
h.metrics.gcDuration.Observe(time.Since(start).Seconds())
|
2020-11-25 05:03:30 -08:00
|
|
|
|
if actualMint > h.minTime.Load() {
|
|
|
|
|
// The actual mint of the Head is higher than the one asked to truncate.
|
|
|
|
|
appendableMinValidTime := h.appendableMinValidTime()
|
|
|
|
|
if actualMint < appendableMinValidTime {
|
|
|
|
|
h.minTime.Store(actualMint)
|
|
|
|
|
h.minValidTime.Store(actualMint)
|
|
|
|
|
} else {
|
|
|
|
|
// The actual min time is in the appendable window.
|
|
|
|
|
// So we set the mint to the appendableMinValidTime.
|
|
|
|
|
h.minTime.Store(appendableMinValidTime)
|
|
|
|
|
h.minValidTime.Store(appendableMinValidTime)
|
|
|
|
|
}
|
|
|
|
|
}
|
2017-08-30 09:34:54 -07:00
|
|
|
|
|
2020-05-06 08:30:00 -07:00
|
|
|
|
// Truncate the chunk m-mapper.
|
|
|
|
|
if err := h.chunkDiskMapper.Truncate(mint); err != nil {
|
|
|
|
|
return errors.Wrap(err, "truncate chunks.HeadReadWriter")
|
|
|
|
|
}
|
2020-10-19 08:27:08 -07:00
|
|
|
|
return nil
|
|
|
|
|
}
|
2020-05-06 08:30:00 -07:00
|
|
|
|
|
2020-10-19 08:27:08 -07:00
|
|
|
|
// truncateWAL removes old data before mint from the WAL.
|
|
|
|
|
func (h *Head) truncateWAL(mint int64) error {
|
|
|
|
|
if h.wal == nil || mint <= h.lastWALTruncationTime.Load() {
|
2018-05-17 06:04:32 -07:00
|
|
|
|
return nil
|
|
|
|
|
}
|
2020-10-19 08:27:08 -07:00
|
|
|
|
start := time.Now()
|
|
|
|
|
h.lastWALTruncationTime.Store(mint)
|
2017-08-30 09:34:54 -07:00
|
|
|
|
|
2020-09-01 02:16:57 -07:00
|
|
|
|
first, last, err := wal.Segments(h.wal.Dir())
|
2018-05-17 06:04:32 -07:00
|
|
|
|
if err != nil {
|
|
|
|
|
return errors.Wrap(err, "get segment range")
|
|
|
|
|
}
|
2019-06-07 03:35:02 -07:00
|
|
|
|
// Start a new segment, so low ingestion volume TSDB don't have more WAL than
|
|
|
|
|
// needed.
|
2020-10-19 08:27:08 -07:00
|
|
|
|
if err := h.wal.NextSegment(); err != nil {
|
2019-06-07 03:35:02 -07:00
|
|
|
|
return errors.Wrap(err, "next segment")
|
|
|
|
|
}
|
2018-10-11 08:23:52 -07:00
|
|
|
|
last-- // Never consider last segment for checkpoint.
|
|
|
|
|
if last < 0 {
|
2018-05-17 06:04:32 -07:00
|
|
|
|
return nil // no segments yet.
|
|
|
|
|
}
|
2020-04-07 03:25:57 -07:00
|
|
|
|
// The lower two thirds of segments should contain mostly obsolete samples.
|
|
|
|
|
// If we have less than two segments, it's not worth checkpointing yet.
|
|
|
|
|
// With the default 2h blocks, this will keeping up to around 3h worth
|
|
|
|
|
// of WAL segments.
|
|
|
|
|
last = first + (last-first)*2/3
|
2018-10-11 08:23:52 -07:00
|
|
|
|
if last <= first {
|
2018-05-17 06:04:32 -07:00
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2017-09-21 02:02:30 -07:00
|
|
|
|
keep := func(id uint64) bool {
|
2019-04-09 06:16:24 -07:00
|
|
|
|
if h.series.getByID(id) != nil {
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
h.deletedMtx.Lock()
|
|
|
|
|
_, ok := h.deleted[id]
|
|
|
|
|
h.deletedMtx.Unlock()
|
|
|
|
|
return ok
|
2017-08-31 02:39:22 -07:00
|
|
|
|
}
|
2018-09-25 06:48:33 -07:00
|
|
|
|
h.metrics.checkpointCreationTotal.Inc()
|
2020-07-15 06:45:37 -07:00
|
|
|
|
if _, err = wal.Checkpoint(h.logger, h.wal, first, last, keep, mint); err != nil {
|
2018-09-25 06:48:33 -07:00
|
|
|
|
h.metrics.checkpointCreationFail.Inc()
|
2020-07-04 22:55:42 -07:00
|
|
|
|
if _, ok := errors.Cause(err).(*wal.CorruptionErr); ok {
|
|
|
|
|
h.metrics.walCorruptionsTotal.Inc()
|
|
|
|
|
}
|
2018-05-17 06:04:32 -07:00
|
|
|
|
return errors.Wrap(err, "create checkpoint")
|
2017-08-30 09:34:54 -07:00
|
|
|
|
}
|
2018-10-11 08:23:52 -07:00
|
|
|
|
if err := h.wal.Truncate(last + 1); err != nil {
|
2018-09-25 06:48:33 -07:00
|
|
|
|
// If truncating fails, we'll just try again at the next checkpoint.
|
|
|
|
|
// Leftover segments will just be ignored in the future if there's a checkpoint
|
|
|
|
|
// that supersedes them.
|
|
|
|
|
level.Error(h.logger).Log("msg", "truncating segments failed", "err", err)
|
|
|
|
|
}
|
2019-04-09 06:16:24 -07:00
|
|
|
|
|
|
|
|
|
// The checkpoint is written and segments before it is truncated, so we no
|
|
|
|
|
// longer need to track deleted series that are before it.
|
|
|
|
|
h.deletedMtx.Lock()
|
|
|
|
|
for ref, segment := range h.deleted {
|
|
|
|
|
if segment < first {
|
|
|
|
|
delete(h.deleted, ref)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
h.deletedMtx.Unlock()
|
|
|
|
|
|
2018-09-25 06:48:33 -07:00
|
|
|
|
h.metrics.checkpointDeleteTotal.Inc()
|
2019-09-19 02:15:41 -07:00
|
|
|
|
if err := wal.DeleteCheckpoints(h.wal.Dir(), last); err != nil {
|
2018-09-25 06:48:33 -07:00
|
|
|
|
// Leftover old checkpoints do not cause problems down the line beyond
|
|
|
|
|
// occupying disk space.
|
|
|
|
|
// They will just be ignored since a higher checkpoint exists.
|
|
|
|
|
level.Error(h.logger).Log("msg", "delete old checkpoints", "err", err)
|
|
|
|
|
h.metrics.checkpointDeleteFail.Inc()
|
|
|
|
|
}
|
2017-08-30 09:34:54 -07:00
|
|
|
|
h.metrics.walTruncateDuration.Observe(time.Since(start).Seconds())
|
2017-09-01 05:38:49 -07:00
|
|
|
|
|
2018-05-17 06:04:32 -07:00
|
|
|
|
level.Info(h.logger).Log("msg", "WAL checkpoint complete",
|
2018-10-11 08:23:52 -07:00
|
|
|
|
"first", first, "last", last, "duration", time.Since(start))
|
2018-05-17 06:04:32 -07:00
|
|
|
|
|
2017-09-01 05:38:49 -07:00
|
|
|
|
return nil
|
2017-08-30 09:34:54 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// initTime initializes a head with the first timestamp. This only needs to be called
|
2018-12-11 12:09:17 -08:00
|
|
|
|
// for a completely fresh head with an empty WAL.
|
2020-08-03 02:45:53 -07:00
|
|
|
|
func (h *Head) initTime(t int64) {
|
2020-07-27 21:42:42 -07:00
|
|
|
|
if !h.minTime.CAS(math.MaxInt64, t) {
|
2020-08-03 02:45:53 -07:00
|
|
|
|
return
|
2017-08-30 09:34:54 -07:00
|
|
|
|
}
|
2017-09-07 04:04:02 -07:00
|
|
|
|
// Ensure that max time is initialized to at least the min time we just set.
|
|
|
|
|
// Concurrent appenders may already have set it to a higher value.
|
2020-07-27 21:42:42 -07:00
|
|
|
|
h.maxTime.CAS(math.MinInt64, t)
|
2017-08-30 09:34:54 -07:00
|
|
|
|
}
|
|
|
|
|
|
2020-04-29 09:16:14 -07:00
|
|
|
|
type Stats struct {
|
|
|
|
|
NumSeries uint64
|
|
|
|
|
MinTime, MaxTime int64
|
|
|
|
|
IndexPostingStats *index.PostingsStats
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Stats returns important current HEAD statistics. Note that it is expensive to
|
|
|
|
|
// calculate these.
|
|
|
|
|
func (h *Head) Stats(statsByLabelName string) *Stats {
|
|
|
|
|
return &Stats{
|
|
|
|
|
NumSeries: h.NumSeries(),
|
|
|
|
|
MaxTime: h.MaxTime(),
|
|
|
|
|
MinTime: h.MinTime(),
|
|
|
|
|
IndexPostingStats: h.PostingsCardinalityStats(statsByLabelName),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-02-14 01:50:24 -08:00
|
|
|
|
type RangeHead struct {
|
2017-10-09 06:21:46 -07:00
|
|
|
|
head *Head
|
|
|
|
|
mint, maxt int64
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-25 16:17:56 -07:00
|
|
|
|
// NewRangeHead returns a *RangeHead.
|
2020-02-14 01:50:24 -08:00
|
|
|
|
func NewRangeHead(head *Head, mint, maxt int64) *RangeHead {
|
|
|
|
|
return &RangeHead{
|
|
|
|
|
head: head,
|
|
|
|
|
mint: mint,
|
|
|
|
|
maxt: maxt,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-25 12:13:47 -07:00
|
|
|
|
func (h *RangeHead) Index() (IndexReader, error) {
|
|
|
|
|
return h.head.indexRange(h.mint, h.maxt), nil
|
2017-10-09 06:21:46 -07:00
|
|
|
|
}
|
|
|
|
|
|
2020-02-14 01:50:24 -08:00
|
|
|
|
func (h *RangeHead) Chunks() (ChunkReader, error) {
|
2020-05-22 02:03:23 -07:00
|
|
|
|
return h.head.chunksRange(h.mint, h.maxt, h.head.iso.State())
|
2017-10-09 06:21:46 -07:00
|
|
|
|
}
|
|
|
|
|
|
2020-02-14 01:50:24 -08:00
|
|
|
|
func (h *RangeHead) Tombstones() (tombstones.Reader, error) {
|
2020-01-20 07:38:00 -08:00
|
|
|
|
return h.head.tombstones, nil
|
2017-10-09 06:21:46 -07:00
|
|
|
|
}
|
|
|
|
|
|
2020-02-14 01:50:24 -08:00
|
|
|
|
func (h *RangeHead) MinTime() int64 {
|
2019-02-14 05:29:41 -08:00
|
|
|
|
return h.mint
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-19 08:27:08 -07:00
|
|
|
|
// MaxTime returns the max time of actual data fetch-able from the head.
|
|
|
|
|
// This controls the chunks time range which is closed [b.MinTime, b.MaxTime].
|
2020-02-14 01:50:24 -08:00
|
|
|
|
func (h *RangeHead) MaxTime() int64 {
|
2019-02-14 05:29:41 -08:00
|
|
|
|
return h.maxt
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-19 08:27:08 -07:00
|
|
|
|
// BlockMaxTime returns the max time of the potential block created from this head.
|
|
|
|
|
// It's different to MaxTime as we need to add +1 millisecond to block maxt because block
|
|
|
|
|
// intervals are half-open: [b.MinTime, b.MaxTime). Block intervals are always +1 than the total samples it includes.
|
|
|
|
|
func (h *RangeHead) BlockMaxTime() int64 {
|
|
|
|
|
return h.MaxTime() + 1
|
|
|
|
|
}
|
|
|
|
|
|
2020-02-14 01:50:24 -08:00
|
|
|
|
func (h *RangeHead) NumSeries() uint64 {
|
2019-07-23 01:04:48 -07:00
|
|
|
|
return h.head.NumSeries()
|
|
|
|
|
}
|
|
|
|
|
|
2020-02-14 01:50:24 -08:00
|
|
|
|
func (h *RangeHead) Meta() BlockMeta {
|
2019-07-23 01:04:48 -07:00
|
|
|
|
return BlockMeta{
|
|
|
|
|
MinTime: h.MinTime(),
|
|
|
|
|
MaxTime: h.MaxTime(),
|
|
|
|
|
ULID: h.head.Meta().ULID,
|
|
|
|
|
Stats: BlockStats{
|
|
|
|
|
NumSeries: h.NumSeries(),
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-01-21 02:37:29 -08:00
|
|
|
|
// String returns an human readable representation of the range head. It's important to
|
|
|
|
|
// keep this function in order to avoid the struct dump when the head is stringified in
|
|
|
|
|
// errors or logs.
|
|
|
|
|
func (h *RangeHead) String() string {
|
|
|
|
|
return fmt.Sprintf("range head (mint: %d, maxt: %d)", h.MinTime(), h.MaxTime())
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-13 09:51:50 -08:00
|
|
|
|
// initAppender is a helper to initialize the time bounds of the head
|
2017-08-30 09:34:54 -07:00
|
|
|
|
// upon the first sample it receives.
|
|
|
|
|
type initAppender struct {
|
2020-02-06 07:58:38 -08:00
|
|
|
|
app storage.Appender
|
2017-08-30 09:34:54 -07:00
|
|
|
|
head *Head
|
|
|
|
|
}
|
|
|
|
|
|
2021-02-18 04:07:00 -08:00
|
|
|
|
func (a *initAppender) Append(ref uint64, lset labels.Labels, t int64, v float64) (uint64, error) {
|
2017-08-30 09:34:54 -07:00
|
|
|
|
if a.app != nil {
|
2021-02-18 04:07:00 -08:00
|
|
|
|
return a.app.Append(ref, lset, t, v)
|
2017-08-30 09:34:54 -07:00
|
|
|
|
}
|
2021-02-18 04:07:00 -08:00
|
|
|
|
|
2017-09-01 03:09:29 -07:00
|
|
|
|
a.head.initTime(t)
|
2020-04-17 11:51:03 -07:00
|
|
|
|
a.app = a.head.appender()
|
2021-02-18 04:07:00 -08:00
|
|
|
|
return a.app.Append(ref, lset, t, v)
|
2017-08-30 09:34:54 -07:00
|
|
|
|
}
|
|
|
|
|
|
2021-03-16 02:47:45 -07:00
|
|
|
|
func (a *initAppender) AppendExemplar(ref uint64, l labels.Labels, e exemplar.Exemplar) (uint64, error) {
|
|
|
|
|
// Check if exemplar storage is enabled.
|
2021-04-21 07:32:21 -07:00
|
|
|
|
if a.head.opts.NumExemplars <= 0 {
|
2021-03-16 02:47:45 -07:00
|
|
|
|
return 0, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if a.app != nil {
|
|
|
|
|
return a.app.AppendExemplar(ref, l, e)
|
|
|
|
|
}
|
|
|
|
|
// We should never reach here given we would call Append before AppendExemplar
|
|
|
|
|
// and we probably want to always base head/WAL min time on sample times.
|
|
|
|
|
a.head.initTime(e.Ts)
|
|
|
|
|
a.app = a.head.appender()
|
|
|
|
|
|
|
|
|
|
return a.app.AppendExemplar(ref, l, e)
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-29 07:38:46 -07:00
|
|
|
|
func (a *initAppender) AppendHistogram(ref uint64, l labels.Labels, t int64, sh histogram.SparseHistogram) (uint64, error) {
|
2021-06-28 08:00:55 -07:00
|
|
|
|
if a.app != nil {
|
2021-06-29 07:38:46 -07:00
|
|
|
|
return a.app.AppendHistogram(ref, l, t, sh)
|
2021-06-28 08:00:55 -07:00
|
|
|
|
}
|
2021-06-29 07:38:46 -07:00
|
|
|
|
a.head.initTime(t)
|
2021-06-28 08:00:55 -07:00
|
|
|
|
a.app = a.head.appender()
|
|
|
|
|
|
2021-06-29 07:38:46 -07:00
|
|
|
|
return a.app.AppendHistogram(ref, l, t, sh)
|
2021-06-28 08:00:55 -07:00
|
|
|
|
}
|
|
|
|
|
|
2021-03-19 12:28:55 -07:00
|
|
|
|
var _ storage.GetRef = &initAppender{}
|
|
|
|
|
|
2021-03-24 08:24:58 -07:00
|
|
|
|
func (a *initAppender) GetRef(lset labels.Labels) (uint64, labels.Labels) {
|
2021-03-19 12:28:55 -07:00
|
|
|
|
if g, ok := a.app.(storage.GetRef); ok {
|
|
|
|
|
return g.GetRef(lset)
|
|
|
|
|
}
|
2021-03-24 08:24:58 -07:00
|
|
|
|
return 0, nil
|
2021-03-19 12:28:55 -07:00
|
|
|
|
}
|
|
|
|
|
|
2017-08-30 09:34:54 -07:00
|
|
|
|
func (a *initAppender) Commit() error {
|
|
|
|
|
if a.app == nil {
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
return a.app.Commit()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (a *initAppender) Rollback() error {
|
|
|
|
|
if a.app == nil {
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
return a.app.Rollback()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Appender returns a new Appender on the database.
|
2020-07-30 04:11:13 -07:00
|
|
|
|
func (h *Head) Appender(_ context.Context) storage.Appender {
|
2017-08-30 09:34:54 -07:00
|
|
|
|
h.metrics.activeAppenders.Inc()
|
|
|
|
|
|
|
|
|
|
// The head cache might not have a starting point yet. The init appender
|
|
|
|
|
// picks up the first appended timestamp as the base.
|
2018-05-25 14:19:32 -07:00
|
|
|
|
if h.MinTime() == math.MaxInt64 {
|
2020-02-12 11:22:27 -08:00
|
|
|
|
return &initAppender{
|
2020-04-17 11:51:03 -07:00
|
|
|
|
head: h,
|
2020-02-12 11:22:27 -08:00
|
|
|
|
}
|
2017-08-30 09:34:54 -07:00
|
|
|
|
}
|
2020-04-17 11:51:03 -07:00
|
|
|
|
return h.appender()
|
2017-08-30 09:34:54 -07:00
|
|
|
|
}
|
|
|
|
|
|
2020-04-17 11:51:03 -07:00
|
|
|
|
func (h *Head) appender() *headAppender {
|
|
|
|
|
appendID := h.iso.newAppendID()
|
|
|
|
|
cleanupAppendIDsBelow := h.iso.lowWatermark()
|
|
|
|
|
|
2021-04-21 07:32:21 -07:00
|
|
|
|
// Allocate the exemplars buffer only if exemplars are enabled.
|
|
|
|
|
var exemplarsBuf []exemplarWithSeriesRef
|
|
|
|
|
if h.opts.NumExemplars > 0 {
|
|
|
|
|
exemplarsBuf = h.getExemplarBuffer()
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-30 09:34:54 -07:00
|
|
|
|
return &headAppender{
|
2020-11-25 05:03:30 -08:00
|
|
|
|
head: h,
|
|
|
|
|
minValidTime: h.appendableMinValidTime(),
|
2020-02-12 11:22:27 -08:00
|
|
|
|
mint: math.MaxInt64,
|
|
|
|
|
maxt: math.MinInt64,
|
|
|
|
|
samples: h.getAppendBuffer(),
|
|
|
|
|
sampleSeries: h.getSeriesBuffer(),
|
2021-04-21 07:32:21 -07:00
|
|
|
|
exemplars: exemplarsBuf,
|
2020-02-12 11:22:27 -08:00
|
|
|
|
appendID: appendID,
|
|
|
|
|
cleanupAppendIDsBelow: cleanupAppendIDsBelow,
|
2021-03-16 02:47:45 -07:00
|
|
|
|
exemplarAppender: h.exemplars,
|
2017-08-30 09:34:54 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-11-25 05:03:30 -08:00
|
|
|
|
func (h *Head) appendableMinValidTime() int64 {
|
|
|
|
|
// Setting the minimum valid time to whichever is greater, the head min valid time or the compaction window,
|
|
|
|
|
// ensures that no samples will be added within the compaction window to avoid races.
|
|
|
|
|
return max(h.minValidTime.Load(), h.MaxTime()-h.chunkRange.Load()/2)
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-04 02:30:49 -08:00
|
|
|
|
func max(a, b int64) int64 {
|
|
|
|
|
if a > b {
|
|
|
|
|
return a
|
|
|
|
|
}
|
|
|
|
|
return b
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-16 02:47:45 -07:00
|
|
|
|
func (h *Head) ExemplarAppender() storage.ExemplarAppender {
|
|
|
|
|
h.metrics.activeAppenders.Inc()
|
|
|
|
|
|
|
|
|
|
// The head cache might not have a starting point yet. The init appender
|
|
|
|
|
// picks up the first appended timestamp as the base.
|
|
|
|
|
if h.MinTime() == math.MaxInt64 {
|
|
|
|
|
return &initAppender{
|
|
|
|
|
head: h,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return h.appender()
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-19 02:15:41 -07:00
|
|
|
|
func (h *Head) getAppendBuffer() []record.RefSample {
|
2017-08-30 09:34:54 -07:00
|
|
|
|
b := h.appendPool.Get()
|
|
|
|
|
if b == nil {
|
2019-09-19 02:15:41 -07:00
|
|
|
|
return make([]record.RefSample, 0, 512)
|
2017-08-30 09:34:54 -07:00
|
|
|
|
}
|
2019-09-19 02:15:41 -07:00
|
|
|
|
return b.([]record.RefSample)
|
2017-08-30 09:34:54 -07:00
|
|
|
|
}
|
|
|
|
|
|
2019-09-19 02:15:41 -07:00
|
|
|
|
func (h *Head) putAppendBuffer(b []record.RefSample) {
|
2021-02-04 03:57:16 -08:00
|
|
|
|
//nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty.
|
2017-08-30 09:34:54 -07:00
|
|
|
|
h.appendPool.Put(b[:0])
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-16 02:47:45 -07:00
|
|
|
|
func (h *Head) getExemplarBuffer() []exemplarWithSeriesRef {
|
|
|
|
|
b := h.exemplarsPool.Get()
|
|
|
|
|
if b == nil {
|
|
|
|
|
return make([]exemplarWithSeriesRef, 0, 512)
|
|
|
|
|
}
|
|
|
|
|
return b.([]exemplarWithSeriesRef)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (h *Head) putExemplarBuffer(b []exemplarWithSeriesRef) {
|
2021-04-21 07:32:21 -07:00
|
|
|
|
if b == nil {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-16 02:47:45 -07:00
|
|
|
|
//nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty.
|
|
|
|
|
h.exemplarsPool.Put(b[:0])
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-19 02:15:41 -07:00
|
|
|
|
func (h *Head) getSeriesBuffer() []*memSeries {
|
|
|
|
|
b := h.seriesPool.Get()
|
|
|
|
|
if b == nil {
|
|
|
|
|
return make([]*memSeries, 0, 512)
|
|
|
|
|
}
|
|
|
|
|
return b.([]*memSeries)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (h *Head) putSeriesBuffer(b []*memSeries) {
|
2021-02-04 03:57:16 -08:00
|
|
|
|
//nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty.
|
2019-09-19 02:15:41 -07:00
|
|
|
|
h.seriesPool.Put(b[:0])
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-17 06:04:32 -07:00
|
|
|
|
func (h *Head) getBytesBuffer() []byte {
|
|
|
|
|
b := h.bytesPool.Get()
|
|
|
|
|
if b == nil {
|
|
|
|
|
return make([]byte, 0, 1024)
|
|
|
|
|
}
|
|
|
|
|
return b.([]byte)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (h *Head) putBytesBuffer(b []byte) {
|
2021-02-04 03:57:16 -08:00
|
|
|
|
//nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty.
|
2018-05-17 06:04:32 -07:00
|
|
|
|
h.bytesPool.Put(b[:0])
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-16 02:47:45 -07:00
|
|
|
|
type exemplarWithSeriesRef struct {
|
|
|
|
|
ref uint64
|
|
|
|
|
exemplar exemplar.Exemplar
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-30 09:34:54 -07:00
|
|
|
|
type headAppender struct {
|
2021-03-16 02:47:45 -07:00
|
|
|
|
head *Head
|
|
|
|
|
minValidTime int64 // No samples below this timestamp are allowed.
|
|
|
|
|
mint, maxt int64
|
|
|
|
|
exemplarAppender ExemplarStorage
|
2017-08-30 09:34:54 -07:00
|
|
|
|
|
2021-06-29 07:38:46 -07:00
|
|
|
|
series []record.RefSeries
|
|
|
|
|
samples []record.RefSample
|
|
|
|
|
exemplars []exemplarWithSeriesRef
|
|
|
|
|
sampleSeries []*memSeries
|
|
|
|
|
histograms []record.RefHistogram
|
|
|
|
|
histogramSeries []*memSeries
|
2020-02-12 11:22:27 -08:00
|
|
|
|
|
|
|
|
|
appendID, cleanupAppendIDsBelow uint64
|
2020-07-22 02:57:38 -07:00
|
|
|
|
closed bool
|
2017-08-30 09:34:54 -07:00
|
|
|
|
}
|
|
|
|
|
|
2021-02-18 04:07:00 -08:00
|
|
|
|
func (a *headAppender) Append(ref uint64, lset labels.Labels, t int64, v float64) (uint64, error) {
|
2018-05-25 14:19:32 -07:00
|
|
|
|
if t < a.minValidTime {
|
2020-05-06 08:30:00 -07:00
|
|
|
|
a.head.metrics.outOfBoundSamples.Inc()
|
2020-03-16 14:52:02 -07:00
|
|
|
|
return 0, storage.ErrOutOfBounds
|
2017-08-30 09:34:54 -07:00
|
|
|
|
}
|
|
|
|
|
|
2021-02-18 04:07:00 -08:00
|
|
|
|
s := a.head.series.getByID(ref)
|
|
|
|
|
if s == nil {
|
|
|
|
|
// Ensure no empty labels have gotten through.
|
|
|
|
|
lset = lset.WithoutEmpty()
|
|
|
|
|
if len(lset) == 0 {
|
|
|
|
|
return 0, errors.Wrap(ErrInvalidSample, "empty labelset")
|
|
|
|
|
}
|
2020-07-14 01:36:22 -07:00
|
|
|
|
|
2021-02-18 04:07:00 -08:00
|
|
|
|
if l, dup := lset.HasDuplicateLabelNames(); dup {
|
|
|
|
|
return 0, errors.Wrap(ErrInvalidSample, fmt.Sprintf(`label name "%s" is not unique`, l))
|
|
|
|
|
}
|
2017-08-30 09:34:54 -07:00
|
|
|
|
|
2021-02-18 04:07:00 -08:00
|
|
|
|
var created bool
|
|
|
|
|
var err error
|
|
|
|
|
s, created, err = a.head.getOrCreate(lset.Hash(), lset)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return 0, err
|
|
|
|
|
}
|
|
|
|
|
if created {
|
|
|
|
|
a.series = append(a.series, record.RefSeries{
|
|
|
|
|
Ref: s.ref,
|
|
|
|
|
Labels: lset,
|
|
|
|
|
})
|
|
|
|
|
}
|
2018-09-17 09:58:42 -07:00
|
|
|
|
}
|
2017-09-05 02:45:18 -07:00
|
|
|
|
|
2017-09-07 23:48:19 -07:00
|
|
|
|
s.Lock()
|
2018-09-17 09:58:42 -07:00
|
|
|
|
if err := s.appendable(t, v); err != nil {
|
|
|
|
|
s.Unlock()
|
2020-05-06 08:30:00 -07:00
|
|
|
|
if err == storage.ErrOutOfOrderSample {
|
|
|
|
|
a.head.metrics.outOfOrderSamples.Inc()
|
|
|
|
|
}
|
2021-02-18 04:07:00 -08:00
|
|
|
|
return 0, err
|
2017-09-08 01:12:28 -07:00
|
|
|
|
}
|
2018-09-17 09:58:42 -07:00
|
|
|
|
s.pendingCommit = true
|
|
|
|
|
s.Unlock()
|
|
|
|
|
|
2018-05-25 14:19:32 -07:00
|
|
|
|
if t < a.mint {
|
|
|
|
|
a.mint = t
|
|
|
|
|
}
|
2018-02-21 08:01:12 -08:00
|
|
|
|
if t > a.maxt {
|
|
|
|
|
a.maxt = t
|
2017-08-30 09:34:54 -07:00
|
|
|
|
}
|
|
|
|
|
|
2019-09-19 02:15:41 -07:00
|
|
|
|
a.samples = append(a.samples, record.RefSample{
|
2021-02-18 04:07:00 -08:00
|
|
|
|
Ref: s.ref,
|
2019-09-19 02:15:41 -07:00
|
|
|
|
T: t,
|
|
|
|
|
V: v,
|
2017-08-30 09:34:54 -07:00
|
|
|
|
})
|
2019-09-19 02:15:41 -07:00
|
|
|
|
a.sampleSeries = append(a.sampleSeries, s)
|
2021-02-18 04:07:00 -08:00
|
|
|
|
return s.ref, nil
|
2017-08-30 09:34:54 -07:00
|
|
|
|
}
|
|
|
|
|
|
2021-03-16 02:47:45 -07:00
|
|
|
|
// AppendExemplar for headAppender assumes the series ref already exists, and so it doesn't
|
|
|
|
|
// use getOrCreate or make any of the lset sanity checks that Append does.
|
|
|
|
|
func (a *headAppender) AppendExemplar(ref uint64, _ labels.Labels, e exemplar.Exemplar) (uint64, error) {
|
|
|
|
|
// Check if exemplar storage is enabled.
|
2021-04-21 07:32:21 -07:00
|
|
|
|
if a.head.opts.NumExemplars <= 0 {
|
2021-03-16 02:47:45 -07:00
|
|
|
|
return 0, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
s := a.head.series.getByID(ref)
|
|
|
|
|
if s == nil {
|
|
|
|
|
return 0, fmt.Errorf("unknown series ref. when trying to add exemplar: %d", ref)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Ensure no empty labels have gotten through.
|
|
|
|
|
e.Labels = e.Labels.WithoutEmpty()
|
|
|
|
|
|
2021-05-06 13:53:52 -07:00
|
|
|
|
err := a.exemplarAppender.ValidateExemplar(s.lset, e)
|
|
|
|
|
if err != nil {
|
|
|
|
|
if err == storage.ErrDuplicateExemplar {
|
|
|
|
|
// Duplicate, don't return an error but don't accept the exemplar.
|
|
|
|
|
return 0, nil
|
|
|
|
|
}
|
|
|
|
|
return 0, err
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-16 02:47:45 -07:00
|
|
|
|
a.exemplars = append(a.exemplars, exemplarWithSeriesRef{ref, e})
|
|
|
|
|
|
|
|
|
|
return s.ref, nil
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-29 07:38:46 -07:00
|
|
|
|
func (a *headAppender) AppendHistogram(ref uint64, lset labels.Labels, t int64, sh histogram.SparseHistogram) (uint64, error) {
|
|
|
|
|
if t < a.minValidTime {
|
|
|
|
|
a.head.metrics.outOfBoundSamples.Inc()
|
|
|
|
|
return 0, storage.ErrOutOfBounds
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
s := a.head.series.getByID(ref)
|
|
|
|
|
if s == nil {
|
|
|
|
|
// Ensure no empty labels have gotten through.
|
|
|
|
|
lset = lset.WithoutEmpty()
|
|
|
|
|
if len(lset) == 0 {
|
|
|
|
|
return 0, errors.Wrap(ErrInvalidSample, "empty labelset")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if l, dup := lset.HasDuplicateLabelNames(); dup {
|
|
|
|
|
return 0, errors.Wrap(ErrInvalidSample, fmt.Sprintf(`label name "%s" is not unique`, l))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var created bool
|
|
|
|
|
var err error
|
|
|
|
|
s, created, err = a.head.getOrCreate(lset.Hash(), lset)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return 0, err
|
|
|
|
|
}
|
|
|
|
|
if created {
|
|
|
|
|
a.series = append(a.series, record.RefSeries{
|
|
|
|
|
Ref: s.ref,
|
|
|
|
|
Labels: lset,
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
s.Lock()
|
|
|
|
|
if err := s.appendableHistogram(t, sh); err != nil {
|
|
|
|
|
s.Unlock()
|
|
|
|
|
if err == storage.ErrOutOfOrderSample {
|
|
|
|
|
a.head.metrics.outOfOrderSamples.Inc()
|
|
|
|
|
}
|
|
|
|
|
return 0, err
|
|
|
|
|
}
|
|
|
|
|
s.pendingCommit = true
|
|
|
|
|
s.Unlock()
|
|
|
|
|
|
|
|
|
|
if t < a.mint {
|
|
|
|
|
a.mint = t
|
|
|
|
|
}
|
|
|
|
|
if t > a.maxt {
|
|
|
|
|
a.maxt = t
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
a.histograms = append(a.histograms, record.RefHistogram{
|
|
|
|
|
Ref: s.ref,
|
|
|
|
|
T: t,
|
|
|
|
|
H: sh,
|
|
|
|
|
})
|
|
|
|
|
a.histogramSeries = append(a.histogramSeries, s)
|
|
|
|
|
return s.ref, nil
|
2021-06-28 08:00:55 -07:00
|
|
|
|
}
|
|
|
|
|
|
2021-03-19 12:28:55 -07:00
|
|
|
|
var _ storage.GetRef = &headAppender{}
|
|
|
|
|
|
2021-03-24 08:24:58 -07:00
|
|
|
|
func (a *headAppender) GetRef(lset labels.Labels) (uint64, labels.Labels) {
|
2021-03-19 12:28:55 -07:00
|
|
|
|
s := a.head.series.getByHash(lset.Hash(), lset)
|
|
|
|
|
if s == nil {
|
2021-03-24 08:24:58 -07:00
|
|
|
|
return 0, nil
|
2021-03-19 12:28:55 -07:00
|
|
|
|
}
|
2021-03-24 08:24:58 -07:00
|
|
|
|
// returned labels must be suitable to pass to Append()
|
|
|
|
|
return s.ref, s.lset
|
2021-03-19 12:28:55 -07:00
|
|
|
|
}
|
|
|
|
|
|
2018-05-17 06:04:32 -07:00
|
|
|
|
func (a *headAppender) log() error {
|
|
|
|
|
if a.head.wal == nil {
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
buf := a.head.getBytesBuffer()
|
|
|
|
|
defer func() { a.head.putBytesBuffer(buf) }()
|
|
|
|
|
|
|
|
|
|
var rec []byte
|
2019-09-19 02:15:41 -07:00
|
|
|
|
var enc record.Encoder
|
2018-05-17 06:04:32 -07:00
|
|
|
|
|
|
|
|
|
if len(a.series) > 0 {
|
|
|
|
|
rec = enc.Series(a.series, buf)
|
|
|
|
|
buf = rec[:0]
|
|
|
|
|
|
|
|
|
|
if err := a.head.wal.Log(rec); err != nil {
|
|
|
|
|
return errors.Wrap(err, "log series")
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if len(a.samples) > 0 {
|
|
|
|
|
rec = enc.Samples(a.samples, buf)
|
|
|
|
|
buf = rec[:0]
|
|
|
|
|
|
|
|
|
|
if err := a.head.wal.Log(rec); err != nil {
|
|
|
|
|
return errors.Wrap(err, "log samples")
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-05-06 13:53:52 -07:00
|
|
|
|
if len(a.exemplars) > 0 {
|
|
|
|
|
rec = enc.Exemplars(exemplarsForEncoding(a.exemplars), buf)
|
|
|
|
|
buf = rec[:0]
|
|
|
|
|
|
|
|
|
|
if err := a.head.wal.Log(rec); err != nil {
|
|
|
|
|
return errors.Wrap(err, "log exemplars")
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-05-17 06:04:32 -07:00
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-06 13:53:52 -07:00
|
|
|
|
func exemplarsForEncoding(es []exemplarWithSeriesRef) []record.RefExemplar {
|
|
|
|
|
ret := make([]record.RefExemplar, 0, len(es))
|
|
|
|
|
for _, e := range es {
|
|
|
|
|
ret = append(ret, record.RefExemplar{
|
|
|
|
|
Ref: e.ref,
|
|
|
|
|
T: e.exemplar.Ts,
|
|
|
|
|
V: e.exemplar.Value,
|
|
|
|
|
Labels: e.exemplar.Labels,
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
return ret
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-22 02:57:38 -07:00
|
|
|
|
func (a *headAppender) Commit() (err error) {
|
|
|
|
|
if a.closed {
|
|
|
|
|
return ErrAppenderClosed
|
|
|
|
|
}
|
|
|
|
|
defer func() { a.closed = true }()
|
2021-05-06 13:53:52 -07:00
|
|
|
|
|
2020-03-09 10:24:18 -07:00
|
|
|
|
if err := a.log(); err != nil {
|
2021-04-16 05:44:53 -07:00
|
|
|
|
_ = a.Rollback() // Most likely the same error will happen again.
|
2020-03-09 10:24:18 -07:00
|
|
|
|
return errors.Wrap(err, "write to WAL")
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-16 02:47:45 -07:00
|
|
|
|
// No errors logging to WAL, so pass the exemplars along to the in memory storage.
|
|
|
|
|
for _, e := range a.exemplars {
|
|
|
|
|
s := a.head.series.getByID(e.ref)
|
2021-04-16 05:44:53 -07:00
|
|
|
|
// We don't instrument exemplar appends here, all is instrumented by storage.
|
|
|
|
|
if err := a.exemplarAppender.AddExemplar(s.lset, e.exemplar); err != nil {
|
|
|
|
|
if err == storage.ErrOutOfOrderExemplar {
|
|
|
|
|
continue
|
|
|
|
|
}
|
2021-03-16 02:47:45 -07:00
|
|
|
|
level.Debug(a.head.logger).Log("msg", "Unknown error while adding exemplar", "err", err)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-28 06:04:07 -07:00
|
|
|
|
defer a.head.metrics.activeAppenders.Dec()
|
|
|
|
|
defer a.head.putAppendBuffer(a.samples)
|
2019-09-19 02:15:41 -07:00
|
|
|
|
defer a.head.putSeriesBuffer(a.sampleSeries)
|
2021-03-16 02:47:45 -07:00
|
|
|
|
defer a.head.putExemplarBuffer(a.exemplars)
|
2020-03-04 07:16:05 -08:00
|
|
|
|
defer a.head.iso.closeAppend(a.appendID)
|
2017-08-30 09:34:54 -07:00
|
|
|
|
|
2017-09-05 02:45:18 -07:00
|
|
|
|
total := len(a.samples)
|
2019-09-19 02:15:41 -07:00
|
|
|
|
var series *memSeries
|
|
|
|
|
for i, s := range a.samples {
|
|
|
|
|
series = a.sampleSeries[i]
|
|
|
|
|
series.Lock()
|
2020-05-06 08:30:00 -07:00
|
|
|
|
ok, chunkCreated := series.append(s.T, s.V, a.appendID, a.head.chunkDiskMapper)
|
2020-02-12 11:22:27 -08:00
|
|
|
|
series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
|
2019-09-19 02:15:41 -07:00
|
|
|
|
series.pendingCommit = false
|
|
|
|
|
series.Unlock()
|
2017-09-07 23:48:19 -07:00
|
|
|
|
|
2017-08-30 09:34:54 -07:00
|
|
|
|
if !ok {
|
|
|
|
|
total--
|
2020-05-06 08:30:00 -07:00
|
|
|
|
a.head.metrics.outOfOrderSamples.Inc()
|
2017-08-30 09:34:54 -07:00
|
|
|
|
}
|
|
|
|
|
if chunkCreated {
|
|
|
|
|
a.head.metrics.chunks.Inc()
|
|
|
|
|
a.head.metrics.chunksCreated.Inc()
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-06-29 07:38:46 -07:00
|
|
|
|
total += len(a.histograms) // TODO: different metric?
|
|
|
|
|
for i, s := range a.histograms {
|
|
|
|
|
series = a.histogramSeries[i]
|
|
|
|
|
series.Lock()
|
2021-07-03 10:34:34 -07:00
|
|
|
|
a.head.hasHistograms.Store(true)
|
2021-06-29 07:38:46 -07:00
|
|
|
|
ok, chunkCreated := series.appendHistogram(s.T, s.H, a.appendID, a.head.chunkDiskMapper)
|
|
|
|
|
series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
|
|
|
|
|
series.pendingCommit = false
|
|
|
|
|
series.Unlock()
|
|
|
|
|
|
|
|
|
|
if !ok {
|
|
|
|
|
total--
|
|
|
|
|
a.head.metrics.outOfOrderSamples.Inc()
|
|
|
|
|
}
|
|
|
|
|
if chunkCreated {
|
|
|
|
|
a.head.metrics.chunks.Inc()
|
|
|
|
|
a.head.metrics.chunksCreated.Inc()
|
|
|
|
|
}
|
|
|
|
|
}
|
2017-08-30 09:34:54 -07:00
|
|
|
|
|
|
|
|
|
a.head.metrics.samplesAppended.Add(float64(total))
|
2018-05-25 14:19:32 -07:00
|
|
|
|
a.head.updateMinMaxTime(a.mint, a.maxt)
|
2017-08-30 09:34:54 -07:00
|
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-22 02:57:38 -07:00
|
|
|
|
func (a *headAppender) Rollback() (err error) {
|
|
|
|
|
if a.closed {
|
|
|
|
|
return ErrAppenderClosed
|
|
|
|
|
}
|
|
|
|
|
defer func() { a.closed = true }()
|
2020-03-13 12:54:47 -07:00
|
|
|
|
defer a.head.metrics.activeAppenders.Dec()
|
|
|
|
|
defer a.head.iso.closeAppend(a.appendID)
|
|
|
|
|
defer a.head.putSeriesBuffer(a.sampleSeries)
|
|
|
|
|
|
2019-09-19 02:15:41 -07:00
|
|
|
|
var series *memSeries
|
|
|
|
|
for i := range a.samples {
|
|
|
|
|
series = a.sampleSeries[i]
|
|
|
|
|
series.Lock()
|
2020-02-12 11:22:27 -08:00
|
|
|
|
series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
|
2019-09-19 02:15:41 -07:00
|
|
|
|
series.pendingCommit = false
|
|
|
|
|
series.Unlock()
|
2018-09-17 09:58:42 -07:00
|
|
|
|
}
|
2017-08-30 09:34:54 -07:00
|
|
|
|
a.head.putAppendBuffer(a.samples)
|
2021-05-06 13:53:52 -07:00
|
|
|
|
a.head.putExemplarBuffer(a.exemplars)
|
2020-02-12 11:22:27 -08:00
|
|
|
|
a.samples = nil
|
2021-05-06 13:53:52 -07:00
|
|
|
|
a.exemplars = nil
|
2017-08-30 09:34:54 -07:00
|
|
|
|
|
2018-06-28 06:04:07 -07:00
|
|
|
|
// Series are created in the head memory regardless of rollback. Thus we have
|
|
|
|
|
// to log them to the WAL in any case.
|
2018-05-17 06:04:32 -07:00
|
|
|
|
return a.log()
|
2017-08-30 09:34:54 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Delete all samples in the range of [mint, maxt] for series that satisfy the given
|
|
|
|
|
// label matchers.
|
2019-11-18 11:53:33 -08:00
|
|
|
|
func (h *Head) Delete(mint, maxt int64, ms ...*labels.Matcher) error {
|
2017-08-30 09:34:54 -07:00
|
|
|
|
// Do not delete anything beyond the currently valid range.
|
|
|
|
|
mint, maxt = clampInterval(mint, maxt, h.MinTime(), h.MaxTime())
|
|
|
|
|
|
|
|
|
|
ir := h.indexRange(mint, maxt)
|
|
|
|
|
|
2017-12-17 10:08:21 -08:00
|
|
|
|
p, err := PostingsForMatchers(ir, ms...)
|
2017-11-13 03:16:58 -08:00
|
|
|
|
if err != nil {
|
|
|
|
|
return errors.Wrap(err, "select series")
|
|
|
|
|
}
|
2017-08-30 09:34:54 -07:00
|
|
|
|
|
2019-09-19 02:15:41 -07:00
|
|
|
|
var stones []tombstones.Stone
|
2017-08-30 09:34:54 -07:00
|
|
|
|
for p.Next() {
|
2017-09-05 02:45:18 -07:00
|
|
|
|
series := h.series.getByID(p.At())
|
2017-08-30 09:34:54 -07:00
|
|
|
|
|
2020-03-16 05:59:22 -07:00
|
|
|
|
series.RLock()
|
2018-02-07 05:43:21 -08:00
|
|
|
|
t0, t1 := series.minTime(), series.maxTime()
|
2020-03-16 05:59:22 -07:00
|
|
|
|
series.RUnlock()
|
2018-02-07 05:43:21 -08:00
|
|
|
|
if t0 == math.MinInt64 || t1 == math.MinInt64 {
|
|
|
|
|
continue
|
|
|
|
|
}
|
2017-08-30 09:34:54 -07:00
|
|
|
|
// Delete only until the current values and not beyond.
|
2018-02-07 05:43:21 -08:00
|
|
|
|
t0, t1 = clampInterval(mint, maxt, t0, t1)
|
2020-01-20 07:38:00 -08:00
|
|
|
|
stones = append(stones, tombstones.Stone{Ref: p.At(), Intervals: tombstones.Intervals{{Mint: t0, Maxt: t1}}})
|
2017-08-30 09:34:54 -07:00
|
|
|
|
}
|
|
|
|
|
if p.Err() != nil {
|
|
|
|
|
return p.Err()
|
|
|
|
|
}
|
2018-05-17 06:04:32 -07:00
|
|
|
|
if h.wal != nil {
|
2020-01-20 07:38:00 -08:00
|
|
|
|
var enc record.Encoder
|
2018-05-17 06:04:32 -07:00
|
|
|
|
if err := h.wal.Log(enc.Tombstones(stones, nil)); err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
2017-08-30 09:34:54 -07:00
|
|
|
|
}
|
2020-01-20 07:38:00 -08:00
|
|
|
|
for _, s := range stones {
|
|
|
|
|
h.tombstones.AddInterval(s.Ref, s.Intervals[0])
|
2017-08-30 09:34:54 -07:00
|
|
|
|
}
|
2019-01-08 09:08:41 -08:00
|
|
|
|
|
2017-08-30 09:34:54 -07:00
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-13 09:51:50 -08:00
|
|
|
|
// gc removes data before the minimum timestamp from the head.
|
2020-11-25 05:03:30 -08:00
|
|
|
|
// It returns the actual min times of the chunks present in the Head.
|
|
|
|
|
func (h *Head) gc() int64 {
|
2017-08-28 15:39:17 -07:00
|
|
|
|
// Only data strictly lower than this timestamp must be deleted.
|
|
|
|
|
mint := h.MinTime()
|
2017-01-19 05:01:38 -08:00
|
|
|
|
|
2017-09-05 02:45:18 -07:00
|
|
|
|
// Drop old chunks and remember series IDs and hashes if they can be
|
|
|
|
|
// deleted entirely.
|
2020-11-25 05:03:30 -08:00
|
|
|
|
deleted, chunksRemoved, actualMint := h.series.gc(mint)
|
2017-09-05 02:45:18 -07:00
|
|
|
|
seriesRemoved := len(deleted)
|
2017-03-20 02:41:43 -07:00
|
|
|
|
|
2017-09-05 02:45:18 -07:00
|
|
|
|
h.metrics.seriesRemoved.Add(float64(seriesRemoved))
|
|
|
|
|
h.metrics.chunksRemoved.Add(float64(chunksRemoved))
|
|
|
|
|
h.metrics.chunks.Sub(float64(chunksRemoved))
|
2020-07-27 21:42:42 -07:00
|
|
|
|
h.numSeries.Sub(uint64(seriesRemoved))
|
2017-03-04 07:50:48 -08:00
|
|
|
|
|
2017-11-30 06:34:49 -08:00
|
|
|
|
// Remove deleted series IDs from the postings lists.
|
|
|
|
|
h.postings.Delete(deleted)
|
2017-09-05 02:45:18 -07:00
|
|
|
|
|
2019-04-09 06:16:24 -07:00
|
|
|
|
if h.wal != nil {
|
2020-09-01 02:16:57 -07:00
|
|
|
|
_, last, _ := wal.Segments(h.wal.Dir())
|
2019-04-09 06:16:24 -07:00
|
|
|
|
h.deletedMtx.Lock()
|
|
|
|
|
// Keep series records until we're past segment 'last'
|
|
|
|
|
// because the WAL will still have samples records with
|
|
|
|
|
// this ref ID. If we didn't keep these series records then
|
|
|
|
|
// on start up when we replay the WAL, or any other code
|
|
|
|
|
// that reads the WAL, wouldn't be able to use those
|
|
|
|
|
// samples since we would have no labels for that ref ID.
|
|
|
|
|
for ref := range deleted {
|
|
|
|
|
h.deleted[ref] = last
|
|
|
|
|
}
|
|
|
|
|
h.deletedMtx.Unlock()
|
|
|
|
|
}
|
|
|
|
|
|
2017-09-05 02:45:18 -07:00
|
|
|
|
// Rebuild symbols and label value indices from what is left in the postings terms.
|
2020-07-14 01:36:22 -07:00
|
|
|
|
// symMtx ensures that append of symbols and postings is disabled for rebuild time.
|
|
|
|
|
h.symMtx.Lock()
|
|
|
|
|
defer h.symMtx.Unlock()
|
|
|
|
|
|
2019-03-20 01:43:07 -07:00
|
|
|
|
symbols := make(map[string]struct{}, len(h.symbols))
|
2020-09-10 08:05:47 -07:00
|
|
|
|
if err := h.postings.Iter(func(l labels.Label, _ index.Postings) error {
|
|
|
|
|
symbols[l.Name] = struct{}{}
|
|
|
|
|
symbols[l.Value] = struct{}{}
|
2017-11-30 06:34:49 -08:00
|
|
|
|
return nil
|
2018-09-20 01:33:52 -07:00
|
|
|
|
}); err != nil {
|
|
|
|
|
// This should never happen, as the iteration function only returns nil.
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
2017-08-28 15:39:17 -07:00
|
|
|
|
h.symbols = symbols
|
2020-11-25 05:03:30 -08:00
|
|
|
|
|
|
|
|
|
return actualMint
|
2017-03-20 02:21:21 -07:00
|
|
|
|
}
|
|
|
|
|
|
2017-10-09 06:21:46 -07:00
|
|
|
|
// Tombstones returns a new reader over the head's tombstones
|
2019-09-19 02:15:41 -07:00
|
|
|
|
func (h *Head) Tombstones() (tombstones.Reader, error) {
|
2020-01-20 07:38:00 -08:00
|
|
|
|
return h.tombstones, nil
|
2017-06-25 10:02:02 -07:00
|
|
|
|
}
|
|
|
|
|
|
2017-08-28 15:39:17 -07:00
|
|
|
|
// Index returns an IndexReader against the block.
|
2020-03-25 12:13:47 -07:00
|
|
|
|
func (h *Head) Index() (IndexReader, error) {
|
|
|
|
|
return h.indexRange(math.MinInt64, math.MaxInt64), nil
|
2017-03-20 00:41:56 -07:00
|
|
|
|
}
|
|
|
|
|
|
2017-08-28 15:39:17 -07:00
|
|
|
|
func (h *Head) indexRange(mint, maxt int64) *headIndexReader {
|
|
|
|
|
if hmin := h.MinTime(); hmin > mint {
|
|
|
|
|
mint = hmin
|
2017-01-12 11:00:36 -08:00
|
|
|
|
}
|
2017-08-28 15:39:17 -07:00
|
|
|
|
return &headIndexReader{head: h, mint: mint, maxt: maxt}
|
2017-01-12 10:18:51 -08:00
|
|
|
|
}
|
|
|
|
|
|
2017-08-28 15:39:17 -07:00
|
|
|
|
// Chunks returns a ChunkReader against the block.
|
2017-10-09 06:21:46 -07:00
|
|
|
|
func (h *Head) Chunks() (ChunkReader, error) {
|
2020-05-22 02:03:23 -07:00
|
|
|
|
return h.chunksRange(math.MinInt64, math.MaxInt64, h.iso.State())
|
2017-01-12 10:18:51 -08:00
|
|
|
|
}
|
|
|
|
|
|
2020-05-22 02:03:23 -07:00
|
|
|
|
func (h *Head) chunksRange(mint, maxt int64, is *isolationState) (*headChunkReader, error) {
|
|
|
|
|
h.closedMtx.Lock()
|
|
|
|
|
defer h.closedMtx.Unlock()
|
|
|
|
|
if h.closed {
|
|
|
|
|
return nil, errors.New("can't read from a closed head")
|
|
|
|
|
}
|
2017-08-28 15:39:17 -07:00
|
|
|
|
if hmin := h.MinTime(); hmin > mint {
|
|
|
|
|
mint = hmin
|
2017-01-12 10:18:51 -08:00
|
|
|
|
}
|
2020-02-12 11:22:27 -08:00
|
|
|
|
return &headChunkReader{
|
2020-09-02 09:03:21 -07:00
|
|
|
|
head: h,
|
|
|
|
|
mint: mint,
|
|
|
|
|
maxt: maxt,
|
|
|
|
|
isoState: is,
|
2020-05-22 02:03:23 -07:00
|
|
|
|
}, nil
|
2017-01-12 10:18:51 -08:00
|
|
|
|
}
|
|
|
|
|
|
2019-07-23 01:04:48 -07:00
|
|
|
|
// NumSeries returns the number of active series in the head.
|
|
|
|
|
func (h *Head) NumSeries() uint64 {
|
2020-07-27 21:42:42 -07:00
|
|
|
|
return h.numSeries.Load()
|
2019-07-23 01:04:48 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Meta returns meta information about the head.
|
|
|
|
|
// The head is dynamic so will return dynamic results.
|
|
|
|
|
func (h *Head) Meta() BlockMeta {
|
|
|
|
|
var id [16]byte
|
|
|
|
|
copy(id[:], "______head______")
|
|
|
|
|
return BlockMeta{
|
|
|
|
|
MinTime: h.MinTime(),
|
|
|
|
|
MaxTime: h.MaxTime(),
|
|
|
|
|
ULID: ulid.ULID(id),
|
|
|
|
|
Stats: BlockStats{
|
|
|
|
|
NumSeries: h.NumSeries(),
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-28 15:39:17 -07:00
|
|
|
|
// MinTime returns the lowest time bound on visible data in the head.
|
|
|
|
|
func (h *Head) MinTime() int64 {
|
2020-07-27 21:42:42 -07:00
|
|
|
|
return h.minTime.Load()
|
2017-01-12 10:18:51 -08:00
|
|
|
|
}
|
|
|
|
|
|
2017-08-28 15:39:17 -07:00
|
|
|
|
// MaxTime returns the highest timestamp seen in data of the head.
|
|
|
|
|
func (h *Head) MaxTime() int64 {
|
2020-07-27 21:42:42 -07:00
|
|
|
|
return h.maxTime.Load()
|
2017-01-12 10:18:51 -08:00
|
|
|
|
}
|
|
|
|
|
|
2019-04-01 01:19:06 -07:00
|
|
|
|
// compactable returns whether the head has a compactable range.
|
|
|
|
|
// The head has a compactable range when the head time range is 1.5 times the chunk range.
|
|
|
|
|
// The 0.5 acts as a buffer of the appendable window.
|
|
|
|
|
func (h *Head) compactable() bool {
|
2020-07-27 21:42:42 -07:00
|
|
|
|
return h.MaxTime()-h.MinTime() > h.chunkRange.Load()/2*3
|
2019-04-01 01:19:06 -07:00
|
|
|
|
}
|
|
|
|
|
|
2017-11-10 12:19:39 -08:00
|
|
|
|
// Close flushes the WAL and closes the head.
|
|
|
|
|
func (h *Head) Close() error {
|
2020-05-22 02:03:23 -07:00
|
|
|
|
h.closedMtx.Lock()
|
|
|
|
|
defer h.closedMtx.Unlock()
|
|
|
|
|
h.closed = true
|
2021-07-03 10:34:34 -07:00
|
|
|
|
|
|
|
|
|
// M-map all in-memory chunks.
|
|
|
|
|
// A hack for the histogram till it is stored in WAL and replayed.
|
|
|
|
|
if h.hasHistograms.Load() {
|
|
|
|
|
for _, m := range h.series.series {
|
|
|
|
|
for _, s := range m {
|
|
|
|
|
s.mmapCurrentHeadChunk(h.chunkDiskMapper)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-28 08:24:58 -07:00
|
|
|
|
errs := tsdb_errors.NewMulti(h.chunkDiskMapper.Close())
|
2020-05-06 08:30:00 -07:00
|
|
|
|
if h.wal != nil {
|
2020-10-28 08:24:58 -07:00
|
|
|
|
errs.Add(h.wal.Close())
|
2018-05-17 06:04:32 -07:00
|
|
|
|
}
|
2020-10-28 08:24:58 -07:00
|
|
|
|
return errs.Err()
|
2017-11-10 12:19:39 -08:00
|
|
|
|
}
|
|
|
|
|
|
2021-01-21 02:37:29 -08:00
|
|
|
|
// String returns an human readable representation of the TSDB head. It's important to
|
|
|
|
|
// keep this function in order to avoid the struct dump when the head is stringified in
|
|
|
|
|
// errors or logs.
|
|
|
|
|
func (h *Head) String() string {
|
|
|
|
|
return "head"
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-28 15:39:17 -07:00
|
|
|
|
type headChunkReader struct {
|
2020-09-02 09:03:21 -07:00
|
|
|
|
head *Head
|
|
|
|
|
mint, maxt int64
|
|
|
|
|
isoState *isolationState
|
2017-01-12 10:18:51 -08:00
|
|
|
|
}
|
|
|
|
|
|
2017-08-28 15:39:17 -07:00
|
|
|
|
func (h *headChunkReader) Close() error {
|
2020-02-12 11:22:27 -08:00
|
|
|
|
h.isoState.Close()
|
2017-01-12 10:18:51 -08:00
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2017-09-04 07:08:38 -07:00
|
|
|
|
// packChunkID packs a seriesID and a chunkID within it into a global 8 byte ID.
|
|
|
|
|
// It panicks if the seriesID exceeds 5 bytes or the chunk ID 3 bytes.
|
|
|
|
|
func packChunkID(seriesID, chunkID uint64) uint64 {
|
|
|
|
|
if seriesID > (1<<40)-1 {
|
|
|
|
|
panic("series ID exceeds 5 bytes")
|
|
|
|
|
}
|
|
|
|
|
if chunkID > (1<<24)-1 {
|
|
|
|
|
panic("chunk ID exceeds 3 bytes")
|
|
|
|
|
}
|
|
|
|
|
return (seriesID << 24) | chunkID
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func unpackChunkID(id uint64) (seriesID, chunkID uint64) {
|
|
|
|
|
return id >> 24, (id << 40) >> 40
|
|
|
|
|
}
|
|
|
|
|
|
2016-12-15 07:14:33 -08:00
|
|
|
|
// Chunk returns the chunk for the reference number.
|
2017-11-30 06:34:49 -08:00
|
|
|
|
func (h *headChunkReader) Chunk(ref uint64) (chunkenc.Chunk, error) {
|
2017-09-04 07:08:38 -07:00
|
|
|
|
sid, cid := unpackChunkID(ref)
|
2017-09-05 02:45:18 -07:00
|
|
|
|
|
|
|
|
|
s := h.head.series.getByID(sid)
|
2017-12-13 12:58:21 -08:00
|
|
|
|
// This means that the series has been garbage collected.
|
|
|
|
|
if s == nil {
|
2020-03-16 14:52:02 -07:00
|
|
|
|
return nil, storage.ErrNotFound
|
2017-12-13 12:58:21 -08:00
|
|
|
|
}
|
2017-01-06 08:23:12 -08:00
|
|
|
|
|
2017-09-07 23:48:19 -07:00
|
|
|
|
s.Lock()
|
2020-05-22 02:03:23 -07:00
|
|
|
|
c, garbageCollect, err := s.chunk(int(cid), h.head.chunkDiskMapper)
|
|
|
|
|
if err != nil {
|
|
|
|
|
s.Unlock()
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
2020-05-06 08:30:00 -07:00
|
|
|
|
defer func() {
|
|
|
|
|
if garbageCollect {
|
|
|
|
|
// Set this to nil so that Go GC can collect it after it has been used.
|
|
|
|
|
c.chunk = nil
|
2020-09-02 09:03:21 -07:00
|
|
|
|
s.memChunkPool.Put(c)
|
2020-05-06 08:30:00 -07:00
|
|
|
|
}
|
|
|
|
|
}()
|
2017-12-13 12:58:21 -08:00
|
|
|
|
|
2020-05-22 02:03:23 -07:00
|
|
|
|
// This means that the chunk is outside the specified range.
|
|
|
|
|
if !c.OverlapsClosedInterval(h.mint, h.maxt) {
|
2017-12-13 12:58:21 -08:00
|
|
|
|
s.Unlock()
|
2020-03-16 14:52:02 -07:00
|
|
|
|
return nil, storage.ErrNotFound
|
2017-12-13 12:58:21 -08:00
|
|
|
|
}
|
2017-09-07 23:48:19 -07:00
|
|
|
|
s.Unlock()
|
2017-02-18 08:33:20 -08:00
|
|
|
|
|
2017-08-28 15:39:17 -07:00
|
|
|
|
return &safeChunk{
|
2020-05-06 08:30:00 -07:00
|
|
|
|
Chunk: c.chunk,
|
|
|
|
|
s: s,
|
|
|
|
|
cid: int(cid),
|
|
|
|
|
isoState: h.isoState,
|
|
|
|
|
chunkDiskMapper: h.head.chunkDiskMapper,
|
2017-08-28 15:39:17 -07:00
|
|
|
|
}, nil
|
2016-12-14 09:38:46 -08:00
|
|
|
|
}
|
|
|
|
|
|
2017-01-09 07:51:39 -08:00
|
|
|
|
type safeChunk struct {
|
2017-11-30 06:34:49 -08:00
|
|
|
|
chunkenc.Chunk
|
2020-05-06 08:30:00 -07:00
|
|
|
|
s *memSeries
|
|
|
|
|
cid int
|
|
|
|
|
isoState *isolationState
|
|
|
|
|
chunkDiskMapper *chunks.ChunkDiskMapper
|
2017-01-09 07:51:39 -08:00
|
|
|
|
}
|
|
|
|
|
|
2019-07-09 02:49:34 -07:00
|
|
|
|
func (c *safeChunk) Iterator(reuseIter chunkenc.Iterator) chunkenc.Iterator {
|
2017-09-07 23:48:19 -07:00
|
|
|
|
c.s.Lock()
|
2020-05-06 08:30:00 -07:00
|
|
|
|
it := c.s.iterator(c.cid, c.isoState, c.chunkDiskMapper, reuseIter)
|
2017-09-07 23:48:19 -07:00
|
|
|
|
c.s.Unlock()
|
|
|
|
|
return it
|
2017-01-09 07:51:39 -08:00
|
|
|
|
}
|
|
|
|
|
|
2017-01-10 06:28:22 -08:00
|
|
|
|
type headIndexReader struct {
|
2017-08-28 15:39:17 -07:00
|
|
|
|
head *Head
|
|
|
|
|
mint, maxt int64
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (h *headIndexReader) Close() error {
|
|
|
|
|
return nil
|
2017-08-05 04:31:48 -07:00
|
|
|
|
}
|
|
|
|
|
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
|
func (h *headIndexReader) Symbols() index.StringIter {
|
2017-09-05 02:45:18 -07:00
|
|
|
|
h.head.symMtx.RLock()
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
|
res := make([]string, 0, len(h.head.symbols))
|
2017-09-05 02:45:18 -07:00
|
|
|
|
|
|
|
|
|
for s := range h.head.symbols {
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
|
res = append(res, s)
|
2017-09-05 02:45:18 -07:00
|
|
|
|
}
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
|
h.head.symMtx.RUnlock()
|
|
|
|
|
|
|
|
|
|
sort.Strings(res)
|
|
|
|
|
return index.NewStringListIter(res)
|
2016-12-14 09:38:46 -08:00
|
|
|
|
}
|
|
|
|
|
|
2020-06-25 06:10:29 -07:00
|
|
|
|
// SortedLabelValues returns label values present in the head for the
|
|
|
|
|
// specific label name that are within the time range mint to maxt.
|
2021-02-09 09:38:35 -08:00
|
|
|
|
// If matchers are specified the returned result set is reduced
|
|
|
|
|
// to label values of metrics matching the matchers.
|
|
|
|
|
func (h *headIndexReader) SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error) {
|
|
|
|
|
values, err := h.LabelValues(name, matchers...)
|
2020-06-25 06:10:29 -07:00
|
|
|
|
if err == nil {
|
|
|
|
|
sort.Strings(values)
|
|
|
|
|
}
|
|
|
|
|
return values, err
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-30 05:50:09 -07:00
|
|
|
|
// LabelValues returns label values present in the head for the
|
|
|
|
|
// specific label name that are within the time range mint to maxt.
|
2021-02-09 09:38:35 -08:00
|
|
|
|
// If matchers are specified the returned result set is reduced
|
|
|
|
|
// to label values of metrics matching the matchers.
|
|
|
|
|
func (h *headIndexReader) LabelValues(name string, matchers ...*labels.Matcher) ([]string, error) {
|
2020-05-30 05:50:09 -07:00
|
|
|
|
if h.maxt < h.head.MinTime() || h.mint > h.head.MaxTime() {
|
|
|
|
|
return []string{}, nil
|
|
|
|
|
}
|
|
|
|
|
|
2021-02-09 09:38:35 -08:00
|
|
|
|
if len(matchers) == 0 {
|
|
|
|
|
h.head.symMtx.RLock()
|
|
|
|
|
defer h.head.symMtx.RUnlock()
|
|
|
|
|
return h.head.postings.LabelValues(name), nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return labelValuesWithMatchers(h, name, matchers...)
|
2016-12-14 09:38:46 -08:00
|
|
|
|
}
|
|
|
|
|
|
2020-05-30 05:50:09 -07:00
|
|
|
|
// LabelNames returns all the unique label names present in the head
|
|
|
|
|
// that are within the time range mint to maxt.
|
2018-11-07 07:52:41 -08:00
|
|
|
|
func (h *headIndexReader) LabelNames() ([]string, error) {
|
|
|
|
|
h.head.symMtx.RLock()
|
2020-05-30 05:50:09 -07:00
|
|
|
|
if h.maxt < h.head.MinTime() || h.mint > h.head.MaxTime() {
|
2020-09-10 08:05:47 -07:00
|
|
|
|
h.head.symMtx.RUnlock()
|
2020-05-30 05:50:09 -07:00
|
|
|
|
return []string{}, nil
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-10 08:05:47 -07:00
|
|
|
|
labelNames := h.head.postings.LabelNames()
|
|
|
|
|
h.head.symMtx.RUnlock()
|
|
|
|
|
|
2018-11-07 07:52:41 -08:00
|
|
|
|
sort.Strings(labelNames)
|
|
|
|
|
return labelNames, nil
|
|
|
|
|
}
|
|
|
|
|
|
Reduce memory used by postings offset table.
Rather than keeping the offset of each postings list, instead
keep the nth offset of the offset of the posting list. As postings
list offsets have always been sorted, we can then get to the closest
entry before the one we want an iterate forwards.
I haven't done much tuning on the 32 number, it was chosen to try
not to read through more than a 4k page of data.
Switch to a bulk interface for fetching postings. Use it to avoid having
to re-read parts of the posting offset table when querying lots of it.
For a index with what BenchmarkHeadPostingForMatchers uses RAM
for r.postings drops from 3.79MB to 80.19kB or about 48x.
Bytes allocated go down by 30%, and suprisingly CPU usage drops by
4-6% for typical queries too.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56%
BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70%
BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01%
BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01%
BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69%
BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69%
BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-05 10:27:40 -08:00
|
|
|
|
// Postings returns the postings list iterator for the label pairs.
|
|
|
|
|
func (h *headIndexReader) Postings(name string, values ...string) (index.Postings, error) {
|
|
|
|
|
res := make([]index.Postings, 0, len(values))
|
|
|
|
|
for _, value := range values {
|
2020-03-25 12:13:47 -07:00
|
|
|
|
res = append(res, h.head.postings.Get(name, value))
|
Reduce memory used by postings offset table.
Rather than keeping the offset of each postings list, instead
keep the nth offset of the offset of the posting list. As postings
list offsets have always been sorted, we can then get to the closest
entry before the one we want an iterate forwards.
I haven't done much tuning on the 32 number, it was chosen to try
not to read through more than a 4k page of data.
Switch to a bulk interface for fetching postings. Use it to avoid having
to re-read parts of the posting offset table when querying lots of it.
For a index with what BenchmarkHeadPostingForMatchers uses RAM
for r.postings drops from 3.79MB to 80.19kB or about 48x.
Bytes allocated go down by 30%, and suprisingly CPU usage drops by
4-6% for typical queries too.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56%
BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70%
BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01%
BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01%
BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69%
BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69%
BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-05 10:27:40 -08:00
|
|
|
|
}
|
|
|
|
|
return index.Merge(res...), nil
|
2016-12-14 09:38:46 -08:00
|
|
|
|
}
|
|
|
|
|
|
2017-11-30 06:34:49 -08:00
|
|
|
|
func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings {
|
2019-01-03 02:35:10 -08:00
|
|
|
|
series := make([]*memSeries, 0, 128)
|
2017-08-05 04:31:48 -07:00
|
|
|
|
|
2019-01-03 02:35:10 -08:00
|
|
|
|
// Fetch all the series only once.
|
2017-08-05 04:31:48 -07:00
|
|
|
|
for p.Next() {
|
2019-01-03 02:35:10 -08:00
|
|
|
|
s := h.head.series.getByID(p.At())
|
|
|
|
|
if s == nil {
|
2020-04-11 01:22:18 -07:00
|
|
|
|
level.Debug(h.head.logger).Log("msg", "Looked up series not found")
|
2019-01-03 02:35:10 -08:00
|
|
|
|
} else {
|
|
|
|
|
series = append(series, s)
|
|
|
|
|
}
|
2017-08-05 04:31:48 -07:00
|
|
|
|
}
|
|
|
|
|
if err := p.Err(); err != nil {
|
2017-11-30 06:34:49 -08:00
|
|
|
|
return index.ErrPostings(errors.Wrap(err, "expand postings"))
|
2017-08-05 04:31:48 -07:00
|
|
|
|
}
|
|
|
|
|
|
2019-01-03 02:35:10 -08:00
|
|
|
|
sort.Slice(series, func(i, j int) bool {
|
|
|
|
|
return labels.Compare(series[i].lset, series[j].lset) < 0
|
2017-08-05 04:31:48 -07:00
|
|
|
|
})
|
2019-01-03 02:35:10 -08:00
|
|
|
|
|
|
|
|
|
// Convert back to list.
|
|
|
|
|
ep := make([]uint64, 0, len(series))
|
|
|
|
|
for _, p := range series {
|
|
|
|
|
ep = append(ep, p.ref)
|
|
|
|
|
}
|
2017-11-30 06:34:49 -08:00
|
|
|
|
return index.NewListPostings(ep)
|
2017-08-05 04:31:48 -07:00
|
|
|
|
}
|
|
|
|
|
|
2016-12-14 09:38:46 -08:00
|
|
|
|
// Series returns the series for the given reference.
|
2017-11-30 06:34:49 -08:00
|
|
|
|
func (h *headIndexReader) Series(ref uint64, lbls *labels.Labels, chks *[]chunks.Meta) error {
|
2017-09-05 02:45:18 -07:00
|
|
|
|
s := h.head.series.getByID(ref)
|
2017-01-06 08:23:12 -08:00
|
|
|
|
|
2017-05-17 07:43:01 -07:00
|
|
|
|
if s == nil {
|
2017-10-12 06:25:12 -07:00
|
|
|
|
h.head.metrics.seriesNotFound.Inc()
|
2020-03-16 14:52:02 -07:00
|
|
|
|
return storage.ErrNotFound
|
2017-05-17 07:43:01 -07:00
|
|
|
|
}
|
2017-08-05 04:31:48 -07:00
|
|
|
|
*lbls = append((*lbls)[:0], s.lset...)
|
2017-01-11 04:02:38 -08:00
|
|
|
|
|
2017-09-07 23:48:19 -07:00
|
|
|
|
s.Lock()
|
|
|
|
|
defer s.Unlock()
|
2017-01-11 04:02:38 -08:00
|
|
|
|
|
2017-08-05 04:31:48 -07:00
|
|
|
|
*chks = (*chks)[:0]
|
|
|
|
|
|
2020-05-06 08:30:00 -07:00
|
|
|
|
for i, c := range s.mmappedChunks {
|
2017-08-28 15:39:17 -07:00
|
|
|
|
// Do not expose chunks that are outside of the specified range.
|
2018-07-02 01:23:36 -07:00
|
|
|
|
if !c.OverlapsClosedInterval(h.mint, h.maxt) {
|
2017-08-28 15:39:17 -07:00
|
|
|
|
continue
|
|
|
|
|
}
|
2017-11-30 06:34:49 -08:00
|
|
|
|
*chks = append(*chks, chunks.Meta{
|
2017-01-11 04:02:38 -08:00
|
|
|
|
MinTime: c.minTime,
|
2020-05-06 08:30:00 -07:00
|
|
|
|
MaxTime: c.maxTime,
|
2017-09-04 07:08:38 -07:00
|
|
|
|
Ref: packChunkID(s.ref, uint64(s.chunkID(i))),
|
2017-01-11 04:02:38 -08:00
|
|
|
|
})
|
2017-01-03 06:43:26 -08:00
|
|
|
|
}
|
2020-05-06 08:30:00 -07:00
|
|
|
|
if s.headChunk != nil && s.headChunk.OverlapsClosedInterval(h.mint, h.maxt) {
|
|
|
|
|
*chks = append(*chks, chunks.Meta{
|
|
|
|
|
MinTime: s.headChunk.minTime,
|
|
|
|
|
MaxTime: math.MaxInt64, // Set the head chunks as open (being appended to).
|
|
|
|
|
Ref: packChunkID(s.ref, uint64(s.chunkID(len(s.mmappedChunks)))),
|
|
|
|
|
})
|
|
|
|
|
}
|
2017-01-11 04:02:38 -08:00
|
|
|
|
|
2017-08-05 04:31:48 -07:00
|
|
|
|
return nil
|
2016-12-31 06:35:08 -08:00
|
|
|
|
}
|
|
|
|
|
|
2021-02-09 09:38:35 -08:00
|
|
|
|
// LabelValueFor returns label value for the given label name in the series referred to by ID.
|
|
|
|
|
func (h *headIndexReader) LabelValueFor(id uint64, label string) (string, error) {
|
|
|
|
|
memSeries := h.head.series.getByID(id)
|
|
|
|
|
if memSeries == nil {
|
|
|
|
|
return "", storage.ErrNotFound
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
value := memSeries.lset.Get(label)
|
|
|
|
|
if value == "" {
|
|
|
|
|
return "", storage.ErrNotFound
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return value, nil
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-20 06:22:08 -07:00
|
|
|
|
func (h *Head) getOrCreate(hash uint64, lset labels.Labels) (*memSeries, bool, error) {
|
2021-03-18 08:23:50 -07:00
|
|
|
|
// Just using `getOrCreateWithID` below would be semantically sufficient, but we'd create
|
2017-09-18 03:28:56 -07:00
|
|
|
|
// a new series on every sample inserted via Add(), which causes allocations
|
|
|
|
|
// and makes our series IDs rather random and harder to compress in postings.
|
|
|
|
|
s := h.series.getByHash(hash, lset)
|
|
|
|
|
if s != nil {
|
2020-05-20 06:22:08 -07:00
|
|
|
|
return s, false, nil
|
2017-09-18 03:28:56 -07:00
|
|
|
|
}
|
2017-08-30 09:34:54 -07:00
|
|
|
|
|
2017-09-05 02:45:18 -07:00
|
|
|
|
// Optimistically assume that we are the first one to create the series.
|
2020-07-27 21:42:42 -07:00
|
|
|
|
id := h.lastSeriesID.Inc()
|
2017-09-19 01:20:19 -07:00
|
|
|
|
|
|
|
|
|
return h.getOrCreateWithID(id, hash, lset)
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-20 06:22:08 -07:00
|
|
|
|
func (h *Head) getOrCreateWithID(id, hash uint64, lset labels.Labels) (*memSeries, bool, error) {
|
2021-03-18 08:23:50 -07:00
|
|
|
|
s, created, err := h.series.getOrSet(hash, lset, func() *memSeries {
|
|
|
|
|
return newMemSeries(lset, id, h.chunkRange.Load(), &h.memChunkPool)
|
|
|
|
|
})
|
2020-05-20 06:22:08 -07:00
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, false, err
|
|
|
|
|
}
|
2017-09-05 02:45:18 -07:00
|
|
|
|
if !created {
|
2020-05-20 06:22:08 -07:00
|
|
|
|
return s, false, nil
|
2017-09-05 02:45:18 -07:00
|
|
|
|
}
|
|
|
|
|
|
2017-09-18 03:28:56 -07:00
|
|
|
|
h.metrics.seriesCreated.Inc()
|
2020-07-27 21:42:42 -07:00
|
|
|
|
h.numSeries.Inc()
|
2017-09-18 03:28:56 -07:00
|
|
|
|
|
2017-09-05 02:45:18 -07:00
|
|
|
|
h.symMtx.Lock()
|
|
|
|
|
defer h.symMtx.Unlock()
|
2016-12-21 16:12:28 -08:00
|
|
|
|
|
|
|
|
|
for _, l := range lset {
|
2017-08-05 04:31:48 -07:00
|
|
|
|
h.symbols[l.Name] = struct{}{}
|
|
|
|
|
h.symbols[l.Value] = struct{}{}
|
2016-12-21 16:12:28 -08:00
|
|
|
|
}
|
2017-01-03 06:43:26 -08:00
|
|
|
|
|
2020-07-14 01:36:22 -07:00
|
|
|
|
h.postings.Add(id, lset)
|
2020-05-20 06:22:08 -07:00
|
|
|
|
return s, true, nil
|
2017-09-05 02:45:18 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// seriesHashmap is a simple hashmap for memSeries by their label set. It is built
|
|
|
|
|
// on top of a regular hashmap and holds a slice of series to resolve hash collisions.
|
|
|
|
|
// Its methods require the hash to be submitted with it to avoid re-computations throughout
|
|
|
|
|
// the code.
|
|
|
|
|
type seriesHashmap map[uint64][]*memSeries
|
2016-12-04 04:16:11 -08:00
|
|
|
|
|
2017-09-05 02:45:18 -07:00
|
|
|
|
func (m seriesHashmap) get(hash uint64, lset labels.Labels) *memSeries {
|
|
|
|
|
for _, s := range m[hash] {
|
2019-11-18 11:53:33 -08:00
|
|
|
|
if labels.Equal(s.lset, lset) {
|
2017-09-05 02:45:18 -07:00
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (m seriesHashmap) set(hash uint64, s *memSeries) {
|
|
|
|
|
l := m[hash]
|
|
|
|
|
for i, prev := range l {
|
2019-11-18 11:53:33 -08:00
|
|
|
|
if labels.Equal(prev.lset, s.lset) {
|
2017-09-05 02:45:18 -07:00
|
|
|
|
l[i] = s
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
m[hash] = append(l, s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (m seriesHashmap) del(hash uint64, lset labels.Labels) {
|
|
|
|
|
var rem []*memSeries
|
|
|
|
|
for _, s := range m[hash] {
|
2019-11-18 11:53:33 -08:00
|
|
|
|
if !labels.Equal(s.lset, lset) {
|
2017-09-05 02:45:18 -07:00
|
|
|
|
rem = append(rem, s)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if len(rem) == 0 {
|
|
|
|
|
delete(m, hash)
|
|
|
|
|
} else {
|
|
|
|
|
m[hash] = rem
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-29 23:12:43 -08:00
|
|
|
|
const (
|
|
|
|
|
// DefaultStripeSize is the default number of entries to allocate in the stripeSeries hash map.
|
|
|
|
|
DefaultStripeSize = 1 << 14
|
|
|
|
|
)
|
|
|
|
|
|
2017-09-05 02:45:18 -07:00
|
|
|
|
// stripeSeries locks modulo ranges of IDs and hashes to reduce lock contention.
|
2018-01-13 09:51:50 -08:00
|
|
|
|
// The locks are padded to not be on the same cache line. Filling the padded space
|
2017-09-05 02:45:18 -07:00
|
|
|
|
// with the maps was profiled to be slower – likely due to the additional pointer
|
|
|
|
|
// dereferences.
|
|
|
|
|
type stripeSeries struct {
|
2020-05-20 06:22:08 -07:00
|
|
|
|
size int
|
|
|
|
|
series []map[uint64]*memSeries
|
|
|
|
|
hashes []seriesHashmap
|
|
|
|
|
locks []stripeLock
|
|
|
|
|
seriesLifecycleCallback SeriesLifecycleCallback
|
2017-09-05 02:45:18 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type stripeLock struct {
|
|
|
|
|
sync.RWMutex
|
|
|
|
|
// Padding to avoid multiple locks being on the same cache line.
|
|
|
|
|
_ [40]byte
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-20 06:22:08 -07:00
|
|
|
|
func newStripeSeries(stripeSize int, seriesCallback SeriesLifecycleCallback) *stripeSeries {
|
2020-01-29 23:12:43 -08:00
|
|
|
|
s := &stripeSeries{
|
2020-05-20 06:22:08 -07:00
|
|
|
|
size: stripeSize,
|
|
|
|
|
series: make([]map[uint64]*memSeries, stripeSize),
|
|
|
|
|
hashes: make([]seriesHashmap, stripeSize),
|
|
|
|
|
locks: make([]stripeLock, stripeSize),
|
|
|
|
|
seriesLifecycleCallback: seriesCallback,
|
2020-01-29 23:12:43 -08:00
|
|
|
|
}
|
2017-09-05 02:45:18 -07:00
|
|
|
|
|
|
|
|
|
for i := range s.series {
|
|
|
|
|
s.series[i] = map[uint64]*memSeries{}
|
|
|
|
|
}
|
|
|
|
|
for i := range s.hashes {
|
|
|
|
|
s.hashes[i] = seriesHashmap{}
|
|
|
|
|
}
|
2017-01-11 04:02:38 -08:00
|
|
|
|
return s
|
2016-12-04 04:16:11 -08:00
|
|
|
|
}
|
|
|
|
|
|
2017-09-05 02:45:18 -07:00
|
|
|
|
// gc garbage collects old chunks that are strictly before mint and removes
|
|
|
|
|
// series entirely that have no chunks left.
|
2020-11-25 05:03:30 -08:00
|
|
|
|
func (s *stripeSeries) gc(mint int64) (map[uint64]struct{}, int, int64) {
|
2017-09-05 02:45:18 -07:00
|
|
|
|
var (
|
2020-11-25 05:03:30 -08:00
|
|
|
|
deleted = map[uint64]struct{}{}
|
|
|
|
|
deletedForCallback = []labels.Labels{}
|
|
|
|
|
rmChunks = 0
|
|
|
|
|
actualMint int64 = math.MaxInt64
|
2017-09-05 02:45:18 -07:00
|
|
|
|
)
|
|
|
|
|
// Run through all series and truncate old chunks. Mark those with no
|
2017-09-06 07:20:37 -07:00
|
|
|
|
// chunks left as deleted and store their ID.
|
2020-01-29 23:12:43 -08:00
|
|
|
|
for i := 0; i < s.size; i++ {
|
2017-09-05 02:45:18 -07:00
|
|
|
|
s.locks[i].Lock()
|
|
|
|
|
|
|
|
|
|
for hash, all := range s.hashes[i] {
|
|
|
|
|
for _, series := range all {
|
2017-09-07 23:48:19 -07:00
|
|
|
|
series.Lock()
|
2017-09-05 02:45:18 -07:00
|
|
|
|
rmChunks += series.truncateChunksBefore(mint)
|
|
|
|
|
|
2020-05-06 08:30:00 -07:00
|
|
|
|
if len(series.mmappedChunks) > 0 || series.headChunk != nil || series.pendingCommit {
|
2020-11-25 05:03:30 -08:00
|
|
|
|
seriesMint := series.minTime()
|
|
|
|
|
if seriesMint < actualMint {
|
|
|
|
|
actualMint = seriesMint
|
|
|
|
|
}
|
2017-09-07 23:48:19 -07:00
|
|
|
|
series.Unlock()
|
2017-09-05 02:45:18 -07:00
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// The series is gone entirely. We need to keep the series lock
|
|
|
|
|
// and make sure we have acquired the stripe locks for hash and ID of the
|
|
|
|
|
// series alike.
|
|
|
|
|
// If we don't hold them all, there's a very small chance that a series receives
|
|
|
|
|
// samples again while we are half-way into deleting it.
|
2020-01-29 23:12:43 -08:00
|
|
|
|
j := int(series.ref) & (s.size - 1)
|
2017-09-05 02:45:18 -07:00
|
|
|
|
|
|
|
|
|
if i != j {
|
|
|
|
|
s.locks[j].Lock()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
deleted[series.ref] = struct{}{}
|
|
|
|
|
s.hashes[i].del(hash, series.lset)
|
|
|
|
|
delete(s.series[j], series.ref)
|
2020-05-20 06:22:08 -07:00
|
|
|
|
deletedForCallback = append(deletedForCallback, series.lset)
|
2017-09-05 02:45:18 -07:00
|
|
|
|
|
|
|
|
|
if i != j {
|
|
|
|
|
s.locks[j].Unlock()
|
|
|
|
|
}
|
|
|
|
|
|
2017-09-07 23:48:19 -07:00
|
|
|
|
series.Unlock()
|
2017-09-05 02:45:18 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
s.locks[i].Unlock()
|
2020-05-20 06:22:08 -07:00
|
|
|
|
|
|
|
|
|
s.seriesLifecycleCallback.PostDeletion(deletedForCallback...)
|
|
|
|
|
deletedForCallback = deletedForCallback[:0]
|
2017-09-05 02:45:18 -07:00
|
|
|
|
}
|
|
|
|
|
|
2020-11-25 05:03:30 -08:00
|
|
|
|
if actualMint == math.MaxInt64 {
|
|
|
|
|
actualMint = mint
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return deleted, rmChunks, actualMint
|
2017-09-05 02:45:18 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (s *stripeSeries) getByID(id uint64) *memSeries {
|
2020-01-29 23:12:43 -08:00
|
|
|
|
i := id & uint64(s.size-1)
|
2017-09-05 02:45:18 -07:00
|
|
|
|
|
|
|
|
|
s.locks[i].RLock()
|
|
|
|
|
series := s.series[i][id]
|
|
|
|
|
s.locks[i].RUnlock()
|
|
|
|
|
|
|
|
|
|
return series
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (s *stripeSeries) getByHash(hash uint64, lset labels.Labels) *memSeries {
|
2020-01-29 23:12:43 -08:00
|
|
|
|
i := hash & uint64(s.size-1)
|
2017-09-05 02:45:18 -07:00
|
|
|
|
|
|
|
|
|
s.locks[i].RLock()
|
|
|
|
|
series := s.hashes[i].get(hash, lset)
|
|
|
|
|
s.locks[i].RUnlock()
|
|
|
|
|
|
|
|
|
|
return series
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-18 08:23:50 -07:00
|
|
|
|
func (s *stripeSeries) getOrSet(hash uint64, lset labels.Labels, createSeries func() *memSeries) (*memSeries, bool, error) {
|
2020-05-20 06:22:08 -07:00
|
|
|
|
// PreCreation is called here to avoid calling it inside the lock.
|
|
|
|
|
// It is not necessary to call it just before creating a series,
|
|
|
|
|
// rather it gives a 'hint' whether to create a series or not.
|
2021-03-18 08:23:50 -07:00
|
|
|
|
preCreationErr := s.seriesLifecycleCallback.PreCreation(lset)
|
|
|
|
|
|
|
|
|
|
// Create the series, unless the PreCreation() callback as failed.
|
|
|
|
|
// If failed, we'll not allow to create a new series anyway.
|
|
|
|
|
var series *memSeries
|
|
|
|
|
if preCreationErr == nil {
|
|
|
|
|
series = createSeries()
|
|
|
|
|
}
|
2017-09-05 02:45:18 -07:00
|
|
|
|
|
2020-05-20 06:22:08 -07:00
|
|
|
|
i := hash & uint64(s.size-1)
|
2017-09-05 02:45:18 -07:00
|
|
|
|
s.locks[i].Lock()
|
|
|
|
|
|
2021-03-18 08:23:50 -07:00
|
|
|
|
if prev := s.hashes[i].get(hash, lset); prev != nil {
|
2017-09-18 02:23:22 -07:00
|
|
|
|
s.locks[i].Unlock()
|
2020-05-20 06:22:08 -07:00
|
|
|
|
return prev, false, nil
|
|
|
|
|
}
|
2021-03-18 08:23:50 -07:00
|
|
|
|
if preCreationErr == nil {
|
2020-05-20 06:22:08 -07:00
|
|
|
|
s.hashes[i].set(hash, series)
|
2017-09-05 02:45:18 -07:00
|
|
|
|
}
|
|
|
|
|
s.locks[i].Unlock()
|
|
|
|
|
|
2021-03-18 08:23:50 -07:00
|
|
|
|
if preCreationErr != nil {
|
2020-05-20 06:22:08 -07:00
|
|
|
|
// The callback prevented creation of series.
|
2021-03-18 08:23:50 -07:00
|
|
|
|
return nil, false, preCreationErr
|
2020-05-20 06:22:08 -07:00
|
|
|
|
}
|
|
|
|
|
// Setting the series in the s.hashes marks the creation of series
|
|
|
|
|
// as any further calls to this methods would return that series.
|
|
|
|
|
s.seriesLifecycleCallback.PostCreation(series.lset)
|
|
|
|
|
|
2020-01-29 23:12:43 -08:00
|
|
|
|
i = series.ref & uint64(s.size-1)
|
2017-09-05 02:45:18 -07:00
|
|
|
|
|
|
|
|
|
s.locks[i].Lock()
|
|
|
|
|
s.series[i][series.ref] = series
|
|
|
|
|
s.locks[i].Unlock()
|
|
|
|
|
|
2020-05-20 06:22:08 -07:00
|
|
|
|
return series, true, nil
|
2017-09-05 02:45:18 -07:00
|
|
|
|
}
|
|
|
|
|
|
2021-07-05 08:57:26 -07:00
|
|
|
|
type hist struct {
|
|
|
|
|
t int64
|
|
|
|
|
h histogram.SparseHistogram
|
|
|
|
|
}
|
|
|
|
|
|
2017-03-24 02:20:39 -07:00
|
|
|
|
type sample struct {
|
|
|
|
|
t int64
|
|
|
|
|
v float64
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
|
func newSample(t int64, v float64) tsdbutil.Sample { return sample{t, v} }
|
|
|
|
|
func (s sample) T() int64 { return s.t }
|
|
|
|
|
func (s sample) V() float64 { return s.v }
|
2018-10-25 13:06:19 -07:00
|
|
|
|
|
2017-09-07 23:48:19 -07:00
|
|
|
|
// memSeries is the in-memory representation of a series. None of its methods
|
2018-01-13 09:51:50 -08:00
|
|
|
|
// are goroutine safe and it is the caller's responsibility to lock it.
|
2017-01-11 04:02:38 -08:00
|
|
|
|
type memSeries struct {
|
2020-03-16 05:59:22 -07:00
|
|
|
|
sync.RWMutex
|
2017-01-09 07:51:39 -08:00
|
|
|
|
|
2020-05-06 08:30:00 -07:00
|
|
|
|
ref uint64
|
|
|
|
|
lset labels.Labels
|
|
|
|
|
mmappedChunks []*mmappedChunk
|
|
|
|
|
headChunk *memChunk
|
|
|
|
|
chunkRange int64
|
|
|
|
|
firstChunkID int
|
2017-01-09 07:51:39 -08:00
|
|
|
|
|
2018-09-17 09:58:42 -07:00
|
|
|
|
nextAt int64 // Timestamp at which to cut the next chunk.
|
|
|
|
|
sampleBuf [4]sample
|
2021-07-05 08:57:26 -07:00
|
|
|
|
histBuf [4]hist
|
2018-09-17 09:58:42 -07:00
|
|
|
|
pendingCommit bool // Whether there are samples waiting to be committed to this series.
|
2017-01-09 07:51:39 -08:00
|
|
|
|
|
2017-11-30 06:34:49 -08:00
|
|
|
|
app chunkenc.Appender // Current appender for the chunk.
|
2020-02-12 11:22:27 -08:00
|
|
|
|
|
2020-05-06 08:30:00 -07:00
|
|
|
|
memChunkPool *sync.Pool
|
|
|
|
|
|
2020-02-12 11:22:27 -08:00
|
|
|
|
txs *txRing
|
2017-01-09 07:51:39 -08:00
|
|
|
|
}
|
|
|
|
|
|
2020-05-06 08:30:00 -07:00
|
|
|
|
func newMemSeries(lset labels.Labels, id uint64, chunkRange int64, memChunkPool *sync.Pool) *memSeries {
|
2019-01-08 09:08:41 -08:00
|
|
|
|
s := &memSeries{
|
2020-05-06 08:30:00 -07:00
|
|
|
|
lset: lset,
|
|
|
|
|
ref: id,
|
|
|
|
|
chunkRange: chunkRange,
|
|
|
|
|
nextAt: math.MinInt64,
|
|
|
|
|
txs: newTxRing(4),
|
|
|
|
|
memChunkPool: memChunkPool,
|
2019-01-08 09:08:41 -08:00
|
|
|
|
}
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-28 15:39:17 -07:00
|
|
|
|
func (s *memSeries) minTime() int64 {
|
2020-05-06 08:30:00 -07:00
|
|
|
|
if len(s.mmappedChunks) > 0 {
|
|
|
|
|
return s.mmappedChunks[0].minTime
|
2018-02-07 05:43:21 -08:00
|
|
|
|
}
|
2020-05-06 08:30:00 -07:00
|
|
|
|
if s.headChunk != nil {
|
|
|
|
|
return s.headChunk.minTime
|
|
|
|
|
}
|
|
|
|
|
return math.MinInt64
|
2017-08-28 15:39:17 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (s *memSeries) maxTime() int64 {
|
2018-02-07 05:43:21 -08:00
|
|
|
|
c := s.head()
|
|
|
|
|
if c == nil {
|
|
|
|
|
return math.MinInt64
|
|
|
|
|
}
|
|
|
|
|
return c.maxTime
|
2017-08-28 15:39:17 -07:00
|
|
|
|
}
|
|
|
|
|
|
2021-06-29 07:38:46 -07:00
|
|
|
|
func (s *memSeries) cutNewHeadChunk(mint int64, e chunkenc.Encoding, chunkDiskMapper *chunks.ChunkDiskMapper) *memChunk {
|
2020-05-06 08:30:00 -07:00
|
|
|
|
s.mmapCurrentHeadChunk(chunkDiskMapper)
|
|
|
|
|
|
|
|
|
|
s.headChunk = &memChunk{
|
2017-06-07 04:42:53 -07:00
|
|
|
|
minTime: mint,
|
2017-01-11 04:02:38 -08:00
|
|
|
|
maxTime: math.MinInt64,
|
|
|
|
|
}
|
2020-01-23 23:44:52 -08:00
|
|
|
|
|
2021-06-29 07:38:46 -07:00
|
|
|
|
if chunkenc.IsValidEncoding(e) {
|
|
|
|
|
var err error
|
|
|
|
|
s.headChunk.chunk, err = chunkenc.NewEmptyChunk(e)
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err) // This should never happen.
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
s.headChunk.chunk = chunkenc.NewXORChunk()
|
|
|
|
|
}
|
|
|
|
|
|
2017-10-25 00:32:06 -07:00
|
|
|
|
// Set upper bound on when the next chunk must be started. An earlier timestamp
|
|
|
|
|
// may be chosen dynamically at a later point.
|
2018-12-04 02:30:49 -08:00
|
|
|
|
s.nextAt = rangeForTimestamp(mint, s.chunkRange)
|
2017-10-25 00:32:06 -07:00
|
|
|
|
|
2020-05-06 08:30:00 -07:00
|
|
|
|
app, err := s.headChunk.chunk.Appender()
|
2017-01-11 04:02:38 -08:00
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
s.app = app
|
2020-05-06 08:30:00 -07:00
|
|
|
|
return s.headChunk
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (s *memSeries) mmapCurrentHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper) {
|
2021-07-03 10:34:34 -07:00
|
|
|
|
if s.headChunk == nil || s.headChunk.chunk.NumSamples() == 0 {
|
2020-05-06 08:30:00 -07:00
|
|
|
|
// There is no head chunk, so nothing to m-map here.
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
chunkRef, err := chunkDiskMapper.WriteChunk(s.ref, s.headChunk.minTime, s.headChunk.maxTime, s.headChunk.chunk)
|
|
|
|
|
if err != nil {
|
|
|
|
|
if err != chunks.ErrChunkDiskMapperClosed {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
s.mmappedChunks = append(s.mmappedChunks, &mmappedChunk{
|
|
|
|
|
ref: chunkRef,
|
|
|
|
|
numSamples: uint16(s.headChunk.chunk.NumSamples()),
|
|
|
|
|
minTime: s.headChunk.minTime,
|
|
|
|
|
maxTime: s.headChunk.maxTime,
|
|
|
|
|
})
|
2017-01-11 04:02:38 -08:00
|
|
|
|
}
|
|
|
|
|
|
2017-08-28 15:39:17 -07:00
|
|
|
|
// appendable checks whether the given sample is valid for appending to the series.
|
|
|
|
|
func (s *memSeries) appendable(t int64, v float64) error {
|
2017-09-01 05:38:49 -07:00
|
|
|
|
c := s.head()
|
|
|
|
|
if c == nil {
|
2017-08-28 15:39:17 -07:00
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if t > c.maxTime {
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
if t < c.maxTime {
|
2020-03-16 14:52:02 -07:00
|
|
|
|
return storage.ErrOutOfOrderSample
|
2017-08-28 15:39:17 -07:00
|
|
|
|
}
|
|
|
|
|
// We are allowing exact duplicates as we can encounter them in valid cases
|
|
|
|
|
// like federation and erroring out at that time would be extremely noisy.
|
2018-11-14 06:02:32 -08:00
|
|
|
|
if math.Float64bits(s.sampleBuf[3].v) != math.Float64bits(v) {
|
2020-03-16 14:52:02 -07:00
|
|
|
|
return storage.ErrDuplicateSampleForTimestamp
|
2017-08-28 15:39:17 -07:00
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-29 07:38:46 -07:00
|
|
|
|
// appendableHistogram checks whether the given sample is valid for appending to the series.
|
|
|
|
|
func (s *memSeries) appendableHistogram(t int64, sh histogram.SparseHistogram) error {
|
|
|
|
|
c := s.head()
|
|
|
|
|
if c == nil {
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if t > c.maxTime {
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
if t < c.maxTime {
|
|
|
|
|
return storage.ErrOutOfOrderSample
|
|
|
|
|
}
|
|
|
|
|
// TODO: do it for histogram.
|
|
|
|
|
// We are allowing exact duplicates as we can encounter them in valid cases
|
|
|
|
|
// like federation and erroring out at that time would be extremely noisy.
|
|
|
|
|
//if math.Float64bits(s.sampleBuf[3].v) != math.Float64bits(v) {
|
|
|
|
|
// return storage.ErrDuplicateSampleForTimestamp
|
|
|
|
|
//}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-06 08:30:00 -07:00
|
|
|
|
// chunk returns the chunk for the chunk id from memory or by m-mapping it from the disk.
|
|
|
|
|
// If garbageCollect is true, it means that the returned *memChunk
|
|
|
|
|
// (and not the chunkenc.Chunk inside it) can be garbage collected after it's usage.
|
2020-05-22 02:03:23 -07:00
|
|
|
|
func (s *memSeries) chunk(id int, chunkDiskMapper *chunks.ChunkDiskMapper) (chunk *memChunk, garbageCollect bool, err error) {
|
2020-05-06 08:30:00 -07:00
|
|
|
|
// ix represents the index of chunk in the s.mmappedChunks slice. The chunk id's are
|
|
|
|
|
// incremented by 1 when new chunk is created, hence (id - firstChunkID) gives the slice index.
|
|
|
|
|
// The max index for the s.mmappedChunks slice can be len(s.mmappedChunks)-1, hence if the ix
|
|
|
|
|
// is len(s.mmappedChunks), it represents the next chunk, which is the head chunk.
|
2017-09-01 05:38:49 -07:00
|
|
|
|
ix := id - s.firstChunkID
|
2020-05-06 08:30:00 -07:00
|
|
|
|
if ix < 0 || ix > len(s.mmappedChunks) {
|
2020-05-22 02:03:23 -07:00
|
|
|
|
return nil, false, storage.ErrNotFound
|
2020-05-06 08:30:00 -07:00
|
|
|
|
}
|
|
|
|
|
if ix == len(s.mmappedChunks) {
|
2020-05-22 02:03:23 -07:00
|
|
|
|
if s.headChunk == nil {
|
|
|
|
|
return nil, false, errors.New("invalid head chunk")
|
|
|
|
|
}
|
|
|
|
|
return s.headChunk, false, nil
|
2020-05-06 08:30:00 -07:00
|
|
|
|
}
|
|
|
|
|
chk, err := chunkDiskMapper.Chunk(s.mmappedChunks[ix].ref)
|
|
|
|
|
if err != nil {
|
2020-05-22 02:03:23 -07:00
|
|
|
|
if _, ok := err.(*chunks.CorruptionErr); ok {
|
|
|
|
|
panic(err)
|
2020-05-06 08:30:00 -07:00
|
|
|
|
}
|
2020-05-22 02:03:23 -07:00
|
|
|
|
return nil, false, err
|
2017-09-01 05:38:49 -07:00
|
|
|
|
}
|
2020-05-06 08:30:00 -07:00
|
|
|
|
mc := s.memChunkPool.Get().(*memChunk)
|
|
|
|
|
mc.chunk = chk
|
|
|
|
|
mc.minTime = s.mmappedChunks[ix].minTime
|
|
|
|
|
mc.maxTime = s.mmappedChunks[ix].maxTime
|
2020-05-22 02:03:23 -07:00
|
|
|
|
return mc, true, nil
|
2017-08-28 15:39:17 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (s *memSeries) chunkID(pos int) int {
|
|
|
|
|
return pos + s.firstChunkID
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-21 00:02:13 -07:00
|
|
|
|
// truncateChunksBefore removes all chunks from the series that
|
|
|
|
|
// have no timestamp at or after mint.
|
|
|
|
|
// Chunk IDs remain unchanged.
|
2017-08-30 08:38:25 -07:00
|
|
|
|
func (s *memSeries) truncateChunksBefore(mint int64) (removed int) {
|
2020-05-06 08:30:00 -07:00
|
|
|
|
if s.headChunk != nil && s.headChunk.maxTime < mint {
|
|
|
|
|
// If head chunk is truncated, we can truncate all mmapped chunks.
|
2020-11-30 00:55:33 -08:00
|
|
|
|
removed = 1 + len(s.mmappedChunks)
|
|
|
|
|
s.firstChunkID += removed
|
2018-10-31 06:28:56 -07:00
|
|
|
|
s.headChunk = nil
|
2020-05-06 08:30:00 -07:00
|
|
|
|
s.mmappedChunks = nil
|
2020-11-30 00:55:33 -08:00
|
|
|
|
return removed
|
2020-05-06 08:30:00 -07:00
|
|
|
|
}
|
|
|
|
|
if len(s.mmappedChunks) > 0 {
|
|
|
|
|
for i, c := range s.mmappedChunks {
|
|
|
|
|
if c.maxTime >= mint {
|
|
|
|
|
break
|
|
|
|
|
}
|
2020-11-30 00:55:33 -08:00
|
|
|
|
removed = i + 1
|
2020-05-06 08:30:00 -07:00
|
|
|
|
}
|
2020-11-30 00:55:33 -08:00
|
|
|
|
s.mmappedChunks = append(s.mmappedChunks[:0], s.mmappedChunks[removed:]...)
|
|
|
|
|
s.firstChunkID += removed
|
2018-10-31 06:28:56 -07:00
|
|
|
|
}
|
2020-11-30 00:55:33 -08:00
|
|
|
|
return removed
|
2017-08-28 15:39:17 -07:00
|
|
|
|
}
|
|
|
|
|
|
2020-02-12 11:22:27 -08:00
|
|
|
|
// append adds the sample (t, v) to the series. The caller also has to provide
|
2020-02-28 17:39:26 -08:00
|
|
|
|
// the appendID for isolation. (The appendID can be zero, which results in no
|
|
|
|
|
// isolation for this append.)
|
2020-05-06 08:30:00 -07:00
|
|
|
|
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
|
|
|
|
|
func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper) (sampleInOrder, chunkCreated bool) {
|
2021-06-29 07:38:46 -07:00
|
|
|
|
c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncXOR, chunkDiskMapper)
|
|
|
|
|
if !sampleInOrder {
|
|
|
|
|
return sampleInOrder, chunkCreated
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
s.app.Append(t, v)
|
|
|
|
|
|
|
|
|
|
c.maxTime = t
|
|
|
|
|
|
|
|
|
|
s.sampleBuf[0] = s.sampleBuf[1]
|
|
|
|
|
s.sampleBuf[1] = s.sampleBuf[2]
|
|
|
|
|
s.sampleBuf[2] = s.sampleBuf[3]
|
|
|
|
|
s.sampleBuf[3] = sample{t: t, v: v}
|
|
|
|
|
|
|
|
|
|
if appendID > 0 {
|
|
|
|
|
s.txs.add(appendID)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return true, chunkCreated
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// appendHistogram adds the sparse histogram.
|
|
|
|
|
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
|
|
|
|
|
func (s *memSeries) appendHistogram(t int64, sh histogram.SparseHistogram, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper) (sampleInOrder, chunkCreated bool) {
|
|
|
|
|
c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncSHS, chunkDiskMapper)
|
|
|
|
|
if !sampleInOrder {
|
|
|
|
|
return sampleInOrder, chunkCreated
|
|
|
|
|
}
|
|
|
|
|
|
2021-07-02 08:58:20 -07:00
|
|
|
|
if !chunkCreated {
|
|
|
|
|
// Head controls the execution of recoding, so that we own the proper chunk reference afterwards
|
|
|
|
|
app, _ := s.app.(*chunkenc.HistoAppender)
|
|
|
|
|
posInterjections, negInterjections, ok := app.Appendable(sh)
|
|
|
|
|
// we have 3 cases here
|
|
|
|
|
// !ok -> we need to cut a new chunk
|
|
|
|
|
// ok but we have interjections -> existing chunk needs recoding before we can append our histogram
|
|
|
|
|
// ok and no interjections -> chunk is ready to support our histogram
|
|
|
|
|
if !ok {
|
|
|
|
|
c = s.cutNewHeadChunk(t, chunkenc.EncSHS, chunkDiskMapper)
|
|
|
|
|
chunkCreated = true
|
|
|
|
|
} else if len(posInterjections) > 0 || len(negInterjections) > 0 {
|
|
|
|
|
// new buckets have appeared. we need to recode all prior histograms within the chunk before we can process this one.
|
2021-07-05 08:57:26 -07:00
|
|
|
|
chunk, app := app.Recode(posInterjections, negInterjections, sh.PositiveSpans, sh.NegativeSpans)
|
|
|
|
|
s.headChunk = &memChunk{
|
|
|
|
|
minTime: s.headChunk.minTime,
|
|
|
|
|
maxTime: s.headChunk.maxTime,
|
|
|
|
|
chunk: chunk,
|
|
|
|
|
}
|
|
|
|
|
s.app = app
|
2021-07-02 08:58:20 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-29 07:38:46 -07:00
|
|
|
|
s.app.AppendHistogram(t, sh)
|
|
|
|
|
|
|
|
|
|
c.maxTime = t
|
|
|
|
|
|
2021-07-05 08:57:26 -07:00
|
|
|
|
s.histBuf[0] = s.histBuf[1]
|
|
|
|
|
s.histBuf[1] = s.histBuf[2]
|
|
|
|
|
s.histBuf[2] = s.histBuf[3]
|
|
|
|
|
s.histBuf[3] = hist{t: t, h: sh}
|
|
|
|
|
|
2021-06-29 07:38:46 -07:00
|
|
|
|
if appendID > 0 {
|
|
|
|
|
s.txs.add(appendID)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return true, chunkCreated
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// appendPreprocessor takes care of cutting new chunks and m-mapping old chunks.
|
|
|
|
|
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
|
|
|
|
|
// This should be called only when appending data.
|
|
|
|
|
func (s *memSeries) appendPreprocessor(t int64, e chunkenc.Encoding, chunkDiskMapper *chunks.ChunkDiskMapper) (c *memChunk, sampleInOrder, chunkCreated bool) {
|
2018-10-23 03:43:06 -07:00
|
|
|
|
// Based on Gorilla white papers this offers near-optimal compression ratio
|
|
|
|
|
// so anything bigger that this has diminishing returns and increases
|
|
|
|
|
// the time range within which we have to decompress all samples.
|
2017-06-07 04:42:53 -07:00
|
|
|
|
const samplesPerChunk = 120
|
|
|
|
|
|
2021-06-29 07:38:46 -07:00
|
|
|
|
c = s.head()
|
2017-01-11 04:02:38 -08:00
|
|
|
|
|
2017-09-01 05:38:49 -07:00
|
|
|
|
if c == nil {
|
2020-05-06 08:30:00 -07:00
|
|
|
|
if len(s.mmappedChunks) > 0 && s.mmappedChunks[len(s.mmappedChunks)-1].maxTime >= t {
|
|
|
|
|
// Out of order sample. Sample timestamp is already in the mmaped chunks, so ignore it.
|
2021-06-29 07:38:46 -07:00
|
|
|
|
return c, false, false
|
2020-05-06 08:30:00 -07:00
|
|
|
|
}
|
|
|
|
|
// There is no chunk in this series yet, create the first chunk for the sample.
|
2021-06-29 07:38:46 -07:00
|
|
|
|
c = s.cutNewHeadChunk(t, e, chunkDiskMapper)
|
2017-08-30 08:38:25 -07:00
|
|
|
|
chunkCreated = true
|
2017-06-07 04:42:53 -07:00
|
|
|
|
}
|
2017-10-07 06:55:11 -07:00
|
|
|
|
numSamples := c.chunk.NumSamples()
|
|
|
|
|
|
2017-10-25 00:32:06 -07:00
|
|
|
|
// Out of order sample.
|
2017-06-07 04:42:53 -07:00
|
|
|
|
if c.maxTime >= t {
|
2021-06-29 07:38:46 -07:00
|
|
|
|
return c, false, chunkCreated
|
2017-06-07 04:42:53 -07:00
|
|
|
|
}
|
2017-10-25 00:32:06 -07:00
|
|
|
|
// If we reach 25% of a chunk's desired sample count, set a definitive time
|
|
|
|
|
// at which to start the next chunk.
|
|
|
|
|
// At latest it must happen at the timestamp set when the chunk was cut.
|
|
|
|
|
if numSamples == samplesPerChunk/4 {
|
|
|
|
|
s.nextAt = computeChunkEndTime(c.minTime, c.maxTime, s.nextAt)
|
|
|
|
|
}
|
|
|
|
|
if t >= s.nextAt {
|
2021-06-29 07:38:46 -07:00
|
|
|
|
c = s.cutNewHeadChunk(t, e, chunkDiskMapper)
|
2017-08-30 09:34:54 -07:00
|
|
|
|
chunkCreated = true
|
2017-01-09 07:51:39 -08:00
|
|
|
|
}
|
2021-06-29 07:38:46 -07:00
|
|
|
|
return c, true, chunkCreated
|
2017-01-09 07:51:39 -08:00
|
|
|
|
}
|
|
|
|
|
|
2020-02-12 11:22:27 -08:00
|
|
|
|
// cleanupAppendIDsBelow cleans up older appendIDs. Has to be called after
|
|
|
|
|
// acquiring lock.
|
|
|
|
|
func (s *memSeries) cleanupAppendIDsBelow(bound uint64) {
|
|
|
|
|
s.txs.cleanupAppendIDsBelow(bound)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// computeChunkEndTime estimates the end timestamp based the beginning of a
|
|
|
|
|
// chunk, its current timestamp and the upper bound up to which we insert data.
|
2017-06-07 04:42:53 -07:00
|
|
|
|
// It assumes that the time range is 1/4 full.
|
|
|
|
|
func computeChunkEndTime(start, cur, max int64) int64 {
|
|
|
|
|
a := (max - start) / ((cur - start + 1) * 4)
|
|
|
|
|
if a == 0 {
|
|
|
|
|
return max
|
|
|
|
|
}
|
|
|
|
|
return start + (max-start)/a
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-06 08:30:00 -07:00
|
|
|
|
// iterator returns a chunk iterator.
|
|
|
|
|
// It is unsafe to call this concurrently with s.append(...) without holding the series lock.
|
2021-06-30 07:48:13 -07:00
|
|
|
|
func (s *memSeries) iterator(id int, isoState *isolationState, chunkDiskMapper *chunks.ChunkDiskMapper, it chunkenc.Iterator) (ttt chunkenc.Iterator) {
|
2020-05-22 02:03:23 -07:00
|
|
|
|
c, garbageCollect, err := s.chunk(id, chunkDiskMapper)
|
|
|
|
|
// TODO(fabxc): Work around! An error will be returns when a querier have retrieved a pointer to a
|
|
|
|
|
// series's chunk, which got then garbage collected before it got
|
|
|
|
|
// accessed. We must ensure to not garbage collect as long as any
|
|
|
|
|
// readers still hold a reference.
|
|
|
|
|
if err != nil {
|
|
|
|
|
return chunkenc.NewNopIterator()
|
|
|
|
|
}
|
2020-05-06 08:30:00 -07:00
|
|
|
|
defer func() {
|
|
|
|
|
if garbageCollect {
|
|
|
|
|
// Set this to nil so that Go GC can collect it after it has been used.
|
|
|
|
|
// This should be done always at the end.
|
|
|
|
|
c.chunk = nil
|
|
|
|
|
s.memChunkPool.Put(c)
|
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
|
2020-02-12 11:22:27 -08:00
|
|
|
|
ix := id - s.firstChunkID
|
|
|
|
|
|
|
|
|
|
numSamples := c.chunk.NumSamples()
|
|
|
|
|
stopAfter := numSamples
|
|
|
|
|
|
|
|
|
|
if isoState != nil {
|
|
|
|
|
totalSamples := 0 // Total samples in this series.
|
|
|
|
|
previousSamples := 0 // Samples before this chunk.
|
|
|
|
|
|
2020-05-06 08:30:00 -07:00
|
|
|
|
for j, d := range s.mmappedChunks {
|
|
|
|
|
totalSamples += int(d.numSamples)
|
2020-02-12 11:22:27 -08:00
|
|
|
|
if j < ix {
|
2020-05-06 08:30:00 -07:00
|
|
|
|
previousSamples += int(d.numSamples)
|
2020-02-12 11:22:27 -08:00
|
|
|
|
}
|
|
|
|
|
}
|
2020-07-20 08:23:18 -07:00
|
|
|
|
|
|
|
|
|
if s.headChunk != nil {
|
2020-05-06 08:30:00 -07:00
|
|
|
|
totalSamples += s.headChunk.chunk.NumSamples()
|
|
|
|
|
}
|
2020-02-12 11:22:27 -08:00
|
|
|
|
|
|
|
|
|
// Removing the extra transactionIDs that are relevant for samples that
|
|
|
|
|
// come after this chunk, from the total transactionIDs.
|
|
|
|
|
appendIDsToConsider := s.txs.txIDCount - (totalSamples - (previousSamples + numSamples))
|
|
|
|
|
|
|
|
|
|
// Iterate over the appendIDs, find the first one that the isolation state says not
|
|
|
|
|
// to return.
|
|
|
|
|
it := s.txs.iterator()
|
|
|
|
|
for index := 0; index < appendIDsToConsider; index++ {
|
|
|
|
|
appendID := it.At()
|
|
|
|
|
if appendID <= isoState.maxAppendID { // Easy check first.
|
|
|
|
|
if _, ok := isoState.incompleteAppends[appendID]; !ok {
|
|
|
|
|
it.Next()
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
stopAfter = numSamples - (appendIDsToConsider - index)
|
|
|
|
|
if stopAfter < 0 {
|
|
|
|
|
stopAfter = 0 // Stopped in a previous chunk.
|
|
|
|
|
}
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if stopAfter == 0 {
|
|
|
|
|
return chunkenc.NewNopIterator()
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-06 08:30:00 -07:00
|
|
|
|
if id-s.firstChunkID < len(s.mmappedChunks) {
|
2020-02-12 11:22:27 -08:00
|
|
|
|
if stopAfter == numSamples {
|
|
|
|
|
return c.chunk.Iterator(it)
|
|
|
|
|
}
|
|
|
|
|
if msIter, ok := it.(*stopIterator); ok {
|
|
|
|
|
msIter.Iterator = c.chunk.Iterator(msIter.Iterator)
|
|
|
|
|
msIter.i = -1
|
|
|
|
|
msIter.stopAfter = stopAfter
|
|
|
|
|
return msIter
|
|
|
|
|
}
|
|
|
|
|
return &stopIterator{
|
|
|
|
|
Iterator: c.chunk.Iterator(it),
|
|
|
|
|
i: -1,
|
|
|
|
|
stopAfter: stopAfter,
|
|
|
|
|
}
|
2017-01-11 04:02:38 -08:00
|
|
|
|
}
|
2018-02-21 07:38:59 -08:00
|
|
|
|
// Serve the last 4 samples for the last chunk from the sample buffer
|
2017-08-30 09:34:54 -07:00
|
|
|
|
// as their compressed bytes may be mutated by added samples.
|
2019-07-09 02:49:34 -07:00
|
|
|
|
if msIter, ok := it.(*memSafeIterator); ok {
|
|
|
|
|
msIter.Iterator = c.chunk.Iterator(msIter.Iterator)
|
|
|
|
|
msIter.i = -1
|
2020-02-12 11:22:27 -08:00
|
|
|
|
msIter.total = numSamples
|
|
|
|
|
msIter.stopAfter = stopAfter
|
2019-07-09 02:49:34 -07:00
|
|
|
|
msIter.buf = s.sampleBuf
|
2021-07-05 08:57:26 -07:00
|
|
|
|
msIter.histBuf = s.histBuf
|
2019-07-09 02:49:34 -07:00
|
|
|
|
return msIter
|
|
|
|
|
}
|
|
|
|
|
return &memSafeIterator{
|
2020-02-12 11:22:27 -08:00
|
|
|
|
stopIterator: stopIterator{
|
|
|
|
|
Iterator: c.chunk.Iterator(it),
|
|
|
|
|
i: -1,
|
|
|
|
|
stopAfter: stopAfter,
|
|
|
|
|
},
|
2021-07-05 08:57:26 -07:00
|
|
|
|
total: numSamples,
|
|
|
|
|
buf: s.sampleBuf,
|
|
|
|
|
histBuf: s.histBuf,
|
2017-01-09 07:51:39 -08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-01-11 04:02:38 -08:00
|
|
|
|
func (s *memSeries) head() *memChunk {
|
2018-10-31 06:28:56 -07:00
|
|
|
|
return s.headChunk
|
2017-01-11 04:02:38 -08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type memChunk struct {
|
2017-11-30 06:34:49 -08:00
|
|
|
|
chunk chunkenc.Chunk
|
2017-01-11 04:02:38 -08:00
|
|
|
|
minTime, maxTime int64
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-06 08:30:00 -07:00
|
|
|
|
// OverlapsClosedInterval returns true if the chunk overlaps [mint, maxt].
|
2018-07-02 01:23:36 -07:00
|
|
|
|
func (mc *memChunk) OverlapsClosedInterval(mint, maxt int64) bool {
|
|
|
|
|
return mc.minTime <= maxt && mint <= mc.maxTime
|
|
|
|
|
}
|
|
|
|
|
|
2020-02-12 11:22:27 -08:00
|
|
|
|
type stopIterator struct {
|
2017-11-30 06:34:49 -08:00
|
|
|
|
chunkenc.Iterator
|
2017-01-09 07:51:39 -08:00
|
|
|
|
|
2020-02-12 11:22:27 -08:00
|
|
|
|
i, stopAfter int
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (it *stopIterator) Next() bool {
|
|
|
|
|
if it.i+1 >= it.stopAfter {
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
it.i++
|
|
|
|
|
return it.Iterator.Next()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type memSafeIterator struct {
|
|
|
|
|
stopIterator
|
|
|
|
|
|
2021-07-05 08:57:26 -07:00
|
|
|
|
total int
|
|
|
|
|
buf [4]sample
|
|
|
|
|
histBuf [4]hist
|
2017-01-09 07:51:39 -08:00
|
|
|
|
}
|
|
|
|
|
|
2021-04-26 15:43:22 -07:00
|
|
|
|
func (it *memSafeIterator) Seek(t int64) bool {
|
|
|
|
|
if it.Err() != nil {
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ts, _ := it.At()
|
|
|
|
|
|
|
|
|
|
for t > ts || it.i == -1 {
|
|
|
|
|
if !it.Next() {
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
ts, _ = it.At()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
|
2017-01-09 07:51:39 -08:00
|
|
|
|
func (it *memSafeIterator) Next() bool {
|
2020-02-12 11:22:27 -08:00
|
|
|
|
if it.i+1 >= it.stopAfter {
|
2017-01-09 07:51:39 -08:00
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
it.i++
|
2021-07-05 09:21:35 -07:00
|
|
|
|
if it.total-it.i > 4 {
|
2017-01-09 07:51:39 -08:00
|
|
|
|
return it.Iterator.Next()
|
|
|
|
|
}
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (it *memSafeIterator) At() (int64, float64) {
|
|
|
|
|
if it.total-it.i > 4 {
|
|
|
|
|
return it.Iterator.At()
|
|
|
|
|
}
|
|
|
|
|
s := it.buf[4-(it.total-it.i)]
|
|
|
|
|
return s.t, s.v
|
|
|
|
|
}
|
2017-11-30 06:34:49 -08:00
|
|
|
|
|
2021-07-05 08:57:26 -07:00
|
|
|
|
func (it *memSafeIterator) AtHistogram() (int64, histogram.SparseHistogram) {
|
|
|
|
|
if it.total-it.i > 4 {
|
|
|
|
|
return it.Iterator.AtHistogram()
|
|
|
|
|
}
|
|
|
|
|
s := it.histBuf[4-(it.total-it.i)]
|
|
|
|
|
return s.t, s.h
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-06 08:30:00 -07:00
|
|
|
|
type mmappedChunk struct {
|
|
|
|
|
ref uint64
|
|
|
|
|
numSamples uint16
|
|
|
|
|
minTime, maxTime int64
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Returns true if the chunk overlaps [mint, maxt].
|
|
|
|
|
func (mc *mmappedChunk) OverlapsClosedInterval(mint, maxt int64) bool {
|
|
|
|
|
return mc.minTime <= maxt && mint <= mc.maxTime
|
|
|
|
|
}
|
2020-05-20 06:22:08 -07:00
|
|
|
|
|
|
|
|
|
// SeriesLifecycleCallback specifies a list of callbacks that will be called during a lifecycle of a series.
|
|
|
|
|
// It is always a no-op in Prometheus and mainly meant for external users who import TSDB.
|
|
|
|
|
// All the callbacks should be safe to be called concurrently.
|
2020-10-07 08:51:31 -07:00
|
|
|
|
// It is up to the user to implement soft or hard consistency by making the callbacks
|
2020-05-20 06:22:08 -07:00
|
|
|
|
// atomic or non-atomic. Atomic callbacks can cause degradation performance.
|
|
|
|
|
type SeriesLifecycleCallback interface {
|
|
|
|
|
// PreCreation is called before creating a series to indicate if the series can be created.
|
|
|
|
|
// A non nil error means the series should not be created.
|
|
|
|
|
PreCreation(labels.Labels) error
|
|
|
|
|
// PostCreation is called after creating a series to indicate a creation of series.
|
|
|
|
|
PostCreation(labels.Labels)
|
|
|
|
|
// PostDeletion is called after deletion of series.
|
|
|
|
|
PostDeletion(...labels.Labels)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type noopSeriesLifecycleCallback struct{}
|
|
|
|
|
|
|
|
|
|
func (noopSeriesLifecycleCallback) PreCreation(labels.Labels) error { return nil }
|
|
|
|
|
func (noopSeriesLifecycleCallback) PostCreation(labels.Labels) {}
|
|
|
|
|
func (noopSeriesLifecycleCallback) PostDeletion(...labels.Labels) {}
|
2020-10-12 14:15:40 -07:00
|
|
|
|
|
|
|
|
|
func (h *Head) Size() int64 {
|
|
|
|
|
var walSize int64
|
|
|
|
|
if h.wal != nil {
|
|
|
|
|
walSize, _ = h.wal.Size()
|
|
|
|
|
}
|
2020-11-03 02:04:59 -08:00
|
|
|
|
cdmSize, _ := h.chunkDiskMapper.Size()
|
|
|
|
|
return walSize + cdmSize
|
2020-10-12 14:15:40 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (h *RangeHead) Size() int64 {
|
|
|
|
|
return h.head.Size()
|
|
|
|
|
}
|
2021-06-05 07:29:32 -07:00
|
|
|
|
|
|
|
|
|
func (h *Head) startWALReplayStatus(startFrom, last int) {
|
|
|
|
|
h.stats.WALReplayStatus.Lock()
|
|
|
|
|
defer h.stats.WALReplayStatus.Unlock()
|
|
|
|
|
|
|
|
|
|
h.stats.WALReplayStatus.Min = startFrom
|
|
|
|
|
h.stats.WALReplayStatus.Max = last
|
|
|
|
|
h.stats.WALReplayStatus.Current = startFrom
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (h *Head) updateWALReplayStatusRead(current int) {
|
|
|
|
|
h.stats.WALReplayStatus.Lock()
|
|
|
|
|
defer h.stats.WALReplayStatus.Unlock()
|
|
|
|
|
|
|
|
|
|
h.stats.WALReplayStatus.Current = current
|
|
|
|
|
}
|