mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-15 01:54:06 -08:00
Merge pull request #216 from grafana/merge_upstream
add option to use the new chunk disk mapper from upstream
This commit is contained in:
commit
64e6c171c2
|
@ -171,6 +171,9 @@ type Options struct {
|
|||
// SeriesHashCache specifies the series hash cache used when querying shards via Querier.Select().
|
||||
// If nil, the cache won't be used.
|
||||
SeriesHashCache *hashcache.SeriesHashCache
|
||||
|
||||
// Temporary flag which we use to select whether we want to use the new or the old chunk disk mapper.
|
||||
NewChunkDiskMapper bool
|
||||
}
|
||||
|
||||
type BlocksToDeleteFunc func(blocks []*Block) map[ulid.ULID]struct{}
|
||||
|
@ -734,6 +737,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
|
|||
headOpts.EnableExemplarStorage = opts.EnableExemplarStorage
|
||||
headOpts.MaxExemplars.Store(opts.MaxExemplars)
|
||||
headOpts.EnableMemorySnapshotOnShutdown = opts.EnableMemorySnapshotOnShutdown
|
||||
headOpts.NewChunkDiskMapper = opts.NewChunkDiskMapper
|
||||
if opts.IsolationDisabled {
|
||||
// We only override this flag if isolation is disabled at DB level. We use the default otherwise.
|
||||
headOpts.IsolationDisabled = opts.IsolationDisabled
|
||||
|
|
|
@ -154,6 +154,10 @@ type HeadOptions struct {
|
|||
EnableMemorySnapshotOnShutdown bool
|
||||
|
||||
IsolationDisabled bool
|
||||
|
||||
// Temporary flag which we use to select whether to use the new (used in upstream
|
||||
// Prometheus) or the old (legacy) chunk disk mapper.
|
||||
NewChunkDiskMapper bool
|
||||
}
|
||||
|
||||
func DefaultHeadOptions() *HeadOptions {
|
||||
|
@ -167,6 +171,7 @@ func DefaultHeadOptions() *HeadOptions {
|
|||
StripeSize: DefaultStripeSize,
|
||||
SeriesCallback: &noopSeriesLifecycleCallback{},
|
||||
IsolationDisabled: defaultIsolationDisabled,
|
||||
NewChunkDiskMapper: false,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -229,7 +234,7 @@ func NewHead(r prometheus.Registerer, l log.Logger, wal *wal.WAL, opts *HeadOpti
|
|||
opts.ChunkPool = chunkenc.NewPool()
|
||||
}
|
||||
|
||||
if opts.ChunkWriteQueueSize > 0 {
|
||||
if opts.NewChunkDiskMapper {
|
||||
h.chunkDiskMapper, err = chunks.NewChunkDiskMapper(
|
||||
r,
|
||||
mmappedChunksDir(opts.ChunkDirRoot),
|
||||
|
|
|
@ -1663,7 +1663,6 @@ func TestHeadReadWriterRepair(t *testing.T) {
|
|||
opts := DefaultHeadOptions()
|
||||
opts.ChunkRange = chunkRange
|
||||
opts.ChunkDirRoot = dir
|
||||
opts.ChunkWriteQueueSize = 1 // We need to set this option so that we use the async queue. Upstream prometheus uses the queue directly.
|
||||
h, err := NewHead(nil, nil, w, opts, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0.0, prom_testutil.ToFloat64(h.metrics.mmapChunkCorruptionTotal))
|
||||
|
@ -1688,7 +1687,14 @@ func TestHeadReadWriterRepair(t *testing.T) {
|
|||
// take effect without another chunk being written.
|
||||
files, err := ioutil.ReadDir(mmappedChunksDir(dir))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 6, len(files))
|
||||
|
||||
// With the new chunk disk mapper we only expect 6 files, because the last call to "CutNewFile()" won't
|
||||
// take effect until the next chunk is being written.
|
||||
if opts.NewChunkDiskMapper {
|
||||
require.Equal(t, 6, len(files))
|
||||
} else {
|
||||
require.Equal(t, 7, len(files))
|
||||
}
|
||||
|
||||
// Corrupt the 4th file by writing a random byte to series ref.
|
||||
f, err := os.OpenFile(filepath.Join(mmappedChunksDir(dir), files[3].Name()), os.O_WRONLY, 0o666)
|
||||
|
|
Loading…
Reference in a new issue