mirror of
https://github.com/prometheus/prometheus.git
synced 2025-01-13 06:47:28 -08:00
Merge pull request #14597 from prometheus/merge-2.54-to-main
Some checks are pending
buf.build / lint and publish (push) Waiting to run
CI / Go tests (push) Waiting to run
CI / More Go tests (push) Waiting to run
CI / Go tests with previous Go version (push) Waiting to run
CI / UI tests (push) Waiting to run
CI / Go tests on Windows (push) Waiting to run
CI / Mixins tests (push) Waiting to run
CI / Build Prometheus for common architectures (0) (push) Waiting to run
CI / Build Prometheus for common architectures (1) (push) Waiting to run
CI / Build Prometheus for common architectures (2) (push) Waiting to run
CI / Build Prometheus for all architectures (0) (push) Waiting to run
CI / Build Prometheus for all architectures (1) (push) Waiting to run
CI / Build Prometheus for all architectures (10) (push) Waiting to run
CI / Build Prometheus for all architectures (11) (push) Waiting to run
CI / Build Prometheus for all architectures (2) (push) Waiting to run
CI / Build Prometheus for all architectures (3) (push) Waiting to run
CI / Build Prometheus for all architectures (4) (push) Waiting to run
CI / Build Prometheus for all architectures (5) (push) Waiting to run
CI / Build Prometheus for all architectures (6) (push) Waiting to run
CI / Build Prometheus for all architectures (7) (push) Waiting to run
CI / Build Prometheus for all architectures (8) (push) Waiting to run
CI / Build Prometheus for all architectures (9) (push) Waiting to run
CI / Report status of build Prometheus for all architectures (push) Blocked by required conditions
CI / Check generated parser (push) Waiting to run
CI / golangci-lint (push) Waiting to run
CI / fuzzing (push) Waiting to run
CI / codeql (push) Waiting to run
CI / Publish main branch artifacts (push) Blocked by required conditions
CI / Publish release artefacts (push) Blocked by required conditions
CI / Publish UI on npm Registry (push) Blocked by required conditions
Scorecards supply-chain security / Scorecards analysis (push) Waiting to run
Some checks are pending
buf.build / lint and publish (push) Waiting to run
CI / Go tests (push) Waiting to run
CI / More Go tests (push) Waiting to run
CI / Go tests with previous Go version (push) Waiting to run
CI / UI tests (push) Waiting to run
CI / Go tests on Windows (push) Waiting to run
CI / Mixins tests (push) Waiting to run
CI / Build Prometheus for common architectures (0) (push) Waiting to run
CI / Build Prometheus for common architectures (1) (push) Waiting to run
CI / Build Prometheus for common architectures (2) (push) Waiting to run
CI / Build Prometheus for all architectures (0) (push) Waiting to run
CI / Build Prometheus for all architectures (1) (push) Waiting to run
CI / Build Prometheus for all architectures (10) (push) Waiting to run
CI / Build Prometheus for all architectures (11) (push) Waiting to run
CI / Build Prometheus for all architectures (2) (push) Waiting to run
CI / Build Prometheus for all architectures (3) (push) Waiting to run
CI / Build Prometheus for all architectures (4) (push) Waiting to run
CI / Build Prometheus for all architectures (5) (push) Waiting to run
CI / Build Prometheus for all architectures (6) (push) Waiting to run
CI / Build Prometheus for all architectures (7) (push) Waiting to run
CI / Build Prometheus for all architectures (8) (push) Waiting to run
CI / Build Prometheus for all architectures (9) (push) Waiting to run
CI / Report status of build Prometheus for all architectures (push) Blocked by required conditions
CI / Check generated parser (push) Waiting to run
CI / golangci-lint (push) Waiting to run
CI / fuzzing (push) Waiting to run
CI / codeql (push) Waiting to run
CI / Publish main branch artifacts (push) Blocked by required conditions
CI / Publish release artefacts (push) Blocked by required conditions
CI / Publish UI on npm Registry (push) Blocked by required conditions
Scorecards supply-chain security / Scorecards analysis (push) Waiting to run
Merge 2.54 to main
This commit is contained in:
commit
73abc8a8be
44
CHANGELOG.md
44
CHANGELOG.md
|
@ -3,8 +3,48 @@
|
||||||
## unreleased
|
## unreleased
|
||||||
|
|
||||||
* [FEATURE] OTLP receiver: Add new option `otlp.promote_resource_attributes`, for any OTel resource attributes that should be promoted to metric labels. #14200
|
* [FEATURE] OTLP receiver: Add new option `otlp.promote_resource_attributes`, for any OTel resource attributes that should be promoted to metric labels. #14200
|
||||||
* [FEATURE] Remote-Write: Add sender and receiver support for [Remote Write 2.0-rc.2](https://prometheus.io/docs/specs/remote_write_spec_2_0/) specification #14395 #14427 #14444
|
|
||||||
* [ENHANCEMENT] Remote-Write: 1.x messages against Remote Write 2.x Receivers will have now correct values for `prometheus_storage_<samples|histograms|exemplar>_failed_total` in case of partial errors #14444
|
## 2.54.0-rc.1 / 2024-08-05
|
||||||
|
|
||||||
|
Release 2.54 brings a release candidate of a major new version of [Remote Write: 2.0](https://prometheus.io/docs/specs/remote_write_spec_2_0/).
|
||||||
|
This is experimental at this time and may still change.
|
||||||
|
Remote-write v2 is enabled by default, but can be disabled via feature-flag `web.remote-write-receiver.accepted-protobuf-messages`.
|
||||||
|
|
||||||
|
* [CHANGE] Remote-Write: `highest_timestamp_in_seconds` and `queue_highest_sent_timestamp_seconds` metrics now initialized to 0. #14437
|
||||||
|
* [CHANGE] API: Split warnings from info annotations in API response. #14327
|
||||||
|
* [FEATURE] Remote-Write: Version 2.0 experimental, plus metadata in WAL via feature flag `metadata-wal-records` (defaults on). #14395,#14427,#14444
|
||||||
|
* [FEATURE] PromQL: add limitk() and limit_ratio() aggregation operators. #12503
|
||||||
|
* [ENHANCEMENT] PromQL: Accept underscores in literal numbers, e.g. 1_000_000 for 1 million. #12821
|
||||||
|
* [ENHANCEMENT] PromQL: float literal numbers and durations are now interchangeable (experimental). Example: `time() - my_timestamp > 10m`. #9138
|
||||||
|
* [ENHANCEMENT] PromQL: use Kahan summation for sum(). #14074,#14362
|
||||||
|
* [ENHANCEMENT] PromQL (experimental native histograms): Optimize `histogram_count` and `histogram_sum` functions. #14097
|
||||||
|
* [ENHANCEMENT] TSDB: Better support for out-of-order experimental native histogram samples. #14438
|
||||||
|
* [ENHANCEMENT] TSDB: Optimise seek within index. #14393
|
||||||
|
* [ENHANCEMENT] TSDB: Optimise deletion of stale series. #14307
|
||||||
|
* [ENHANCEMENT] TSDB: Reduce locking to optimise adding and removing series. #13286,#14286
|
||||||
|
* [ENHANCEMENT] TSDB: Small optimisation: streamline special handling for out-of-order data. #14396,#14584
|
||||||
|
* [ENHANCEMENT] Regexps: Optimize patterns with multiple prefixes. #13843,#14368
|
||||||
|
* [ENHANCEMENT] Regexps: Optimize patterns containing multiple literal strings. #14173
|
||||||
|
* [ENHANCEMENT] AWS SD: expose Primary IPv6 addresses as __meta_ec2_primary_ipv6_addresses. #14156
|
||||||
|
* [ENHANCEMENT] Docker SD: add MatchFirstNetwork for containers with multiple networks. #10490
|
||||||
|
* [ENHANCEMENT] OpenStack SD: Use `flavor.original_name` if available. #14312
|
||||||
|
* [ENHANCEMENT] UI (experimental native histograms): more accurate representation. #13680,#14430
|
||||||
|
* [ENHANCEMENT] Agent: `out_of_order_time_window` config option now applies to agent. #14094
|
||||||
|
* [ENHANCEMENT] Notifier: Send any outstanding Alertmanager notifications when shutting down. #14290
|
||||||
|
* [ENHANCEMENT] Rules: Add label-matcher support to Rules API. #10194
|
||||||
|
* [ENHANCEMENT] HTTP API: Add url to message logged on error while sending response. #14209
|
||||||
|
* [BUGFIX] CLI: escape `|` characters when generating docs. #14420
|
||||||
|
* [BUGFIX] PromQL (experimental native histograms): Fix some binary operators between native histogram values. #14454
|
||||||
|
* [BUGFIX] TSDB: LabelNames API could fail during compaction. #14279
|
||||||
|
* [BUGFIX] TSDB: Fix rare issue where pending OOO read can be left dangling if creating querier fails. #14341
|
||||||
|
* [BUGFIX] TSDB: fix check for context cancellation in LabelNamesFor. #14302
|
||||||
|
* [BUGFIX] Rules: Fix rare panic on reload. #14366
|
||||||
|
* [BUGFIX] Config: In YAML marshalling, do not output a regexp field if it was never set. #14004
|
||||||
|
* [BUGFIX] Remote-Write: reject samples with future timestamps. #14304
|
||||||
|
* [BUGFIX] Remote-Write: Fix data corruption in remote write if max_sample_age is applied. #14078
|
||||||
|
* [BUGFIX] Notifier: Fix Alertmanager discovery not updating under heavy load. #14174
|
||||||
|
* [BUGFIX] Regexes: some Unicode characters were not matched by case-insensitive comparison. #14170,#14299
|
||||||
|
* [BUGFIX] Remote-Read: Resolve occasional segmentation fault on query. #14515
|
||||||
|
|
||||||
## 2.53.1 / 2024-07-10
|
## 2.53.1 / 2024-07-10
|
||||||
|
|
||||||
|
|
|
@ -1090,8 +1090,9 @@ func (m RemoteWriteProtoMsgs) String() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// RemoteWriteProtoMsgV1 represents the deprecated `prometheus.WriteRequest` protobuf
|
// RemoteWriteProtoMsgV1 represents the `prometheus.WriteRequest` protobuf
|
||||||
// message introduced in the https://prometheus.io/docs/specs/remote_write_spec/.
|
// message introduced in the https://prometheus.io/docs/specs/remote_write_spec/,
|
||||||
|
// which will eventually be deprecated.
|
||||||
//
|
//
|
||||||
// NOTE: This string is used for both HTTP header values and config value, so don't change
|
// NOTE: This string is used for both HTTP header values and config value, so don't change
|
||||||
// this reference.
|
// this reference.
|
||||||
|
|
|
@ -19,7 +19,7 @@ remote_write:
|
||||||
protobuf_message: "io.prometheus.write.v2.Request"
|
protobuf_message: "io.prometheus.write.v2.Request"
|
||||||
```
|
```
|
||||||
|
|
||||||
or for deprecated Remote Write 1.0 message:
|
or for the eventually deprecated Remote Write 1.0 message:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
remote_write:
|
remote_write:
|
||||||
|
|
|
@ -1295,6 +1295,9 @@ func (db *DB) CompactOOOHead(ctx context.Context) error {
|
||||||
return db.compactOOOHead(ctx)
|
return db.compactOOOHead(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Callback for testing.
|
||||||
|
var compactOOOHeadTestingCallback func()
|
||||||
|
|
||||||
func (db *DB) compactOOOHead(ctx context.Context) error {
|
func (db *DB) compactOOOHead(ctx context.Context) error {
|
||||||
if !db.oooWasEnabled.Load() {
|
if !db.oooWasEnabled.Load() {
|
||||||
return nil
|
return nil
|
||||||
|
@ -1304,6 +1307,11 @@ func (db *DB) compactOOOHead(ctx context.Context) error {
|
||||||
return fmt.Errorf("get ooo compaction head: %w", err)
|
return fmt.Errorf("get ooo compaction head: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if compactOOOHeadTestingCallback != nil {
|
||||||
|
compactOOOHeadTestingCallback()
|
||||||
|
compactOOOHeadTestingCallback = nil
|
||||||
|
}
|
||||||
|
|
||||||
ulids, err := db.compactOOO(db.dir, oooHead)
|
ulids, err := db.compactOOO(db.dir, oooHead)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("compact ooo head: %w", err)
|
return fmt.Errorf("compact ooo head: %w", err)
|
||||||
|
|
|
@ -4500,12 +4500,15 @@ func TestMetadataAssertInMemoryData(t *testing.T) {
|
||||||
func TestOOOCompaction(t *testing.T) {
|
func TestOOOCompaction(t *testing.T) {
|
||||||
for name, scenario := range sampleTypeScenarios {
|
for name, scenario := range sampleTypeScenarios {
|
||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
testOOOCompaction(t, scenario)
|
testOOOCompaction(t, scenario, false)
|
||||||
|
})
|
||||||
|
t.Run(name+"+extra", func(t *testing.T) {
|
||||||
|
testOOOCompaction(t, scenario, true)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) {
|
func testOOOCompaction(t *testing.T, scenario sampleTypeScenario, addExtraSamples bool) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
|
@ -4536,7 +4539,7 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add an in-order samples.
|
// Add an in-order samples.
|
||||||
addSample(250, 350)
|
addSample(250, 300)
|
||||||
|
|
||||||
// Verify that the in-memory ooo chunk is empty.
|
// Verify that the in-memory ooo chunk is empty.
|
||||||
checkEmptyOOOChunk := func(lbls labels.Labels) {
|
checkEmptyOOOChunk := func(lbls labels.Labels) {
|
||||||
|
@ -4550,15 +4553,17 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) {
|
||||||
|
|
||||||
// Add ooo samples that creates multiple chunks.
|
// Add ooo samples that creates multiple chunks.
|
||||||
// 90 to 300 spans across 3 block ranges: [0, 120), [120, 240), [240, 360)
|
// 90 to 300 spans across 3 block ranges: [0, 120), [120, 240), [240, 360)
|
||||||
addSample(90, 310)
|
addSample(90, 300)
|
||||||
// Adding same samples to create overlapping chunks.
|
// Adding same samples to create overlapping chunks.
|
||||||
// Since the active chunk won't start at 90 again, all the new
|
// Since the active chunk won't start at 90 again, all the new
|
||||||
// chunks will have different time ranges than the previous chunks.
|
// chunks will have different time ranges than the previous chunks.
|
||||||
addSample(90, 310)
|
addSample(90, 300)
|
||||||
|
|
||||||
|
var highest int64 = 300
|
||||||
|
|
||||||
verifyDBSamples := func() {
|
verifyDBSamples := func() {
|
||||||
var series1Samples, series2Samples []chunks.Sample
|
var series1Samples, series2Samples []chunks.Sample
|
||||||
for _, r := range [][2]int64{{90, 119}, {120, 239}, {240, 350}} {
|
for _, r := range [][2]int64{{90, 119}, {120, 239}, {240, highest}} {
|
||||||
fromMins, toMins := r[0], r[1]
|
fromMins, toMins := r[0], r[1]
|
||||||
for min := fromMins; min <= toMins; min++ {
|
for min := fromMins; min <= toMins; min++ {
|
||||||
ts := min * time.Minute.Milliseconds()
|
ts := min * time.Minute.Milliseconds()
|
||||||
|
@ -4586,7 +4591,7 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.False(t, created)
|
require.False(t, created)
|
||||||
require.Positive(t, ms.ooo.oooHeadChunk.chunk.NumSamples())
|
require.Positive(t, ms.ooo.oooHeadChunk.chunk.NumSamples())
|
||||||
require.Len(t, ms.ooo.oooMmappedChunks, 14) // 7 original, 7 duplicate.
|
require.Len(t, ms.ooo.oooMmappedChunks, 13) // 7 original, 6 duplicate.
|
||||||
}
|
}
|
||||||
checkNonEmptyOOOChunk(series1)
|
checkNonEmptyOOOChunk(series1)
|
||||||
checkNonEmptyOOOChunk(series2)
|
checkNonEmptyOOOChunk(series2)
|
||||||
|
@ -4604,6 +4609,15 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, f.Size(), int64(100))
|
require.Greater(t, f.Size(), int64(100))
|
||||||
|
|
||||||
|
if addExtraSamples {
|
||||||
|
compactOOOHeadTestingCallback = func() {
|
||||||
|
addSample(90, 120) // Back in time, to generate a new OOO chunk.
|
||||||
|
addSample(300, 330) // Now some samples after the previous highest timestamp.
|
||||||
|
addSample(300, 330) // Repeat to generate an OOO chunk at these timestamps.
|
||||||
|
}
|
||||||
|
highest = 330
|
||||||
|
}
|
||||||
|
|
||||||
// OOO compaction happens here.
|
// OOO compaction happens here.
|
||||||
require.NoError(t, db.CompactOOOHead(ctx))
|
require.NoError(t, db.CompactOOOHead(ctx))
|
||||||
|
|
||||||
|
@ -4619,11 +4633,13 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) {
|
||||||
require.Equal(t, "00000001", files[0].Name())
|
require.Equal(t, "00000001", files[0].Name())
|
||||||
f, err = files[0].Info()
|
f, err = files[0].Info()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, int64(0), f.Size())
|
|
||||||
|
|
||||||
|
if !addExtraSamples {
|
||||||
|
require.Equal(t, int64(0), f.Size())
|
||||||
// OOO stuff should not be present in the Head now.
|
// OOO stuff should not be present in the Head now.
|
||||||
checkEmptyOOOChunk(series1)
|
checkEmptyOOOChunk(series1)
|
||||||
checkEmptyOOOChunk(series2)
|
checkEmptyOOOChunk(series2)
|
||||||
|
}
|
||||||
|
|
||||||
verifySamples := func(block *Block, fromMins, toMins int64) {
|
verifySamples := func(block *Block, fromMins, toMins int64) {
|
||||||
series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
|
series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
|
||||||
|
@ -4648,7 +4664,7 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) {
|
||||||
// Checking for expected data in the blocks.
|
// Checking for expected data in the blocks.
|
||||||
verifySamples(db.Blocks()[0], 90, 119)
|
verifySamples(db.Blocks()[0], 90, 119)
|
||||||
verifySamples(db.Blocks()[1], 120, 239)
|
verifySamples(db.Blocks()[1], 120, 239)
|
||||||
verifySamples(db.Blocks()[2], 240, 310)
|
verifySamples(db.Blocks()[2], 240, 299)
|
||||||
|
|
||||||
// There should be a single m-map file.
|
// There should be a single m-map file.
|
||||||
mmapDir := mmappedChunksDir(db.head.opts.ChunkDirRoot)
|
mmapDir := mmappedChunksDir(db.head.opts.ChunkDirRoot)
|
||||||
|
@ -4661,7 +4677,7 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) {
|
||||||
err = db.CompactHead(NewRangeHead(db.head, 250*time.Minute.Milliseconds(), 350*time.Minute.Milliseconds()))
|
err = db.CompactHead(NewRangeHead(db.head, 250*time.Minute.Milliseconds(), 350*time.Minute.Milliseconds()))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, db.Blocks(), 4) // [0, 120), [120, 240), [240, 360), [250, 351)
|
require.Len(t, db.Blocks(), 4) // [0, 120), [120, 240), [240, 360), [250, 351)
|
||||||
verifySamples(db.Blocks()[3], 250, 350)
|
verifySamples(db.Blocks()[3], 250, highest)
|
||||||
|
|
||||||
verifyDBSamples() // Blocks created out of normal and OOO head now. But not merged.
|
verifyDBSamples() // Blocks created out of normal and OOO head now. But not merged.
|
||||||
|
|
||||||
|
@ -4678,7 +4694,7 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) {
|
||||||
require.Len(t, db.Blocks(), 3) // [0, 120), [120, 240), [240, 360)
|
require.Len(t, db.Blocks(), 3) // [0, 120), [120, 240), [240, 360)
|
||||||
verifySamples(db.Blocks()[0], 90, 119)
|
verifySamples(db.Blocks()[0], 90, 119)
|
||||||
verifySamples(db.Blocks()[1], 120, 239)
|
verifySamples(db.Blocks()[1], 120, 239)
|
||||||
verifySamples(db.Blocks()[2], 240, 350) // Merged block.
|
verifySamples(db.Blocks()[2], 240, highest) // Merged block.
|
||||||
|
|
||||||
verifyDBSamples() // Final state. Blocks from normal and OOO head are merged.
|
verifyDBSamples() // Final state. Blocks from normal and OOO head are merged.
|
||||||
}
|
}
|
||||||
|
|
|
@ -467,7 +467,7 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDi
|
||||||
// amongst all the chunks in the OOOHead.
|
// amongst all the chunks in the OOOHead.
|
||||||
// This function is not thread safe unless the caller holds a lock.
|
// This function is not thread safe unless the caller holds a lock.
|
||||||
// The caller must ensure that s.ooo is not nil.
|
// The caller must ensure that s.ooo is not nil.
|
||||||
func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMapper, mint, maxt int64) (*mergedOOOChunks, error) {
|
func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMapper, mint, maxt int64, maxMmapRef chunks.ChunkDiskMapperRef) (*mergedOOOChunks, error) {
|
||||||
_, cid := chunks.HeadChunkRef(meta.Ref).Unpack()
|
_, cid := chunks.HeadChunkRef(meta.Ref).Unpack()
|
||||||
|
|
||||||
// ix represents the index of chunk in the s.mmappedChunks slice. The chunk meta's are
|
// ix represents the index of chunk in the s.mmappedChunks slice. The chunk meta's are
|
||||||
|
@ -490,6 +490,9 @@ func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMappe
|
||||||
tmpChks := make([]chunkMetaAndChunkDiskMapperRef, 0, len(s.ooo.oooMmappedChunks)+1)
|
tmpChks := make([]chunkMetaAndChunkDiskMapperRef, 0, len(s.ooo.oooMmappedChunks)+1)
|
||||||
|
|
||||||
for i, c := range s.ooo.oooMmappedChunks {
|
for i, c := range s.ooo.oooMmappedChunks {
|
||||||
|
if maxMmapRef != 0 && c.ref > maxMmapRef {
|
||||||
|
break
|
||||||
|
}
|
||||||
if c.OverlapsClosedInterval(mint, maxt) {
|
if c.OverlapsClosedInterval(mint, maxt) {
|
||||||
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{
|
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{
|
||||||
meta: chunks.Meta{
|
meta: chunks.Meta{
|
||||||
|
|
|
@ -201,7 +201,7 @@ func (oh *OOORangeHead) Index() (IndexReader, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (oh *OOORangeHead) Chunks() (ChunkReader, error) {
|
func (oh *OOORangeHead) Chunks() (ChunkReader, error) {
|
||||||
return NewOOOHeadChunkReader(oh.head, oh.mint, oh.maxt, oh.isoState), nil
|
return NewOOOHeadChunkReader(oh.head, oh.mint, oh.maxt, oh.isoState, 0), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (oh *OOORangeHead) Tombstones() (tombstones.Reader, error) {
|
func (oh *OOORangeHead) Tombstones() (tombstones.Reader, error) {
|
||||||
|
|
|
@ -243,14 +243,16 @@ type OOOHeadChunkReader struct {
|
||||||
head *Head
|
head *Head
|
||||||
mint, maxt int64
|
mint, maxt int64
|
||||||
isoState *oooIsolationState
|
isoState *oooIsolationState
|
||||||
|
maxMmapRef chunks.ChunkDiskMapperRef
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewOOOHeadChunkReader(head *Head, mint, maxt int64, isoState *oooIsolationState) *OOOHeadChunkReader {
|
func NewOOOHeadChunkReader(head *Head, mint, maxt int64, isoState *oooIsolationState, maxMmapRef chunks.ChunkDiskMapperRef) *OOOHeadChunkReader {
|
||||||
return &OOOHeadChunkReader{
|
return &OOOHeadChunkReader{
|
||||||
head: head,
|
head: head,
|
||||||
mint: mint,
|
mint: mint,
|
||||||
maxt: maxt,
|
maxt: maxt,
|
||||||
isoState: isoState,
|
isoState: isoState,
|
||||||
|
maxMmapRef: maxMmapRef,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -269,7 +271,7 @@ func (cr OOOHeadChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk,
|
||||||
s.Unlock()
|
s.Unlock()
|
||||||
return nil, nil, storage.ErrNotFound
|
return nil, nil, storage.ErrNotFound
|
||||||
}
|
}
|
||||||
mc, err := s.oooMergedChunks(meta, cr.head.chunkDiskMapper, cr.mint, cr.maxt)
|
mc, err := s.oooMergedChunks(meta, cr.head.chunkDiskMapper, cr.mint, cr.maxt, cr.maxMmapRef)
|
||||||
s.Unlock()
|
s.Unlock()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
|
@ -386,7 +388,7 @@ func (ch *OOOCompactionHead) Index() (IndexReader, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ch *OOOCompactionHead) Chunks() (ChunkReader, error) {
|
func (ch *OOOCompactionHead) Chunks() (ChunkReader, error) {
|
||||||
return NewOOOHeadChunkReader(ch.oooIR.head, ch.oooIR.mint, ch.oooIR.maxt, nil), nil
|
return NewOOOHeadChunkReader(ch.oooIR.head, ch.oooIR.mint, ch.oooIR.maxt, nil, ch.lastMmapRef), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ch *OOOCompactionHead) Tombstones() (tombstones.Reader, error) {
|
func (ch *OOOCompactionHead) Tombstones() (tombstones.Reader, error) {
|
||||||
|
|
|
@ -481,7 +481,7 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) {
|
||||||
t.Run("Getting a non existing chunk fails with not found error", func(t *testing.T) {
|
t.Run("Getting a non existing chunk fails with not found error", func(t *testing.T) {
|
||||||
db := newTestDBWithOpts(t, opts)
|
db := newTestDBWithOpts(t, opts)
|
||||||
|
|
||||||
cr := NewOOOHeadChunkReader(db.head, 0, 1000, nil)
|
cr := NewOOOHeadChunkReader(db.head, 0, 1000, nil, 0)
|
||||||
defer cr.Close()
|
defer cr.Close()
|
||||||
c, iterable, err := cr.ChunkOrIterable(chunks.Meta{
|
c, iterable, err := cr.ChunkOrIterable(chunks.Meta{
|
||||||
Ref: 0x1000000, Chunk: chunkenc.Chunk(nil), MinTime: 100, MaxTime: 300,
|
Ref: 0x1000000, Chunk: chunkenc.Chunk(nil), MinTime: 100, MaxTime: 300,
|
||||||
|
@ -839,7 +839,7 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, len(tc.expChunksSamples), len(chks))
|
require.Equal(t, len(tc.expChunksSamples), len(chks))
|
||||||
|
|
||||||
cr := NewOOOHeadChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil)
|
cr := NewOOOHeadChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil, 0)
|
||||||
defer cr.Close()
|
defer cr.Close()
|
||||||
for i := 0; i < len(chks); i++ {
|
for i := 0; i < len(chks); i++ {
|
||||||
c, iterable, err := cr.ChunkOrIterable(chks[i])
|
c, iterable, err := cr.ChunkOrIterable(chks[i])
|
||||||
|
@ -1013,7 +1013,7 @@ func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
|
||||||
}
|
}
|
||||||
require.NoError(t, app.Commit())
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
cr := NewOOOHeadChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil)
|
cr := NewOOOHeadChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil, 0)
|
||||||
defer cr.Close()
|
defer cr.Close()
|
||||||
for i := 0; i < len(chks); i++ {
|
for i := 0; i < len(chks); i++ {
|
||||||
c, iterable, err := cr.ChunkOrIterable(chks[i])
|
c, iterable, err := cr.ChunkOrIterable(chks[i])
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{
|
{
|
||||||
"name": "@prometheus-io/codemirror-promql",
|
"name": "@prometheus-io/codemirror-promql",
|
||||||
"version": "0.53.1",
|
"version": "0.54.0-rc.1",
|
||||||
"description": "a CodeMirror mode for the PromQL language",
|
"description": "a CodeMirror mode for the PromQL language",
|
||||||
"types": "dist/esm/index.d.ts",
|
"types": "dist/esm/index.d.ts",
|
||||||
"module": "dist/esm/index.js",
|
"module": "dist/esm/index.js",
|
||||||
|
@ -29,7 +29,7 @@
|
||||||
},
|
},
|
||||||
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
|
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@prometheus-io/lezer-promql": "0.53.1",
|
"@prometheus-io/lezer-promql": "0.54.0-rc.1",
|
||||||
"lru-cache": "^7.18.3"
|
"lru-cache": "^7.18.3"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{
|
{
|
||||||
"name": "@prometheus-io/lezer-promql",
|
"name": "@prometheus-io/lezer-promql",
|
||||||
"version": "0.53.1",
|
"version": "0.54.0-rc.1",
|
||||||
"description": "lezer-based PromQL grammar",
|
"description": "lezer-based PromQL grammar",
|
||||||
"main": "dist/index.cjs",
|
"main": "dist/index.cjs",
|
||||||
"type": "module",
|
"type": "module",
|
||||||
|
|
14
web/ui/package-lock.json
generated
14
web/ui/package-lock.json
generated
|
@ -1,12 +1,12 @@
|
||||||
{
|
{
|
||||||
"name": "prometheus-io",
|
"name": "prometheus-io",
|
||||||
"version": "0.53.1",
|
"version": "0.54.0-rc.1",
|
||||||
"lockfileVersion": 3,
|
"lockfileVersion": 3,
|
||||||
"requires": true,
|
"requires": true,
|
||||||
"packages": {
|
"packages": {
|
||||||
"": {
|
"": {
|
||||||
"name": "prometheus-io",
|
"name": "prometheus-io",
|
||||||
"version": "0.53.1",
|
"version": "0.54.0-rc.1",
|
||||||
"workspaces": [
|
"workspaces": [
|
||||||
"react-app",
|
"react-app",
|
||||||
"module/*"
|
"module/*"
|
||||||
|
@ -30,10 +30,10 @@
|
||||||
},
|
},
|
||||||
"module/codemirror-promql": {
|
"module/codemirror-promql": {
|
||||||
"name": "@prometheus-io/codemirror-promql",
|
"name": "@prometheus-io/codemirror-promql",
|
||||||
"version": "0.53.1",
|
"version": "0.54.0-rc.1",
|
||||||
"license": "Apache-2.0",
|
"license": "Apache-2.0",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@prometheus-io/lezer-promql": "0.53.1",
|
"@prometheus-io/lezer-promql": "0.54.0-rc.1",
|
||||||
"lru-cache": "^7.18.3"
|
"lru-cache": "^7.18.3"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
|
@ -69,7 +69,7 @@
|
||||||
},
|
},
|
||||||
"module/lezer-promql": {
|
"module/lezer-promql": {
|
||||||
"name": "@prometheus-io/lezer-promql",
|
"name": "@prometheus-io/lezer-promql",
|
||||||
"version": "0.53.1",
|
"version": "0.54.0-rc.1",
|
||||||
"license": "Apache-2.0",
|
"license": "Apache-2.0",
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@lezer/generator": "^1.7.1",
|
"@lezer/generator": "^1.7.1",
|
||||||
|
@ -19352,7 +19352,7 @@
|
||||||
},
|
},
|
||||||
"react-app": {
|
"react-app": {
|
||||||
"name": "@prometheus-io/app",
|
"name": "@prometheus-io/app",
|
||||||
"version": "0.53.1",
|
"version": "0.54.0-rc.1",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@codemirror/autocomplete": "^6.17.0",
|
"@codemirror/autocomplete": "^6.17.0",
|
||||||
"@codemirror/commands": "^6.6.0",
|
"@codemirror/commands": "^6.6.0",
|
||||||
|
@ -19370,7 +19370,7 @@
|
||||||
"@lezer/lr": "^1.4.2",
|
"@lezer/lr": "^1.4.2",
|
||||||
"@nexucis/fuzzy": "^0.4.1",
|
"@nexucis/fuzzy": "^0.4.1",
|
||||||
"@nexucis/kvsearch": "^0.8.1",
|
"@nexucis/kvsearch": "^0.8.1",
|
||||||
"@prometheus-io/codemirror-promql": "0.53.1",
|
"@prometheus-io/codemirror-promql": "0.54.0-rc.1",
|
||||||
"bootstrap": "^4.6.2",
|
"bootstrap": "^4.6.2",
|
||||||
"css.escape": "^1.5.1",
|
"css.escape": "^1.5.1",
|
||||||
"downshift": "^9.0.6",
|
"downshift": "^9.0.6",
|
||||||
|
|
|
@ -28,5 +28,5 @@
|
||||||
"ts-jest": "^29.2.2",
|
"ts-jest": "^29.2.2",
|
||||||
"typescript": "^4.9.5"
|
"typescript": "^4.9.5"
|
||||||
},
|
},
|
||||||
"version": "0.53.1"
|
"version": "0.54.0-rc.1"
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{
|
{
|
||||||
"name": "@prometheus-io/app",
|
"name": "@prometheus-io/app",
|
||||||
"version": "0.53.1",
|
"version": "0.54.0-rc.1",
|
||||||
"private": true,
|
"private": true,
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@codemirror/autocomplete": "^6.17.0",
|
"@codemirror/autocomplete": "^6.17.0",
|
||||||
|
@ -19,7 +19,7 @@
|
||||||
"@lezer/lr": "^1.4.2",
|
"@lezer/lr": "^1.4.2",
|
||||||
"@nexucis/fuzzy": "^0.4.1",
|
"@nexucis/fuzzy": "^0.4.1",
|
||||||
"@nexucis/kvsearch": "^0.8.1",
|
"@nexucis/kvsearch": "^0.8.1",
|
||||||
"@prometheus-io/codemirror-promql": "0.53.1",
|
"@prometheus-io/codemirror-promql": "0.54.0-rc.1",
|
||||||
"bootstrap": "^4.6.2",
|
"bootstrap": "^4.6.2",
|
||||||
"css.escape": "^1.5.1",
|
"css.escape": "^1.5.1",
|
||||||
"downshift": "^9.0.6",
|
"downshift": "^9.0.6",
|
||||||
|
|
Loading…
Reference in a new issue