From 52270d621668c3428531f68370c40065f13bbd90 Mon Sep 17 00:00:00 2001 From: Chance Feick Date: Mon, 14 Nov 2022 13:30:22 -0800 Subject: [PATCH 01/82] Fix relative link to use .md file extension Signed-off-by: Chance Feick --- docs/querying/basics.md | 2 +- docs/querying/functions.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/querying/basics.md b/docs/querying/basics.md index bc4478f62..5e9ddc119 100644 --- a/docs/querying/basics.md +++ b/docs/querying/basics.md @@ -35,7 +35,7 @@ vector is the only type that can be directly graphed. _Notes about the experimental native histograms:_ * Ingesting native histograms has to be enabled via a [feature - flag](../feature_flags/#native-histograms). + flag](../feature_flags.md#native-histograms). * Once native histograms have been ingested into the TSDB (and even after disabling the feature flag again), both instant vectors and range vectors may now contain samples that aren't simple floating point numbers (float samples) diff --git a/docs/querying/functions.md b/docs/querying/functions.md index b4bc0a743..82513e1c0 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -14,7 +14,7 @@ vector, which if not provided it will default to the value of the expression _Notes about the experimental native histograms:_ * Ingesting native histograms has to be enabled via a [feature - flag](../feature_flags/#native-histograms). As long as no native histograms + flag](../feature_flags.md#native-histograms). As long as no native histograms have been ingested into the TSDB, all functions will behave as usual. * Functions that do not explicitly mention native histograms in their documentation (see below) effectively treat a native histogram as a float From 2cfd8da6280f97176ee877aaa0c5f724d20905de Mon Sep 17 00:00:00 2001 From: Chance Feick <6326742+chancefeick@users.noreply.github.com> Date: Tue, 15 Nov 2022 08:10:44 -0800 Subject: [PATCH 02/82] Update docs/querying/basics.md Co-authored-by: Julien Pivotto Signed-off-by: Chance Feick <6326742+chancefeick@users.noreply.github.com> --- docs/querying/basics.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/querying/basics.md b/docs/querying/basics.md index 5e9ddc119..c3aa7b804 100644 --- a/docs/querying/basics.md +++ b/docs/querying/basics.md @@ -35,7 +35,7 @@ vector is the only type that can be directly graphed. _Notes about the experimental native histograms:_ * Ingesting native histograms has to be enabled via a [feature - flag](../feature_flags.md#native-histograms). + flag](../../feature_flags/#native-histograms). * Once native histograms have been ingested into the TSDB (and even after disabling the feature flag again), both instant vectors and range vectors may now contain samples that aren't simple floating point numbers (float samples) From c5a37ddad54762a1eaf80c820d1fff6d288bef4a Mon Sep 17 00:00:00 2001 From: haleyao Date: Sun, 9 Jul 2023 21:33:31 +0800 Subject: [PATCH 03/82] Remove deleted target from discovery manager Signed-off-by: haleyao --- discovery/legacymanager/manager.go | 7 ++++++- discovery/legacymanager/manager_test.go | 10 +++------- discovery/manager.go | 7 ++++++- discovery/manager_test.go | 15 ++------------- 4 files changed, 17 insertions(+), 22 deletions(-) diff --git a/discovery/legacymanager/manager.go b/discovery/legacymanager/manager.go index 87823f401..e7c79a8f8 100644 --- a/discovery/legacymanager/manager.go +++ b/discovery/legacymanager/manager.go @@ -270,7 +270,12 @@ func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) { } for _, tg := range tgs { if tg != nil { // Some Discoverers send nil target group so need to check for it to avoid panics. - m.targets[poolKey][tg.Source] = tg + // Remove the deleted target. + if len(tg.Targets) == 0 && len(tg.Labels) == 0 { + delete(m.targets[poolKey], tg.Source) + } else { + m.targets[poolKey][tg.Source] = tg + } } } } diff --git a/discovery/legacymanager/manager_test.go b/discovery/legacymanager/manager_test.go index 13b84e6e3..bc8a419ec 100644 --- a/discovery/legacymanager/manager_test.go +++ b/discovery/legacymanager/manager_test.go @@ -824,13 +824,9 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { if !ok { t.Fatalf("'%v' should be present in target groups", pkey) } - group, ok := targetGroups[""] - if !ok { - t.Fatalf("missing '' key in target groups %v", targetGroups) - } - - if len(group.Targets) != 0 { - t.Fatalf("Invalid number of targets: expected 0, got %d", len(group.Targets)) + _, ok = targetGroups[""] + if ok { + t.Fatalf("Target groups should be empty, got %v", targetGroups) } } diff --git a/discovery/manager.go b/discovery/manager.go index 8b304a0fa..7f06b423d 100644 --- a/discovery/manager.go +++ b/discovery/manager.go @@ -387,7 +387,12 @@ func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) { } for _, tg := range tgs { if tg != nil { // Some Discoverers send nil target group so need to check for it to avoid panics. - m.targets[poolKey][tg.Source] = tg + // Remove the deleted target. + if len(tg.Targets) == 0 && len(tg.Labels) == 0 { + delete(m.targets[poolKey], tg.Source) + } else { + m.targets[poolKey][tg.Source] = tg + } } } } diff --git a/discovery/manager_test.go b/discovery/manager_test.go index 537160811..67ccbcac7 100644 --- a/discovery/manager_test.go +++ b/discovery/manager_test.go @@ -1044,19 +1044,8 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { if !ok { t.Fatalf("'%v' should be present in target groups", p) } - group, ok := targetGroups[""] - if !ok { - t.Fatalf("missing '' key in target groups %v", targetGroups) - } - - if len(group.Targets) != 0 { - t.Fatalf("Invalid number of targets: expected 0, got %d", len(group.Targets)) - } - require.Equal(t, 1, len(syncedTargets)) - require.Equal(t, 1, len(syncedTargets["prometheus"])) - if lbls := syncedTargets["prometheus"][0].Labels; lbls != nil { - t.Fatalf("Unexpected Group: expected nil Labels, got %v", lbls) - } + require.Equal(t, 0, len(targetGroups)) + require.Equal(t, 0, len(syncedTargets)) } func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { From fc2e4cd3b9ca1d924dce714b091c9bd5c97926fa Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Tue, 11 Jul 2023 13:13:35 +0200 Subject: [PATCH 04/82] docs: Fix link to feature flags. Signed-off-by: Julien Pivotto --- docs/querying/basics.md | 2 +- docs/querying/operators.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/querying/basics.md b/docs/querying/basics.md index c3aa7b804..5e9ddc119 100644 --- a/docs/querying/basics.md +++ b/docs/querying/basics.md @@ -35,7 +35,7 @@ vector is the only type that can be directly graphed. _Notes about the experimental native histograms:_ * Ingesting native histograms has to be enabled via a [feature - flag](../../feature_flags/#native-histograms). + flag](../feature_flags.md#native-histograms). * Once native histograms have been ingested into the TSDB (and even after disabling the feature flag again), both instant vectors and range vectors may now contain samples that aren't simple floating point numbers (float samples) diff --git a/docs/querying/operators.md b/docs/querying/operators.md index 0cd536894..5eb549051 100644 --- a/docs/querying/operators.md +++ b/docs/querying/operators.md @@ -310,7 +310,7 @@ so `2 ^ 3 ^ 2` is equivalent to `2 ^ (3 ^ 2)`. ## Operators for native histograms Native histograms are an experimental feature. Ingesting native histograms has -to be enabled via a [feature flag](../feature_flags/#native-histograms). Once +to be enabled via a [feature flag](../feature_flags.md#native-histograms). Once native histograms have been ingested, they can be queried (even after the feature flag has been disabled again). However, the operator support for native histograms is still very limited. From 17cdfdd79f42ab52a3cff6882ab958dcfa411bc2 Mon Sep 17 00:00:00 2001 From: Mikhail Fesenko Date: Sat, 15 Oct 2022 01:18:20 +0200 Subject: [PATCH 05/82] maraphon.go: Simplified conditions in method Signed-off-by: Mikhail Fesenko --- discovery/marathon/marathon.go | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/discovery/marathon/marathon.go b/discovery/marathon/marathon.go index cfd3e2c08..ef897234d 100644 --- a/discovery/marathon/marathon.go +++ b/discovery/marathon/marathon.go @@ -106,14 +106,18 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { if len(c.AuthToken) > 0 && len(c.AuthTokenFile) > 0 { return errors.New("marathon_sd: at most one of auth_token & auth_token_file must be configured") } - if c.HTTPClientConfig.BasicAuth != nil && (len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0) { - return errors.New("marathon_sd: at most one of basic_auth, auth_token & auth_token_file must be configured") - } - if (len(c.HTTPClientConfig.BearerToken) > 0 || len(c.HTTPClientConfig.BearerTokenFile) > 0) && (len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0) { - return errors.New("marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured") - } - if c.HTTPClientConfig.Authorization != nil && (len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0) { - return errors.New("marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured") + + isAuthTokenProvided := len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0 + if isAuthTokenProvided { + if c.HTTPClientConfig.BasicAuth != nil { + return errors.New("marathon_sd: at most one of basic_auth, auth_token & auth_token_file must be configured") + } + if len(c.HTTPClientConfig.BearerToken) > 0 || len(c.HTTPClientConfig.BearerTokenFile) > 0 { + return errors.New("marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured") + } + if c.HTTPClientConfig.Authorization != nil { + return errors.New("marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured") + } } return c.HTTPClientConfig.Validate() } From 02e11cc2a721be3dd6ae1bc4cb3bc37d65b6db96 Mon Sep 17 00:00:00 2001 From: Mikhail Fesenko Date: Thu, 13 Jul 2023 00:52:27 +0200 Subject: [PATCH 06/82] Fix from discussion Signed-off-by: Mikhail Fesenko --- discovery/marathon/marathon.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/discovery/marathon/marathon.go b/discovery/marathon/marathon.go index ef897234d..3baf79aff 100644 --- a/discovery/marathon/marathon.go +++ b/discovery/marathon/marathon.go @@ -107,15 +107,13 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return errors.New("marathon_sd: at most one of auth_token & auth_token_file must be configured") } - isAuthTokenProvided := len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0 - if isAuthTokenProvided { - if c.HTTPClientConfig.BasicAuth != nil { + if len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0 { + switch { + case c.HTTPClientConfig.BasicAuth != nil: return errors.New("marathon_sd: at most one of basic_auth, auth_token & auth_token_file must be configured") - } - if len(c.HTTPClientConfig.BearerToken) > 0 || len(c.HTTPClientConfig.BearerTokenFile) > 0 { + case len(c.HTTPClientConfig.BearerToken) > 0 || len(c.HTTPClientConfig.BearerTokenFile) > 0: return errors.New("marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured") - } - if c.HTTPClientConfig.Authorization != nil { + case c.HTTPClientConfig.Authorization != nil: return errors.New("marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured") } } From 15fa680117dc7cc7f8046daf4a7258868385223b Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 28 Jun 2023 15:35:02 +1000 Subject: [PATCH 07/82] Add benchmark for query using timestamp() Signed-off-by: Charles Korn --- promql/bench_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/promql/bench_test.go b/promql/bench_test.go index 6818498bf..fb5f3a06d 100644 --- a/promql/bench_test.go +++ b/promql/bench_test.go @@ -186,6 +186,10 @@ func rangeQueryCases() []benchCase { expr: "count({__name__!=\"\",l=\"\"})", steps: 1, }, + // timestamp() function + { + expr: "timestamp(a_X)", + }, } // X in an expr will be replaced by different metric sizes. From a2a2cc757e48de31cfbb5e099814232712e1c8b2 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 28 Jun 2023 15:08:48 +1000 Subject: [PATCH 08/82] Extract timestamp special case to its own method. Signed-off-by: Charles Korn --- promql/engine.go | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 83bbdeff8..64f69df5a 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1387,15 +1387,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { unwrapParenExpr(&arg) vs, ok := arg.(*parser.VectorSelector) if ok { - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - if vs.Timestamp != nil { - // This is a special case only for "timestamp" since the offset - // needs to be adjusted for every point. - vs.Offset = time.Duration(enh.Ts-*vs.Timestamp) * time.Millisecond - } - val, ws := ev.vectorSelector(vs, enh.Ts) - return call([]parser.Value{val}, e.Args, enh), ws - }) + return ev.evalTimestampFunctionOverVectorSelector(vs, call, e) } } @@ -1833,6 +1825,18 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { panic(fmt.Errorf("unhandled expression of type: %T", expr)) } +func (ev *evaluator) evalTimestampFunctionOverVectorSelector(vs *parser.VectorSelector, call FunctionCall, e *parser.Call) (parser.Value, storage.Warnings) { + return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + if vs.Timestamp != nil { + // This is a special case only for "timestamp" since the offset + // needs to be adjusted for every point. + vs.Offset = time.Duration(enh.Ts-*vs.Timestamp) * time.Millisecond + } + val, ws := ev.vectorSelector(vs, enh.Ts) + return call([]parser.Value{val}, e.Args, enh), ws + }) +} + // vectorSelector evaluates a *parser.VectorSelector expression. func (ev *evaluator) vectorSelector(node *parser.VectorSelector, ts int64) (Vector, storage.Warnings) { ws, err := checkAndExpandSeriesSet(ev.ctx, node) From eeface2e1779d8593cdb859ea637d6aa05595318 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 28 Jun 2023 15:09:58 +1000 Subject: [PATCH 09/82] Inline method Signed-off-by: Charles Korn --- promql/engine.go | 64 ++++++++++++++++++++++-------------------------- 1 file changed, 29 insertions(+), 35 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 64f69df5a..2702e1779 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1832,43 +1832,37 @@ func (ev *evaluator) evalTimestampFunctionOverVectorSelector(vs *parser.VectorSe // needs to be adjusted for every point. vs.Offset = time.Duration(enh.Ts-*vs.Timestamp) * time.Millisecond } - val, ws := ev.vectorSelector(vs, enh.Ts) - return call([]parser.Value{val}, e.Args, enh), ws - }) -} - -// vectorSelector evaluates a *parser.VectorSelector expression. -func (ev *evaluator) vectorSelector(node *parser.VectorSelector, ts int64) (Vector, storage.Warnings) { - ws, err := checkAndExpandSeriesSet(ev.ctx, node) - if err != nil { - ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) - } - vec := make(Vector, 0, len(node.Series)) - it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) - var chkIter chunkenc.Iterator - for i, s := range node.Series { - chkIter = s.Iterator(chkIter) - it.Reset(chkIter) - - t, f, h, ok := ev.vectorSelectorSingle(it, node, ts) - if ok { - vec = append(vec, Sample{ - Metric: node.Series[i].Labels(), - T: t, - F: f, - H: h, - }) - - ev.currentSamples++ - ev.samplesStats.IncrementSamplesAtTimestamp(ts, 1) - if ev.currentSamples > ev.maxSamples { - ev.error(ErrTooManySamples(env)) - } + ws, err := checkAndExpandSeriesSet(ev.ctx, vs) + if err != nil { + ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) } + vec := make(Vector, 0, len(vs.Series)) + it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) + var chkIter chunkenc.Iterator + for i, s := range vs.Series { + chkIter = s.Iterator(chkIter) + it.Reset(chkIter) - } - ev.samplesStats.UpdatePeak(ev.currentSamples) - return vec, ws + t, f, h, ok := ev.vectorSelectorSingle(it, vs, enh.Ts) + if ok { + vec = append(vec, Sample{ + Metric: vs.Series[i].Labels(), + T: t, + F: f, + H: h, + }) + + ev.currentSamples++ + ev.samplesStats.IncrementSamplesAtTimestamp(enh.Ts, 1) + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + } + + } + ev.samplesStats.UpdatePeak(ev.currentSamples) + return call([]parser.Value{vec}, e.Args, enh), ws + }) } // vectorSelectorSingle evaluates an instant vector for the iterator of one time series. From a14299805208ed7da43e482cf04055635bfb634a Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 28 Jun 2023 15:11:01 +1000 Subject: [PATCH 10/82] Expand series set just once Signed-off-by: Charles Korn --- promql/engine.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 2702e1779..1d7483d2f 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1826,16 +1826,17 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { } func (ev *evaluator) evalTimestampFunctionOverVectorSelector(vs *parser.VectorSelector, call FunctionCall, e *parser.Call) (parser.Value, storage.Warnings) { + ws, err := checkAndExpandSeriesSet(ev.ctx, vs) + if err != nil { + ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) + } + return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { if vs.Timestamp != nil { // This is a special case only for "timestamp" since the offset // needs to be adjusted for every point. vs.Offset = time.Duration(enh.Ts-*vs.Timestamp) * time.Millisecond } - ws, err := checkAndExpandSeriesSet(ev.ctx, vs) - if err != nil { - ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) - } vec := make(Vector, 0, len(vs.Series)) it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) var chkIter chunkenc.Iterator From b114c0888d295e97f0a531fc8547cc44c836149c Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 28 Jun 2023 15:12:34 +1000 Subject: [PATCH 11/82] Simplify loop Signed-off-by: Charles Korn --- promql/engine.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 1d7483d2f..6fbaba505 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1840,14 +1840,14 @@ func (ev *evaluator) evalTimestampFunctionOverVectorSelector(vs *parser.VectorSe vec := make(Vector, 0, len(vs.Series)) it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) var chkIter chunkenc.Iterator - for i, s := range vs.Series { + for _, s := range vs.Series { chkIter = s.Iterator(chkIter) it.Reset(chkIter) t, f, h, ok := ev.vectorSelectorSingle(it, vs, enh.Ts) if ok { vec = append(vec, Sample{ - Metric: vs.Series[i].Labels(), + Metric: s.Labels(), T: t, F: f, H: h, @@ -1859,7 +1859,6 @@ func (ev *evaluator) evalTimestampFunctionOverVectorSelector(vs *parser.VectorSe ev.error(ErrTooManySamples(env)) } } - } ev.samplesStats.UpdatePeak(ev.currentSamples) return call([]parser.Value{vec}, e.Args, enh), ws From 993618adea442fb8ba19a3c36a3c01c434b3cfc5 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 28 Jun 2023 15:13:58 +1000 Subject: [PATCH 12/82] Don't create a new iterator for every time step. Signed-off-by: Charles Korn --- promql/engine.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/promql/engine.go b/promql/engine.go index 6fbaba505..ed801a683 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1830,6 +1830,7 @@ func (ev *evaluator) evalTimestampFunctionOverVectorSelector(vs *parser.VectorSe if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) } + it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { if vs.Timestamp != nil { @@ -1838,7 +1839,6 @@ func (ev *evaluator) evalTimestampFunctionOverVectorSelector(vs *parser.VectorSe vs.Offset = time.Duration(enh.Ts-*vs.Timestamp) * time.Millisecond } vec := make(Vector, 0, len(vs.Series)) - it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) var chkIter chunkenc.Iterator for _, s := range vs.Series { chkIter = s.Iterator(chkIter) From fde6ebb17df9ee50e4a18533169b9ef84882a99f Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 28 Jun 2023 15:27:44 +1000 Subject: [PATCH 13/82] Create per-series iterators only once per selector, rather than recreating it for each time step. Signed-off-by: Charles Korn --- promql/engine.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index ed801a683..b6c856ba7 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1830,7 +1830,12 @@ func (ev *evaluator) evalTimestampFunctionOverVectorSelector(vs *parser.VectorSe if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) } - it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) + + seriesIterators := make([]*storage.MemoizedSeriesIterator, len(vs.Series)) + for i, s := range vs.Series { + it := s.Iterator(nil) + seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)) + } return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { if vs.Timestamp != nil { @@ -1838,12 +1843,10 @@ func (ev *evaluator) evalTimestampFunctionOverVectorSelector(vs *parser.VectorSe // needs to be adjusted for every point. vs.Offset = time.Duration(enh.Ts-*vs.Timestamp) * time.Millisecond } - vec := make(Vector, 0, len(vs.Series)) - var chkIter chunkenc.Iterator - for _, s := range vs.Series { - chkIter = s.Iterator(chkIter) - it.Reset(chkIter) + vec := make(Vector, 0, len(vs.Series)) + for i, s := range vs.Series { + it := seriesIterators[i] t, f, h, ok := ev.vectorSelectorSingle(it, vs, enh.Ts) if ok { vec = append(vec, Sample{ From 6903d6edd882a8439a5e96dde028def9a7a46d8c Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Thu, 29 Jun 2023 13:34:26 +1000 Subject: [PATCH 14/82] Add test to confirm `timestamp()` behaves correctly when evaluating a range query. Signed-off-by: Charles Korn --- promql/engine_test.go | 94 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) diff --git a/promql/engine_test.go b/promql/engine_test.go index ca4a022e0..1b14e8a5d 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -1977,6 +1977,100 @@ func TestSubquerySelector(t *testing.T) { } } +func TestTimestampFunction_StepsMoreOftenThanSamples(t *testing.T) { + test, err := NewTest(t, ` +load 1m + metric 0+1x1000 +`) + require.NoError(t, err) + defer test.Close() + + err = test.Run() + require.NoError(t, err) + + query := "timestamp(metric)" + start := time.Unix(0, 0) + end := time.Unix(61, 0) + interval := time.Second + + expectedResult := Matrix{ + Series{ + Floats: []FPoint{ + {F: 0, T: 0}, + {F: 0, T: 1_000}, + {F: 0, T: 2_000}, + {F: 0, T: 3_000}, + {F: 0, T: 4_000}, + {F: 0, T: 5_000}, + {F: 0, T: 6_000}, + {F: 0, T: 7_000}, + {F: 0, T: 8_000}, + {F: 0, T: 9_000}, + {F: 0, T: 10_000}, + {F: 0, T: 11_000}, + {F: 0, T: 12_000}, + {F: 0, T: 13_000}, + {F: 0, T: 14_000}, + {F: 0, T: 15_000}, + {F: 0, T: 16_000}, + {F: 0, T: 17_000}, + {F: 0, T: 18_000}, + {F: 0, T: 19_000}, + {F: 0, T: 20_000}, + {F: 0, T: 21_000}, + {F: 0, T: 22_000}, + {F: 0, T: 23_000}, + {F: 0, T: 24_000}, + {F: 0, T: 25_000}, + {F: 0, T: 26_000}, + {F: 0, T: 27_000}, + {F: 0, T: 28_000}, + {F: 0, T: 29_000}, + {F: 0, T: 30_000}, + {F: 0, T: 31_000}, + {F: 0, T: 32_000}, + {F: 0, T: 33_000}, + {F: 0, T: 34_000}, + {F: 0, T: 35_000}, + {F: 0, T: 36_000}, + {F: 0, T: 37_000}, + {F: 0, T: 38_000}, + {F: 0, T: 39_000}, + {F: 0, T: 40_000}, + {F: 0, T: 41_000}, + {F: 0, T: 42_000}, + {F: 0, T: 43_000}, + {F: 0, T: 44_000}, + {F: 0, T: 45_000}, + {F: 0, T: 46_000}, + {F: 0, T: 47_000}, + {F: 0, T: 48_000}, + {F: 0, T: 49_000}, + {F: 0, T: 50_000}, + {F: 0, T: 51_000}, + {F: 0, T: 52_000}, + {F: 0, T: 53_000}, + {F: 0, T: 54_000}, + {F: 0, T: 55_000}, + {F: 0, T: 56_000}, + {F: 0, T: 57_000}, + {F: 0, T: 58_000}, + {F: 0, T: 59_000}, + {F: 60, T: 60_000}, + {F: 60, T: 61_000}, + }, + Metric: labels.EmptyLabels(), + }, + } + + qry, err := test.QueryEngine().NewRangeQuery(test.context, test.Queryable(), nil, query, start, end, interval) + require.NoError(t, err) + + res := qry.Exec(test.Context()) + require.NoError(t, res.Err) + require.Equal(t, expectedResult, res.Value) +} + type FakeQueryLogger struct { closed bool logs []interface{} From 54e1046616807b2a46095d7da4219421034864db Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Thu, 13 Jul 2023 15:36:38 +0100 Subject: [PATCH 15/82] web/api: extend BenchmarkRespond with more types of data Signed-off-by: Bryan Boreham --- web/api/v1/api_test.go | 62 +++++++++++++++++++++++++++++++----------- 1 file changed, 46 insertions(+), 16 deletions(-) diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 321a13d5e..99e3b292e 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -3417,27 +3417,57 @@ func TestReturnAPIError(t *testing.T) { var testResponseWriter = httptest.ResponseRecorder{} func BenchmarkRespond(b *testing.B) { - b.ReportAllocs() - request, err := http.NewRequest(http.MethodGet, "/does-not-matter", nil) - require.NoError(b, err) points := []promql.FPoint{} for i := 0; i < 10000; i++ { points = append(points, promql.FPoint{F: float64(i * 1000000), T: int64(i)}) } - response := &QueryData{ - ResultType: parser.ValueTypeMatrix, - Result: promql.Matrix{ - promql.Series{ - Floats: points, - Metric: labels.EmptyLabels(), - }, - }, + matrix := promql.Matrix{} + for i := 0; i < 1000; i++ { + matrix = append(matrix, promql.Series{ + Metric: labels.FromStrings("__name__", fmt.Sprintf("series%v", i), + "label", fmt.Sprintf("series%v", i), + "label2", fmt.Sprintf("series%v", i)), + Floats: points[:10], + }) } - b.ResetTimer() - api := API{} - api.InstallCodec(JSONCodec{}) - for n := 0; n < b.N; n++ { - api.respond(&testResponseWriter, request, response, nil) + series := []labels.Labels{} + for i := 0; i < 1000; i++ { + series = append(series, labels.FromStrings("__name__", fmt.Sprintf("series%v", i), + "label", fmt.Sprintf("series%v", i), + "label2", fmt.Sprintf("series%v", i))) + } + + cases := []struct { + name string + response interface{} + }{ + {name: "10000 points no labels", response: &QueryData{ + ResultType: parser.ValueTypeMatrix, + Result: promql.Matrix{ + promql.Series{ + Floats: points, + Metric: labels.EmptyLabels(), + }, + }, + }}, + {name: "1000 labels", response: series}, + {name: "1000 series 10 points", response: &QueryData{ + ResultType: parser.ValueTypeMatrix, + Result: matrix, + }}, + } + for _, c := range cases { + b.Run(c.name, func(b *testing.B) { + b.ReportAllocs() + request, err := http.NewRequest(http.MethodGet, "/does-not-matter", nil) + require.NoError(b, err) + b.ResetTimer() + api := API{} + api.InstallCodec(JSONCodec{}) + for n := 0; n < b.N; n++ { + api.respond(&testResponseWriter, request, c.response, nil) + } + }) } } From bb528d4a55fd8a08899a8fca700cc64a09c80a54 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Tue, 4 Jul 2023 22:59:43 +0100 Subject: [PATCH 16/82] Add jsoniter encoder for Labels Signed-off-by: Bryan Boreham --- web/api/v1/json_codec.go | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/web/api/v1/json_codec.go b/web/api/v1/json_codec.go index 62e7563b1..3ac99a02d 100644 --- a/web/api/v1/json_codec.go +++ b/web/api/v1/json_codec.go @@ -19,6 +19,7 @@ import ( jsoniter "github.com/json-iterator/go" "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/util/jsonutil" ) @@ -29,6 +30,7 @@ func init() { jsoniter.RegisterTypeEncoderFunc("promql.FPoint", marshalFPointJSON, marshalPointJSONIsEmpty) jsoniter.RegisterTypeEncoderFunc("promql.HPoint", marshalHPointJSON, marshalPointJSONIsEmpty) jsoniter.RegisterTypeEncoderFunc("exemplar.Exemplar", marshalExemplarJSON, marshalExemplarJSONEmpty) + jsoniter.RegisterTypeEncoderFunc("labels.Labels", unsafeMarshalLabelsJSON, labelsIsEmpty) } // JSONCodec is a Codec that encodes API responses as JSON. @@ -217,3 +219,28 @@ func marshalExemplarJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { func marshalExemplarJSONEmpty(unsafe.Pointer) bool { return false } + +func unsafeMarshalLabelsJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { + labelsPtr := (*labels.Labels)(ptr) + marshalLabelsJSON(*labelsPtr, stream) +} + +func marshalLabelsJSON(lbls labels.Labels, stream *jsoniter.Stream) { + stream.WriteObjectStart() + i := 0 + lbls.Range(func(v labels.Label) { + if i != 0 { + stream.WriteMore() + } + i++ + stream.WriteString(v.Name) + stream.WriteRaw(`:`) + stream.WriteString(v.Value) + }) + stream.WriteObjectEnd() +} + +func labelsIsEmpty(ptr unsafe.Pointer) bool { + labelsPtr := (*labels.Labels)(ptr) + return labelsPtr.IsEmpty() +} From dcadb32eb168fed63b25ba34b071b8f5dcff69b5 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Thu, 13 Jul 2023 15:39:54 +0100 Subject: [PATCH 17/82] web/api: use stream encoder for embedded labels This is much more efficient. Signed-off-by: Bryan Boreham --- web/api/v1/json_codec.go | 21 +++------------------ 1 file changed, 3 insertions(+), 18 deletions(-) diff --git a/web/api/v1/json_codec.go b/web/api/v1/json_codec.go index 3ac99a02d..f1a8104cc 100644 --- a/web/api/v1/json_codec.go +++ b/web/api/v1/json_codec.go @@ -70,12 +70,7 @@ func marshalSeriesJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { s := *((*promql.Series)(ptr)) stream.WriteObjectStart() stream.WriteObjectField(`metric`) - m, err := s.Metric.MarshalJSON() - if err != nil { - stream.Error = err - return - } - stream.SetBuffer(append(stream.Buffer(), m...)) + marshalLabelsJSON(s.Metric, stream) for i, p := range s.Floats { stream.WriteMore() @@ -131,12 +126,7 @@ func marshalSampleJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { s := *((*promql.Sample)(ptr)) stream.WriteObjectStart() stream.WriteObjectField(`metric`) - m, err := s.Metric.MarshalJSON() - if err != nil { - stream.Error = err - return - } - stream.SetBuffer(append(stream.Buffer(), m...)) + marshalLabelsJSON(s.Metric, stream) stream.WriteMore() if s.H == nil { stream.WriteObjectField(`value`) @@ -196,12 +186,7 @@ func marshalExemplarJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { // "labels" key. stream.WriteObjectField(`labels`) - lbls, err := p.Labels.MarshalJSON() - if err != nil { - stream.Error = err - return - } - stream.SetBuffer(append(stream.Buffer(), lbls...)) + marshalLabelsJSON(p.Labels, stream) // "value" key. stream.WriteMore() From 3c80963e8102b9baaffb10dd76db9c9bfcd1c077 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Mierzwa?= Date: Mon, 31 Jul 2023 10:10:24 +0100 Subject: [PATCH 18/82] Use a linked list for memSeries.headChunk (#11818) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks. When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call. If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending our sample to it. Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed. When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes. Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait for it to be mmapped. If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting queries and scrapes. Queries might timeout, since by default they have a 2 minute timeout set. Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything. To avoid this we need to remove mmapping from append path, since mmapping is blocking. But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later. This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples, while older, yet to be mmapped, chunks are linked to it. Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger it manually, which reduces the risk that it will have to compete for mmap locks with other chunks. Signed-off-by: Łukasz Mierzwa --- tsdb/chunks/chunks.go | 10 +- tsdb/db.go | 2 + tsdb/db_test.go | 3 + tsdb/head.go | 141 ++++++++++++--- tsdb/head_append.go | 89 +++++----- tsdb/head_read.go | 119 +++++++++---- tsdb/head_read_test.go | 387 +++++++++++++++++++++++++++++++++++++++++ tsdb/head_test.go | 239 ++++++++++++++++++++++--- tsdb/head_wal.go | 17 +- 9 files changed, 881 insertions(+), 126 deletions(-) diff --git a/tsdb/chunks/chunks.go b/tsdb/chunks/chunks.go index 6d04998e8..9817fe47a 100644 --- a/tsdb/chunks/chunks.go +++ b/tsdb/chunks/chunks.go @@ -85,13 +85,21 @@ func (p HeadChunkRef) Unpack() (HeadSeriesRef, HeadChunkID) { // - less than the above, but >= memSeries.firstID, then it's // memSeries.mmappedChunks[i] where i = HeadChunkID - memSeries.firstID. // +// If memSeries.headChunks is non-nil it points to a *memChunk that holds the current +// "open" (accepting appends) instance. *memChunk is a linked list and memChunk.next pointer +// might link to the older *memChunk instance. +// If there are multiple *memChunk instances linked to each other from memSeries.headChunks +// they will be m-mapped as soon as possible leaving only "open" *memChunk instance. +// // Example: // assume a memSeries.firstChunkID=7 and memSeries.mmappedChunks=[p5,p6,p7,p8,p9]. // | HeadChunkID value | refers to ... | // |-------------------|----------------------------------------------------------------------------------------| // | 0-6 | chunks that have been compacted to blocks, these won't return data for queries in Head | // | 7-11 | memSeries.mmappedChunks[i] where i is 0 to 4. | -// | 12 | memSeries.headChunk | +// | 12 | *memChunk{next: nil} +// | 13 | *memChunk{next: ^} +// | 14 | memSeries.headChunks -> *memChunk{next: ^} type HeadChunkID uint64 // BlockChunkRef refers to a chunk within a persisted block. diff --git a/tsdb/db.go b/tsdb/db.go index 2ca6034a0..0c69ae6e6 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -973,6 +973,8 @@ func (db *DB) run() { case db.compactc <- struct{}{}: default: } + // We attempt mmapping of head chunks regularly. + db.head.mmapHeadChunks() case <-db.compactc: db.metrics.compactionsTriggered.Inc() diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 772fcf9d1..0eb361db7 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -1535,6 +1535,7 @@ func TestSizeRetention(t *testing.T) { } } require.NoError(t, headApp.Commit()) + db.Head().mmapHeadChunks() require.Eventually(t, func() bool { return db.Head().chunkDiskMapper.IsQueueEmpty() @@ -6049,12 +6050,14 @@ func TestDiskFillingUpAfterDisablingOOO(t *testing.T) { // Check that m-map files gets deleted properly after compactions. + db.head.mmapHeadChunks() checkMmapFileContents([]string{"000001", "000002"}, nil) require.NoError(t, db.Compact()) checkMmapFileContents([]string{"000002"}, []string{"000001"}) require.Nil(t, ms.ooo, "OOO mmap chunk was not compacted") addSamples(501, 650) + db.head.mmapHeadChunks() checkMmapFileContents([]string{"000002", "000003"}, []string{"000001"}) require.NoError(t, db.Compact()) checkMmapFileContents(nil, []string{"000001", "000002", "000003"}) diff --git a/tsdb/head.go b/tsdb/head.go index 499be067a..e18bd55a5 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -344,6 +344,7 @@ type headMetrics struct { mmapChunkCorruptionTotal prometheus.Counter snapshotReplayErrorTotal prometheus.Counter // Will be either 0 or 1. oooHistogram prometheus.Histogram + mmapChunksTotal prometheus.Counter } const ( @@ -468,6 +469,10 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { 60 * 60 * 12, // 12h }, }), + mmapChunksTotal: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_tsdb_mmap_chunks_total", + Help: "Total number of chunks that were memory-mapped.", + }), } if r != nil { @@ -495,6 +500,7 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { m.checkpointDeleteTotal, m.checkpointCreationFail, m.checkpointCreationTotal, + m.mmapChunksTotal, m.mmapChunkCorruptionTotal, m.snapshotReplayErrorTotal, // Metrics bound to functions and not needed in tests @@ -880,11 +886,11 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries) numSamples: numSamples, }) h.updateMinMaxTime(mint, maxt) - if ms.headChunk != nil && maxt >= ms.headChunk.minTime { + if ms.headChunks != nil && maxt >= ms.headChunks.minTime { // The head chunk was completed and was m-mapped after taking the snapshot. // Hence remove this chunk. ms.nextAt = 0 - ms.headChunk = nil + ms.headChunks = nil ms.app = nil } return nil @@ -1574,6 +1580,10 @@ func (h *Head) Close() error { defer h.closedMtx.Unlock() h.closed = true + // mmap all but last chunk in case we're performing snapshot since that only + // takes samples from most recent head chunk. + h.mmapHeadChunks() + errs := tsdb_errors.NewMulti(h.chunkDiskMapper.Close()) if errs.Err() == nil && h.opts.EnableMemorySnapshotOnShutdown { errs.Add(h.performChunkSnapshot()) @@ -1630,6 +1640,37 @@ func (h *Head) getOrCreateWithID(id chunks.HeadSeriesRef, hash uint64, lset labe return s, true, nil } +// mmapHeadChunks will iterate all memSeries stored on Head and call mmapHeadChunks() on each of them. +// +// There are two types of chunks that store samples for each memSeries: +// A) Head chunk - stored on Go heap, when new samples are appended they go there. +// B) M-mapped chunks - memory mapped chunks, kernel manages the memory for us on-demand, these chunks +// +// are read-only. +// +// Calling mmapHeadChunks() will iterate all memSeries and m-mmap all chunks that should be m-mapped. +// The m-mapping operation is needs to be serialised and so it goes via central lock. +// If there are multiple concurrent memSeries that need to m-map some chunk then they can block each-other. +// +// To minimise the effect of locking on TSDB operations m-mapping is serialised and done away from +// sample append path, since waiting on a lock inside an append would lock the entire memSeries for +// (potentially) a long time, since that could eventually delay next scrape and/or cause query timeouts. +func (h *Head) mmapHeadChunks() { + var count int + for i := 0; i < h.series.size; i++ { + h.series.locks[i].RLock() + for _, all := range h.series.hashes[i] { + for _, series := range all { + series.Lock() + count += series.mmapChunks(h.chunkDiskMapper) + series.Unlock() + } + } + h.series.locks[i].RUnlock() + } + h.metrics.mmapChunksTotal.Add(float64(count)) +} + // seriesHashmap is a simple hashmap for memSeries by their label set. It is built // on top of a regular hashmap and holds a slice of series to resolve hash collisions. // Its methods require the hash to be submitted with it to avoid re-computations throughout @@ -1760,7 +1801,7 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) ( minOOOTime = series.ooo.oooHeadChunk.minTime } } - if len(series.mmappedChunks) > 0 || series.headChunk != nil || series.pendingCommit || + if len(series.mmappedChunks) > 0 || series.headChunks != nil || series.pendingCommit || (series.ooo != nil && (len(series.ooo.oooMmappedChunks) > 0 || series.ooo.oooHeadChunk != nil)) { seriesMint := series.minTime() if seriesMint < actualMint { @@ -1915,8 +1956,11 @@ type memSeries struct { // // pN is the pointer to the mmappedChunk referered to by HeadChunkID=N mmappedChunks []*mmappedChunk - headChunk *memChunk // Most recent chunk in memory that's still being built. - firstChunkID chunks.HeadChunkID // HeadChunkID for mmappedChunks[0] + // Most recent chunks in memory that are still being built or waiting to be mmapped. + // This is a linked list, headChunks points to the most recent chunk, headChunks.next points + // to older chunk and so on. + headChunks *memChunk + firstChunkID chunks.HeadChunkID // HeadChunkID for mmappedChunks[0] ooo *memSeriesOOOFields @@ -1932,7 +1976,7 @@ type memSeries struct { lastFloatHistogramValue *histogram.FloatHistogram // Current appender for the head chunk. Set when a new head chunk is cut. - // It is nil only if headChunk is nil. E.g. if there was an appender that created a new series, but rolled back the commit + // It is nil only if headChunks is nil. E.g. if there was an appender that created a new series, but rolled back the commit // (the first sample would create a headChunk, hence appender, but rollback skipped it while the Append() call would create a series). app chunkenc.Appender @@ -1966,17 +2010,16 @@ func (s *memSeries) minTime() int64 { if len(s.mmappedChunks) > 0 { return s.mmappedChunks[0].minTime } - if s.headChunk != nil { - return s.headChunk.minTime + if s.headChunks != nil { + return s.headChunks.oldest().minTime } return math.MinInt64 } func (s *memSeries) maxTime() int64 { // The highest timestamps will always be in the regular (non-OOO) chunks, even if OOO is enabled. - c := s.head() - if c != nil { - return c.maxTime + if s.headChunks != nil { + return s.headChunks.maxTime } if len(s.mmappedChunks) > 0 { return s.mmappedChunks[len(s.mmappedChunks)-1].maxTime @@ -1989,12 +2032,29 @@ func (s *memSeries) maxTime() int64 { // Chunk IDs remain unchanged. func (s *memSeries) truncateChunksBefore(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) int { var removedInOrder int - if s.headChunk != nil && s.headChunk.maxTime < mint { - // If head chunk is truncated, we can truncate all mmapped chunks. - removedInOrder = 1 + len(s.mmappedChunks) - s.firstChunkID += chunks.HeadChunkID(removedInOrder) - s.headChunk = nil - s.mmappedChunks = nil + if s.headChunks != nil { + var i int + var nextChk *memChunk + chk := s.headChunks + for chk != nil { + if chk.maxTime < mint { + // If any head chunk is truncated, we can truncate all mmapped chunks. + removedInOrder = chk.len() + len(s.mmappedChunks) + s.firstChunkID += chunks.HeadChunkID(removedInOrder) + if i == 0 { + // This is the first chunk on the list so we need to remove the entire list. + s.headChunks = nil + } else { + // This is NOT the first chunk, unlink it from parent. + nextChk.prev = nil + } + s.mmappedChunks = nil + break + } + nextChk = chk + chk = chk.prev + i++ + } } if len(s.mmappedChunks) > 0 { for i, c := range s.mmappedChunks { @@ -2034,13 +2094,52 @@ func (s *memSeries) cleanupAppendIDsBelow(bound uint64) { } } -func (s *memSeries) head() *memChunk { - return s.headChunk -} - type memChunk struct { chunk chunkenc.Chunk minTime, maxTime int64 + prev *memChunk // Link to the previous element on the list. +} + +// len returns the length of memChunk list, including the element it was called on. +func (mc *memChunk) len() (count int) { + elem := mc + for elem != nil { + count++ + elem = elem.prev + } + return count +} + +// oldest returns the oldest element on the list. +// For single element list this will be the same memChunk oldest() was called on. +func (mc *memChunk) oldest() (elem *memChunk) { + elem = mc + for elem.prev != nil { + elem = elem.prev + } + return elem +} + +// atOffset returns a memChunk that's Nth element on the linked list. +func (mc *memChunk) atOffset(offset int) (elem *memChunk) { + if offset == 0 { + return mc + } + if offset < 0 { + return nil + } + + var i int + elem = mc + for i < offset { + i++ + elem = elem.prev + if elem == nil { + break + } + } + + return elem } type oooHeadChunk struct { diff --git a/tsdb/head_append.go b/tsdb/head_append.go index cbbb60f03..b98183164 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -395,7 +395,7 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooTimeWindow int64) (isOOO bool, oooDelta int64, err error) { // Check if we can append in the in-order chunk. if t >= minValidTime { - if s.head() == nil { + if s.headChunks == nil { // The series has no sample and was freshly created. return false, 0, nil } @@ -433,15 +433,14 @@ func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooTi // appendableHistogram checks whether the given histogram is valid for appending to the series. func (s *memSeries) appendableHistogram(t int64, h *histogram.Histogram) error { - c := s.head() - if c == nil { + if s.headChunks == nil { return nil } - if t > c.maxTime { + if t > s.headChunks.maxTime { return nil } - if t < c.maxTime { + if t < s.headChunks.maxTime { return storage.ErrOutOfOrderSample } @@ -455,15 +454,14 @@ func (s *memSeries) appendableHistogram(t int64, h *histogram.Histogram) error { // appendableFloatHistogram checks whether the given float histogram is valid for appending to the series. func (s *memSeries) appendableFloatHistogram(t int64, fh *histogram.FloatHistogram) error { - c := s.head() - if c == nil { + if s.headChunks == nil { return nil } - if t > c.maxTime { + if t > s.headChunks.maxTime { return nil } - if t < c.maxTime { + if t < s.headChunks.maxTime { return storage.ErrOutOfOrderSample } @@ -1200,12 +1198,11 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui return true, false } - // This is a brand new chunk, switch out the head chunk (based on cutNewHeadChunk). - s.mmapCurrentHeadChunk(o.chunkDiskMapper) - s.headChunk = &memChunk{ + s.headChunks = &memChunk{ chunk: newChunk, minTime: t, maxTime: t, + prev: s.headChunks, } s.nextAt = rangeForTimestamp(t, o.chunkRange) return true, true @@ -1258,12 +1255,11 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, return true, false } - // This is a brand new chunk, switch out the head chunk (based on cutNewHeadChunk). - s.mmapCurrentHeadChunk(o.chunkDiskMapper) - s.headChunk = &memChunk{ + s.headChunks = &memChunk{ chunk: newChunk, minTime: t, maxTime: t, + prev: s.headChunks, } s.nextAt = rangeForTimestamp(t, o.chunkRange) return true, true @@ -1273,7 +1269,7 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, // It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. // This should be called only when appending data. func (s *memSeries) appendPreprocessor(t int64, e chunkenc.Encoding, o chunkOpts) (c *memChunk, sampleInOrder, chunkCreated bool) { - c = s.head() + c = s.headChunks if c == nil { if len(s.mmappedChunks) > 0 && s.mmappedChunks[len(s.mmappedChunks)-1].maxTime >= t { @@ -1281,7 +1277,7 @@ func (s *memSeries) appendPreprocessor(t int64, e chunkenc.Encoding, o chunkOpts return c, false, false } // There is no head chunk in this series yet, create the first chunk for the sample. - c = s.cutNewHeadChunk(t, e, o.chunkDiskMapper, o.chunkRange) + c = s.cutNewHeadChunk(t, e, o.chunkRange) chunkCreated = true } @@ -1293,8 +1289,9 @@ func (s *memSeries) appendPreprocessor(t int64, e chunkenc.Encoding, o chunkOpts if c.chunk.Encoding() != e { // The chunk encoding expected by this append is different than the head chunk's // encoding. So we cut a new chunk with the expected encoding. - c = s.cutNewHeadChunk(t, e, o.chunkDiskMapper, o.chunkRange) + c = s.cutNewHeadChunk(t, e, o.chunkRange) chunkCreated = true + } numSamples := c.chunk.NumSamples() @@ -1318,7 +1315,7 @@ func (s *memSeries) appendPreprocessor(t int64, e chunkenc.Encoding, o chunkOpts // as we expect more chunks to come. // Note that next chunk will have its nextAt recalculated for the new rate. if t >= s.nextAt || numSamples >= o.samplesPerChunk*2 { - c = s.cutNewHeadChunk(t, e, o.chunkDiskMapper, o.chunkRange) + c = s.cutNewHeadChunk(t, e, o.chunkRange) chunkCreated = true } @@ -1338,36 +1335,37 @@ func computeChunkEndTime(start, cur, max int64) int64 { return start + (max-start)/n } -func (s *memSeries) cutNewHeadChunk( - mint int64, e chunkenc.Encoding, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, -) *memChunk { - s.mmapCurrentHeadChunk(chunkDiskMapper) - - s.headChunk = &memChunk{ +func (s *memSeries) cutNewHeadChunk(mint int64, e chunkenc.Encoding, chunkRange int64) *memChunk { + // When cutting a new head chunk we create a new memChunk instance with .prev + // pointing at the current .headChunks, so it forms a linked list. + // All but first headChunks list elements will be m-mapped as soon as possible + // so this is a single element list most of the time. + s.headChunks = &memChunk{ minTime: mint, maxTime: math.MinInt64, + prev: s.headChunks, } if chunkenc.IsValidEncoding(e) { var err error - s.headChunk.chunk, err = chunkenc.NewEmptyChunk(e) + s.headChunks.chunk, err = chunkenc.NewEmptyChunk(e) if err != nil { panic(err) // This should never happen. } } else { - s.headChunk.chunk = chunkenc.NewXORChunk() + s.headChunks.chunk = chunkenc.NewXORChunk() } // Set upper bound on when the next chunk must be started. An earlier timestamp // may be chosen dynamically at a later point. s.nextAt = rangeForTimestamp(mint, chunkRange) - app, err := s.headChunk.chunk.Appender() + app, err := s.headChunks.chunk.Appender() if err != nil { panic(err) } s.app = app - return s.headChunk + return s.headChunks } // cutNewOOOHeadChunk cuts a new OOO chunk and m-maps the old chunk. @@ -1401,19 +1399,32 @@ func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMap return chunkRef } -func (s *memSeries) mmapCurrentHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper) { - if s.headChunk == nil || s.headChunk.chunk.NumSamples() == 0 { - // There is no head chunk, so nothing to m-map here. +// mmapChunks will m-map all but first chunk on s.headChunks list. +func (s *memSeries) mmapChunks(chunkDiskMapper *chunks.ChunkDiskMapper) (count int) { + if s.headChunks == nil || s.headChunks.prev == nil { + // There is none or only one head chunk, so nothing to m-map here. return } - chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.headChunk.minTime, s.headChunk.maxTime, s.headChunk.chunk, false, handleChunkWriteError) - s.mmappedChunks = append(s.mmappedChunks, &mmappedChunk{ - ref: chunkRef, - numSamples: uint16(s.headChunk.chunk.NumSamples()), - minTime: s.headChunk.minTime, - maxTime: s.headChunk.maxTime, - }) + // Write chunks starting from the oldest one and stop before we get to current s.headChunk. + // If we have this chain: s.headChunk{t4} -> t3 -> t2 -> t1 -> t0 + // then we need to write chunks t0 to t3, but skip s.headChunks. + for i := s.headChunks.len() - 1; i > 0; i-- { + chk := s.headChunks.atOffset(i) + chunkRef := chunkDiskMapper.WriteChunk(s.ref, chk.minTime, chk.maxTime, chk.chunk, false, handleChunkWriteError) + s.mmappedChunks = append(s.mmappedChunks, &mmappedChunk{ + ref: chunkRef, + numSamples: uint16(chk.chunk.NumSamples()), + minTime: chk.minTime, + maxTime: chk.maxTime, + }) + count++ + } + + // Once we've written out all chunks except s.headChunks we need to unlink these from s.headChunk. + s.headChunks.prev = nil + + return count } func handleChunkWriteError(err error) { diff --git a/tsdb/head_read.go b/tsdb/head_read.go index b2af74ace..f27d4ef76 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -174,12 +174,27 @@ func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchB Ref: chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.headChunkID(i))), }) } - if s.headChunk != nil && s.headChunk.OverlapsClosedInterval(h.mint, h.maxt) { - *chks = append(*chks, chunks.Meta{ - MinTime: s.headChunk.minTime, - MaxTime: math.MaxInt64, // Set the head chunks as open (being appended to). - Ref: chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.headChunkID(len(s.mmappedChunks)))), - }) + + if s.headChunks != nil { + var maxTime int64 + var i, j int + for i = s.headChunks.len() - 1; i >= 0; i-- { + chk := s.headChunks.atOffset(i) + if i == 0 { + // Set the head chunk as open (being appended to) for the first headChunk. + maxTime = math.MaxInt64 + } else { + maxTime = chk.maxTime + } + if chk.OverlapsClosedInterval(h.mint, h.maxt) { + *chks = append(*chks, chunks.Meta{ + MinTime: chk.minTime, + MaxTime: maxTime, + Ref: chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.headChunkID(len(s.mmappedChunks)+j))), + }) + } + j++ + } } return nil @@ -187,7 +202,7 @@ func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchB // headChunkID returns the HeadChunkID referred to by the given position. // * 0 <= pos < len(s.mmappedChunks) refer to s.mmappedChunks[pos] -// * pos == len(s.mmappedChunks) refers to s.headChunk +// * pos >= len(s.mmappedChunks) refers to s.headChunks linked list func (s *memSeries) headChunkID(pos int) chunks.HeadChunkID { return chunks.HeadChunkID(pos) + s.firstChunkID } @@ -296,7 +311,7 @@ func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc. } s.Lock() - c, headChunk, err := s.chunk(cid, h.head.chunkDiskMapper, &h.head.memChunkPool) + c, headChunk, isOpen, err := s.chunk(cid, h.head.chunkDiskMapper, &h.head.memChunkPool) if err != nil { s.Unlock() return nil, 0, err @@ -305,6 +320,7 @@ func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc. if !headChunk { // Set this to nil so that Go GC can collect it after it has been used. c.chunk = nil + c.prev = nil h.head.memChunkPool.Put(c) } }() @@ -316,14 +332,14 @@ func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc. } chk, maxTime := c.chunk, c.maxTime - if headChunk && copyLastChunk { + if headChunk && isOpen && copyLastChunk { // The caller may ask to copy the head chunk in order to take the // bytes of the chunk without causing the race between read and append. - b := s.headChunk.chunk.Bytes() + b := s.headChunks.chunk.Bytes() newB := make([]byte, len(b)) copy(newB, b) // TODO(codesome): Use bytes.Clone() when we upgrade to Go 1.20. // TODO(codesome): Put back in the pool (non-trivial). - chk, err = h.head.opts.ChunkPool.Get(s.headChunk.chunk.Encoding(), newB) + chk, err = h.head.opts.ChunkPool.Get(s.headChunks.chunk.Encoding(), newB) if err != nil { return nil, 0, err } @@ -341,34 +357,60 @@ func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc. // chunk returns the chunk for the HeadChunkID from memory or by m-mapping it from the disk. // If headChunk is false, it means that the returned *memChunk // (and not the chunkenc.Chunk inside it) can be garbage collected after its usage. -func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDiskMapper, memChunkPool *sync.Pool) (chunk *memChunk, headChunk bool, err error) { +// if isOpen is true, it means that the returned *memChunk is used for appends. +func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDiskMapper, memChunkPool *sync.Pool) (chunk *memChunk, headChunk, isOpen bool, err error) { // ix represents the index of chunk in the s.mmappedChunks slice. The chunk id's are // incremented by 1 when new chunk is created, hence (id - firstChunkID) gives the slice index. // The max index for the s.mmappedChunks slice can be len(s.mmappedChunks)-1, hence if the ix - // is len(s.mmappedChunks), it represents the next chunk, which is the head chunk. + // is >= len(s.mmappedChunks), it represents one of the chunks on s.headChunks linked list. + // The order of elemens is different for slice and linked list. + // For s.mmappedChunks slice newer chunks are appended to it. + // For s.headChunks list newer chunks are prepended to it. + // + // memSeries { + // mmappedChunks: [t0, t1, t2] + // headChunk: {t5}->{t4}->{t3} + // } ix := int(id) - int(s.firstChunkID) - if ix < 0 || ix > len(s.mmappedChunks) { - return nil, false, storage.ErrNotFound + + var headChunksLen int + if s.headChunks != nil { + headChunksLen = s.headChunks.len() } - if ix == len(s.mmappedChunks) { - if s.headChunk == nil { - return nil, false, errors.New("invalid head chunk") - } - return s.headChunk, true, nil + if ix < 0 || ix > len(s.mmappedChunks)+headChunksLen-1 { + return nil, false, false, storage.ErrNotFound } - chk, err := chunkDiskMapper.Chunk(s.mmappedChunks[ix].ref) - if err != nil { - if _, ok := err.(*chunks.CorruptionErr); ok { - panic(err) + + if ix < len(s.mmappedChunks) { + chk, err := chunkDiskMapper.Chunk(s.mmappedChunks[ix].ref) + if err != nil { + if _, ok := err.(*chunks.CorruptionErr); ok { + panic(err) + } + return nil, false, false, err } - return nil, false, err + mc := memChunkPool.Get().(*memChunk) + mc.chunk = chk + mc.minTime = s.mmappedChunks[ix].minTime + mc.maxTime = s.mmappedChunks[ix].maxTime + return mc, false, false, nil } - mc := memChunkPool.Get().(*memChunk) - mc.chunk = chk - mc.minTime = s.mmappedChunks[ix].minTime - mc.maxTime = s.mmappedChunks[ix].maxTime - return mc, false, nil + + ix -= len(s.mmappedChunks) + + offset := headChunksLen - ix - 1 + // headChunks is a linked list where first element is the most recent one and the last one is the oldest. + // This order is reversed when compared with mmappedChunks, since mmappedChunks[0] is the oldest chunk, + // while headChunk.atOffset(0) would give us the most recent chunk. + // So when calling headChunk.atOffset() we need to reverse the value of ix. + elem := s.headChunks.atOffset(offset) + if elem == nil { + // This should never really happen and would mean that headChunksLen value is NOT equal + // to the length of the headChunks list. + return nil, false, false, storage.ErrNotFound + } + return elem, true, offset == 0, nil } // oooMergedChunk returns the requested chunk based on the given chunks.Meta @@ -660,8 +702,21 @@ func (s *memSeries) iterator(id chunks.HeadChunkID, c chunkenc.Chunk, isoState * } } - if s.headChunk != nil { - totalSamples += s.headChunk.chunk.NumSamples() + ix -= len(s.mmappedChunks) + if s.headChunks != nil { + // Iterate all head chunks from the oldest to the newest. + headChunksLen := s.headChunks.len() + for j := headChunksLen - 1; j >= 0; j-- { + chk := s.headChunks.atOffset(j) + chkSamples := chk.chunk.NumSamples() + totalSamples += chkSamples + // Chunk ID is len(s.mmappedChunks) + $(headChunks list position). + // Where $(headChunks list position) is zero for the oldest chunk and $(s.headChunks.len() - 1) + // for the newest (open) chunk. + if headChunksLen-1-j < ix { + previousSamples += chkSamples + } + } } // Removing the extra transactionIDs that are relevant for samples that diff --git a/tsdb/head_read_test.go b/tsdb/head_read_test.go index 2712bcd1a..ad0a59d34 100644 --- a/tsdb/head_read_test.go +++ b/tsdb/head_read_test.go @@ -15,11 +15,14 @@ package tsdb import ( "fmt" + "sync" "testing" "github.com/stretchr/testify/require" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/chunks" ) func TestBoundedChunk(t *testing.T) { @@ -176,3 +179,387 @@ func newTestChunk(numSamples int) chunkenc.Chunk { } return xor } + +// TestMemSeries_chunk runs a series of tests on memSeries.chunk() calls. +// It will simulate various conditions to ensure all code paths in that function are covered. +func TestMemSeries_chunk(t *testing.T) { + const chunkRange int64 = 100 + const chunkStep int64 = 5 + + appendSamples := func(t *testing.T, s *memSeries, start, end int64, cdm *chunks.ChunkDiskMapper) { + for i := start; i < end; i += chunkStep { + ok, _ := s.append(i, float64(i), 0, chunkOpts{ + chunkDiskMapper: cdm, + chunkRange: chunkRange, + samplesPerChunk: DefaultSamplesPerChunk, + }) + require.True(t, ok, "sample append failed") + } + } + + type setupFn func(*testing.T, *memSeries, *chunks.ChunkDiskMapper) + + type callOutput uint8 + const ( + outOpenHeadChunk callOutput = iota // memSeries.chunk() call returned memSeries.headChunks with headChunk=true & isOpen=true + outClosedHeadChunk // memSeries.chunk() call returned memSeries.headChunks with headChunk=true & isOpen=false + outMmappedChunk // memSeries.chunk() call returned a chunk from memSeries.mmappedChunks with headChunk=false + outErr // memSeries.chunk() call returned an error + ) + + tests := []struct { + name string + setup setupFn // optional function called just before the test memSeries.chunk() call + inputID chunks.HeadChunkID // requested chunk id for memSeries.chunk() call + expected callOutput + }{ + { + name: "call ix=0 on empty memSeries", + inputID: 0, + expected: outErr, + }, + { + name: "call ix=1 on empty memSeries", + inputID: 1, + expected: outErr, + }, + { + name: "firstChunkID > ix", + setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { + appendSamples(t, s, 0, chunkRange, cdm) + require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks") + require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") + require.Equal(t, chunkRange-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") + s.firstChunkID = 5 + }, + inputID: 1, + expected: outErr, + }, + { + name: "call ix=0 on memSeries with no mmapped chunks", + setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { + appendSamples(t, s, 0, chunkRange, cdm) + require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks") + require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") + require.Equal(t, chunkRange-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") + }, + inputID: 0, + expected: outOpenHeadChunk, + }, + { + name: "call ix=1 on memSeries with no mmapped chunks", + setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { + appendSamples(t, s, 0, chunkRange, cdm) + require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks") + require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") + require.Equal(t, chunkRange-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") + }, + inputID: 1, + expected: outErr, + }, + { + name: "call ix=10 on memSeries with no mmapped chunks", + setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { + appendSamples(t, s, 0, chunkRange, cdm) + require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks") + require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") + require.Equal(t, chunkRange-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") + }, + inputID: 10, + expected: outErr, + }, + { + name: "call ix=0 on memSeries with 3 mmapped chunks", + setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { + appendSamples(t, s, 0, chunkRange*4, cdm) + s.mmapChunks(cdm) + require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") + require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") + require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") + }, + inputID: 0, + expected: outMmappedChunk, + }, + { + name: "call ix=1 on memSeries with 3 mmapped chunks", + setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { + appendSamples(t, s, 0, chunkRange*4, cdm) + s.mmapChunks(cdm) + require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") + require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") + require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") + }, + inputID: 1, + expected: outMmappedChunk, + }, + { + name: "call ix=3 on memSeries with 3 mmapped chunks", + setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { + appendSamples(t, s, 0, chunkRange*4, cdm) + s.mmapChunks(cdm) + require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") + require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") + require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") + }, + inputID: 3, + expected: outOpenHeadChunk, + }, + { + name: "call ix=0 on memSeries with 3 mmapped chunks and no headChunk", + setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { + appendSamples(t, s, 0, chunkRange*4, cdm) + s.mmapChunks(cdm) + require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") + require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") + require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") + s.headChunks = nil + }, + inputID: 0, + expected: outMmappedChunk, + }, + { + name: "call ix=2 on memSeries with 3 mmapped chunks and no headChunk", + setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { + appendSamples(t, s, 0, chunkRange*4, cdm) + s.mmapChunks(cdm) + require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") + require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") + require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") + s.headChunks = nil + }, + inputID: 2, + expected: outMmappedChunk, + }, + { + name: "call ix=3 on memSeries with 3 mmapped chunks and no headChunk", + setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { + appendSamples(t, s, 0, chunkRange*4, cdm) + s.mmapChunks(cdm) + require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") + require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") + require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") + s.headChunks = nil + }, + inputID: 3, + expected: outErr, + }, + { + name: "call ix=1 on memSeries with 3 mmapped chunks and closed ChunkDiskMapper", + setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { + appendSamples(t, s, 0, chunkRange*4, cdm) + s.mmapChunks(cdm) + require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") + require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") + require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") + cdm.Close() + }, + inputID: 1, + expected: outErr, + }, + { + name: "call ix=3 on memSeries with 3 mmapped chunks and closed ChunkDiskMapper", + setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { + appendSamples(t, s, 0, chunkRange*4, cdm) + s.mmapChunks(cdm) + require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") + require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") + require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") + cdm.Close() + }, + inputID: 3, + expected: outOpenHeadChunk, + }, + { + name: "call ix=0 on memSeries with 3 head chunks and no mmapped chunks", + setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { + appendSamples(t, s, 0, chunkRange*3, cdm) + require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks") + require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks") + require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") + require.Equal(t, (chunkRange*3)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") + }, + inputID: 0, + expected: outClosedHeadChunk, + }, + { + name: "call ix=1 on memSeries with 3 head chunks and no mmapped chunks", + setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { + appendSamples(t, s, 0, chunkRange*3, cdm) + require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks") + require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks") + require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") + require.Equal(t, (chunkRange*3)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") + }, + inputID: 1, + expected: outClosedHeadChunk, + }, + { + name: "call ix=10 on memSeries with 3 head chunks and no mmapped chunks", + setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { + appendSamples(t, s, 0, chunkRange*3, cdm) + require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks") + require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks") + require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") + require.Equal(t, (chunkRange*3)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") + }, + inputID: 10, + expected: outErr, + }, + { + name: "call ix=0 on memSeries with 3 head chunks and 3 mmapped chunks", + setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { + appendSamples(t, s, 0, chunkRange*4, cdm) + s.mmapChunks(cdm) + require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") + require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + + appendSamples(t, s, chunkRange*4, chunkRange*6, cdm) + require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks") + require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") + require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") + require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") + }, + inputID: 0, + expected: outMmappedChunk, + }, + { + name: "call ix=2 on memSeries with 3 head chunks and 3 mmapped chunks", + setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { + appendSamples(t, s, 0, chunkRange*4, cdm) + s.mmapChunks(cdm) + require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") + require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + + appendSamples(t, s, chunkRange*4, chunkRange*6, cdm) + require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks") + require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") + require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") + require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") + }, + inputID: 2, + expected: outMmappedChunk, + }, + { + name: "call ix=3 on memSeries with 3 head chunks and 3 mmapped chunks", + setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { + appendSamples(t, s, 0, chunkRange*4, cdm) + s.mmapChunks(cdm) + require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") + require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + + appendSamples(t, s, chunkRange*4, chunkRange*6, cdm) + require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks") + require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") + require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") + require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") + }, + inputID: 3, + expected: outClosedHeadChunk, + }, + { + name: "call ix=5 on memSeries with 3 head chunks and 3 mmapped chunks", + setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { + appendSamples(t, s, 0, chunkRange*4, cdm) + s.mmapChunks(cdm) + require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") + require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + + appendSamples(t, s, chunkRange*4, chunkRange*6, cdm) + require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks") + require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") + require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") + require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") + }, + inputID: 5, + expected: outOpenHeadChunk, + }, + { + name: "call ix=6 on memSeries with 3 head chunks and 3 mmapped chunks", + setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { + appendSamples(t, s, 0, chunkRange*4, cdm) + s.mmapChunks(cdm) + require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") + require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + + appendSamples(t, s, chunkRange*4, chunkRange*6, cdm) + require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks") + require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") + require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") + require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") + }, + inputID: 6, + expected: outErr, + }, + + { + name: "call ix=10 on memSeries with 3 head chunks and 3 mmapped chunks", + setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { + appendSamples(t, s, 0, chunkRange*4, cdm) + s.mmapChunks(cdm) + require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") + require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + + appendSamples(t, s, chunkRange*4, chunkRange*6, cdm) + require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks") + require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") + require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") + require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") + }, + inputID: 10, + expected: outErr, + }, + } + + memChunkPool := &sync.Pool{ + New: func() interface{} { + return &memChunk{} + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + dir := t.TempDir() + chunkDiskMapper, err := chunks.NewChunkDiskMapper(nil, dir, chunkenc.NewPool(), chunks.DefaultWriteBufferSize, chunks.DefaultWriteQueueSize) + require.NoError(t, err) + defer func() { + require.NoError(t, chunkDiskMapper.Close()) + }() + + series := newMemSeries(labels.EmptyLabels(), 1, true) + + if tc.setup != nil { + tc.setup(t, series, chunkDiskMapper) + } + + chk, headChunk, isOpen, err := series.chunk(tc.inputID, chunkDiskMapper, memChunkPool) + switch tc.expected { + case outOpenHeadChunk: + require.NoError(t, err, "unexpected error") + require.True(t, headChunk, "expected a chunk with headChunk=true but got headChunk=%v", headChunk) + require.True(t, isOpen, "expected a chunk with isOpen=true but got isOpen=%v", isOpen) + case outClosedHeadChunk: + require.NoError(t, err, "unexpected error") + require.True(t, headChunk, "expected a chunk with headChunk=true but got headChunk=%v", headChunk) + require.False(t, isOpen, "expected a chunk with isOpen=false but got isOpen=%v", isOpen) + case outMmappedChunk: + require.NoError(t, err, "unexpected error") + require.False(t, headChunk, "expected a chunk with headChunk=false but got gc=%v", headChunk) + case outErr: + require.Nil(t, chk, "got a non-nil chunk reference returned with an error") + require.Error(t, err) + } + }) + } +} diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 0a6eef66c..9b49aca03 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -292,7 +292,7 @@ func BenchmarkLoadWAL(b *testing.B) { // Create one mmapped chunk per series, with one sample at the given time. s := newMemSeries(labels.Labels{}, chunks.HeadSeriesRef(k)*101, defaultIsolationDisabled) s.append(c.mmappedChunkT, 42, 0, cOpts) - s.mmapCurrentHeadChunk(chunkDiskMapper) + s.mmapChunks(chunkDiskMapper) } require.NoError(b, chunkDiskMapper.Close()) } @@ -587,15 +587,15 @@ func TestHead_ReadWAL(t *testing.T) { return x } - c, _, err := s10.chunk(0, head.chunkDiskMapper, &head.memChunkPool) + c, _, _, err := s10.chunk(0, head.chunkDiskMapper, &head.memChunkPool) require.NoError(t, err) require.Equal(t, []sample{{100, 2, nil, nil}, {101, 5, nil, nil}}, expandChunk(c.chunk.Iterator(nil))) - c, _, err = s50.chunk(0, head.chunkDiskMapper, &head.memChunkPool) + c, _, _, err = s50.chunk(0, head.chunkDiskMapper, &head.memChunkPool) require.NoError(t, err) require.Equal(t, []sample{{101, 6, nil, nil}}, expandChunk(c.chunk.Iterator(nil))) // The samples before the new series record should be discarded since a duplicate record // is only possible when old samples were compacted. - c, _, err = s100.chunk(0, head.chunkDiskMapper, &head.memChunkPool) + c, _, _, err = s100.chunk(0, head.chunkDiskMapper, &head.memChunkPool) require.NoError(t, err) require.Equal(t, []sample{{101, 7, nil, nil}}, expandChunk(c.chunk.Iterator(nil))) @@ -822,30 +822,200 @@ func TestMemSeries_truncateChunks(t *testing.T) { ok, _ := s.append(int64(i), float64(i), 0, cOpts) require.True(t, ok, "sample append failed") } + s.mmapChunks(chunkDiskMapper) // Check that truncate removes half of the chunks and afterwards // that the ID of the last chunk still gives us the same chunk afterwards. countBefore := len(s.mmappedChunks) + 1 // +1 for the head chunk. lastID := s.headChunkID(countBefore - 1) - lastChunk, _, err := s.chunk(lastID, chunkDiskMapper, &memChunkPool) + lastChunk, _, _, err := s.chunk(lastID, chunkDiskMapper, &memChunkPool) require.NoError(t, err) require.NotNil(t, lastChunk) - chk, _, err := s.chunk(0, chunkDiskMapper, &memChunkPool) + chk, _, _, err := s.chunk(0, chunkDiskMapper, &memChunkPool) require.NotNil(t, chk) require.NoError(t, err) s.truncateChunksBefore(2000, 0) require.Equal(t, int64(2000), s.mmappedChunks[0].minTime) - _, _, err = s.chunk(0, chunkDiskMapper, &memChunkPool) + _, _, _, err = s.chunk(0, chunkDiskMapper, &memChunkPool) require.Equal(t, storage.ErrNotFound, err, "first chunks not gone") require.Equal(t, countBefore/2, len(s.mmappedChunks)+1) // +1 for the head chunk. - chk, _, err = s.chunk(lastID, chunkDiskMapper, &memChunkPool) + chk, _, _, err = s.chunk(lastID, chunkDiskMapper, &memChunkPool) require.NoError(t, err) require.Equal(t, lastChunk, chk) } +func TestMemSeries_truncateChunks_scenarios(t *testing.T) { + const chunkRange = 100 + const chunkStep = 5 + + tests := []struct { + name string + headChunks int // the number of head chubks to create on memSeries by appending enough samples + mmappedChunks int // the number of mmapped chunks to create on memSeries by appending enough samples + truncateBefore int64 // the mint to pass to truncateChunksBefore() + expectedTruncated int // the number of chunks that we're expecting be truncated and returned by truncateChunksBefore() + expectedHead int // the expected number of head chunks after truncation + expectedMmap int // the expected number of mmapped chunks after truncation + expectedFirstChunkID chunks.HeadChunkID // the expected series.firstChunkID after truncation + }{ + { + name: "empty memSeries", + truncateBefore: chunkRange * 10, + }, + { + name: "single head chunk, not truncated", + headChunks: 1, + expectedHead: 1, + }, + { + name: "single head chunk, truncated", + headChunks: 1, + truncateBefore: chunkRange, + expectedTruncated: 1, + expectedHead: 0, + expectedFirstChunkID: 1, + }, + { + name: "2 head chunks, not truncated", + headChunks: 2, + expectedHead: 2, + }, + { + name: "2 head chunks, first truncated", + headChunks: 2, + truncateBefore: chunkRange, + expectedTruncated: 1, + expectedHead: 1, + expectedFirstChunkID: 1, + }, + { + name: "2 head chunks, everything truncated", + headChunks: 2, + truncateBefore: chunkRange * 2, + expectedTruncated: 2, + expectedHead: 0, + expectedFirstChunkID: 2, + }, + { + name: "no head chunks, 3 mmap chunks, second mmap truncated", + headChunks: 0, + mmappedChunks: 3, + truncateBefore: chunkRange * 2, + expectedTruncated: 2, + expectedHead: 0, + expectedMmap: 1, + expectedFirstChunkID: 2, + }, + { + name: "single head chunk, single mmap chunk, not truncated", + headChunks: 1, + mmappedChunks: 1, + expectedHead: 1, + expectedMmap: 1, + }, + { + name: "single head chunk, single mmap chunk, mmap truncated", + headChunks: 1, + mmappedChunks: 1, + truncateBefore: chunkRange, + expectedTruncated: 1, + expectedHead: 1, + expectedMmap: 0, + expectedFirstChunkID: 1, + }, + { + name: "5 head chunk, 5 mmap chunk, third head truncated", + headChunks: 5, + mmappedChunks: 5, + truncateBefore: chunkRange * 7, + expectedTruncated: 7, + expectedHead: 3, + expectedMmap: 0, + expectedFirstChunkID: 7, + }, + { + name: "2 head chunks, 3 mmap chunks, second mmap truncated", + headChunks: 2, + mmappedChunks: 3, + truncateBefore: chunkRange * 2, + expectedTruncated: 2, + expectedHead: 2, + expectedMmap: 1, + expectedFirstChunkID: 2, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + dir := t.TempDir() + chunkDiskMapper, err := chunks.NewChunkDiskMapper(nil, dir, chunkenc.NewPool(), chunks.DefaultWriteBufferSize, chunks.DefaultWriteQueueSize) + require.NoError(t, err) + defer func() { + require.NoError(t, chunkDiskMapper.Close()) + }() + + series := newMemSeries(labels.EmptyLabels(), 1, true) + + cOpts := chunkOpts{ + chunkDiskMapper: chunkDiskMapper, + chunkRange: chunkRange, + samplesPerChunk: DefaultSamplesPerChunk, + } + + var headStart int + if tc.mmappedChunks > 0 { + headStart = (tc.mmappedChunks + 1) * chunkRange + for i := 0; i < (tc.mmappedChunks+1)*chunkRange; i += chunkStep { + ok, _ := series.append(int64(i), float64(i), 0, cOpts) + require.True(t, ok, "sample append failed") + } + series.mmapChunks(chunkDiskMapper) + } + + if tc.headChunks == 0 { + series.headChunks = nil + } else { + for i := headStart; i < chunkRange*(tc.mmappedChunks+tc.headChunks); i += chunkStep { + ok, _ := series.append(int64(i), float64(i), 0, cOpts) + require.True(t, ok, "sample append failed: %d", i) + } + } + + if tc.headChunks > 0 { + require.NotNil(t, series.headChunks, "head chunk is missing") + require.Equal(t, tc.headChunks, series.headChunks.len(), "wrong number of head chunks") + } else { + require.Nil(t, series.headChunks, "head chunk is present") + } + require.Equal(t, tc.mmappedChunks, len(series.mmappedChunks), "wrong number of mmapped chunks") + + truncated := series.truncateChunksBefore(tc.truncateBefore, 0) + require.Equal(t, tc.expectedTruncated, truncated, "wrong number of truncated chunks returned") + + require.Equal(t, tc.expectedMmap, len(series.mmappedChunks), "wrong number of mmappedChunks after truncation") + + if tc.expectedHead > 0 { + require.NotNil(t, series.headChunks, "headChunks should is nil after truncation") + require.Equal(t, tc.expectedHead, series.headChunks.len(), "wrong number of head chunks after truncation") + require.Nil(t, series.headChunks.oldest().prev, "last head chunk cannot have any next chunk set") + } else { + require.Nil(t, series.headChunks, "headChunks should is non-nil after truncation") + } + + if series.headChunks != nil || len(series.mmappedChunks) > 0 { + require.GreaterOrEqual(t, series.maxTime(), tc.truncateBefore, "wrong value of series.maxTime() after truncation") + } else { + require.Equal(t, int64(math.MinInt64), series.maxTime(), "wrong value of series.maxTime() after truncation") + } + + require.Equal(t, tc.expectedFirstChunkID, series.firstChunkID, "wrong firstChunkID after truncation") + }) + } +} + func TestHeadDeleteSeriesWithoutSamples(t *testing.T) { for _, compress := range []wlog.CompressionType{wlog.CompressionNone, wlog.CompressionSnappy, wlog.CompressionZstd} { t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { @@ -1363,6 +1533,7 @@ func TestMemSeries_append(t *testing.T) { ok, chunkCreated = s.append(999, 2, 0, cOpts) require.True(t, ok, "append failed") require.False(t, chunkCreated, "second sample should use same chunk") + s.mmapChunks(chunkDiskMapper) ok, chunkCreated = s.append(1000, 3, 0, cOpts) require.True(t, ok, "append failed") @@ -1372,11 +1543,12 @@ func TestMemSeries_append(t *testing.T) { require.True(t, ok, "append failed") require.False(t, chunkCreated, "second sample should use same chunk") + s.mmapChunks(chunkDiskMapper) require.Equal(t, 1, len(s.mmappedChunks), "there should be only 1 mmapped chunk") require.Equal(t, int64(998), s.mmappedChunks[0].minTime, "wrong chunk range") require.Equal(t, int64(999), s.mmappedChunks[0].maxTime, "wrong chunk range") - require.Equal(t, int64(1000), s.headChunk.minTime, "wrong chunk range") - require.Equal(t, int64(1001), s.headChunk.maxTime, "wrong chunk range") + require.Equal(t, int64(1000), s.headChunks.minTime, "wrong chunk range") + require.Equal(t, int64(1001), s.headChunks.maxTime, "wrong chunk range") // Fill the range [1000,2000) with many samples. Intermediate chunks should be cut // at approximately 120 samples per chunk. @@ -1384,6 +1556,7 @@ func TestMemSeries_append(t *testing.T) { ok, _ := s.append(1001+int64(i), float64(i), 0, cOpts) require.True(t, ok, "append failed") } + s.mmapChunks(chunkDiskMapper) require.Greater(t, len(s.mmappedChunks)+1, 7, "expected intermediate chunks") @@ -1437,21 +1610,23 @@ func TestMemSeries_appendHistogram(t *testing.T) { require.True(t, ok, "append failed") require.False(t, chunkCreated, "second sample should use same chunk") + s.mmapChunks(chunkDiskMapper) require.Equal(t, 1, len(s.mmappedChunks), "there should be only 1 mmapped chunk") require.Equal(t, int64(998), s.mmappedChunks[0].minTime, "wrong chunk range") require.Equal(t, int64(999), s.mmappedChunks[0].maxTime, "wrong chunk range") - require.Equal(t, int64(1000), s.headChunk.minTime, "wrong chunk range") - require.Equal(t, int64(1001), s.headChunk.maxTime, "wrong chunk range") + require.Equal(t, int64(1000), s.headChunks.minTime, "wrong chunk range") + require.Equal(t, int64(1001), s.headChunks.maxTime, "wrong chunk range") ok, chunkCreated = s.appendHistogram(1002, histogramWithOneMoreBucket, 0, cOpts) require.True(t, ok, "append failed") require.False(t, chunkCreated, "third sample should trigger a re-encoded chunk") + s.mmapChunks(chunkDiskMapper) require.Equal(t, 1, len(s.mmappedChunks), "there should be only 1 mmapped chunk") require.Equal(t, int64(998), s.mmappedChunks[0].minTime, "wrong chunk range") require.Equal(t, int64(999), s.mmappedChunks[0].maxTime, "wrong chunk range") - require.Equal(t, int64(1000), s.headChunk.minTime, "wrong chunk range") - require.Equal(t, int64(1002), s.headChunk.maxTime, "wrong chunk range") + require.Equal(t, int64(1000), s.headChunks.minTime, "wrong chunk range") + require.Equal(t, int64(1002), s.headChunks.maxTime, "wrong chunk range") } func TestMemSeries_append_atVariableRate(t *testing.T) { @@ -1495,6 +1670,7 @@ func TestMemSeries_append_atVariableRate(t *testing.T) { require.True(t, ok, "new chunk sample was not appended") require.True(t, chunkCreated, "sample at block duration timestamp should create a new chunk") + s.mmapChunks(chunkDiskMapper) var totalSamplesInChunks int for i, c := range s.mmappedChunks { totalSamplesInChunks += int(c.numSamples) @@ -1841,6 +2017,7 @@ func TestHeadReadWriterRepair(t *testing.T) { require.True(t, ok, "series append failed") require.False(t, chunkCreated, "chunk was created") h.chunkDiskMapper.CutNewFile() + s.mmapChunks(h.chunkDiskMapper) } require.NoError(t, h.Close()) @@ -1985,6 +2162,7 @@ func TestMemSeriesIsolation(t *testing.T) { _, err := app.Append(0, labels.FromStrings("foo", "bar"), int64(i), float64(i)) require.NoError(t, err) require.NoError(t, app.Commit()) + h.mmapHeadChunks() } return i } @@ -2666,7 +2844,7 @@ func TestIteratorSeekIntoBuffer(t *testing.T) { require.True(t, ok, "sample append failed") } - c, _, err := s.chunk(0, chunkDiskMapper, &sync.Pool{ + c, _, _, err := s.chunk(0, chunkDiskMapper, &sync.Pool{ New: func() interface{} { return &memChunk{} }, @@ -3092,6 +3270,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { } } require.NoError(t, app.Commit()) + head.mmapHeadChunks() } // There should be 11 mmap chunks in s1. @@ -3103,7 +3282,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { cpy := *mmap expMmapChunks = append(expMmapChunks, &cpy) } - expHeadChunkSamples := ms.headChunk.chunk.NumSamples() + expHeadChunkSamples := ms.headChunks.chunk.NumSamples() require.Greater(t, expHeadChunkSamples, 0) // Series with mix of histograms and float. @@ -3199,7 +3378,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { // Checking contents of s1. ms = head.series.getByHash(s1.Hash(), s1) require.Equal(t, expMmapChunks, ms.mmappedChunks) - require.Equal(t, expHeadChunkSamples, ms.headChunk.chunk.NumSamples()) + require.Equal(t, expHeadChunkSamples, ms.headChunks.chunk.NumSamples()) testQuery := func() { q, err := NewBlockQuerier(head, head.MinTime(), head.MaxTime()) @@ -3738,6 +3917,8 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) { // Only 1 chunk in the memory, no m-mapped chunk. s := head.series.getByHash(l.Hash(), l) require.NotNil(t, s) + require.NotNil(t, s.headChunks) + require.Equal(t, s.headChunks.len(), 1) require.Equal(t, 0, len(s.mmappedChunks)) testQuery(1) @@ -3766,10 +3947,13 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) { expHistograms = append(expHistograms, timedHistogram{t: 100*int64(len(expHistograms)) + 1, h: &histogram.Histogram{Sum: math.Float64frombits(value.StaleNaN)}}) } require.NoError(t, app.Commit()) + head.mmapHeadChunks() // Total 2 chunks, 1 m-mapped. s = head.series.getByHash(l.Hash(), l) require.NotNil(t, s) + require.NotNil(t, s.headChunks) + require.Equal(t, s.headChunks.len(), 1) require.Equal(t, 1, len(s.mmappedChunks)) testQuery(2) } @@ -3804,6 +3988,7 @@ func TestHistogramCounterResetHeader(t *testing.T) { ms, _, err := head.getOrCreate(l.Hash(), l) require.NoError(t, err) + ms.mmapChunks(head.chunkDiskMapper) require.Len(t, ms.mmappedChunks, len(expHeaders)-1) // One is the head chunk. for i, mmapChunk := range ms.mmappedChunks { @@ -3816,9 +4001,9 @@ func TestHistogramCounterResetHeader(t *testing.T) { } } if floatHisto { - require.Equal(t, expHeaders[len(expHeaders)-1], ms.headChunk.chunk.(*chunkenc.FloatHistogramChunk).GetCounterResetHeader()) + require.Equal(t, expHeaders[len(expHeaders)-1], ms.headChunks.chunk.(*chunkenc.FloatHistogramChunk).GetCounterResetHeader()) } else { - require.Equal(t, expHeaders[len(expHeaders)-1], ms.headChunk.chunk.(*chunkenc.HistogramChunk).GetCounterResetHeader()) + require.Equal(t, expHeaders[len(expHeaders)-1], ms.headChunks.chunk.(*chunkenc.HistogramChunk).GetCounterResetHeader()) } } @@ -3909,7 +4094,7 @@ func TestAppendingDifferentEncodingToSameSeries(t *testing.T) { require.NoError(t, err) require.False(t, created) require.NotNil(t, ms) - require.Len(t, ms.mmappedChunks, count-1) // One will be the head chunk. + require.Equal(t, count, ms.headChunks.len()) } appends := []struct { @@ -4350,6 +4535,7 @@ func TestHeadInit_DiscardChunksWithUnsupportedEncoding(t *testing.T) { require.False(t, created, "should already exist") require.NotNil(t, series, "should return the series we created above") + series.mmapChunks(h.chunkDiskMapper) expChunks := make([]*mmappedChunk, len(series.mmappedChunks)) copy(expChunks, series.mmappedChunks) @@ -4507,6 +4693,7 @@ func TestReplayAfterMmapReplayError(t *testing.T) { require.NoError(t, f.Close()) openHead() + h.mmapHeadChunks() // There should be less m-map files due to corruption. files, err = os.ReadDir(filepath.Join(dir, "chunks_head")) @@ -4697,7 +4884,7 @@ func TestOOOAppendWithNoSeries(t *testing.T) { require.False(t, created) require.NotNil(t, ms) - require.Nil(t, ms.headChunk) + require.Nil(t, ms.headChunks) require.NotNil(t, ms.ooo.oooHeadChunk) require.Equal(t, expSamples, ms.ooo.oooHeadChunk.chunk.NumSamples()) } @@ -4709,8 +4896,8 @@ func TestOOOAppendWithNoSeries(t *testing.T) { require.NotNil(t, ms) require.Nil(t, ms.ooo) - require.NotNil(t, ms.headChunk) - require.Equal(t, expSamples, ms.headChunk.chunk.NumSamples()) + require.NotNil(t, ms.headChunks) + require.Equal(t, expSamples, ms.headChunks.chunk.NumSamples()) } newLabels := func(idx int) labels.Labels { return labels.FromStrings("foo", fmt.Sprintf("%d", idx)) } @@ -4821,6 +5008,7 @@ func TestGaugeHistogramWALAndChunkHeader(t *testing.T) { appendHistogram(hists[4]) checkHeaders := func() { + head.mmapHeadChunks() ms, _, err := head.getOrCreate(l.Hash(), l) require.NoError(t, err) require.Len(t, ms.mmappedChunks, 3) @@ -4835,7 +5023,7 @@ func TestGaugeHistogramWALAndChunkHeader(t *testing.T) { require.NoError(t, err) require.Equal(t, expHeaders[i], chk.(*chunkenc.HistogramChunk).GetCounterResetHeader()) } - require.Equal(t, expHeaders[len(expHeaders)-1], ms.headChunk.chunk.(*chunkenc.HistogramChunk).GetCounterResetHeader()) + require.Equal(t, expHeaders[len(expHeaders)-1], ms.headChunks.chunk.(*chunkenc.HistogramChunk).GetCounterResetHeader()) } checkHeaders() @@ -4898,6 +5086,7 @@ func TestGaugeFloatHistogramWALAndChunkHeader(t *testing.T) { checkHeaders := func() { ms, _, err := head.getOrCreate(l.Hash(), l) require.NoError(t, err) + head.mmapHeadChunks() require.Len(t, ms.mmappedChunks, 3) expHeaders := []chunkenc.CounterResetHeader{ chunkenc.UnknownCounterReset, @@ -4910,7 +5099,7 @@ func TestGaugeFloatHistogramWALAndChunkHeader(t *testing.T) { require.NoError(t, err) require.Equal(t, expHeaders[i], chk.(*chunkenc.FloatHistogramChunk).GetCounterResetHeader()) } - require.Equal(t, expHeaders[len(expHeaders)-1], ms.headChunk.chunk.(*chunkenc.FloatHistogramChunk).GetCounterResetHeader()) + require.Equal(t, expHeaders[len(expHeaders)-1], ms.headChunks.chunk.(*chunkenc.FloatHistogramChunk).GetCounterResetHeader()) } checkHeaders() diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 2397a9ec9..3ed95887e 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -503,7 +503,7 @@ func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*m // Any samples replayed till now would already be compacted. Resetting the head chunk. mSeries.nextAt = 0 - mSeries.headChunk = nil + mSeries.headChunks = nil mSeries.app = nil return } @@ -595,6 +595,7 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp if _, chunkCreated := ms.append(s.T, s.V, 0, appendChunkOpts); chunkCreated { h.metrics.chunksCreated.Inc() h.metrics.chunks.Inc() + _ = ms.mmapChunks(h.chunkDiskMapper) } if s.T > maxt { maxt = s.T @@ -960,15 +961,15 @@ func (s *memSeries) encodeToSnapshotRecord(b []byte) []byte { buf.PutBE64int64(0) // Backwards-compatibility; was chunkRange but now unused. s.Lock() - if s.headChunk == nil { + if s.headChunks == nil { buf.PutUvarint(0) } else { - enc := s.headChunk.chunk.Encoding() + enc := s.headChunks.chunk.Encoding() buf.PutUvarint(1) - buf.PutBE64int64(s.headChunk.minTime) - buf.PutBE64int64(s.headChunk.maxTime) + buf.PutBE64int64(s.headChunks.minTime) + buf.PutBE64int64(s.headChunks.maxTime) buf.PutByte(byte(enc)) - buf.PutUvarintBytes(s.headChunk.chunk.Bytes()) + buf.PutUvarintBytes(s.headChunks.chunk.Bytes()) switch enc { case chunkenc.EncXOR: @@ -1414,12 +1415,12 @@ func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSerie continue } series.nextAt = csr.mc.maxTime // This will create a new chunk on append. - series.headChunk = csr.mc + series.headChunks = csr.mc series.lastValue = csr.lastValue series.lastHistogramValue = csr.lastHistogramValue series.lastFloatHistogramValue = csr.lastFloatHistogramValue - app, err := series.headChunk.chunk.Appender() + app, err := series.headChunks.chunk.Appender() if err != nil { errChan <- err return From 98383fdc632d8d20cbc9a588a3b6097d012475eb Mon Sep 17 00:00:00 2001 From: Marc Tuduri Date: Mon, 31 Jul 2023 12:51:41 +0200 Subject: [PATCH 19/82] sd: change hetzner role type and constants to be exportable Signed-off-by: Marc Tuduri --- discovery/hetzner/hcloud.go | 2 +- discovery/hetzner/hetzner.go | 18 +++++++++--------- discovery/hetzner/robot.go | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/discovery/hetzner/hcloud.go b/discovery/hetzner/hcloud.go index 4bcfde830..6d0599dfa 100644 --- a/discovery/hetzner/hcloud.go +++ b/discovery/hetzner/hcloud.go @@ -91,7 +91,7 @@ func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er targets := make([]model.LabelSet, len(servers)) for i, server := range servers { labels := model.LabelSet{ - hetznerLabelRole: model.LabelValue(hetznerRoleHcloud), + hetznerLabelRole: model.LabelValue(HetznerRoleHcloud), hetznerLabelServerID: model.LabelValue(fmt.Sprintf("%d", server.ID)), hetznerLabelServerName: model.LabelValue(server.Name), hetznerLabelDatacenter: model.LabelValue(server.Datacenter.Name), diff --git a/discovery/hetzner/hetzner.go b/discovery/hetzner/hetzner.go index 40b28cc2c..c3f7ec39c 100644 --- a/discovery/hetzner/hetzner.go +++ b/discovery/hetzner/hetzner.go @@ -57,7 +57,7 @@ type SDConfig struct { RefreshInterval model.Duration `yaml:"refresh_interval"` Port int `yaml:"port"` - Role role `yaml:"role"` + Role Role `yaml:"role"` hcloudEndpoint string // For tests only. robotEndpoint string // For tests only. } @@ -74,26 +74,26 @@ type refresher interface { refresh(context.Context) ([]*targetgroup.Group, error) } -// role is the role of the target within the Hetzner Ecosystem. -type role string +// Role is the Role of the target within the Hetzner Ecosystem. +type Role string // The valid options for role. const ( // Hetzner Robot Role (Dedicated Server) // https://robot.hetzner.com - hetznerRoleRobot role = "robot" + HetznerRoleRobot Role = "robot" // Hetzner Cloud Role // https://console.hetzner.cloud - hetznerRoleHcloud role = "hcloud" + HetznerRoleHcloud Role = "hcloud" ) // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *role) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error { if err := unmarshal((*string)(c)); err != nil { return err } switch *c { - case hetznerRoleRobot, hetznerRoleHcloud: + case HetznerRoleRobot, HetznerRoleHcloud: return nil default: return fmt.Errorf("unknown role %q", *c) @@ -143,12 +143,12 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error) func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) { switch conf.Role { - case hetznerRoleHcloud: + case HetznerRoleHcloud: if conf.hcloudEndpoint == "" { conf.hcloudEndpoint = hcloud.Endpoint } return newHcloudDiscovery(conf, l) - case hetznerRoleRobot: + case HetznerRoleRobot: if conf.robotEndpoint == "" { conf.robotEndpoint = "https://robot-ws.your-server.de" } diff --git a/discovery/hetzner/robot.go b/discovery/hetzner/robot.go index 496088028..1d8aa9302 100644 --- a/discovery/hetzner/robot.go +++ b/discovery/hetzner/robot.go @@ -105,7 +105,7 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) targets := make([]model.LabelSet, len(servers)) for i, server := range servers { labels := model.LabelSet{ - hetznerLabelRole: model.LabelValue(hetznerRoleRobot), + hetznerLabelRole: model.LabelValue(HetznerRoleRobot), hetznerLabelServerID: model.LabelValue(strconv.Itoa(server.Server.ServerNumber)), hetznerLabelServerName: model.LabelValue(server.Server.ServerName), hetznerLabelDatacenter: model.LabelValue(strings.ToLower(server.Server.Dc)), From fb3935e8f96a6b97cbf7c8637655ca724b727021 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Tue, 1 Aug 2023 13:08:57 +1000 Subject: [PATCH 20/82] Address PR feedback: rename method Signed-off-by: Charles Korn --- promql/engine.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index b6c856ba7..ac8c0f566 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1387,7 +1387,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { unwrapParenExpr(&arg) vs, ok := arg.(*parser.VectorSelector) if ok { - return ev.evalTimestampFunctionOverVectorSelector(vs, call, e) + return ev.rangeEvalTimestampFunctionOverVectorSelector(vs, call, e) } } @@ -1825,7 +1825,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { panic(fmt.Errorf("unhandled expression of type: %T", expr)) } -func (ev *evaluator) evalTimestampFunctionOverVectorSelector(vs *parser.VectorSelector, call FunctionCall, e *parser.Call) (parser.Value, storage.Warnings) { +func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.VectorSelector, call FunctionCall, e *parser.Call) (parser.Value, storage.Warnings) { ws, err := checkAndExpandSeriesSet(ev.ctx, vs) if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) From 6087c555eda562ee2702540176bbd26e4287e6ef Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Tue, 1 Aug 2023 13:17:49 +1000 Subject: [PATCH 21/82] Address PR feedback: clarify comment Signed-off-by: Charles Korn --- promql/engine.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index ac8c0f566..37e9d425b 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1839,8 +1839,9 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.Vec return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { if vs.Timestamp != nil { - // This is a special case only for "timestamp" since the offset - // needs to be adjusted for every point. + // This is a special case for "timestamp()" when the @ modifier is used, to ensure that + // we return a point for each time step in this case. + // See https://github.com/prometheus/prometheus/issues/8433. vs.Offset = time.Duration(enh.Ts-*vs.Timestamp) * time.Millisecond } From 145d7457feb4a9b6d6c597e173c6a03e54f4ecfc Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Tue, 1 Aug 2023 13:27:46 +1000 Subject: [PATCH 22/82] Address PR feedback: use loop to create expected test result Signed-off-by: Charles Korn --- promql/engine_test.go | 78 ++++++++----------------------------------- 1 file changed, 14 insertions(+), 64 deletions(-) diff --git a/promql/engine_test.go b/promql/engine_test.go index 1b14e8a5d..54567d154 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -1993,72 +1993,22 @@ load 1m end := time.Unix(61, 0) interval := time.Second + // We expect the value to be 0 for t=0s to t=59s (inclusive), then 60 for t=60s and t=61s. + expectedPoints := []FPoint{} + + for t := 0; t <= 59; t++ { + expectedPoints = append(expectedPoints, FPoint{F: 0, T: int64(t * 1000)}) + } + + expectedPoints = append( + expectedPoints, + FPoint{F: 60, T: 60_000}, + FPoint{F: 60, T: 61_000}, + ) + expectedResult := Matrix{ Series{ - Floats: []FPoint{ - {F: 0, T: 0}, - {F: 0, T: 1_000}, - {F: 0, T: 2_000}, - {F: 0, T: 3_000}, - {F: 0, T: 4_000}, - {F: 0, T: 5_000}, - {F: 0, T: 6_000}, - {F: 0, T: 7_000}, - {F: 0, T: 8_000}, - {F: 0, T: 9_000}, - {F: 0, T: 10_000}, - {F: 0, T: 11_000}, - {F: 0, T: 12_000}, - {F: 0, T: 13_000}, - {F: 0, T: 14_000}, - {F: 0, T: 15_000}, - {F: 0, T: 16_000}, - {F: 0, T: 17_000}, - {F: 0, T: 18_000}, - {F: 0, T: 19_000}, - {F: 0, T: 20_000}, - {F: 0, T: 21_000}, - {F: 0, T: 22_000}, - {F: 0, T: 23_000}, - {F: 0, T: 24_000}, - {F: 0, T: 25_000}, - {F: 0, T: 26_000}, - {F: 0, T: 27_000}, - {F: 0, T: 28_000}, - {F: 0, T: 29_000}, - {F: 0, T: 30_000}, - {F: 0, T: 31_000}, - {F: 0, T: 32_000}, - {F: 0, T: 33_000}, - {F: 0, T: 34_000}, - {F: 0, T: 35_000}, - {F: 0, T: 36_000}, - {F: 0, T: 37_000}, - {F: 0, T: 38_000}, - {F: 0, T: 39_000}, - {F: 0, T: 40_000}, - {F: 0, T: 41_000}, - {F: 0, T: 42_000}, - {F: 0, T: 43_000}, - {F: 0, T: 44_000}, - {F: 0, T: 45_000}, - {F: 0, T: 46_000}, - {F: 0, T: 47_000}, - {F: 0, T: 48_000}, - {F: 0, T: 49_000}, - {F: 0, T: 50_000}, - {F: 0, T: 51_000}, - {F: 0, T: 52_000}, - {F: 0, T: 53_000}, - {F: 0, T: 54_000}, - {F: 0, T: 55_000}, - {F: 0, T: 56_000}, - {F: 0, T: 57_000}, - {F: 0, T: 58_000}, - {F: 0, T: 59_000}, - {F: 60, T: 60_000}, - {F: 60, T: 61_000}, - }, + Floats: expectedPoints, Metric: labels.EmptyLabels(), }, } From f26dfc95e60dfd6ea8ef066e1c464ad54a52af2e Mon Sep 17 00:00:00 2001 From: cui fliter Date: Tue, 1 Aug 2023 18:24:42 +0800 Subject: [PATCH 23/82] fix struct name in comment (#12624) Signed-off-by: cui fliter --- tsdb/head.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tsdb/head.go b/tsdb/head.go index e18bd55a5..4435855e2 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -2161,7 +2161,7 @@ func overlapsClosedInterval(mint1, maxt1, mint2, maxt2 int64) bool { return mint1 <= maxt2 && mint2 <= maxt1 } -// mappedChunks describes a head chunk on disk that has been mmapped +// mmappedChunk describes a head chunk on disk that has been mmapped type mmappedChunk struct { ref chunks.ChunkDiskMapperRef numSamples uint16 From cd7d0b69a2344d1508d80c68640a0a5381de9cf6 Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Tue, 1 Aug 2023 14:04:45 +0200 Subject: [PATCH 24/82] Check nil err first when committing (#12625) The most common case is to have a nil error when appending series, so let's check that first instead of checking the 3 error types first. Signed-off-by: Oleg Zaytsev --- tsdb/head_append.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index b98183164..f06aacba4 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -930,6 +930,8 @@ func (a *headAppender) Commit() (err error) { oooSample, _, err := series.appendable(s.T, s.V, a.headMaxt, a.minValidTime, a.oooTimeWindow) switch err { + case nil: + // Do nothing. case storage.ErrOutOfOrderSample: samplesAppended-- oooRejected++ @@ -939,8 +941,6 @@ func (a *headAppender) Commit() (err error) { case storage.ErrTooOldSample: samplesAppended-- tooOldRejected++ - case nil: - // Do nothing. default: samplesAppended-- } From 17ef7010111225b21be6db134fad5432313c8fe1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Aug 2023 23:49:40 +0000 Subject: [PATCH 25/82] build(deps): bump github.com/scaleway/scaleway-sdk-go Bumps [github.com/scaleway/scaleway-sdk-go](https://github.com/scaleway/scaleway-sdk-go) from 1.0.0-beta.19 to 1.0.0-beta.20. - [Release notes](https://github.com/scaleway/scaleway-sdk-go/releases) - [Changelog](https://github.com/scaleway/scaleway-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/scaleway/scaleway-sdk-go/compare/v1.0.0-beta.19...v1.0.0-beta.20) --- updated-dependencies: - dependency-name: github.com/scaleway/scaleway-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 1ddf77184..c75244918 100644 --- a/go.mod +++ b/go.mod @@ -50,7 +50,7 @@ require ( github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/sigv4 v0.1.0 github.com/prometheus/exporter-toolkit v0.10.0 - github.com/scaleway/scaleway-sdk-go v1.0.0-beta.19 + github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20 github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/stretchr/testify v1.8.4 github.com/vultr/govultr/v2 v2.17.2 diff --git a/go.sum b/go.sum index 63c60005a..26a93dbb8 100644 --- a/go.sum +++ b/go.sum @@ -706,8 +706,8 @@ github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDN github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.19 h1:+1H+N9QFl2Sfvia0FBYfMrHYHYhmpZxhSE0wpPL2lYs= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.19/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20 h1:a9hSJdJcd16e0HoMsnFvaHvxB3pxSD+SC7+CISp7xY0= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/shoenig/test v0.6.6 h1:Oe8TPH9wAbv++YPNDKJWUnI8Q4PPWCx3UbOfH+FxiMU= From d39628294195a9328bc02af815b1deb264a65576 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 2 Aug 2023 11:48:17 +1000 Subject: [PATCH 26/82] Address PR feedback: clarify comment Signed-off-by: Charles Korn --- promql/bench_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/promql/bench_test.go b/promql/bench_test.go index fb5f3a06d..c6a528f7b 100644 --- a/promql/bench_test.go +++ b/promql/bench_test.go @@ -186,7 +186,7 @@ func rangeQueryCases() []benchCase { expr: "count({__name__!=\"\",l=\"\"})", steps: 1, }, - // timestamp() function + // Functions which have special handling inside eval() { expr: "timestamp(a_X)", }, From 6bb57201417d7ca7af2b6cfa141dae86790b8f82 Mon Sep 17 00:00:00 2001 From: Goutham Date: Thu, 3 Aug 2023 00:11:36 +0200 Subject: [PATCH 27/82] Add initial OTLP ingestion docs We still need a guide that we can link users to in https://github.com/prometheus/docs/tree/main/content/docs/guides This guide should show sending metrics from application directly via the OTel SDKs and also sending through the Collector. Signed-off-by: Goutham --- docs/feature_flags.md | 8 ++++++++ docs/querying/api.md | 13 +++++++++++++ 2 files changed, 21 insertions(+) diff --git a/docs/feature_flags.md b/docs/feature_flags.md index 58e49e3b4..1cf54c47f 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -126,3 +126,11 @@ still ingest those conventional histograms that do not come with a corresponding native histogram. However, if a native histogram is present, Prometheus will ignore the corresponding conventional histogram, with the notable exception of exemplars, which are always ingested. + +## OTLP Receiver + +`--enable-feature=otlp-write-receiver` + +The OTLP receiver allows Prometheus to accept [OpenTelemetry](https://opentelemetry.io/) metrics writes. +Prometheus is best used as a Pull based system, and staleness, `up` metric, and other Pull enabled features +won't work when you push OTLP metrics. \ No newline at end of file diff --git a/docs/querying/api.md b/docs/querying/api.md index ca7f64f62..da74f55cf 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -1294,3 +1294,16 @@ Enable the remote write receiver by setting endpoint is `/api/v1/write`. Find more details [here](../storage.md#overview). *New in v2.33* + +## OTLP Receiver + +Prometheus can be configured as a receiver for the OTLP Metrics protocol. This +is not considered an efficient way of ingesting samples. Use it +with caution for specific low-volume use cases. It is not yet suitable for +replacing the ingestion via scraping yet. + +Enable the OTLP receiver by the feature flag +`--enable-feature=otlp-write-receiver`. When enabled, the OTLP receiver +endpoint is `/otlp/v1/metrics`. + +*New in v2.47* \ No newline at end of file From 61daa30bb1251c391307f3c90943fda616090de7 Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Thu, 3 Aug 2023 10:56:27 +0200 Subject: [PATCH 28/82] Pass ref to SeriesLifecycleCallback.PostDeletion (#12626) When a particular SeriesLifecycleCallback tries to optimize and run closer to the Head, keeping track of the HeadSeriesRef instead of the labelsets, it's impossible to handle the PostDeletion callback properly as there's no way to know which series refs were deleted from the head. This changes the callback to provide the series refs alongside the labelsets, so the implementation can choose what to do. Signed-off-by: Oleg Zaytsev --- tsdb/head.go | 25 +++++++++++++------------ tsdb/head_bench_test.go | 7 ++++--- 2 files changed, 17 insertions(+), 15 deletions(-) diff --git a/tsdb/head.go b/tsdb/head.go index 4435855e2..34a289a98 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -206,7 +206,7 @@ type SeriesLifecycleCallback interface { // PostCreation is called after creating a series to indicate a creation of series. PostCreation(labels.Labels) // PostDeletion is called after deletion of series. - PostDeletion(...labels.Labels) + PostDeletion(map[chunks.HeadSeriesRef]labels.Labels) } // NewHead opens the head block in dir. @@ -1762,16 +1762,17 @@ func newStripeSeries(stripeSize int, seriesCallback SeriesLifecycleCallback) *st // minMmapFile is the min mmap file number seen in the series (in-order and out-of-order) after gc'ing the series. func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (_ map[storage.SeriesRef]struct{}, _ int, _, _ int64, minMmapFile int) { var ( - deleted = map[storage.SeriesRef]struct{}{} - deletedForCallback = []labels.Labels{} - rmChunks = 0 - actualMint int64 = math.MaxInt64 - minOOOTime int64 = math.MaxInt64 + deleted = map[storage.SeriesRef]struct{}{} + rmChunks = 0 + actualMint int64 = math.MaxInt64 + minOOOTime int64 = math.MaxInt64 + deletedFromPrevStripe = 0 ) minMmapFile = math.MaxInt32 // Run through all series and truncate old chunks. Mark those with no // chunks left as deleted and store their ID. for i := 0; i < s.size; i++ { + deletedForCallback := make(map[chunks.HeadSeriesRef]labels.Labels, deletedFromPrevStripe) s.locks[i].Lock() for hash, all := range s.hashes[i] { @@ -1825,7 +1826,7 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) ( deleted[storage.SeriesRef(series.ref)] = struct{}{} s.hashes[i].del(hash, series.lset) delete(s.series[j], series.ref) - deletedForCallback = append(deletedForCallback, series.lset) + deletedForCallback[series.ref] = series.lset if i != j { s.locks[j].Unlock() @@ -1837,8 +1838,8 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) ( s.locks[i].Unlock() - s.seriesLifecycleCallback.PostDeletion(deletedForCallback...) - deletedForCallback = deletedForCallback[:0] + s.seriesLifecycleCallback.PostDeletion(deletedForCallback) + deletedFromPrevStripe = len(deletedForCallback) } if actualMint == math.MaxInt64 { @@ -2175,9 +2176,9 @@ func (mc *mmappedChunk) OverlapsClosedInterval(mint, maxt int64) bool { type noopSeriesLifecycleCallback struct{} -func (noopSeriesLifecycleCallback) PreCreation(labels.Labels) error { return nil } -func (noopSeriesLifecycleCallback) PostCreation(labels.Labels) {} -func (noopSeriesLifecycleCallback) PostDeletion(...labels.Labels) {} +func (noopSeriesLifecycleCallback) PreCreation(labels.Labels) error { return nil } +func (noopSeriesLifecycleCallback) PostCreation(labels.Labels) {} +func (noopSeriesLifecycleCallback) PostDeletion(map[chunks.HeadSeriesRef]labels.Labels) {} func (h *Head) Size() int64 { var walSize, wblSize int64 diff --git a/tsdb/head_bench_test.go b/tsdb/head_bench_test.go index 2f8e0ba37..8fdf94db0 100644 --- a/tsdb/head_bench_test.go +++ b/tsdb/head_bench_test.go @@ -22,6 +22,7 @@ import ( "go.uber.org/atomic" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/tsdb/chunks" ) func BenchmarkHeadStripeSeriesCreate(b *testing.B) { @@ -80,6 +81,6 @@ func BenchmarkHeadStripeSeriesCreate_PreCreationFailure(b *testing.B) { type failingSeriesLifecycleCallback struct{} -func (failingSeriesLifecycleCallback) PreCreation(labels.Labels) error { return errors.New("failed") } -func (failingSeriesLifecycleCallback) PostCreation(labels.Labels) {} -func (failingSeriesLifecycleCallback) PostDeletion(...labels.Labels) {} +func (failingSeriesLifecycleCallback) PreCreation(labels.Labels) error { return errors.New("failed") } +func (failingSeriesLifecycleCallback) PostCreation(labels.Labels) {} +func (failingSeriesLifecycleCallback) PostDeletion(map[chunks.HeadSeriesRef]labels.Labels) {} From 9df36c3e5e2c538fb12a7da251e8716cccb60fd5 Mon Sep 17 00:00:00 2001 From: Goutham Veeramachaneni Date: Thu, 3 Aug 2023 13:56:54 +0200 Subject: [PATCH 29/82] Apply suggestions from code review Co-authored-by: Julien Pivotto Signed-off-by: Goutham Veeramachaneni --- docs/querying/api.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/querying/api.md b/docs/querying/api.md index da74f55cf..8cb9c694b 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -1299,8 +1299,8 @@ endpoint is `/api/v1/write`. Find more details [here](../storage.md#overview). Prometheus can be configured as a receiver for the OTLP Metrics protocol. This is not considered an efficient way of ingesting samples. Use it -with caution for specific low-volume use cases. It is not yet suitable for -replacing the ingestion via scraping yet. +with caution for specific low-volume use cases. It is not suitable for +replacing the ingestion via scraping. Enable the OTLP receiver by the feature flag `--enable-feature=otlp-write-receiver`. When enabled, the OTLP receiver From 5e21b3b2c6813b03f267c03124509745b5f848dd Mon Sep 17 00:00:00 2001 From: Pablo Baeyens Date: Thu, 3 Aug 2023 13:59:37 +0200 Subject: [PATCH 30/82] [go.mod] Bump `go.opentelemetry.io/collector/pdata` to `v1.0.0-rcv0014` (#12623) This is the latest release candidate for the pdata module. Go will pick the latest released version by default with commands such as `go get` and ignore pre-released versions, but this version includes updates. Signed-off-by: Pablo Baeyens --- go.mod | 4 ++-- go.sum | 9 ++++----- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index c75244918..3e97cdda0 100644 --- a/go.mod +++ b/go.mod @@ -54,7 +54,7 @@ require ( github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/stretchr/testify v1.8.4 github.com/vultr/govultr/v2 v2.17.2 - go.opentelemetry.io/collector/pdata v0.66.0 + go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 go.opentelemetry.io/collector/semconv v0.81.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 go.opentelemetry.io/otel v1.16.0 @@ -66,7 +66,7 @@ require ( go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.5.2 go.uber.org/goleak v1.2.1 - go.uber.org/multierr v1.8.0 + go.uber.org/multierr v1.11.0 golang.org/x/net v0.12.0 golang.org/x/oauth2 v0.10.0 golang.org/x/sync v0.3.0 diff --git a/go.sum b/go.sum index 26a93dbb8..f5661fc9e 100644 --- a/go.sum +++ b/go.sum @@ -797,8 +797,8 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/pdata v0.66.0 h1:UdE5U6MsDNzuiWaXdjGx2lC3ElVqWmN/hiUE8vyvSuM= -go.opentelemetry.io/collector/pdata v0.66.0/go.mod h1:pqyaznLzk21m+1KL6fwOsRryRELL+zNM0qiVSn0MbVc= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 h1:iT5qH0NLmkGeIdDtnBogYDx7L58t6CaWGL378DEo2QY= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0014/go.mod h1:BRvDrx43kiSoUx3mr7SoA7h9B8+OY99mUK+CZSQFWW4= go.opentelemetry.io/collector/semconv v0.81.0 h1:lCYNNo3powDvFIaTPP2jDKIrBiV1T92NK4QgL/aHYXw= go.opentelemetry.io/collector/semconv v0.81.0/go.mod h1:TlYPtzvsXyHOgr5eATi43qEMqwSmIziivJB2uctKswo= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 h1:pginetY7+onl4qN1vl0xW/V/v6OBZ0vVdH+esuJgvmM= @@ -824,7 +824,6 @@ go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lI go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME= @@ -833,8 +832,8 @@ go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= -go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= From acb1f8a0971becca55e4395bafc35b9a298a24d0 Mon Sep 17 00:00:00 2001 From: Goutham Date: Thu, 3 Aug 2023 14:01:43 +0200 Subject: [PATCH 31/82] Fix API endpoint Signed-off-by: Goutham --- docs/querying/api.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/querying/api.md b/docs/querying/api.md index 8cb9c694b..8ddb834ef 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -1304,6 +1304,6 @@ replacing the ingestion via scraping. Enable the OTLP receiver by the feature flag `--enable-feature=otlp-write-receiver`. When enabled, the OTLP receiver -endpoint is `/otlp/v1/metrics`. +endpoint is `/api/v1/otlp/v1/metrics`. *New in v2.47* \ No newline at end of file From c810e7cae3868c97eb4ebe97306e41f2630c4b04 Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Fri, 4 Aug 2023 10:21:16 +0200 Subject: [PATCH 32/82] Fix typo in Appender.AppendHistogram() arg name Signed-off-by: Oleg Zaytsev --- tsdb/chunkenc/chunk.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tsdb/chunkenc/chunk.go b/tsdb/chunkenc/chunk.go index 397c09ed8..d6a6f4614 100644 --- a/tsdb/chunkenc/chunk.go +++ b/tsdb/chunkenc/chunk.go @@ -94,7 +94,7 @@ type Appender interface { // The returned bool isRecoded can be used to distinguish between the new Chunk c being a completely new Chunk // or the current Chunk recoded to a new Chunk. // The Appender app that can be used for the next append is always returned. - AppendHistogram(prev *HistogramAppender, t int64, h *histogram.Histogram, appendOny bool) (c Chunk, isRecoded bool, app Appender, err error) + AppendHistogram(prev *HistogramAppender, t int64, h *histogram.Histogram, appendOnly bool) (c Chunk, isRecoded bool, app Appender, err error) AppendFloatHistogram(prev *FloatHistogramAppender, t int64, h *histogram.FloatHistogram, appendOnly bool) (c Chunk, isRecoded bool, app Appender, err error) } From 6ea6def0d3d31531696010e467bc2fb6c93edfef Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Fri, 4 Aug 2023 10:39:55 +0200 Subject: [PATCH 33/82] Use zeropool when replaying agent's DB WAL (#12651) Same as https://github.com/prometheus/prometheus/pull/12189 but for tsdb/agent/db.go Signed-off-by: Oleg Zaytsev --- tsdb/agent/db.go | 44 ++++++++++++-------------------------------- 1 file changed, 12 insertions(+), 32 deletions(-) diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index 2ed51e4b8..77b77fc23 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -42,6 +42,7 @@ import ( "github.com/prometheus/prometheus/tsdb/record" "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/tsdb/wlog" + "github.com/prometheus/prometheus/util/zeropool" ) const ( @@ -411,28 +412,13 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H dec record.Decoder lastRef = chunks.HeadSeriesRef(db.nextRef.Load()) - decoded = make(chan interface{}, 10) - errCh = make(chan error, 1) - seriesPool = sync.Pool{ - New: func() interface{} { - return []record.RefSeries{} - }, - } - samplesPool = sync.Pool{ - New: func() interface{} { - return []record.RefSample{} - }, - } - histogramsPool = sync.Pool{ - New: func() interface{} { - return []record.RefHistogramSample{} - }, - } - floatHistogramsPool = sync.Pool{ - New: func() interface{} { - return []record.RefFloatHistogramSample{} - }, - } + decoded = make(chan interface{}, 10) + errCh = make(chan error, 1) + + seriesPool zeropool.Pool[[]record.RefSeries] + samplesPool zeropool.Pool[[]record.RefSample] + histogramsPool zeropool.Pool[[]record.RefHistogramSample] + floatHistogramsPool zeropool.Pool[[]record.RefFloatHistogramSample] ) go func() { @@ -442,7 +428,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H rec := r.Record() switch dec.Type(rec) { case record.Series: - series := seriesPool.Get().([]record.RefSeries)[:0] + series := seriesPool.Get()[:0] series, err = dec.Series(rec, series) if err != nil { errCh <- &wlog.CorruptionErr{ @@ -454,7 +440,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H } decoded <- series case record.Samples: - samples := samplesPool.Get().([]record.RefSample)[:0] + samples := samplesPool.Get()[:0] samples, err = dec.Samples(rec, samples) if err != nil { errCh <- &wlog.CorruptionErr{ @@ -466,7 +452,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H } decoded <- samples case record.HistogramSamples: - histograms := histogramsPool.Get().([]record.RefHistogramSample)[:0] + histograms := histogramsPool.Get()[:0] histograms, err = dec.HistogramSamples(rec, histograms) if err != nil { errCh <- &wlog.CorruptionErr{ @@ -478,7 +464,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H } decoded <- histograms case record.FloatHistogramSamples: - floatHistograms := floatHistogramsPool.Get().([]record.RefFloatHistogramSample)[:0] + floatHistograms := floatHistogramsPool.Get()[:0] floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms) if err != nil { errCh <- &wlog.CorruptionErr{ @@ -523,8 +509,6 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H } } } - - //nolint:staticcheck seriesPool.Put(v) case []record.RefSample: for _, entry := range v { @@ -539,8 +523,6 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H series.lastTs = entry.T } } - - //nolint:staticcheck samplesPool.Put(v) case []record.RefHistogramSample: for _, entry := range v { @@ -555,7 +537,6 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H series.lastTs = entry.T } } - //nolint:staticcheck histogramsPool.Put(v) case []record.RefFloatHistogramSample: for _, entry := range v { @@ -570,7 +551,6 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H series.lastTs = entry.T } } - //nolint:staticcheck floatHistogramsPool.Put(v) default: panic(fmt.Errorf("unexpected decoded type: %T", d)) From 8d38d59fc58e7add6d57a2983516afb1b8d67922 Mon Sep 17 00:00:00 2001 From: SuperQ Date: Tue, 8 Aug 2023 09:32:51 +0200 Subject: [PATCH 34/82] Cleanup temporary chunk snapshot dirs Simlar to cleanup of WAL files on startup, cleanup temporary chunk_snapshot dirs. This prevents storage space leaks due to terminated snapshots on shutdown. Signed-off-by: SuperQ --- tsdb/db.go | 6 +++++- tsdb/db_test.go | 5 +++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/tsdb/db.go b/tsdb/db.go index 2ca6034a0..e181a8f20 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -2090,7 +2090,8 @@ func isBlockDir(fi fs.DirEntry) bool { return err == nil } -// isTmpDir returns true if the given file-info contains a block ULID or checkpoint prefix and a tmp extension. +// isTmpDir returns true if the given file-info contains a block ULID, a checkpoint prefix, +// or a chunk snapshot prefix and a tmp extension. func isTmpDir(fi fs.DirEntry) bool { if !fi.IsDir() { return false @@ -2102,6 +2103,9 @@ func isTmpDir(fi fs.DirEntry) bool { if strings.HasPrefix(fn, "checkpoint.") { return true } + if strings.HasPrefix(fn, chunkSnapshotPrefix) { + return true + } if _, err := ulid.ParseStrict(fn[:len(fn)-len(ext)]); err == nil { return true } diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 772fcf9d1..6da774a35 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -3146,6 +3146,9 @@ func TestOpen_VariousBlockStates(t *testing.T) { tmpCheckpointDir := path.Join(tmpDir, "wal/checkpoint.00000001.tmp") err := os.MkdirAll(tmpCheckpointDir, 0o777) require.NoError(t, err) + tmpChunkSnapshotDir := path.Join(tmpDir, chunkSnapshotPrefix+"0000.00000001.tmp") + err = os.MkdirAll(tmpChunkSnapshotDir, 0o777) + require.NoError(t, err) opts := DefaultOptions() opts.RetentionDuration = 0 @@ -3179,6 +3182,8 @@ func TestOpen_VariousBlockStates(t *testing.T) { require.Equal(t, len(expectedIgnoredDirs), ignored) _, err = os.Stat(tmpCheckpointDir) require.True(t, os.IsNotExist(err)) + _, err = os.Stat(tmpChunkSnapshotDir) + require.True(t, os.IsNotExist(err)) } func TestOneCheckpointPerCompactCall(t *testing.T) { From d2ae8dc3cb849b165b02fcfec9fb015903d5e64b Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Fri, 11 Aug 2023 16:20:12 +0000 Subject: [PATCH 35/82] remote-write: add http.resend_count tracing attribute As recommended by the OpenTelemetry semantic conventions. https://opentelemetry.io/docs/specs/otel/trace/semantic_conventions/http/#http-client Signed-off-by: Bryan Boreham --- storage/remote/queue_manager.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index 3edd31b91..1c834db77 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -29,6 +29,7 @@ import ( "github.com/prometheus/common/model" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" "go.uber.org/atomic" "github.com/prometheus/prometheus/config" @@ -545,6 +546,10 @@ func (t *QueueManager) sendMetadataWithBackoff(ctx context.Context, metadata []p attribute.String("remote_name", t.storeClient.Name()), attribute.String("remote_url", t.storeClient.Endpoint()), ) + // Attributes defined by OpenTelemetry semantic conventions. + if try > 0 { + span.SetAttributes(semconv.HTTPResendCount(try)) + } begin := time.Now() err := t.storeClient.Store(ctx, req) From 33aab1b2cc9009e60d1ae0658ffe66db84ed6698 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Sun, 13 Aug 2023 14:55:50 +0100 Subject: [PATCH 36/82] labels: extend benchmark for Has() Signed-off-by: Bryan Boreham --- model/labels/labels_test.go | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/model/labels/labels_test.go b/model/labels/labels_test.go index d91be27cb..a5401b924 100644 --- a/model/labels/labels_test.go +++ b/model/labels/labels_test.go @@ -472,16 +472,22 @@ func BenchmarkLabels_Get(b *testing.B) { for _, scenario := range []struct { desc, label string }{ - {"get first label", allLabels[0].Name}, - {"get middle label", allLabels[size/2].Name}, - {"get last label", allLabels[size-1].Name}, - {"get not-found label", "benchmark"}, + {"first label", allLabels[0].Name}, + {"middle label", allLabels[size/2].Name}, + {"last label", allLabels[size-1].Name}, + {"not-found label", "benchmark"}, } { b.Run(scenario.desc, func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - _ = labels.Get(scenario.label) - } + b.Run("get", func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = labels.Get(scenario.label) + } + }) + b.Run("has", func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = labels.Has(scenario.label) + } + }) }) } }) From b5c6807fea172ffcc55359d3ea042bcc7b13bab2 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Sun, 13 Aug 2023 14:56:15 +0100 Subject: [PATCH 37/82] Labels.Has quick check on first character Exit early if we've gone past - labels are sorted in order. Signed-off-by: Bryan Boreham --- model/labels/labels_stringlabels.go | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/model/labels/labels_stringlabels.go b/model/labels/labels_stringlabels.go index 223aa6ebf..bc4700eaa 100644 --- a/model/labels/labels_stringlabels.go +++ b/model/labels/labels_stringlabels.go @@ -300,13 +300,26 @@ func (ls Labels) Get(name string) string { // Has returns true if the label with the given name is present. func (ls Labels) Has(name string) bool { + if name == "" { // Avoid crash in loop if someone asks for "". + return false // Prometheus does not store blank label names. + } for i := 0; i < len(ls.data); { - var lName string - lName, i = decodeString(ls.data, i) - _, i = decodeString(ls.data, i) - if lName == name { - return true + var size int + size, i = decodeSize(ls.data, i) + if ls.data[i] == name[0] { + lName := ls.data[i : i+size] + i += size + if lName == name { + return true + } + } else { + if ls.data[i] > name[0] { // Stop looking if we've gone past. + break + } + i += size } + size, i = decodeSize(ls.data, i) + i += size } return false } From ce260b1fe114c600cde83e1c9d426ae51519279e Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Sun, 13 Aug 2023 15:14:53 +0100 Subject: [PATCH 38/82] labels: remove some unused code Signed-off-by: Bryan Boreham --- model/labels/labels_stringlabels.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/model/labels/labels_stringlabels.go b/model/labels/labels_stringlabels.go index 223aa6ebf..5e9afe8dc 100644 --- a/model/labels/labels_stringlabels.go +++ b/model/labels/labels_stringlabels.go @@ -49,12 +49,6 @@ type Labels struct { data string } -type labelSlice []Label - -func (ls labelSlice) Len() int { return len(ls) } -func (ls labelSlice) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] } -func (ls labelSlice) Less(i, j int) bool { return ls[i].Name < ls[j].Name } - func decodeSize(data string, index int) (int, int) { // Fast-path for common case of a single byte, value 0..127. b := data[index] From 0670e4771a02792642257401d25ac24a486944e7 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Sun, 13 Aug 2023 18:09:10 +0100 Subject: [PATCH 39/82] promql engine: check unique labels using existing map `ContainsSameLabelset` constructs a map with the same hash key as the one used to compile the output of `rangeEval`, so we can use that one and save work. Need to hold the timestamp so we can be sure we saw the same series in the same evaluation. Signed-off-by: Bryan Boreham --- promql/engine.go | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index e2092a800..57a6e7b02 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1143,7 +1143,11 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) } } enh := &EvalNodeHelper{Out: make(Vector, 0, biggestLen)} - seriess := make(map[uint64]Series, biggestLen) // Output series by series hash. + type seriesAndTimestamp struct { + Series + ts int64 + } + seriess := make(map[uint64]seriesAndTimestamp, biggestLen) // Output series by series hash. tempNumSamples := ev.currentSamples var ( @@ -1228,9 +1232,6 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) // Make the function call. enh.Ts = ts result, ws := funcCall(args, bufHelpers, enh) - if result.ContainsSameLabelset() { - ev.errorf("vector cannot contain metrics with the same labelset") - } enh.Out = result[:0] // Reuse result vector. warnings = append(warnings, ws...) @@ -1247,6 +1248,9 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) // If this could be an instant query, shortcut so as not to change sort order. if ev.endTimestamp == ev.startTimestamp { + if result.ContainsSameLabelset() { + ev.errorf("vector cannot contain metrics with the same labelset") + } mat := make(Matrix, len(result)) for i, s := range result { if s.H == nil { @@ -1264,8 +1268,13 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) for _, sample := range result { h := sample.Metric.Hash() ss, ok := seriess[h] - if !ok { - ss = Series{Metric: sample.Metric} + if ok { + if ss.ts == ts { // If we've seen this output series before at this timestamp, it's a duplicate. + ev.errorf("vector cannot contain metrics with the same labelset") + } + ss.ts = ts + } else { + ss = seriesAndTimestamp{Series{Metric: sample.Metric}, ts} } if sample.H == nil { if ss.Floats == nil { @@ -1292,7 +1301,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) // Assemble the output matrix. By the time we get here we know we don't have too many samples. mat := make(Matrix, 0, len(seriess)) for _, ss := range seriess { - mat = append(mat, ss) + mat = append(mat, ss.Series) } ev.currentSamples = originalNumSamples + mat.TotalSamples() ev.samplesStats.UpdatePeak(ev.currentSamples) From 1e3fef6ab0578834a91728ef4fdd1532fa39913d Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 14 Aug 2023 15:39:25 +0100 Subject: [PATCH 40/82] scraping: limit detail on dropped targets, to save memory (#12647) It's possible (quite common on Kubernetes) to have a service discovery return thousands of targets then drop most of them in relabel rules. The main place this data is used is to display in the web UI, where you don't want thousands of lines of display. The new limit is `keep_dropped_targets`, which defaults to 0 for backwards-compatibility. Signed-off-by: Bryan Boreham --- config/config.go | 9 +++++++++ docs/configuration/configuration.md | 8 ++++++++ docs/querying/api.md | 1 + .../examples/prometheus-kubernetes.yml | 5 +++++ scrape/manager.go | 13 ++++++++++++- scrape/scrape.go | 18 +++++++++++++++--- scrape/scrape_test.go | 1 + web/api/v1/api.go | 9 +++++++-- web/api/v1/api_test.go | 11 +++++++++++ web/api/v1/errors_test.go | 5 +++++ .../src/pages/serviceDiscovery/Services.tsx | 19 ++++++++++--------- 11 files changed, 84 insertions(+), 15 deletions(-) diff --git a/config/config.go b/config/config.go index d32fcc33c..7f7595dcd 100644 --- a/config/config.go +++ b/config/config.go @@ -409,6 +409,9 @@ type GlobalConfig struct { // More than this label value length post metric-relabeling will cause the // scrape to fail. 0 means no limit. LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"` + // Keep no more than this many dropped targets per job. + // 0 means no limit. + KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"` } // SetDirectory joins any relative file paths with dir. @@ -514,6 +517,9 @@ type ScrapeConfig struct { // More than this many buckets in a native histogram will cause the scrape to // fail. NativeHistogramBucketLimit uint `yaml:"native_histogram_bucket_limit,omitempty"` + // Keep no more than this many dropped targets per job. + // 0 means no limit. + KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"` // We cannot do proper Go type embedding below as the parser will then parse // values arbitrarily into the overflow maps of further-down types. @@ -608,6 +614,9 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error { if c.LabelValueLengthLimit == 0 { c.LabelValueLengthLimit = globalConfig.LabelValueLengthLimit } + if c.KeepDroppedTargets == 0 { + c.KeepDroppedTargets = globalConfig.KeepDroppedTargets + } return nil } diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 669190257..f15a9f914 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -106,6 +106,10 @@ global: # change in the future. [ target_limit: | default = 0 ] + # Limit per scrape config on the number of targets dropped by relabeling + # that will be kept in memory. 0 means no limit. + [ keep_dropped_targets: | default = 0 ] + # Rule files specifies a list of globs. Rules and alerts are read from # all matching files. rule_files: @@ -415,6 +419,10 @@ metric_relabel_configs: # change in the future. [ target_limit: | default = 0 ] +# Per-job limit on the number of targets dropped by relabeling +# that will be kept in memory. 0 means no limit. +[ keep_dropped_targets: | default = 0 ] + # Limit on total number of positive and negative buckets allowed in a single # native histogram. If this is exceeded, the entire scrape will be treated as # failed. 0 means no limit. diff --git a/docs/querying/api.md b/docs/querying/api.md index 8ddb834ef..408d32cda 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -543,6 +543,7 @@ GET /api/v1/targets ``` Both the active and dropped targets are part of the response by default. +Dropped targets are subject to `keep_dropped_targets` limit, if set. `labels` represents the label set after relabeling has occurred. `discoveredLabels` represent the unmodified labels retrieved during service discovery before relabeling has occurred. diff --git a/documentation/examples/prometheus-kubernetes.yml b/documentation/examples/prometheus-kubernetes.yml index 9a6228734..ad7451c2d 100644 --- a/documentation/examples/prometheus-kubernetes.yml +++ b/documentation/examples/prometheus-kubernetes.yml @@ -8,6 +8,11 @@ # If you are using Kubernetes 1.7.2 or earlier, please take note of the comments # for the kubernetes-cadvisor job; you will need to edit or remove this job. +# Keep at most 100 sets of details of targets dropped by relabeling. +# This information is used to display in the UI for troubleshooting. +global: + keep_dropped_targets: 100 + # Scrape config for API servers. # # Kubernetes exposes API servers as endpoints to the default/kubernetes diff --git a/scrape/manager.go b/scrape/manager.go index d7cf6792c..427b9f2be 100644 --- a/scrape/manager.go +++ b/scrape/manager.go @@ -357,7 +357,7 @@ func (m *Manager) TargetsActive() map[string][]*Target { return targets } -// TargetsDropped returns the dropped targets during relabelling. +// TargetsDropped returns the dropped targets during relabelling, subject to KeepDroppedTargets limit. func (m *Manager) TargetsDropped() map[string][]*Target { m.mtxScrape.Lock() defer m.mtxScrape.Unlock() @@ -368,3 +368,14 @@ func (m *Manager) TargetsDropped() map[string][]*Target { } return targets } + +func (m *Manager) TargetsDroppedCounts() map[string]int { + m.mtxScrape.Lock() + defer m.mtxScrape.Unlock() + + counts := make(map[string]int, len(m.scrapePools)) + for tset, sp := range m.scrapePools { + counts[tset] = sp.droppedTargetsCount + } + return counts +} diff --git a/scrape/scrape.go b/scrape/scrape.go index df729b448..40836afc2 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -242,8 +242,9 @@ type scrapePool struct { targetMtx sync.Mutex // activeTargets and loops must always be synchronized to have the same // set of hashes. - activeTargets map[uint64]*Target - droppedTargets []*Target + activeTargets map[uint64]*Target + droppedTargets []*Target // Subject to KeepDroppedTargets limit. + droppedTargetsCount int // Count of all dropped targets. // Constructor for new scrape loops. This is settable for testing convenience. newLoop func(scrapeLoopOptions) loop @@ -354,12 +355,19 @@ func (sp *scrapePool) ActiveTargets() []*Target { return tActive } +// Return dropped targets, subject to KeepDroppedTargets limit. func (sp *scrapePool) DroppedTargets() []*Target { sp.targetMtx.Lock() defer sp.targetMtx.Unlock() return sp.droppedTargets } +func (sp *scrapePool) DroppedTargetsCount() int { + sp.targetMtx.Lock() + defer sp.targetMtx.Unlock() + return sp.droppedTargetsCount +} + // stop terminates all scrape loops and returns after they all terminated. func (sp *scrapePool) stop() { sp.mtx.Lock() @@ -506,6 +514,7 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { var targets []*Target lb := labels.NewBuilder(labels.EmptyLabels()) sp.droppedTargets = []*Target{} + sp.droppedTargetsCount = 0 for _, tg := range tgs { targets, failures := TargetsFromGroup(tg, sp.config, sp.noDefaultPort, targets, lb) for _, err := range failures { @@ -520,7 +529,10 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { case nonEmpty: all = append(all, t) case !t.discoveredLabels.IsEmpty(): - sp.droppedTargets = append(sp.droppedTargets, t) + if sp.config.KeepDroppedTargets != 0 && uint(len(sp.droppedTargets)) < sp.config.KeepDroppedTargets { + sp.droppedTargets = append(sp.droppedTargets, t) + } + sp.droppedTargetsCount++ } } } diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 3f119b94d..8578f1bec 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -88,6 +88,7 @@ func TestDroppedTargetsList(t *testing.T) { SourceLabels: model.LabelNames{"job"}, }, }, + KeepDroppedTargets: 1, } tgs = []*targetgroup.Group{ { diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 99589ac46..227027e46 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -100,6 +100,7 @@ type ScrapePoolsRetriever interface { type TargetRetriever interface { TargetsActive() map[string][]*scrape.Target TargetsDropped() map[string][]*scrape.Target + TargetsDroppedCounts() map[string]int } // AlertmanagerRetriever provides a list of all/dropped AlertManager URLs. @@ -898,8 +899,9 @@ type DroppedTarget struct { // TargetDiscovery has all the active targets. type TargetDiscovery struct { - ActiveTargets []*Target `json:"activeTargets"` - DroppedTargets []*DroppedTarget `json:"droppedTargets"` + ActiveTargets []*Target `json:"activeTargets"` + DroppedTargets []*DroppedTarget `json:"droppedTargets"` + DroppedTargetCounts map[string]int `json:"droppedTargetCounts"` } // GlobalURLOptions contains fields used for deriving the global URL for local targets. @@ -1039,6 +1041,9 @@ func (api *API) targets(r *http.Request) apiFuncResult { } else { res.ActiveTargets = []*Target{} } + if showDropped { + res.DroppedTargetCounts = api.targetRetriever(r.Context()).TargetsDroppedCounts() + } if showDropped { targetsDropped := api.targetRetriever(r.Context()).TargetsDropped() droppedKeys, numTargets := sortKeys(targetsDropped) diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 99e3b292e..742ca09ba 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -137,6 +137,14 @@ func (t testTargetRetriever) TargetsDropped() map[string][]*scrape.Target { return t.droppedTargets } +func (t testTargetRetriever) TargetsDroppedCounts() map[string]int { + r := make(map[string]int) + for k, v := range t.droppedTargets { + r[k] = len(v) + } + return r +} + func (t *testTargetRetriever) SetMetadataStoreForTargets(identifier string, metadata scrape.MetricMetadataStore) error { targets, ok := t.activeTargets[identifier] @@ -1384,6 +1392,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E }, }, }, + DroppedTargetCounts: map[string]int{"blackbox": 1}, }, }, { @@ -1436,6 +1445,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E }, }, }, + DroppedTargetCounts: map[string]int{"blackbox": 1}, }, }, { @@ -1498,6 +1508,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E }, }, }, + DroppedTargetCounts: map[string]int{"blackbox": 1}, }, }, // With a matching metric. diff --git a/web/api/v1/errors_test.go b/web/api/v1/errors_test.go index afdd67337..8d194a058 100644 --- a/web/api/v1/errors_test.go +++ b/web/api/v1/errors_test.go @@ -229,6 +229,11 @@ func (DummyTargetRetriever) TargetsDropped() map[string][]*scrape.Target { return map[string][]*scrape.Target{} } +// TargetsDroppedCounts implements targetRetriever. +func (DummyTargetRetriever) TargetsDroppedCounts() map[string]int { + return nil +} + // DummyAlertmanagerRetriever implements AlertmanagerRetriever. type DummyAlertmanagerRetriever struct{} diff --git a/web/ui/react-app/src/pages/serviceDiscovery/Services.tsx b/web/ui/react-app/src/pages/serviceDiscovery/Services.tsx index 21bf2259b..79d88fbe4 100644 --- a/web/ui/react-app/src/pages/serviceDiscovery/Services.tsx +++ b/web/ui/react-app/src/pages/serviceDiscovery/Services.tsx @@ -14,6 +14,7 @@ import SearchBar from '../../components/SearchBar'; interface ServiceMap { activeTargets: Target[]; droppedTargets: DroppedTarget[]; + droppedTargetCounts: Record; } export interface TargetLabels { @@ -34,7 +35,7 @@ const droppedTargetKVSearch = new KVSearch({ export const processSummary = ( activeTargets: Target[], - droppedTargets: DroppedTarget[] + droppedTargetCounts: Record ): Record => { const targets: Record = {}; @@ -50,15 +51,15 @@ export const processSummary = ( targets[name].total++; targets[name].active++; } - for (const target of droppedTargets) { - const { job: name } = target.discoveredLabels; + for (const name in targets) { if (!targets[name]) { targets[name] = { - total: 0, + total: droppedTargetCounts[name], active: 0, }; + } else { + targets[name].total += droppedTargetCounts[name]; } - targets[name].total++; } return targets; @@ -94,10 +95,10 @@ export const processTargets = (activeTargets: Target[], droppedTargets: DroppedT return labels; }; -export const ServiceDiscoveryContent: FC = ({ activeTargets, droppedTargets }) => { +export const ServiceDiscoveryContent: FC = ({ activeTargets, droppedTargets, droppedTargetCounts }) => { const [activeTargetList, setActiveTargetList] = useState(activeTargets); const [droppedTargetList, setDroppedTargetList] = useState(droppedTargets); - const [targetList, setTargetList] = useState(processSummary(activeTargets, droppedTargets)); + const [targetList, setTargetList] = useState(processSummary(activeTargets, droppedTargetCounts)); const [labelList, setLabelList] = useState(processTargets(activeTargets, droppedTargets)); const handleSearchChange = useCallback( @@ -118,9 +119,9 @@ export const ServiceDiscoveryContent: FC = ({ activeTargets, dropped const defaultValue = useMemo(getQuerySearchFilter, []); useEffect(() => { - setTargetList(processSummary(activeTargetList, droppedTargetList)); + setTargetList(processSummary(activeTargetList, droppedTargetCounts)); setLabelList(processTargets(activeTargetList, droppedTargetList)); - }, [activeTargetList, droppedTargetList]); + }, [activeTargetList, droppedTargetList, droppedTargetCounts]); return ( <> From b6192be856650cc379abd406aa0f38097e73f1f1 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 14 Aug 2023 16:24:19 +0000 Subject: [PATCH 41/82] Release: volunteer Bryan Boreham to shepherd 2.47 Signed-off-by: Bryan Boreham --- RELEASE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/RELEASE.md b/RELEASE.md index 0d0918191..2ae07281b 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -51,7 +51,7 @@ Release cadence of first pre-releases being cut is 6 weeks. | v2.44 | 2023-04-19 | Bryan Boreham (GitHub: @bboreham) | | v2.45 LTS | 2023-05-31 | Jesus Vazquez (Github: @jesusvazquez) | | v2.46 | 2023-07-12 | Julien Pivotto (GitHub: @roidelapluie) | -| v2.47 | 2023-08-23 | **searching for volunteer** | +| v2.47 | 2023-08-23 | Bryan Boreham (GitHub: @bboreham) | | v2.48 | 2023-10-04 | **searching for volunteer** | If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice. From 009017a3fb0219b0da7e4e2823bee772de306ffc Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Mon, 14 Aug 2023 23:28:23 +0200 Subject: [PATCH 42/82] Revert "Remove deleted target from discovery manager" Signed-off-by: Julien Pivotto --- discovery/legacymanager/manager.go | 7 +------ discovery/legacymanager/manager_test.go | 10 +++++++--- discovery/manager.go | 7 +------ discovery/manager_test.go | 15 +++++++++++++-- 4 files changed, 22 insertions(+), 17 deletions(-) diff --git a/discovery/legacymanager/manager.go b/discovery/legacymanager/manager.go index e7c79a8f8..87823f401 100644 --- a/discovery/legacymanager/manager.go +++ b/discovery/legacymanager/manager.go @@ -270,12 +270,7 @@ func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) { } for _, tg := range tgs { if tg != nil { // Some Discoverers send nil target group so need to check for it to avoid panics. - // Remove the deleted target. - if len(tg.Targets) == 0 && len(tg.Labels) == 0 { - delete(m.targets[poolKey], tg.Source) - } else { - m.targets[poolKey][tg.Source] = tg - } + m.targets[poolKey][tg.Source] = tg } } } diff --git a/discovery/legacymanager/manager_test.go b/discovery/legacymanager/manager_test.go index bc8a419ec..13b84e6e3 100644 --- a/discovery/legacymanager/manager_test.go +++ b/discovery/legacymanager/manager_test.go @@ -824,9 +824,13 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { if !ok { t.Fatalf("'%v' should be present in target groups", pkey) } - _, ok = targetGroups[""] - if ok { - t.Fatalf("Target groups should be empty, got %v", targetGroups) + group, ok := targetGroups[""] + if !ok { + t.Fatalf("missing '' key in target groups %v", targetGroups) + } + + if len(group.Targets) != 0 { + t.Fatalf("Invalid number of targets: expected 0, got %d", len(group.Targets)) } } diff --git a/discovery/manager.go b/discovery/manager.go index 7f06b423d..8b304a0fa 100644 --- a/discovery/manager.go +++ b/discovery/manager.go @@ -387,12 +387,7 @@ func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) { } for _, tg := range tgs { if tg != nil { // Some Discoverers send nil target group so need to check for it to avoid panics. - // Remove the deleted target. - if len(tg.Targets) == 0 && len(tg.Labels) == 0 { - delete(m.targets[poolKey], tg.Source) - } else { - m.targets[poolKey][tg.Source] = tg - } + m.targets[poolKey][tg.Source] = tg } } } diff --git a/discovery/manager_test.go b/discovery/manager_test.go index 67ccbcac7..537160811 100644 --- a/discovery/manager_test.go +++ b/discovery/manager_test.go @@ -1044,8 +1044,19 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { if !ok { t.Fatalf("'%v' should be present in target groups", p) } - require.Equal(t, 0, len(targetGroups)) - require.Equal(t, 0, len(syncedTargets)) + group, ok := targetGroups[""] + if !ok { + t.Fatalf("missing '' key in target groups %v", targetGroups) + } + + if len(group.Targets) != 0 { + t.Fatalf("Invalid number of targets: expected 0, got %d", len(group.Targets)) + } + require.Equal(t, 1, len(syncedTargets)) + require.Equal(t, 1, len(syncedTargets["prometheus"])) + if lbls := syncedTargets["prometheus"][0].Labels; lbls != nil { + t.Fatalf("Unexpected Group: expected nil Labels, got %v", lbls) + } } func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { From 7e91a79c495d2d6352fc4779737caa9323a403af Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Tue, 15 Aug 2023 20:50:29 +0200 Subject: [PATCH 43/82] ci(deps): group k8s and opentelemetry dependencies Dependabot allows to group dependencies by a list of pattern. This allows it on k8s.io and opentelemetry dependencies separately Signed-off-by: Matthieu MOREL --- .github/dependabot.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 4c4b4a5fb..dee914ece 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -4,6 +4,13 @@ updates: directory: "/" schedule: interval: "monthly" + groups: + k8s.io: + patterns: + - "k8s.io/*" + go.opentelemetry.io: + patterns: + - "go.opentelemetry.io/*" - package-ecosystem: "gomod" directory: "/documentation/examples/remote_storage" schedule: From bf880a6e77326ea1863d8ddc17f1e360bed88718 Mon Sep 17 00:00:00 2001 From: Ziqi Zhao Date: Wed, 16 Aug 2023 22:26:31 +0800 Subject: [PATCH 44/82] enhance floathistogram add and sub method Signed-off-by: Ziqi Zhao --- model/histogram/float_histogram.go | 158 ++++++++ model/histogram/float_histogram_test.go | 517 ++++++++++++++++++++++++ 2 files changed, 675 insertions(+) diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index d3f013935..0a65dafbb 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -238,6 +238,52 @@ func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { return h } +func (h *FloatHistogram) AddNew(other *FloatHistogram) *FloatHistogram { + switch { + case other.CounterResetHint == h.CounterResetHint: + // Adding apples to apples, all good. No need to change anything. + case h.CounterResetHint == GaugeType: + // Adding something else to a gauge. That's probably OK. Outcome is a gauge. + // Nothing to do since the receiver is already marked as gauge. + case other.CounterResetHint == GaugeType: + // Similar to before, but this time the receiver is "something else" and we have to change it to gauge. + h.CounterResetHint = GaugeType + case h.CounterResetHint == UnknownCounterReset: + // With the receiver's CounterResetHint being "unknown", this could still be legitimate + // if the caller knows what they are doing. Outcome is then again "unknown". + // No need to do anything since the receiver's CounterResetHint is already "unknown". + case other.CounterResetHint == UnknownCounterReset: + // Similar to before, but now we have to set the receiver's CounterResetHint to "unknown". + h.CounterResetHint = UnknownCounterReset + default: + // All other cases shouldn't actually happen. + // They are a direct collision of CounterReset and NotCounterReset. + // Conservatively set the CounterResetHint to "unknown" and isse a warning. + h.CounterResetHint = UnknownCounterReset + // TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place + } + + otherZeroCount := h.reconcileZeroBuckets(other) + h.ZeroCount += otherZeroCount + h.Count += other.Count + h.Sum += other.Sum + + otherPositiveSpans := other.PositiveSpans + otherPositiveBuckets := other.PositiveBuckets + otherNegativeSpans := other.NegativeSpans + otherNegativeBuckets := other.NegativeBuckets + if other.Schema != h.Schema { + otherPositiveSpans, otherPositiveBuckets = mergeToSchema(other.PositiveSpans, other.PositiveBuckets, other.Schema, h.Schema) + otherNegativeSpans, otherNegativeBuckets = mergeToSchema(other.NegativeSpans, other.NegativeBuckets, other.Schema, h.Schema) + } + + // TODO(beorn7): If needed, this can be optimized by inspecting the + // spans in other and create missing buckets in h in batches. + h.PositiveSpans, h.PositiveBuckets = mergeTwoSpans(h.Schema, h.ZeroThreshold, h.PositiveSpans, h.PositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + h.NegativeSpans, h.NegativeBuckets = mergeTwoSpans(h.Schema, h.ZeroThreshold, h.NegativeSpans, h.NegativeBuckets, otherNegativeSpans, otherNegativeBuckets) + return h +} + // Sub works like Add but subtracts the other histogram. func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram { otherZeroCount := h.reconcileZeroBuckets(other) @@ -1033,3 +1079,115 @@ func mergeToSchema(originSpans []Span, originBuckets []float64, originSchema, ta return targetSpans, targetBuckets } + +func mergeTwoSpans(schema int32, threshold float64, spansA []Span, bucketsA []float64, spansB []Span, bucketsB []float64) ([]Span, []float64) { + var ( + iSpan int = -1 + iBucket int = -1 + iInSpan int32 + indexA int32 + indexB int32 = 0 + bIdxB int = 0 + lowerThanThreshold = true + deltaIndex int32 + ) + + for _, spanB := range spansB { + indexB += spanB.Offset + for j := 0; j < int(spanB.Length); j++ { + if lowerThanThreshold && getBound(indexB, schema) <= threshold { + goto nextLoop + } + lowerThanThreshold = false + + if iSpan == -1 { + if len(spansA) == 0 || spansA[0].Offset > indexB { + // Add bucket before all others. + bucketsA = append(bucketsA, 0) + copy(bucketsA[1:], bucketsA) + bucketsA[0] = bucketsB[bIdxB] + if len(spansA) > 0 && spansA[0].Offset == indexB+1 { // bIndex just preceed spansA[0] by one step + spansA[0].Length++ + spansA[0].Offset-- + goto nextLoop + } else { // if not create new span + spansA = append(spansA, Span{}) + copy(spansA[1:], spansA) + spansA[0] = Span{Offset: indexB, Length: 1} + if len(spansA) > 1 { + // Convert the absolute offset in the formerly + // first span to a relative offset. + spansA[1].Offset -= indexB + 1 + } + goto nextLoop + } + } else if spansA[0].Offset == indexB { + // Just add to first bucket. + bucketsA[0] += bucketsB[bIdxB] + goto nextLoop + } + iSpan, iBucket, iInSpan = 0, 0, 0 + indexA = spansA[0].Offset + } + deltaIndex = indexB - indexA + for { + remainingInSpan := int32(spansA[iSpan].Length) - iInSpan + if deltaIndex < remainingInSpan { + // Bucket is in current span. + iBucket += int(deltaIndex) + iInSpan += deltaIndex + bucketsA[iBucket] += bucketsB[bIdxB] + break + } else { + deltaIndex -= remainingInSpan + iBucket += int(remainingInSpan) + iSpan++ + if iSpan == len(spansA) || deltaIndex < spansA[iSpan].Offset { + // Bucket is in gap behind previous span (or there are no further spans). + bucketsA = append(bucketsA, 0) + copy(bucketsA[iBucket+1:], bucketsA[iBucket:]) + bucketsA[iBucket] = bucketsB[bIdxB] + if deltaIndex == 0 { + // Directly after previous span, extend previous span. + if iSpan < len(spansA) { + spansA[iSpan].Offset-- + } + iSpan-- + iInSpan = int32(spansA[iSpan].Length) + spansA[iSpan].Length++ + break + } else if iSpan < len(spansA) && deltaIndex == spansA[iSpan].Offset-1 { + // Directly before next span, extend next span. + iInSpan = 0 + spansA[iSpan].Offset-- + spansA[iSpan].Length++ + break + } else { + // No next span, or next span is not directly adjacent to new bucket. + // Add new span. + iInSpan = 0 + if iSpan < len(spansA) { + spansA[iSpan].Offset -= deltaIndex + 1 + } + spansA = append(spansA, Span{}) + copy(spansA[iSpan+1:], spansA[iSpan:]) + spansA[iSpan] = Span{Length: 1, Offset: deltaIndex} + break + } + } else { + // Try start of next span. + deltaIndex -= spansA[iSpan].Offset + iInSpan = 0 + } + } + } + + nextLoop: + indexA = indexB + indexB++ + bIdxB++ + } + } + + return spansA, bucketsA +} diff --git a/model/histogram/float_histogram_test.go b/model/histogram/float_histogram_test.go index dd3e30427..5d1e96931 100644 --- a/model/histogram/float_histogram_test.go +++ b/model/histogram/float_histogram_test.go @@ -2252,3 +2252,520 @@ func TestFloatBucketIteratorTargetSchema(t *testing.T) { } require.False(t, it.Next(), "negative iterator not exhausted") } + +func TestFloatHistogramAddNew(t *testing.T) { + cases := []struct { + name string + in1, in2, expected *FloatHistogram + }{ + { + "same bucket layout", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 8, + Count: 21, + Sum: 1.234, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{0, 0, 2, 3, 6}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 19, + Count: 51, + Sum: 3.579, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 5, 7, 13}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{4, 2, 9, 10}, + }, + }, + { + "same bucket layout, defined differently", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + PositiveSpans: []Span{{-2, 2}, {1, 1}, {0, 2}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 8, + Count: 21, + Sum: 1.234, + PositiveSpans: []Span{{-2, 2}, {1, 2}, {0, 1}}, + PositiveBuckets: []float64{0, 0, 2, 3, 6}, + NegativeSpans: []Span{{3, 7}}, + NegativeBuckets: []float64{1, 1, 0, 0, 0, 4, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 19, + Count: 51, + Sum: 3.579, + PositiveSpans: []Span{{-2, 2}, {1, 1}, {0, 2}}, + PositiveBuckets: []float64{1, 0, 5, 7, 13}, + NegativeSpans: []Span{{3, 5}, {0, 2}}, + NegativeBuckets: []float64{4, 2, 0, 0, 0, 9, 10}, + }, + }, + { + "non-overlapping spans", + &FloatHistogram{ + ZeroThreshold: 0.001, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + PositiveSpans: []Span{{-2, 2}, {2, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + &FloatHistogram{ + ZeroThreshold: 0.001, + ZeroCount: 8, + Count: 21, + Sum: 1.234, + PositiveSpans: []Span{{0, 2}, {3, 3}}, + PositiveBuckets: []float64{5, 4, 2, 3, 6}, + NegativeSpans: []Span{{-9, 2}, {3, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.001, + ZeroCount: 19, + Count: 51, + Sum: 3.579, + PositiveSpans: []Span{{-2, 4}, {0, 6}}, + PositiveBuckets: []float64{1, 0, 5, 4, 3, 4, 7, 2, 3, 6}, + NegativeSpans: []Span{{-9, 2}, {3, 2}, {5, 2}, {3, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4, 3, 1, 5, 6}, + }, + }, + { + "non-overlapping inverted order", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 8, + Count: 21, + Sum: 1.234, + PositiveSpans: []Span{{0, 2}, {3, 3}}, + PositiveBuckets: []float64{5, 4, 2, 3, 6}, + NegativeSpans: []Span{{-9, 2}, {3, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + PositiveSpans: []Span{{-2, 2}, {2, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 19, + Count: 51, + Sum: 3.579, + PositiveSpans: []Span{{-2, 2}, {0, 5}, {0, 3}}, + PositiveBuckets: []float64{1, 0, 5, 4, 3, 4, 7, 2, 3, 6}, + NegativeSpans: []Span{{-9, 2}, {3, 2}, {5, 2}, {3, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4, 3, 1, 5, 6}, + }, + }, + { + "overlapping spans", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + PositiveSpans: []Span{{-2, 2}, {2, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 8, + Count: 21, + Sum: 1.234, + PositiveSpans: []Span{{-1, 4}, {0, 3}}, + PositiveBuckets: []float64{5, 4, 2, 3, 6, 2, 5}, + NegativeSpans: []Span{{4, 2}, {1, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 19, + Count: 51, + Sum: 3.579, + PositiveSpans: []Span{{-2, 4}, {0, 4}}, + PositiveBuckets: []float64{1, 5, 4, 2, 6, 10, 9, 5}, + NegativeSpans: []Span{{3, 3}, {1, 3}}, + NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, + }, + }, + { + "overlapping spans inverted order", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 8, + Count: 21, + Sum: 1.234, + PositiveSpans: []Span{{-1, 4}, {0, 3}}, + PositiveBuckets: []float64{5, 4, 2, 3, 6, 2, 5}, + NegativeSpans: []Span{{4, 2}, {1, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + PositiveSpans: []Span{{-2, 2}, {2, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 19, + Count: 51, + Sum: 3.579, + PositiveSpans: []Span{{-2, 5}, {0, 3}}, + PositiveBuckets: []float64{1, 5, 4, 2, 6, 10, 9, 5}, + NegativeSpans: []Span{{3, 3}, {1, 3}}, + NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, + }, + }, + { + "schema change", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 8, + Count: 21, + Sum: 1.234, + Schema: 0, + PositiveSpans: []Span{{-1, 4}, {0, 3}}, + PositiveBuckets: []float64{5, 4, 2, 3, 6, 2, 5}, + NegativeSpans: []Span{{4, 2}, {1, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + Schema: 1, + PositiveSpans: []Span{{-4, 3}, {5, 5}}, + PositiveBuckets: []float64{1, 0, 0, 3, 2, 2, 3, 4}, + NegativeSpans: []Span{{6, 3}, {6, 4}}, + NegativeBuckets: []float64{3, 0.5, 0.5, 2, 3, 2, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 19, + Count: 51, + Sum: 3.579, + PositiveSpans: []Span{{-2, 5}, {0, 3}}, + PositiveBuckets: []float64{1, 5, 4, 2, 6, 10, 9, 5}, + NegativeSpans: []Span{{3, 3}, {1, 3}}, + NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, + }, + }, + { + "larger zero bucket in first histogram", + &FloatHistogram{ + ZeroThreshold: 1, + ZeroCount: 17, + Count: 21, + Sum: 1.234, + PositiveSpans: []Span{{1, 2}, {0, 3}}, + PositiveBuckets: []float64{2, 3, 6, 2, 5}, + NegativeSpans: []Span{{4, 2}, {1, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + PositiveSpans: []Span{{-2, 2}, {2, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + &FloatHistogram{ + ZeroThreshold: 1, + ZeroCount: 29, + Count: 51, + Sum: 3.579, + PositiveSpans: []Span{{1, 2}, {0, 3}}, + PositiveBuckets: []float64{2, 6, 10, 9, 5}, + NegativeSpans: []Span{{3, 3}, {1, 3}}, + NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, + }, + }, + { + "larger zero bucket in second histogram", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + PositiveSpans: []Span{{-2, 2}, {2, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + &FloatHistogram{ + ZeroThreshold: 1, + ZeroCount: 17, + Count: 21, + Sum: 1.234, + PositiveSpans: []Span{{1, 2}, {0, 3}}, + PositiveBuckets: []float64{2, 3, 6, 2, 5}, + NegativeSpans: []Span{{4, 2}, {1, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 1, + ZeroCount: 29, + Count: 51, + Sum: 3.579, + PositiveSpans: []Span{{1, 5}}, + PositiveBuckets: []float64{2, 6, 10, 9, 5}, + NegativeSpans: []Span{{3, 3}, {1, 3}}, + NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, + }, + }, + { + "larger zero threshold in first histogram ends up inside a populated bucket of second histogram", + &FloatHistogram{ + ZeroThreshold: 0.2, + ZeroCount: 17, + Count: 21, + Sum: 1.234, + PositiveSpans: []Span{{1, 2}, {0, 3}}, + PositiveBuckets: []float64{2, 3, 6, 2, 5}, + NegativeSpans: []Span{{4, 2}, {1, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + PositiveSpans: []Span{{-2, 2}, {2, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + &FloatHistogram{ + ZeroThreshold: 0.25, + ZeroCount: 29, + Count: 51, + Sum: 3.579, + PositiveSpans: []Span{{-1, 1}, {1, 5}}, + PositiveBuckets: []float64{0, 2, 6, 10, 9, 5}, + NegativeSpans: []Span{{3, 3}, {1, 3}}, + NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, + }, + }, + { + "larger zero threshold in second histogram ends up inside a populated bucket of first histogram", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + PositiveSpans: []Span{{-2, 2}, {2, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + &FloatHistogram{ + ZeroThreshold: 0.2, + ZeroCount: 17, + Count: 21, + Sum: 1.234, + PositiveSpans: []Span{{1, 2}, {0, 3}}, + PositiveBuckets: []float64{2, 3, 6, 2, 5}, + NegativeSpans: []Span{{4, 2}, {1, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.25, + ZeroCount: 29, + Count: 51, + Sum: 3.579, + PositiveSpans: []Span{{1, 5}}, + PositiveBuckets: []float64{2, 6, 10, 9, 5}, + NegativeSpans: []Span{{3, 3}, {1, 3}}, + NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, + }, + }, + { + "schema change combined with larger zero bucket in second histogram", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 8, + Count: 21, + Sum: 1.234, + Schema: 0, + PositiveSpans: []Span{{-2, 5}, {0, 3}}, + PositiveBuckets: []float64{2, 5, 4, 2, 3, 6, 2, 5}, + NegativeSpans: []Span{{4, 2}, {1, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.25, + ZeroCount: 12, + Count: 30, + Sum: 2.345, + Schema: 1, + PositiveSpans: []Span{{-3, 2}, {5, 5}}, + PositiveBuckets: []float64{1, 0, 3, 2, 2, 3, 4}, + NegativeSpans: []Span{{6, 3}, {6, 4}}, + NegativeBuckets: []float64{3, 0.5, 0.5, 2, 3, 2, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.25, + ZeroCount: 22, + Count: 51, + Sum: 3.579, + PositiveSpans: []Span{{-1, 7}}, + PositiveBuckets: []float64{6, 4, 2, 6, 10, 9, 5}, + NegativeSpans: []Span{{3, 3}, {1, 3}}, + NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, + }, + }, + { + "schema change combined with larger zero bucket in first histogram", + &FloatHistogram{ + ZeroThreshold: 0.25, + ZeroCount: 8, + Count: 21, + Sum: 1.234, + Schema: 0, + PositiveSpans: []Span{{-1, 4}, {0, 3}}, + PositiveBuckets: []float64{5, 4, 2, 3, 6, 2, 5}, + NegativeSpans: []Span{{4, 2}, {1, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + Schema: 1, + PositiveSpans: []Span{{-4, 3}, {5, 5}}, + PositiveBuckets: []float64{1, 0, 0, 3, 2, 2, 3, 4}, + NegativeSpans: []Span{{6, 3}, {6, 4}}, + NegativeBuckets: []float64{3, 0.5, 0.5, 2, 3, 2, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.25, + ZeroCount: 20, + Count: 51, + Sum: 3.579, + PositiveSpans: []Span{{-1, 4}, {0, 3}}, + PositiveBuckets: []float64{5, 4, 2, 6, 10, 9, 5}, + NegativeSpans: []Span{{3, 3}, {1, 3}}, + NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + require.Equal(t, c.expected, c.in1.AddNew(c.in2)) + // Has it also happened in-place? + require.Equal(t, c.expected, c.in1) + }) + } +} + +func BenchmarkAddOld(b *testing.B) { + // run the Fib function b.N times + + f1 := &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 8, + Count: 21, + Sum: 1.234, + Schema: 0, + PositiveSpans: []Span{{-1, 4}, {0, 3}}, + PositiveBuckets: []float64{5, 4, 2, 3, 6, 2, 5}, + NegativeSpans: []Span{{4, 2}, {1, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4}, + } + + f2 := &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + Schema: 1, + PositiveSpans: []Span{{-4, 3}, {5, 5}}, + PositiveBuckets: []float64{1, 0, 0, 3, 2, 2, 3, 4}, + NegativeSpans: []Span{{6, 3}, {6, 4}}, + NegativeBuckets: []float64{3, 0.5, 0.5, 2, 3, 2, 4}, + } + + for n := 0; n < b.N; n++ { + f1.Add(f2) + } + +} + +func BenchmarkAddNew(b *testing.B) { + // run the Fib function b.N times + + f1 := &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 8, + Count: 21, + Sum: 1.234, + Schema: 0, + PositiveSpans: []Span{{-1, 4}, {0, 3}}, + PositiveBuckets: []float64{5, 4, 2, 3, 6, 2, 5}, + NegativeSpans: []Span{{4, 2}, {1, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4}, + } + + f2 := &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + Schema: 1, + PositiveSpans: []Span{{-4, 3}, {5, 5}}, + PositiveBuckets: []float64{1, 0, 0, 3, 2, 2, 3, 4}, + NegativeSpans: []Span{{6, 3}, {6, 4}}, + NegativeBuckets: []float64{3, 0.5, 0.5, 2, 3, 2, 4}, + } + + for n := 0; n < b.N; n++ { + f1.AddNew(f2) + } + +} From e5e51bebef4a37a268e0ff37124a9e2c7c7a6e2f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mustafa=20Ate=C5=9F=20Uzun?= Date: Thu, 17 Aug 2023 16:34:45 +0300 Subject: [PATCH 45/82] fix: error message typo MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Mustafa Ateş Uzun --- tsdb/head_wal.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 3ed95887e..19520a7d2 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -1517,7 +1517,7 @@ Outer: default: // This is a record type we don't understand. It is either and old format from earlier versions, // or a new format and the code was rolled back to old version. - loopErr = errors.Errorf("unsuported snapshot record type 0b%b", rec[0]) + loopErr = errors.Errorf("unsupported snapshot record type 0b%b", rec[0]) break Outer } } From 4399959f791de5b1387808d79624f17411b1527e Mon Sep 17 00:00:00 2001 From: Sylvain Rabot Date: Fri, 18 Aug 2023 15:15:55 +0200 Subject: [PATCH 46/82] Remove native histograms / memory snapshot restriction Signed-off-by: Sylvain Rabot --- cmd/prometheus/main.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index d48898b94..cab65626a 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -214,11 +214,6 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { } } - if c.tsdb.EnableNativeHistograms && c.tsdb.EnableMemorySnapshotOnShutdown { - c.tsdb.EnableMemorySnapshotOnShutdown = false - level.Warn(logger).Log("msg", "memory-snapshot-on-shutdown has been disabled automatically because memory-snapshot-on-shutdown and native-histograms cannot be enabled at the same time.") - } - return nil } From 4d8e380269da5912265274469ff873142bbbabc3 Mon Sep 17 00:00:00 2001 From: Michael Hoffmann Date: Fri, 18 Aug 2023 20:48:59 +0200 Subject: [PATCH 47/82] promql: allow tests to be imported (#12050) Signed-off-by: Michael Hoffmann --- promql/engine_test.go | 249 ++++++++++++---------------- promql/promql_test.go | 27 +-- promql/test.go | 162 ++++++++---------- rules/alerting_test.go | 95 +++++------ rules/manager_test.go | 71 +++----- rules/recording_test.go | 32 ++-- storage/remote/read_handler_test.go | 37 ++--- web/api/v1/api_test.go | 81 +++++---- web/federate_test.go | 30 +--- 9 files changed, 334 insertions(+), 450 deletions(-) diff --git a/promql/engine_test.go b/promql/engine_test.go index 54567d154..0df969375 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -25,8 +25,6 @@ import ( "github.com/go-kit/log" - "github.com/prometheus/prometheus/tsdb/tsdbutil" - "github.com/stretchr/testify/require" "go.uber.org/goleak" @@ -35,7 +33,9 @@ import ( "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/util/stats" + "github.com/prometheus/prometheus/util/teststorage" ) func TestMain(m *testing.M) { @@ -566,6 +566,7 @@ func TestSelectHintsSetCorrectly(t *testing.T) { err error ) ctx := context.Background() + if tc.end == 0 { query, err = engine.NewInstantQuery(ctx, hintsRecorder, nil, tc.query, timestamp.Time(tc.start)) } else { @@ -573,7 +574,7 @@ func TestSelectHintsSetCorrectly(t *testing.T) { } require.NoError(t, err) - res := query.Exec(ctx) + res := query.Exec(context.Background()) require.NoError(t, res.Err) require.Equal(t, tc.expected, hintsRecorder.hints) @@ -636,15 +637,11 @@ func TestEngineShutdown(t *testing.T) { } func TestEngineEvalStmtTimestamps(t *testing.T) { - test, err := NewTest(t, ` + storage := LoadedStorage(t, ` load 10s metric 1 2 `) - require.NoError(t, err) - defer test.Close() - - err = test.Run() - require.NoError(t, err) + t.Cleanup(func() { storage.Close() }) cases := []struct { Query string @@ -728,14 +725,15 @@ load 10s t.Run(fmt.Sprintf("%d query=%s", i, c.Query), func(t *testing.T) { var err error var qry Query + engine := newTestEngine() if c.Interval == 0 { - qry, err = test.QueryEngine().NewInstantQuery(test.context, test.Queryable(), nil, c.Query, c.Start) + qry, err = engine.NewInstantQuery(context.Background(), storage, nil, c.Query, c.Start) } else { - qry, err = test.QueryEngine().NewRangeQuery(test.context, test.Queryable(), nil, c.Query, c.Start, c.End, c.Interval) + qry, err = engine.NewRangeQuery(context.Background(), storage, nil, c.Query, c.Start, c.End, c.Interval) } require.NoError(t, err) - res := qry.Exec(test.Context()) + res := qry.Exec(context.Background()) if c.ShouldError { require.Error(t, res.Err, "expected error for the query %q", c.Query) return @@ -748,18 +746,14 @@ load 10s } func TestQueryStatistics(t *testing.T) { - test, err := NewTest(t, ` + storage := LoadedStorage(t, ` load 10s metricWith1SampleEvery10Seconds 1+1x100 metricWith3SampleEvery10Seconds{a="1",b="1"} 1+1x100 metricWith3SampleEvery10Seconds{a="2",b="2"} 1+1x100 metricWith3SampleEvery10Seconds{a="3",b="2"} 1+1x100 `) - require.NoError(t, err) - defer test.Close() - - err = test.Run() - require.NoError(t, err) + t.Cleanup(func() { storage.Close() }) cases := []struct { Query string @@ -1194,7 +1188,7 @@ load 10s }, } - engine := test.QueryEngine() + engine := newTestEngine() engine.enablePerStepStats = true origMaxSamples := engine.maxSamplesPerQuery for _, c := range cases { @@ -1206,13 +1200,13 @@ load 10s var err error var qry Query if c.Interval == 0 { - qry, err = engine.NewInstantQuery(test.context, test.Queryable(), opts, c.Query, c.Start) + qry, err = engine.NewInstantQuery(context.Background(), storage, opts, c.Query, c.Start) } else { - qry, err = engine.NewRangeQuery(test.context, test.Queryable(), opts, c.Query, c.Start, c.End, c.Interval) + qry, err = engine.NewRangeQuery(context.Background(), storage, opts, c.Query, c.Start, c.End, c.Interval) } require.NoError(t, err) - res := qry.Exec(test.Context()) + res := qry.Exec(context.Background()) require.Equal(t, expErr, res.Err) return qry.Stats() @@ -1234,17 +1228,13 @@ load 10s } func TestMaxQuerySamples(t *testing.T) { - test, err := NewTest(t, ` + storage := LoadedStorage(t, ` load 10s metric 1+1x100 bigmetric{a="1"} 1+1x100 bigmetric{a="2"} 1+1x100 `) - require.NoError(t, err) - defer test.Close() - - err = test.Run() - require.NoError(t, err) + t.Cleanup(func() { storage.Close() }) // These test cases should be touching the limit exactly (hence no exceeding). // Exceeding the limit will be tested by doing -1 to the MaxSamples. @@ -1382,20 +1372,20 @@ load 10s }, } - engine := test.QueryEngine() for _, c := range cases { t.Run(c.Query, func(t *testing.T) { + engine := newTestEngine() testFunc := func(expError error) { var err error var qry Query if c.Interval == 0 { - qry, err = engine.NewInstantQuery(test.context, test.Queryable(), nil, c.Query, c.Start) + qry, err = engine.NewInstantQuery(context.Background(), storage, nil, c.Query, c.Start) } else { - qry, err = engine.NewRangeQuery(test.context, test.Queryable(), nil, c.Query, c.Start, c.End, c.Interval) + qry, err = engine.NewRangeQuery(context.Background(), storage, nil, c.Query, c.Start, c.End, c.Interval) } require.NoError(t, err) - res := qry.Exec(test.Context()) + res := qry.Exec(context.Background()) stats := qry.Stats() require.Equal(t, expError, res.Err) require.NotNil(t, stats) @@ -1416,7 +1406,8 @@ load 10s } func TestAtModifier(t *testing.T) { - test, err := NewTest(t, ` + engine := newTestEngine() + storage := LoadedStorage(t, ` load 10s metric{job="1"} 0+1x1000 metric{job="2"} 0+2x1000 @@ -1427,11 +1418,7 @@ load 10s load 1ms metric_ms 0+1x10000 `) - require.NoError(t, err) - defer test.Close() - - err = test.Run() - require.NoError(t, err) + t.Cleanup(func() { storage.Close() }) lbls1 := labels.FromStrings("__name__", "metric", "job", "1") lbls2 := labels.FromStrings("__name__", "metric", "job", "2") @@ -1441,7 +1428,7 @@ load 1ms lblsneg := labels.FromStrings("__name__", "metric_neg") // Add some samples with negative timestamp. - db := test.TSDB() + db := storage.DB app := db.Appender(context.Background()) ref, err := app.Append(0, lblsneg, -1000000, 1000) require.NoError(t, err) @@ -1630,13 +1617,13 @@ load 1ms var err error var qry Query if c.end == 0 { - qry, err = test.QueryEngine().NewInstantQuery(test.context, test.Queryable(), nil, c.query, start) + qry, err = engine.NewInstantQuery(context.Background(), storage, nil, c.query, start) } else { - qry, err = test.QueryEngine().NewRangeQuery(test.context, test.Queryable(), nil, c.query, start, end, interval) + qry, err = engine.NewRangeQuery(context.Background(), storage, nil, c.query, start, end, interval) } require.NoError(t, err) - res := qry.Exec(test.Context()) + res := qry.Exec(context.Background()) require.NoError(t, res.Err) if expMat, ok := c.result.(Matrix); ok { sort.Sort(expMat) @@ -1955,18 +1942,16 @@ func TestSubquerySelector(t *testing.T) { }, } { t.Run("", func(t *testing.T) { - test, err := NewTest(t, tst.loadString) - require.NoError(t, err) - defer test.Close() + engine := newTestEngine() + storage := LoadedStorage(t, tst.loadString) + t.Cleanup(func() { storage.Close() }) - require.NoError(t, test.Run()) - engine := test.QueryEngine() for _, c := range tst.cases { t.Run(c.Query, func(t *testing.T) { - qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, c.Query, c.Start) + qry, err := engine.NewInstantQuery(context.Background(), storage, nil, c.Query, c.Start) require.NoError(t, err) - res := qry.Exec(test.Context()) + res := qry.Exec(context.Background()) require.Equal(t, c.Result.Err, res.Err) mat := res.Value.(Matrix) sort.Sort(mat) @@ -1978,15 +1963,12 @@ func TestSubquerySelector(t *testing.T) { } func TestTimestampFunction_StepsMoreOftenThanSamples(t *testing.T) { - test, err := NewTest(t, ` + engine := newTestEngine() + storage := LoadedStorage(t, ` load 1m metric 0+1x1000 `) - require.NoError(t, err) - defer test.Close() - - err = test.Run() - require.NoError(t, err) + t.Cleanup(func() { storage.Close() }) query := "timestamp(metric)" start := time.Unix(0, 0) @@ -2013,10 +1995,10 @@ load 1m }, } - qry, err := test.QueryEngine().NewRangeQuery(test.context, test.Queryable(), nil, query, start, end, interval) + qry, err := engine.NewRangeQuery(context.Background(), storage, nil, query, start, end, interval) require.NoError(t, err) - res := qry.Exec(test.Context()) + res := qry.Exec(context.Background()) require.NoError(t, res.Err) require.Equal(t, expectedResult, res.Value) } @@ -2955,7 +2937,6 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { } func TestEngineOptsValidation(t *testing.T) { - ctx := context.Background() cases := []struct { opts EngineOpts query string @@ -3015,8 +2996,8 @@ func TestEngineOptsValidation(t *testing.T) { for _, c := range cases { eng := NewEngine(c.opts) - _, err1 := eng.NewInstantQuery(ctx, nil, nil, c.query, time.Unix(10, 0)) - _, err2 := eng.NewRangeQuery(ctx, nil, nil, c.query, time.Unix(0, 0), time.Unix(10, 0), time.Second) + _, err1 := eng.NewInstantQuery(context.Background(), nil, nil, c.query, time.Unix(10, 0)) + _, err2 := eng.NewRangeQuery(context.Background(), nil, nil, c.query, time.Unix(0, 0), time.Unix(10, 0), time.Second) if c.fail { require.Equal(t, c.expError, err1) require.Equal(t, c.expError, err2) @@ -3156,17 +3137,14 @@ func TestRangeQuery(t *testing.T) { } for _, c := range cases { t.Run(c.Name, func(t *testing.T) { - test, err := NewTest(t, c.Load) - require.NoError(t, err) - defer test.Close() + engine := newTestEngine() + storage := LoadedStorage(t, c.Load) + t.Cleanup(func() { storage.Close() }) - err = test.Run() + qry, err := engine.NewRangeQuery(context.Background(), storage, nil, c.Query, c.Start, c.End, c.Interval) require.NoError(t, err) - qry, err := test.QueryEngine().NewRangeQuery(test.context, test.Queryable(), nil, c.Query, c.Start, c.End, c.Interval) - require.NoError(t, err) - - res := qry.Exec(test.Context()) + res := qry.Exec(context.Background()) require.NoError(t, res.Err) require.Equal(t, c.Result, res.Value) }) @@ -3176,27 +3154,24 @@ func TestRangeQuery(t *testing.T) { func TestNativeHistogramRate(t *testing.T) { // TODO(beorn7): Integrate histograms into the PromQL testing framework // and write more tests there. - test, err := NewTest(t, "") - require.NoError(t, err) - defer test.Close() + engine := newTestEngine() + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) seriesName := "sparse_histogram_series" lbls := labels.FromStrings("__name__", seriesName) - app := test.Storage().Appender(context.TODO()) + app := storage.Appender(context.Background()) for i, h := range tsdbutil.GenerateTestHistograms(100) { _, err := app.AppendHistogram(0, lbls, int64(i)*int64(15*time.Second/time.Millisecond), h, nil) require.NoError(t, err) } require.NoError(t, app.Commit()) - require.NoError(t, test.Run()) - engine := test.QueryEngine() - queryString := fmt.Sprintf("rate(%s[1m])", seriesName) - qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond))) + qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond))) require.NoError(t, err) - res := qry.Exec(test.Context()) + res := qry.Exec(context.Background()) require.NoError(t, res.Err) vector, err := res.Vector() require.NoError(t, err) @@ -3220,27 +3195,24 @@ func TestNativeHistogramRate(t *testing.T) { func TestNativeFloatHistogramRate(t *testing.T) { // TODO(beorn7): Integrate histograms into the PromQL testing framework // and write more tests there. - test, err := NewTest(t, "") - require.NoError(t, err) - defer test.Close() + engine := newTestEngine() + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) seriesName := "sparse_histogram_series" lbls := labels.FromStrings("__name__", seriesName) - app := test.Storage().Appender(context.TODO()) + app := storage.Appender(context.Background()) for i, fh := range tsdbutil.GenerateTestFloatHistograms(100) { _, err := app.AppendHistogram(0, lbls, int64(i)*int64(15*time.Second/time.Millisecond), nil, fh) require.NoError(t, err) } require.NoError(t, app.Commit()) - require.NoError(t, test.Run()) - engine := test.QueryEngine() - queryString := fmt.Sprintf("rate(%s[1m])", seriesName) - qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond))) + qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond))) require.NoError(t, err) - res := qry.Exec(test.Context()) + res := qry.Exec(context.Background()) require.NoError(t, res.Err) vector, err := res.Vector() require.NoError(t, err) @@ -3283,16 +3255,16 @@ func TestNativeHistogram_HistogramCountAndSum(t *testing.T) { } for _, floatHisto := range []bool{true, false} { t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) { - test, err := NewTest(t, "") - require.NoError(t, err) - t.Cleanup(test.Close) + engine := newTestEngine() + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) seriesName := "sparse_histogram_series" lbls := labels.FromStrings("__name__", seriesName) - engine := test.QueryEngine() ts := int64(10 * time.Minute / time.Millisecond) - app := test.Storage().Appender(context.TODO()) + app := storage.Appender(context.Background()) + var err error if floatHisto { _, err = app.AppendHistogram(0, lbls, ts, nil, h.ToFloat()) } else { @@ -3302,10 +3274,10 @@ func TestNativeHistogram_HistogramCountAndSum(t *testing.T) { require.NoError(t, app.Commit()) queryString := fmt.Sprintf("histogram_count(%s)", seriesName) - qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts)) + qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) require.NoError(t, err) - res := qry.Exec(test.Context()) + res := qry.Exec(context.Background()) require.NoError(t, res.Err) vector, err := res.Vector() @@ -3320,10 +3292,10 @@ func TestNativeHistogram_HistogramCountAndSum(t *testing.T) { } queryString = fmt.Sprintf("histogram_sum(%s)", seriesName) - qry, err = engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts)) + qry, err = engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) require.NoError(t, err) - res = qry.Exec(test.Context()) + res = qry.Exec(context.Background()) require.NoError(t, res.Err) vector, err = res.Vector() @@ -3533,18 +3505,18 @@ func TestNativeHistogram_HistogramQuantile(t *testing.T) { }, } - test, err := NewTest(t, "") - require.NoError(t, err) - t.Cleanup(test.Close) + engine := newTestEngine() + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) idx := int64(0) for _, floatHisto := range []bool{true, false} { for _, c := range cases { t.Run(fmt.Sprintf("%s floatHistogram=%t", c.text, floatHisto), func(t *testing.T) { seriesName := "sparse_histogram_series" lbls := labels.FromStrings("__name__", seriesName) - engine := test.QueryEngine() ts := idx * int64(10*time.Minute/time.Millisecond) - app := test.Storage().Appender(context.TODO()) + app := storage.Appender(context.Background()) + var err error if floatHisto { _, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat()) } else { @@ -3556,10 +3528,10 @@ func TestNativeHistogram_HistogramQuantile(t *testing.T) { for j, sc := range c.subCases { t.Run(fmt.Sprintf("%d %s", j, sc.quantile), func(t *testing.T) { queryString := fmt.Sprintf("histogram_quantile(%s, %s)", sc.quantile, seriesName) - qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts)) + qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) require.NoError(t, err) - res := qry.Exec(test.Context()) + res := qry.Exec(context.Background()) require.NoError(t, res.Err) vector, err := res.Vector() @@ -3966,16 +3938,16 @@ func TestNativeHistogram_HistogramFraction(t *testing.T) { for _, floatHisto := range []bool{true, false} { for _, c := range cases { t.Run(fmt.Sprintf("%s floatHistogram=%t", c.text, floatHisto), func(t *testing.T) { - test, err := NewTest(t, "") - require.NoError(t, err) - t.Cleanup(test.Close) + engine := newTestEngine() + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) seriesName := "sparse_histogram_series" lbls := labels.FromStrings("__name__", seriesName) - engine := test.QueryEngine() ts := idx * int64(10*time.Minute/time.Millisecond) - app := test.Storage().Appender(context.TODO()) + app := storage.Appender(context.Background()) + var err error if floatHisto { _, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat()) } else { @@ -3987,10 +3959,10 @@ func TestNativeHistogram_HistogramFraction(t *testing.T) { for j, sc := range c.subCases { t.Run(fmt.Sprintf("%d %s %s", j, sc.lower, sc.upper), func(t *testing.T) { queryString := fmt.Sprintf("histogram_fraction(%s, %s, %s)", sc.lower, sc.upper, seriesName) - qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts)) + qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) require.NoError(t, err) - res := qry.Exec(test.Context()) + res := qry.Exec(context.Background()) require.NoError(t, res.Err) vector, err := res.Vector() @@ -4125,20 +4097,20 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { for _, c := range cases { for _, floatHisto := range []bool{true, false} { t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) { - test, err := NewTest(t, "") - require.NoError(t, err) - t.Cleanup(test.Close) + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) seriesName := "sparse_histogram_series" seriesNameOverTime := "sparse_histogram_series_over_time" - engine := test.QueryEngine() + engine := newTestEngine() ts := idx0 * int64(10*time.Minute/time.Millisecond) - app := test.Storage().Appender(context.TODO()) + app := storage.Appender(context.Background()) for idx1, h := range c.histograms { lbls := labels.FromStrings("__name__", seriesName, "idx", fmt.Sprintf("%d", idx1)) // Since we mutate h later, we need to create a copy here. + var err error if floatHisto { _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat()) } else { @@ -4159,10 +4131,10 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { require.NoError(t, app.Commit()) queryAndCheck := func(queryString string, ts int64, exp Vector) { - qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts)) + qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) require.NoError(t, err) - res := qry.Exec(test.Context()) + res := qry.Exec(context.Background()) require.NoError(t, res.Err) vector, err := res.Vector() @@ -4385,19 +4357,18 @@ func TestNativeHistogram_SubOperator(t *testing.T) { for _, c := range cases { for _, floatHisto := range []bool{true, false} { t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) { - test, err := NewTest(t, "") - require.NoError(t, err) - t.Cleanup(test.Close) + engine := newTestEngine() + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) seriesName := "sparse_histogram_series" - engine := test.QueryEngine() - ts := idx0 * int64(10*time.Minute/time.Millisecond) - app := test.Storage().Appender(context.TODO()) + app := storage.Appender(context.Background()) for idx1, h := range c.histograms { lbls := labels.FromStrings("__name__", seriesName, "idx", fmt.Sprintf("%d", idx1)) // Since we mutate h later, we need to create a copy here. + var err error if floatHisto { _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat()) } else { @@ -4408,10 +4379,10 @@ func TestNativeHistogram_SubOperator(t *testing.T) { require.NoError(t, app.Commit()) queryAndCheck := func(queryString string, exp Vector) { - qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts)) + qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) require.NoError(t, err) - res := qry.Exec(test.Context()) + res := qry.Exec(context.Background()) require.NoError(t, res.Err) vector, err := res.Vector() @@ -4531,20 +4502,20 @@ func TestNativeHistogram_MulDivOperator(t *testing.T) { for _, c := range cases { for _, floatHisto := range []bool{true, false} { t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) { - test, err := NewTest(t, "") - require.NoError(t, err) - t.Cleanup(test.Close) + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) seriesName := "sparse_histogram_series" floatSeriesName := "float_series" - engine := test.QueryEngine() + engine := newTestEngine() ts := idx0 * int64(10*time.Minute/time.Millisecond) - app := test.Storage().Appender(context.TODO()) + app := storage.Appender(context.Background()) h := c.histogram lbls := labels.FromStrings("__name__", seriesName) // Since we mutate h later, we need to create a copy here. + var err error if floatHisto { _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat()) } else { @@ -4556,10 +4527,10 @@ func TestNativeHistogram_MulDivOperator(t *testing.T) { require.NoError(t, app.Commit()) queryAndCheck := func(queryString string, exp Vector) { - qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts)) + qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) require.NoError(t, err) - res := qry.Exec(test.Context()) + res := qry.Exec(context.Background()) require.NoError(t, res.Err) vector, err := res.Vector() @@ -4660,22 +4631,18 @@ metric 0 1 2 for _, c := range cases { c := c t.Run(c.name, func(t *testing.T) { - test, err := NewTest(t, load) - require.NoError(t, err) - defer test.Close() + engine := newTestEngine() + storage := LoadedStorage(t, load) + t.Cleanup(func() { storage.Close() }) - err = test.Run() - require.NoError(t, err) - - eng := test.QueryEngine() if c.engineLookback != 0 { - eng.lookbackDelta = c.engineLookback + engine.lookbackDelta = c.engineLookback } opts := NewPrometheusQueryOpts(false, c.queryLookback) - qry, err := eng.NewInstantQuery(test.context, test.Queryable(), opts, query, c.ts) + qry, err := engine.NewInstantQuery(context.Background(), storage, opts, query, c.ts) require.NoError(t, err) - res := qry.Exec(test.Context()) + res := qry.Exec(context.Background()) require.NoError(t, res.Err) vec, ok := res.Value.(Vector) require.True(t, ok) diff --git a/promql/promql_test.go b/promql/promql_test.go index a07a0f5cb..05821b1c1 100644 --- a/promql/promql_test.go +++ b/promql/promql_test.go @@ -15,7 +15,6 @@ package promql import ( "context" - "path/filepath" "strings" "testing" "time" @@ -26,19 +25,21 @@ import ( "github.com/prometheus/prometheus/util/teststorage" ) +func newTestEngine() *Engine { + return NewEngine(EngineOpts{ + Logger: nil, + Reg: nil, + MaxSamples: 10000, + Timeout: 100 * time.Second, + NoStepSubqueryIntervalFn: func(int64) int64 { return durationMilliseconds(1 * time.Minute) }, + EnableAtModifier: true, + EnableNegativeOffset: true, + EnablePerStepStats: true, + }) +} + func TestEvaluations(t *testing.T) { - files, err := filepath.Glob("testdata/*.test") - require.NoError(t, err) - - for _, fn := range files { - t.Run(fn, func(t *testing.T) { - test, err := newTestFromFile(t, fn) - require.NoError(t, err) - require.NoError(t, test.Run()) - - test.Close() - }) - } + RunBuiltinTests(t, newTestEngine()) } // Run a lot of queries at the same time, to check for race conditions. diff --git a/promql/test.go b/promql/test.go index 64fa66d5d..3d88ca033 100644 --- a/promql/test.go +++ b/promql/test.go @@ -15,12 +15,14 @@ package promql import ( "context" + "embed" "errors" "fmt" + "io/fs" "math" - "os" "strconv" "strings" + "testing" "time" "github.com/grafana/regexp" @@ -32,7 +34,6 @@ import ( "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/util/teststorage" "github.com/prometheus/prometheus/util/testutil" ) @@ -51,23 +52,74 @@ const ( var testStartTime = time.Unix(0, 0).UTC() -// Test is a sequence of read and write commands that are run +// LoadedStorage returns storage with generated data using the provided load statements. +// Non-load statements will cause test errors. +func LoadedStorage(t testutil.T, input string) *teststorage.TestStorage { + test, err := newTest(t, input) + require.NoError(t, err) + + for _, cmd := range test.cmds { + switch cmd.(type) { + case *loadCmd: + require.NoError(t, test.exec(cmd, nil)) + default: + t.Errorf("only 'load' commands accepted, got '%s'", cmd) + } + } + return test.storage +} + +// RunBuiltinTests runs an acceptance test suite against the provided engine. +func RunBuiltinTests(t *testing.T, engine engineQuerier) { + files, err := fs.Glob(testsFs, "*/*.test") + require.NoError(t, err) + + for _, fn := range files { + t.Run(fn, func(t *testing.T) { + content, err := fs.ReadFile(testsFs, fn) + require.NoError(t, err) + RunTest(t, string(content), engine) + }) + } +} + +// RunTest parses and runs the test against the provided engine. +func RunTest(t testutil.T, input string, engine engineQuerier) { + test, err := newTest(t, input) + require.NoError(t, err) + + defer func() { + if test.storage != nil { + test.storage.Close() + } + if test.cancelCtx != nil { + test.cancelCtx() + } + }() + + for _, cmd := range test.cmds { + // TODO(fabxc): aggregate command errors, yield diffs for result + // comparison errors. + require.NoError(t, test.exec(cmd, engine)) + } +} + +// test is a sequence of read and write commands that are run // against a test storage. -type Test struct { +type test struct { testutil.T cmds []testCommand storage *teststorage.TestStorage - queryEngine *Engine - context context.Context - cancelCtx context.CancelFunc + context context.Context + cancelCtx context.CancelFunc } -// NewTest returns an initialized empty Test. -func NewTest(t testutil.T, input string) (*Test, error) { - test := &Test{ +// newTest returns an initialized empty Test. +func newTest(t testutil.T, input string) (*test, error) { + test := &test{ T: t, cmds: []testCommand{}, } @@ -77,46 +129,12 @@ func NewTest(t testutil.T, input string) (*Test, error) { return test, err } -func newTestFromFile(t testutil.T, filename string) (*Test, error) { - content, err := os.ReadFile(filename) - if err != nil { - return nil, err - } - return NewTest(t, string(content)) -} +//go:embed testdata +var testsFs embed.FS -// QueryEngine returns the test's query engine. -func (t *Test) QueryEngine() *Engine { - return t.queryEngine -} - -// Queryable allows querying the test data. -func (t *Test) Queryable() storage.Queryable { - return t.storage -} - -// Context returns the test's context. -func (t *Test) Context() context.Context { - return t.context -} - -// Storage returns the test's storage. -func (t *Test) Storage() storage.Storage { - return t.storage -} - -// TSDB returns test's TSDB. -func (t *Test) TSDB() *tsdb.DB { - return t.storage.DB -} - -// ExemplarStorage returns the test's exemplar storage. -func (t *Test) ExemplarStorage() storage.ExemplarStorage { - return t.storage -} - -func (t *Test) ExemplarQueryable() storage.ExemplarQueryable { - return t.storage.ExemplarQueryable() +type engineQuerier interface { + NewRangeQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) + NewInstantQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, ts time.Time) (Query, error) } func raise(line int, format string, v ...interface{}) error { @@ -157,7 +175,7 @@ func parseLoad(lines []string, i int) (int, *loadCmd, error) { return i, cmd, nil } -func (t *Test) parseEval(lines []string, i int) (int, *evalCmd, error) { +func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) { if !patEvalInstant.MatchString(lines[i]) { return i, nil, raise(i, "invalid evaluation command. (eval[_fail|_ordered] instant [at ] ") } @@ -237,7 +255,7 @@ func getLines(input string) []string { } // parse the given command sequence and appends it to the test. -func (t *Test) parse(input string) error { +func (t *test) parse(input string) error { lines := getLines(input) var err error // Scan for steps line by line. @@ -433,19 +451,6 @@ func (cmd clearCmd) String() string { return "clear" } -// Run executes the command sequence of the test. Until the maximum error number -// is reached, evaluation errors do not terminate execution. -func (t *Test) Run() error { - for _, cmd := range t.cmds { - // TODO(fabxc): aggregate command errors, yield diffs for result - // comparison errors. - if err := t.exec(cmd); err != nil { - return err - } - } - return nil -} - type atModifierTestCase struct { expr string evalTime time.Time @@ -515,7 +520,7 @@ func atModifierTestCases(exprStr string, evalTime time.Time) ([]atModifierTestCa } // exec processes a single step of the test. -func (t *Test) exec(tc testCommand) error { +func (t *test) exec(tc testCommand, engine engineQuerier) error { switch cmd := tc.(type) { case *clearCmd: t.clear() @@ -538,7 +543,7 @@ func (t *Test) exec(tc testCommand) error { } queries = append([]atModifierTestCase{{expr: cmd.expr, evalTime: cmd.start}}, queries...) for _, iq := range queries { - q, err := t.QueryEngine().NewInstantQuery(t.context, t.storage, nil, iq.expr, iq.evalTime) + q, err := engine.NewInstantQuery(t.context, t.storage, nil, iq.expr, iq.evalTime) if err != nil { return err } @@ -560,7 +565,7 @@ func (t *Test) exec(tc testCommand) error { // Check query returns same result in range mode, // by checking against the middle step. - q, err = t.queryEngine.NewRangeQuery(t.context, t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute) + q, err = engine.NewRangeQuery(t.context, t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute) if err != nil { return err } @@ -601,7 +606,7 @@ func (t *Test) exec(tc testCommand) error { } // clear the current test storage of all inserted samples. -func (t *Test) clear() { +func (t *test) clear() { if t.storage != nil { err := t.storage.Close() require.NoError(t.T, err, "Unexpected error while closing test storage.") @@ -610,30 +615,9 @@ func (t *Test) clear() { t.cancelCtx() } t.storage = teststorage.New(t) - - opts := EngineOpts{ - Logger: nil, - Reg: nil, - MaxSamples: 10000, - Timeout: 100 * time.Second, - NoStepSubqueryIntervalFn: func(int64) int64 { return durationMilliseconds(1 * time.Minute) }, - EnableAtModifier: true, - EnableNegativeOffset: true, - EnablePerStepStats: true, - } - - t.queryEngine = NewEngine(opts) t.context, t.cancelCtx = context.WithCancel(context.Background()) } -// Close closes resources associated with the Test. -func (t *Test) Close() { - t.cancelCtx() - - err := t.storage.Close() - require.NoError(t.T, err, "Unexpected error while closing test storage.") -} - // samplesAlmostEqual returns true if the two sample lines only differ by a // small relative error in their sample value. func almostEqual(a, b float64) bool { diff --git a/rules/alerting_test.go b/rules/alerting_test.go index 8cd0da281..f980c2a98 100644 --- a/rules/alerting_test.go +++ b/rules/alerting_test.go @@ -33,6 +33,17 @@ import ( "github.com/prometheus/prometheus/util/teststorage" ) +var testEngine = promql.NewEngine(promql.EngineOpts{ + Logger: nil, + Reg: nil, + MaxSamples: 10000, + Timeout: 100 * time.Second, + NoStepSubqueryIntervalFn: func(int64) int64 { return 60 * 1000 }, + EnableAtModifier: true, + EnableNegativeOffset: true, + EnablePerStepStats: true, +}) + func TestAlertingRuleState(t *testing.T) { tests := []struct { name string @@ -74,14 +85,11 @@ func TestAlertingRuleState(t *testing.T) { } func TestAlertingRuleLabelsUpdate(t *testing.T) { - suite, err := promql.NewTest(t, ` + storage := promql.LoadedStorage(t, ` load 1m http_requests{job="app-server", instance="0"} 75 85 70 70 stale `) - require.NoError(t, err) - defer suite.Close() - - require.NoError(t, suite.Run()) + t.Cleanup(func() { storage.Close() }) expr, err := parser.ParseExpr(`http_requests < 100`) require.NoError(t, err) @@ -158,7 +166,7 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) { t.Logf("case %d", i) evalTime := baseTime.Add(time.Duration(i) * time.Minute) result[0].T = timestamp.FromTime(evalTime) - res, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0) + res, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0) require.NoError(t, err) var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. @@ -175,20 +183,17 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) { require.Equal(t, result, filteredRes) } evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute) - res, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0) + res, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0) require.NoError(t, err) require.Equal(t, 0, len(res)) } func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) { - suite, err := promql.NewTest(t, ` + storage := promql.LoadedStorage(t, ` load 1m http_requests{job="app-server", instance="0"} 75 85 70 70 `) - require.NoError(t, err) - defer suite.Close() - - require.NoError(t, suite.Run()) + t.Cleanup(func() { storage.Close() }) expr, err := parser.ParseExpr(`http_requests < 100`) require.NoError(t, err) @@ -246,7 +251,7 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) { var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. res, err := ruleWithoutExternalLabels.Eval( - suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0, + context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0, ) require.NoError(t, err) for _, smpl := range res { @@ -260,7 +265,7 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) { } res, err = ruleWithExternalLabels.Eval( - suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0, + context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0, ) require.NoError(t, err) for _, smpl := range res { @@ -277,14 +282,11 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) { } func TestAlertingRuleExternalURLInTemplate(t *testing.T) { - suite, err := promql.NewTest(t, ` + storage := promql.LoadedStorage(t, ` load 1m http_requests{job="app-server", instance="0"} 75 85 70 70 `) - require.NoError(t, err) - defer suite.Close() - - require.NoError(t, suite.Run()) + t.Cleanup(func() { storage.Close() }) expr, err := parser.ParseExpr(`http_requests < 100`) require.NoError(t, err) @@ -342,7 +344,7 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) { var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. res, err := ruleWithoutExternalURL.Eval( - suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0, + context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0, ) require.NoError(t, err) for _, smpl := range res { @@ -356,7 +358,7 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) { } res, err = ruleWithExternalURL.Eval( - suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0, + context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0, ) require.NoError(t, err) for _, smpl := range res { @@ -373,14 +375,11 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) { } func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) { - suite, err := promql.NewTest(t, ` + storage := promql.LoadedStorage(t, ` load 1m http_requests{job="app-server", instance="0"} 75 85 70 70 `) - require.NoError(t, err) - defer suite.Close() - - require.NoError(t, suite.Run()) + t.Cleanup(func() { storage.Close() }) expr, err := parser.ParseExpr(`http_requests < 100`) require.NoError(t, err) @@ -414,7 +413,7 @@ func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) { var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. res, err := rule.Eval( - suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0, + context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0, ) require.NoError(t, err) for _, smpl := range res { @@ -430,14 +429,11 @@ func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) { } func TestAlertingRuleQueryInTemplate(t *testing.T) { - suite, err := promql.NewTest(t, ` + storage := promql.LoadedStorage(t, ` load 1m http_requests{job="app-server", instance="0"} 70 85 70 70 `) - require.NoError(t, err) - defer suite.Close() - - require.NoError(t, suite.Run()) + t.Cleanup(func() { storage.Close() }) expr, err := parser.ParseExpr(`sum(http_requests) < 100`) require.NoError(t, err) @@ -473,7 +469,7 @@ instance: {{ $v.Labels.instance }}, value: {{ printf "%.0f" $v.Value }}; require.Fail(t, "unexpected blocking when template expanding.") } } - return EngineQueryFunc(suite.QueryEngine(), suite.Storage())(ctx, q, ts) + return EngineQueryFunc(testEngine, storage)(ctx, q, ts) } go func() { <-startQueryCh @@ -484,7 +480,7 @@ instance: {{ $v.Labels.instance }}, value: {{ printf "%.0f" $v.Value }}; close(getDoneCh) }() _, err = ruleWithQueryInTemplate.Eval( - suite.Context(), evalTime, slowQueryFunc, nil, 0, + context.TODO(), evalTime, slowQueryFunc, nil, 0, ) require.NoError(t, err) } @@ -542,15 +538,12 @@ func TestAlertingRuleDuplicate(t *testing.T) { } func TestAlertingRuleLimit(t *testing.T) { - suite, err := promql.NewTest(t, ` + storage := promql.LoadedStorage(t, ` load 1m metric{label="1"} 1 metric{label="2"} 1 `) - require.NoError(t, err) - defer suite.Close() - - require.NoError(t, suite.Run()) + t.Cleanup(func() { storage.Close() }) tests := []struct { limit int @@ -587,7 +580,7 @@ func TestAlertingRuleLimit(t *testing.T) { evalTime := time.Unix(0, 0) for _, test := range tests { - switch _, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, test.limit); { + switch _, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, test.limit); { case err != nil: require.EqualError(t, err, test.err) case test.err != "": @@ -729,14 +722,11 @@ func TestSendAlertsDontAffectActiveAlerts(t *testing.T) { } func TestKeepFiringFor(t *testing.T) { - suite, err := promql.NewTest(t, ` + storage := promql.LoadedStorage(t, ` load 1m http_requests{job="app-server", instance="0"} 75 85 70 70 10x5 `) - require.NoError(t, err) - defer suite.Close() - - require.NoError(t, suite.Run()) + t.Cleanup(func() { storage.Close() }) expr, err := parser.ParseExpr(`http_requests > 50`) require.NoError(t, err) @@ -819,7 +809,7 @@ func TestKeepFiringFor(t *testing.T) { t.Logf("case %d", i) evalTime := baseTime.Add(time.Duration(i) * time.Minute) result[0].T = timestamp.FromTime(evalTime) - res, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0) + res, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0) require.NoError(t, err) var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. @@ -836,20 +826,17 @@ func TestKeepFiringFor(t *testing.T) { require.Equal(t, result, filteredRes) } evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute) - res, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0) + res, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0) require.NoError(t, err) require.Equal(t, 0, len(res)) } func TestPendingAndKeepFiringFor(t *testing.T) { - suite, err := promql.NewTest(t, ` + storage := promql.LoadedStorage(t, ` load 1m http_requests{job="app-server", instance="0"} 75 10x10 `) - require.NoError(t, err) - defer suite.Close() - - require.NoError(t, suite.Run()) + t.Cleanup(func() { storage.Close() }) expr, err := parser.ParseExpr(`http_requests > 50`) require.NoError(t, err) @@ -876,7 +863,7 @@ func TestPendingAndKeepFiringFor(t *testing.T) { baseTime := time.Unix(0, 0) result.T = timestamp.FromTime(baseTime) - res, err := rule.Eval(suite.Context(), baseTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0) + res, err := rule.Eval(context.TODO(), baseTime, EngineQueryFunc(testEngine, storage), nil, 0) require.NoError(t, err) require.Len(t, res, 2) @@ -891,7 +878,7 @@ func TestPendingAndKeepFiringFor(t *testing.T) { } evalTime := baseTime.Add(time.Minute) - res, err = rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0) + res, err = rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0) require.NoError(t, err) require.Equal(t, 0, len(res)) } diff --git a/rules/manager_test.go b/rules/manager_test.go index 26a790964..f301aa010 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -47,16 +47,12 @@ func TestMain(m *testing.M) { } func TestAlertingRule(t *testing.T) { - suite, err := promql.NewTest(t, ` + storage := promql.LoadedStorage(t, ` load 5m http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 95 105 105 95 85 http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"} 80 90 100 110 120 130 140 `) - require.NoError(t, err) - defer suite.Close() - - err = suite.Run() - require.NoError(t, err) + t.Cleanup(func() { storage.Close() }) expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`) require.NoError(t, err) @@ -161,7 +157,7 @@ func TestAlertingRule(t *testing.T) { evalTime := baseTime.Add(test.time) - res, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0) + res, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0) require.NoError(t, err) var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. @@ -191,16 +187,12 @@ func TestAlertingRule(t *testing.T) { } func TestForStateAddSamples(t *testing.T) { - suite, err := promql.NewTest(t, ` + storage := promql.LoadedStorage(t, ` load 5m http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 95 105 105 95 85 http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"} 80 90 100 110 120 130 140 `) - require.NoError(t, err) - defer suite.Close() - - err = suite.Run() - require.NoError(t, err) + t.Cleanup(func() { storage.Close() }) expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`) require.NoError(t, err) @@ -311,7 +303,7 @@ func TestForStateAddSamples(t *testing.T) { forState = float64(value.StaleNaN) } - res, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0) + res, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0) require.NoError(t, err) var filteredRes promql.Vector // After removing 'ALERTS' samples. @@ -353,24 +345,20 @@ func sortAlerts(items []*Alert) { } func TestForStateRestore(t *testing.T) { - suite, err := promql.NewTest(t, ` + storage := promql.LoadedStorage(t, ` load 5m http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 50 0 0 25 0 0 40 0 120 http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"} 125 90 60 0 0 25 0 0 40 0 130 `) - require.NoError(t, err) - defer suite.Close() - - err = suite.Run() - require.NoError(t, err) + t.Cleanup(func() { storage.Close() }) expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`) require.NoError(t, err) opts := &ManagerOptions{ - QueryFunc: EngineQueryFunc(suite.QueryEngine(), suite.Storage()), - Appendable: suite.Storage(), - Queryable: suite.Storage(), + QueryFunc: EngineQueryFunc(testEngine, storage), + Appendable: storage, + Queryable: storage, Context: context.Background(), Logger: log.NewNopLogger(), NotifyFunc: func(ctx context.Context, expr string, alerts ...*Alert) {}, @@ -404,7 +392,7 @@ func TestForStateRestore(t *testing.T) { baseTime := time.Unix(0, 0) for _, duration := range initialRuns { evalTime := baseTime.Add(duration) - group.Eval(suite.Context(), evalTime) + group.Eval(context.TODO(), evalTime) } exp := rule.ActiveAlerts() @@ -468,7 +456,7 @@ func TestForStateRestore(t *testing.T) { restoreTime := baseTime.Add(tst.restoreDuration) // First eval before restoration. - newGroup.Eval(suite.Context(), restoreTime) + newGroup.Eval(context.TODO(), restoreTime) // Restore happens here. newGroup.RestoreForState(restoreTime) @@ -515,7 +503,7 @@ func TestForStateRestore(t *testing.T) { // Testing the grace period. for _, duration := range []time.Duration{10 * time.Minute, 15 * time.Minute, 20 * time.Minute} { evalTime := baseTime.Add(duration) - group.Eval(suite.Context(), evalTime) + group.Eval(context.TODO(), evalTime) } testFunc(testInput{ restoreDuration: 25 * time.Minute, @@ -1239,16 +1227,11 @@ func TestRuleHealthUpdates(t *testing.T) { } func TestRuleGroupEvalIterationFunc(t *testing.T) { - suite, err := promql.NewTest(t, ` + storage := promql.LoadedStorage(t, ` load 5m http_requests{instance="0"} 75 85 50 0 0 25 0 0 40 0 120 `) - - require.NoError(t, err) - defer suite.Close() - - err = suite.Run() - require.NoError(t, err) + t.Cleanup(func() { storage.Close() }) expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`) require.NoError(t, err) @@ -1294,9 +1277,9 @@ func TestRuleGroupEvalIterationFunc(t *testing.T) { testFunc := func(tst testInput) { opts := &ManagerOptions{ - QueryFunc: EngineQueryFunc(suite.QueryEngine(), suite.Storage()), - Appendable: suite.Storage(), - Queryable: suite.Storage(), + QueryFunc: EngineQueryFunc(testEngine, storage), + Appendable: storage, + Queryable: storage, Context: context.Background(), Logger: log.NewNopLogger(), NotifyFunc: func(ctx context.Context, expr string, alerts ...*Alert) {}, @@ -1361,15 +1344,11 @@ func TestRuleGroupEvalIterationFunc(t *testing.T) { } func TestNativeHistogramsInRecordingRules(t *testing.T) { - suite, err := promql.NewTest(t, "") - require.NoError(t, err) - t.Cleanup(suite.Close) - - err = suite.Run() - require.NoError(t, err) + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) // Add some histograms. - db := suite.TSDB() + db := storage.DB hists := tsdbutil.GenerateTestHistograms(5) ts := time.Now() app := db.Appender(context.Background()) @@ -1381,9 +1360,9 @@ func TestNativeHistogramsInRecordingRules(t *testing.T) { require.NoError(t, app.Commit()) opts := &ManagerOptions{ - QueryFunc: EngineQueryFunc(suite.QueryEngine(), suite.Storage()), - Appendable: suite.Storage(), - Queryable: suite.Storage(), + QueryFunc: EngineQueryFunc(testEngine, storage), + Appendable: storage, + Queryable: storage, Context: context.Background(), Logger: log.NewNopLogger(), } diff --git a/rules/recording_test.go b/rules/recording_test.go index 35a0b1a0b..960ff4bdb 100644 --- a/rules/recording_test.go +++ b/rules/recording_test.go @@ -109,27 +109,22 @@ var ruleEvalTestScenarios = []struct { }, } -func setUpRuleEvalTest(t require.TestingT) *promql.Test { - suite, err := promql.NewTest(t, ` +func setUpRuleEvalTest(t require.TestingT) *teststorage.TestStorage { + return promql.LoadedStorage(t, ` load 1m metric{label_a="1",label_b="3"} 1 metric{label_a="2",label_b="4"} 10 `) - require.NoError(t, err) - - return suite } func TestRuleEval(t *testing.T) { - suite := setUpRuleEvalTest(t) - defer suite.Close() - - require.NoError(t, suite.Run()) + storage := setUpRuleEvalTest(t) + t.Cleanup(func() { storage.Close() }) for _, scenario := range ruleEvalTestScenarios { t.Run(scenario.name, func(t *testing.T) { rule := NewRecordingRule("test_rule", scenario.expr, scenario.ruleLabels) - result, err := rule.Eval(suite.Context(), ruleEvaluationTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0) + result, err := rule.Eval(context.TODO(), ruleEvaluationTime, EngineQueryFunc(testEngine, storage), nil, 0) require.NoError(t, err) require.Equal(t, scenario.expected, result) }) @@ -137,10 +132,8 @@ func TestRuleEval(t *testing.T) { } func BenchmarkRuleEval(b *testing.B) { - suite := setUpRuleEvalTest(b) - defer suite.Close() - - require.NoError(b, suite.Run()) + storage := setUpRuleEvalTest(b) + b.Cleanup(func() { storage.Close() }) for _, scenario := range ruleEvalTestScenarios { b.Run(scenario.name, func(b *testing.B) { @@ -149,7 +142,7 @@ func BenchmarkRuleEval(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - _, err := rule.Eval(suite.Context(), ruleEvaluationTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0) + _, err := rule.Eval(context.TODO(), ruleEvaluationTime, EngineQueryFunc(testEngine, storage), nil, 0) if err != nil { require.NoError(b, err) } @@ -184,15 +177,12 @@ func TestRuleEvalDuplicate(t *testing.T) { } func TestRecordingRuleLimit(t *testing.T) { - suite, err := promql.NewTest(t, ` + storage := promql.LoadedStorage(t, ` load 1m metric{label="1"} 1 metric{label="2"} 1 `) - require.NoError(t, err) - defer suite.Close() - - require.NoError(t, suite.Run()) + t.Cleanup(func() { storage.Close() }) tests := []struct { limit int @@ -223,7 +213,7 @@ func TestRecordingRuleLimit(t *testing.T) { evalTime := time.Unix(0, 0) for _, test := range tests { - switch _, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, test.limit); { + switch _, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, test.limit); { case err != nil: require.EqualError(t, err, test.err) case test.err != "": diff --git a/storage/remote/read_handler_test.go b/storage/remote/read_handler_test.go index 261c28e21..3d9182640 100644 --- a/storage/remote/read_handler_test.go +++ b/storage/remote/read_handler_test.go @@ -33,22 +33,19 @@ import ( "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/tsdbutil" + "github.com/prometheus/prometheus/util/teststorage" ) func TestSampledReadEndpoint(t *testing.T) { - suite, err := promql.NewTest(t, ` + store := promql.LoadedStorage(t, ` load 1m test_metric1{foo="bar",baz="qux"} 1 `) - require.NoError(t, err) - defer suite.Close() + defer store.Close() - addNativeHistogramsToTestSuite(t, suite, 1) + addNativeHistogramsToTestSuite(t, store, 1) - err = suite.Run() - require.NoError(t, err) - - h := NewReadHandler(nil, nil, suite.Storage(), func() config.Config { + h := NewReadHandler(nil, nil, store, func() config.Config { return config.Config{ GlobalConfig: config.GlobalConfig{ // We expect external labels to be added, with the source labels honored. @@ -135,19 +132,16 @@ func TestSampledReadEndpoint(t *testing.T) { } func BenchmarkStreamReadEndpoint(b *testing.B) { - suite, err := promql.NewTest(b, ` + store := promql.LoadedStorage(b, ` load 1m test_metric1{foo="bar1",baz="qux"} 0+100x119 test_metric1{foo="bar2",baz="qux"} 0+100x120 test_metric1{foo="bar3",baz="qux"} 0+100x240 `) - require.NoError(b, err) - defer suite.Close() + b.Cleanup(func() { store.Close() }) - require.NoError(b, suite.Run()) - - api := NewReadHandler(nil, nil, suite.Storage(), func() config.Config { + api := NewReadHandler(nil, nil, store, func() config.Config { return config.Config{} }, 0, 1, 0, @@ -206,20 +200,17 @@ func TestStreamReadEndpoint(t *testing.T) { // Second with 121 samples, We expect 1 frame with 2 chunks. // Third with 241 samples. We expect 1 frame with 2 chunks, and 1 frame with 1 chunk for the same series due to bytes limit. // Fourth with 120 histogram samples. We expect 1 frame with 1 chunk. - suite, err := promql.NewTest(t, ` + store := promql.LoadedStorage(t, ` load 1m test_metric1{foo="bar1",baz="qux"} 0+100x119 test_metric1{foo="bar2",baz="qux"} 0+100x120 test_metric1{foo="bar3",baz="qux"} 0+100x240 `) - require.NoError(t, err) - defer suite.Close() + defer store.Close() - addNativeHistogramsToTestSuite(t, suite, 120) + addNativeHistogramsToTestSuite(t, store, 120) - require.NoError(t, suite.Run()) - - api := NewReadHandler(nil, nil, suite.Storage(), func() config.Config { + api := NewReadHandler(nil, nil, store, func() config.Config { return config.Config{ GlobalConfig: config.GlobalConfig{ // We expect external labels to be added, with the source labels honored. @@ -440,10 +431,10 @@ func TestStreamReadEndpoint(t *testing.T) { }, results) } -func addNativeHistogramsToTestSuite(t *testing.T, pqlTest *promql.Test, n int) { +func addNativeHistogramsToTestSuite(t *testing.T, storage *teststorage.TestStorage, n int) { lbls := labels.FromStrings("__name__", "test_histogram_metric1", "baz", "qux") - app := pqlTest.Storage().Appender(context.TODO()) + app := storage.Appender(context.TODO()) for i, fh := range tsdbutil.GenerateTestFloatHistograms(n) { _, err := app.AppendHistogram(0, lbls, int64(i)*int64(60*time.Second/time.Millisecond), nil, fh) require.NoError(t, err) diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 742ca09ba..c4710c69f 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -56,6 +56,17 @@ import ( "github.com/prometheus/prometheus/util/teststorage" ) +var testEngine = promql.NewEngine(promql.EngineOpts{ + Logger: nil, + Reg: nil, + MaxSamples: 10000, + Timeout: 100 * time.Second, + NoStepSubqueryIntervalFn: func(int64) int64 { return 60 * 1000 }, + EnableAtModifier: true, + EnableNegativeOffset: true, + EnablePerStepStats: true, +}) + // testMetaStore satisfies the scrape.MetricMetadataStore interface. // It is used to inject specific metadata as part of a test case. type testMetaStore struct { @@ -305,7 +316,7 @@ var sampleFlagMap = map[string]string{ } func TestEndpoints(t *testing.T) { - suite, err := promql.NewTest(t, ` + storage := promql.LoadedStorage(t, ` load 1m test_metric1{foo="bar"} 0+100x100 test_metric1{foo="boo"} 1+0x100 @@ -316,6 +327,7 @@ func TestEndpoints(t *testing.T) { test_metric4{foo="boo", dup="1"} 1+0x100 test_metric4{foo="boo"} 1+0x100 `) + t.Cleanup(func() { storage.Close() }) start := time.Unix(0, 0) exemplars := []exemplar.QueryResult{ @@ -361,15 +373,10 @@ func TestEndpoints(t *testing.T) { }, } for _, ed := range exemplars { - suite.ExemplarStorage().AppendExemplar(0, ed.SeriesLabels, ed.Exemplars[0]) + _, err := storage.AppendExemplar(0, ed.SeriesLabels, ed.Exemplars[0]) require.NoError(t, err, "failed to add exemplar: %+v", ed.Exemplars[0]) } - require.NoError(t, err) - defer suite.Close() - - require.NoError(t, suite.Run()) - now := time.Now() t.Run("local", func(t *testing.T) { @@ -383,9 +390,9 @@ func TestEndpoints(t *testing.T) { testTargetRetriever := setupTestTargetRetriever(t) api := &API{ - Queryable: suite.Storage(), - QueryEngine: suite.QueryEngine(), - ExemplarQueryable: suite.ExemplarQueryable(), + Queryable: storage, + QueryEngine: testEngine, + ExemplarQueryable: storage.ExemplarQueryable(), targetRetriever: testTargetRetriever.toFactory(), alertmanagerRetriever: testAlertmanagerRetriever{}.toFactory(), flagsMap: sampleFlagMap, @@ -394,14 +401,14 @@ func TestEndpoints(t *testing.T) { ready: func(f http.HandlerFunc) http.HandlerFunc { return f }, rulesRetriever: algr.toFactory(), } - testEndpoints(t, api, testTargetRetriever, suite.ExemplarStorage(), true) + testEndpoints(t, api, testTargetRetriever, storage, true) }) // Run all the API tests against a API that is wired to forward queries via // the remote read client to a test server, which in turn sends them to the - // data from the test suite. + // data from the test storage. t.Run("remote", func(t *testing.T) { - server := setupRemote(suite.Storage()) + server := setupRemote(storage) defer server.Close() u, err := url.Parse(server.URL) @@ -446,8 +453,8 @@ func TestEndpoints(t *testing.T) { api := &API{ Queryable: remote, - QueryEngine: suite.QueryEngine(), - ExemplarQueryable: suite.ExemplarQueryable(), + QueryEngine: testEngine, + ExemplarQueryable: storage.ExemplarQueryable(), targetRetriever: testTargetRetriever.toFactory(), alertmanagerRetriever: testAlertmanagerRetriever{}.toFactory(), flagsMap: sampleFlagMap, @@ -456,8 +463,7 @@ func TestEndpoints(t *testing.T) { ready: func(f http.HandlerFunc) http.HandlerFunc { return f }, rulesRetriever: algr.toFactory(), } - - testEndpoints(t, api, testTargetRetriever, suite.ExemplarStorage(), false) + testEndpoints(t, api, testTargetRetriever, storage, false) }) } @@ -470,7 +476,7 @@ func (b byLabels) Less(i, j int) bool { return labels.Compare(b[i], b[j]) < 0 } func TestGetSeries(t *testing.T) { // TestEndpoints doesn't have enough label names to test api.labelNames // endpoint properly. Hence we test it separately. - suite, err := promql.NewTest(t, ` + storage := promql.LoadedStorage(t, ` load 1m test_metric1{foo1="bar", baz="abc"} 0+100x100 test_metric1{foo2="boo"} 1+0x100 @@ -478,11 +484,9 @@ func TestGetSeries(t *testing.T) { test_metric2{foo="boo", xyz="qwerty"} 1+0x100 test_metric2{foo="baz", abc="qwerty"} 1+0x100 `) - require.NoError(t, err) - defer suite.Close() - require.NoError(t, suite.Run()) + t.Cleanup(func() { storage.Close() }) api := &API{ - Queryable: suite.Storage(), + Queryable: storage, } request := func(method string, matchers ...string) (*http.Request, error) { u, err := url.Parse("http://example.com") @@ -576,7 +580,7 @@ func TestGetSeries(t *testing.T) { func TestQueryExemplars(t *testing.T) { start := time.Unix(0, 0) - suite, err := promql.NewTest(t, ` + storage := promql.LoadedStorage(t, ` load 1m test_metric1{foo="bar"} 0+100x100 test_metric1{foo="boo"} 1+0x100 @@ -587,15 +591,12 @@ func TestQueryExemplars(t *testing.T) { test_metric4{foo="boo", dup="1"} 1+0x100 test_metric4{foo="boo"} 1+0x100 `) - - require.NoError(t, err) - defer suite.Close() - require.NoError(t, suite.Run()) + t.Cleanup(func() { storage.Close() }) api := &API{ - Queryable: suite.Storage(), - QueryEngine: suite.QueryEngine(), - ExemplarQueryable: suite.ExemplarQueryable(), + Queryable: storage, + QueryEngine: testEngine, + ExemplarQueryable: storage.ExemplarQueryable(), } request := func(method string, qs url.Values) (*http.Request, error) { @@ -673,7 +674,7 @@ func TestQueryExemplars(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - es := suite.ExemplarStorage() + es := storage ctx := context.Background() for _, te := range tc.exemplars { @@ -700,7 +701,7 @@ func TestQueryExemplars(t *testing.T) { func TestLabelNames(t *testing.T) { // TestEndpoints doesn't have enough label names to test api.labelNames // endpoint properly. Hence we test it separately. - suite, err := promql.NewTest(t, ` + storage := promql.LoadedStorage(t, ` load 1m test_metric1{foo1="bar", baz="abc"} 0+100x100 test_metric1{foo2="boo"} 1+0x100 @@ -708,11 +709,9 @@ func TestLabelNames(t *testing.T) { test_metric2{foo="boo", xyz="qwerty"} 1+0x100 test_metric2{foo="baz", abc="qwerty"} 1+0x100 `) - require.NoError(t, err) - defer suite.Close() - require.NoError(t, suite.Run()) + t.Cleanup(func() { storage.Close() }) api := &API{ - Queryable: suite.Storage(), + Queryable: storage, } request := func(method string, matchers ...string) (*http.Request, error) { u, err := url.Parse("http://example.com") @@ -801,14 +800,12 @@ func (testStats) Builtin() (_ stats.BuiltinStats) { } func TestStats(t *testing.T) { - suite, err := promql.NewTest(t, ``) - require.NoError(t, err) - defer suite.Close() - require.NoError(t, suite.Run()) + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) api := &API{ - Queryable: suite.Storage(), - QueryEngine: suite.QueryEngine(), + Queryable: storage, + QueryEngine: testEngine, now: func() time.Time { return time.Unix(123, 0) }, diff --git a/web/federate_test.go b/web/federate_test.go index 4fdcf8daa..2d3542ddc 100644 --- a/web/federate_test.go +++ b/web/federate_test.go @@ -36,6 +36,7 @@ import ( "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/util/teststorage" ) var scenarios = map[string]struct { @@ -199,7 +200,7 @@ test_metric_without_labels{instance="baz"} 1001 6000000 } func TestFederation(t *testing.T) { - suite, err := promql.NewTest(t, ` + storage := promql.LoadedStorage(t, ` load 1m test_metric1{foo="bar",instance="i"} 0+100x100 test_metric1{foo="boo",instance="i"} 1+0x100 @@ -208,17 +209,10 @@ func TestFederation(t *testing.T) { test_metric_stale 1+10x99 stale test_metric_old 1+10x98 `) - if err != nil { - t.Fatal(err) - } - defer suite.Close() - - if err := suite.Run(); err != nil { - t.Fatal(err) - } + t.Cleanup(func() { storage.Close() }) h := &Handler{ - localStorage: &dbAdapter{suite.TSDB()}, + localStorage: &dbAdapter{storage.DB}, lookbackDelta: 5 * time.Minute, now: func() model.Time { return 101 * 60 * 1000 }, // 101min after epoch. config: &config.Config{ @@ -305,19 +299,12 @@ func normalizeBody(body *bytes.Buffer) string { } func TestFederationWithNativeHistograms(t *testing.T) { - suite, err := promql.NewTest(t, "") - if err != nil { - t.Fatal(err) - } - defer suite.Close() - - if err := suite.Run(); err != nil { - t.Fatal(err) - } + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) var expVec promql.Vector - db := suite.TSDB() + db := storage.DB hist := &histogram.Histogram{ Count: 10, ZeroCount: 2, @@ -354,6 +341,7 @@ func TestFederationWithNativeHistograms(t *testing.T) { for i := 0; i < 6; i++ { l := labels.FromStrings("__name__", "test_metric", "foo", fmt.Sprintf("%d", i)) expL := labels.FromStrings("__name__", "test_metric", "instance", "", "foo", fmt.Sprintf("%d", i)) + var err error switch i { case 0, 3: _, err = app.Append(0, l, 100*60*1000, float64(i*100)) @@ -383,7 +371,7 @@ func TestFederationWithNativeHistograms(t *testing.T) { require.NoError(t, app.Commit()) h := &Handler{ - localStorage: &dbAdapter{suite.TSDB()}, + localStorage: &dbAdapter{db}, lookbackDelta: 5 * time.Minute, now: func() model.Time { return 101 * 60 * 1000 }, // 101min after epoch. config: &config.Config{ From fd96996b75e0a3142c5daee046d06816fb537762 Mon Sep 17 00:00:00 2001 From: Salar Nosrati-Ershad Date: Sat, 19 Aug 2023 17:03:26 +0330 Subject: [PATCH 48/82] docs: fix: correct reference to native histograms feature flag Signed-off-by: Salar Nosrati-Ershad --- docs/querying/basics.md | 2 +- docs/querying/functions.md | 2 +- docs/querying/operators.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/querying/basics.md b/docs/querying/basics.md index a4e17c5c6..fa0d44a69 100644 --- a/docs/querying/basics.md +++ b/docs/querying/basics.md @@ -35,7 +35,7 @@ vector is the only type that can be directly graphed. _Notes about the experimental native histograms:_ * Ingesting native histograms has to be enabled via a [feature - flag](../feature_flags.md#native-histograms). + flag](../../feature_flags.md#native-histograms). * Once native histograms have been ingested into the TSDB (and even after disabling the feature flag again), both instant vectors and range vectors may now contain samples that aren't simple floating point numbers (float samples) diff --git a/docs/querying/functions.md b/docs/querying/functions.md index 151a1c10b..1da70c603 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -14,7 +14,7 @@ vector, which if not provided it will default to the value of the expression _Notes about the experimental native histograms:_ * Ingesting native histograms has to be enabled via a [feature - flag](../feature_flags.md#native-histograms). As long as no native histograms + flag](../../feature_flags.md#native-histograms). As long as no native histograms have been ingested into the TSDB, all functions will behave as usual. * Functions that do not explicitly mention native histograms in their documentation (see below) will ignore histogram samples. diff --git a/docs/querying/operators.md b/docs/querying/operators.md index 2a9376d77..b92bdd94a 100644 --- a/docs/querying/operators.md +++ b/docs/querying/operators.md @@ -310,7 +310,7 @@ so `2 ^ 3 ^ 2` is equivalent to `2 ^ (3 ^ 2)`. ## Operators for native histograms Native histograms are an experimental feature. Ingesting native histograms has -to be enabled via a [feature flag](../feature_flags.md#native-histograms). Once +to be enabled via a [feature flag](../../feature_flags.md#native-histograms). Once native histograms have been ingested, they can be queried (even after the feature flag has been disabled again). However, the operator support for native histograms is still very limited. From 627c99424bd91c9cd57631e4a1a9e929c2685573 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Sun, 20 Aug 2023 14:25:32 +0100 Subject: [PATCH 49/82] scrape: extend TestDroppedTargetsList to check counts Signed-off-by: Bryan Boreham --- scrape/scrape_test.go | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 8578f1bec..a1b49ce80 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -88,27 +88,30 @@ func TestDroppedTargetsList(t *testing.T) { SourceLabels: model.LabelNames{"job"}, }, }, - KeepDroppedTargets: 1, } tgs = []*targetgroup.Group{ { Targets: []model.LabelSet{ {model.AddressLabel: "127.0.0.1:9090"}, + {model.AddressLabel: "127.0.0.1:9091"}, }, }, } sp, _ = newScrapePool(cfg, app, 0, nil, &Options{}) expectedLabelSetString = "{__address__=\"127.0.0.1:9090\", __scrape_interval__=\"0s\", __scrape_timeout__=\"0s\", job=\"dropMe\"}" - expectedLength = 1 + expectedLength = 2 ) sp.Sync(tgs) sp.Sync(tgs) - if len(sp.droppedTargets) != expectedLength { - t.Fatalf("Length of dropped targets exceeded expected length, expected %v, got %v", expectedLength, len(sp.droppedTargets)) - } - if sp.droppedTargets[0].DiscoveredLabels().String() != expectedLabelSetString { - t.Fatalf("Got %v, expected %v", sp.droppedTargets[0].DiscoveredLabels().String(), expectedLabelSetString) - } + require.Equal(t, expectedLength, len(sp.droppedTargets)) + require.Equal(t, expectedLength, sp.droppedTargetsCount) + require.Equal(t, expectedLabelSetString, sp.droppedTargets[0].DiscoveredLabels().String()) + + // Check that count is still correct when we don't retain all dropped targets. + sp.config.KeepDroppedTargets = 1 + sp.Sync(tgs) + require.Equal(t, 1, len(sp.droppedTargets)) + require.Equal(t, expectedLength, sp.droppedTargetsCount) } // TestDiscoveredLabelsUpdate checks that DiscoveredLabels are updated From 611f50bb3d362e07f5a98a6f806e7826b21e3f47 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Sun, 20 Aug 2023 14:30:36 +0100 Subject: [PATCH 50/82] scrape: retain all dropped targets when KeepDroppedTargets is zero This was a bug. Signed-off-by: Bryan Boreham --- scrape/scrape.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scrape/scrape.go b/scrape/scrape.go index 40836afc2..b52616a01 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -529,7 +529,7 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { case nonEmpty: all = append(all, t) case !t.discoveredLabels.IsEmpty(): - if sp.config.KeepDroppedTargets != 0 && uint(len(sp.droppedTargets)) < sp.config.KeepDroppedTargets { + if sp.config.KeepDroppedTargets == 0 || uint(len(sp.droppedTargets)) < sp.config.KeepDroppedTargets { sp.droppedTargets = append(sp.droppedTargets, t) } sp.droppedTargetsCount++ From 4787c879bce2b44a8a5634d94f1c8b847dc07875 Mon Sep 17 00:00:00 2001 From: Ziqi Zhao Date: Mon, 21 Aug 2023 13:28:06 +0800 Subject: [PATCH 51/82] add more elaborate benchmark test Signed-off-by: Ziqi Zhao --- model/histogram/float_histogram.go | 6 +- model/histogram/float_histogram_test.go | 82 +++++++++---------------- 2 files changed, 32 insertions(+), 56 deletions(-) diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index 0a65dafbb..ffd991d86 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -279,8 +279,8 @@ func (h *FloatHistogram) AddNew(other *FloatHistogram) *FloatHistogram { // TODO(beorn7): If needed, this can be optimized by inspecting the // spans in other and create missing buckets in h in batches. - h.PositiveSpans, h.PositiveBuckets = mergeTwoSpans(h.Schema, h.ZeroThreshold, h.PositiveSpans, h.PositiveBuckets, otherPositiveSpans, otherPositiveBuckets) - h.NegativeSpans, h.NegativeBuckets = mergeTwoSpans(h.Schema, h.ZeroThreshold, h.NegativeSpans, h.NegativeBuckets, otherNegativeSpans, otherNegativeBuckets) + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, h.PositiveSpans, h.PositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, h.NegativeSpans, h.NegativeBuckets, otherNegativeSpans, otherNegativeBuckets) return h } @@ -1080,7 +1080,7 @@ func mergeToSchema(originSpans []Span, originBuckets []float64, originSchema, ta return targetSpans, targetBuckets } -func mergeTwoSpans(schema int32, threshold float64, spansA []Span, bucketsA []float64, spansB []Span, bucketsB []float64) ([]Span, []float64) { +func addBuckets(schema int32, threshold float64, spansA []Span, bucketsA []float64, spansB []Span, bucketsB []float64) ([]Span, []float64) { var ( iSpan int = -1 iBucket int = -1 diff --git a/model/histogram/float_histogram_test.go b/model/histogram/float_histogram_test.go index 5d1e96931..bef9a1d6c 100644 --- a/model/histogram/float_histogram_test.go +++ b/model/histogram/float_histogram_test.go @@ -16,6 +16,7 @@ package histogram import ( "fmt" "math" + "math/rand" "testing" "github.com/stretchr/testify/require" @@ -2705,67 +2706,42 @@ func TestFloatHistogramAddNew(t *testing.T) { } func BenchmarkAddOld(b *testing.B) { - // run the Fib function b.N times - - f1 := &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: 8, - Count: 21, - Sum: 1.234, - Schema: 0, - PositiveSpans: []Span{{-1, 4}, {0, 3}}, - PositiveBuckets: []float64{5, 4, 2, 3, 6, 2, 5}, - NegativeSpans: []Span{{4, 2}, {1, 2}}, - NegativeBuckets: []float64{1, 1, 4, 4}, - } - - f2 := &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: 11, - Count: 30, - Sum: 2.345, - Schema: 1, - PositiveSpans: []Span{{-4, 3}, {5, 5}}, - PositiveBuckets: []float64{1, 0, 0, 3, 2, 2, 3, 4}, - NegativeSpans: []Span{{6, 3}, {6, 4}}, - NegativeBuckets: []float64{3, 0.5, 0.5, 2, 3, 2, 4}, - } - for n := 0; n < b.N; n++ { + b.StopTimer() + f1 := createRandomFloatHistogram(50) + f2 := createRandomFloatHistogram(50) + b.StartTimer() f1.Add(f2) } } func BenchmarkAddNew(b *testing.B) { - // run the Fib function b.N times - - f1 := &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: 8, - Count: 21, - Sum: 1.234, - Schema: 0, - PositiveSpans: []Span{{-1, 4}, {0, 3}}, - PositiveBuckets: []float64{5, 4, 2, 3, 6, 2, 5}, - NegativeSpans: []Span{{4, 2}, {1, 2}}, - NegativeBuckets: []float64{1, 1, 4, 4}, - } - - f2 := &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: 11, - Count: 30, - Sum: 2.345, - Schema: 1, - PositiveSpans: []Span{{-4, 3}, {5, 5}}, - PositiveBuckets: []float64{1, 0, 0, 3, 2, 2, 3, 4}, - NegativeSpans: []Span{{6, 3}, {6, 4}}, - NegativeBuckets: []float64{3, 0.5, 0.5, 2, 3, 2, 4}, - } - for n := 0; n < b.N; n++ { + b.StopTimer() + f1 := createRandomFloatHistogram(50) + f2 := createRandomFloatHistogram(50) + b.StartTimer() f1.AddNew(f2) } - +} + +func createRandomFloatHistogram(spanNum int32) *FloatHistogram { + f := &FloatHistogram{} + f.PositiveSpans, f.PositiveBuckets = createRandomSpans(spanNum) + f.NegativeSpans, f.NegativeBuckets = createRandomSpans(spanNum) + return f +} + +func createRandomSpans(spanNum int32) ([]Span, []float64) { + Spans := make([]Span, spanNum) + Buckets := make([]float64, 0) + for i := 0; i < int(spanNum); i++ { + Spans[i].Offset = rand.Int31n(spanNum) + 1 + Spans[i].Length = uint32(rand.Int31n(spanNum) + 1) + for j := 0; j < int(Spans[i].Length); j++ { + Buckets = append(Buckets, float64(rand.Int31n(spanNum)+1)) + } + } + return Spans, Buckets } From 2a781ec5ac5643f14605ed37078a6daa5189caba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Mon, 21 Aug 2023 13:12:45 +0200 Subject: [PATCH 52/82] Replicate infinite loop in native-classic histogram scrape MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Enable scraping a native histogram with exemplars that leads to infinite loop. Signed-off-by: György Krajcsovits --- scrape/scrape_test.go | 116 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 108 insertions(+), 8 deletions(-) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 8578f1bec..1a3d83755 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -1982,13 +1982,14 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) { func TestScrapeLoopAppendExemplar(t *testing.T) { tests := []struct { - title string - scrapeText string - contentType string - discoveryLabels []string - floats []floatSample - histograms []histogramSample - exemplars []exemplar.Exemplar + title string + scrapeClassicHistograms bool + scrapeText string + contentType string + discoveryLabels []string + floats []floatSample + histograms []histogramSample + exemplars []exemplar.Exemplar }{ { title: "Metric without exemplars", @@ -2142,6 +2143,105 @@ metric: < {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, Ts: 1234568, HasTs: false}, }, }, + { + title: "Native histogram with two exemplars scraped as classic histogram", + scrapeText: `name: "test_histogram" +help: "Test histogram with many buckets removed to keep it manageable in size." +type: HISTOGRAM +metric: < + histogram: < + sample_count: 175 + sample_sum: 0.0008280461746287094 + bucket: < + cumulative_count: 2 + upper_bound: -0.0004899999999999998 + > + bucket: < + cumulative_count: 4 + upper_bound: -0.0003899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "59727" + > + value: -0.00039 + timestamp: < + seconds: 1625851155 + nanos: 146848499 + > + > + > + bucket: < + cumulative_count: 16 + upper_bound: -0.0002899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "5617" + > + value: -0.00029 + > + > + schema: 3 + zero_threshold: 2.938735877055719e-39 + zero_count: 2 + negative_span: < + offset: -162 + length: 1 + > + negative_span: < + offset: 23 + length: 4 + > + negative_delta: 1 + negative_delta: 3 + negative_delta: -2 + negative_delta: -1 + negative_delta: 1 + positive_span: < + offset: -161 + length: 1 + > + positive_span: < + offset: 8 + length: 3 + > + positive_delta: 1 + positive_delta: 2 + positive_delta: -1 + positive_delta: -1 + > + timestamp_ms: 1234568 +> + +`, + scrapeClassicHistograms: true, + contentType: "application/vnd.google.protobuf", + histograms: []histogramSample{{ + t: 1234568, + h: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + }}, + exemplars: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, Ts: 1625851155146, HasTs: true}, + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, Ts: 1234568, HasTs: false}, + }, + }, } for _, test := range tests { @@ -2168,7 +2268,7 @@ metric: < nil, 0, 0, - false, + test.scrapeClassicHistograms, false, false, nil, From 2ae8c2bd3d91519b0b342897d1f1e6dc64496348 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Mon, 21 Aug 2023 13:55:13 +0200 Subject: [PATCH 53/82] Set expected values in test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The parsing doesn't seem to be perfect as I don't get all classic buckets possibly another bug found? Signed-off-by: György Krajcsovits --- scrape/scrape_test.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 1a3d83755..3a5d3df06 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -2217,6 +2217,14 @@ metric: < `, scrapeClassicHistograms: true, contentType: "application/vnd.google.protobuf", + floats: []floatSample{ + {metric: labels.FromStrings("__name__", "test_histogram_count"), t: 1234568, f: 175}, + {metric: labels.FromStrings("__name__", "test_histogram_sum"), t: 1234568, f: 0.0008280461746287094}, + {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0004899999999999998"), t: 1234568, f: 2}, + // {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0003899999999999998"), t: 1234568, f: 4}, + // {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0002899999999999998"), t: 1234568, f: 16}, + {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "+Inf"), t: 1234568, f: 175}, + }, histograms: []histogramSample{{ t: 1234568, h: &histogram.Histogram{ @@ -2240,6 +2248,8 @@ metric: < exemplars: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, Ts: 1625851155146, HasTs: true}, {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, Ts: 1234568, HasTs: false}, + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, Ts: 1625851155146, HasTs: true}, + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, Ts: 1234568, HasTs: false}, }, }, } @@ -2278,6 +2288,9 @@ metric: < now := time.Now() for i := range test.floats { + if test.floats[i].t != 0 { + continue + } test.floats[i].t = timestamp.FromTime(now) } From e846736134b53ec982a6f93c4381dc7046c5dd32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Mon, 21 Aug 2023 14:13:49 +0200 Subject: [PATCH 54/82] Fix typo MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- model/textparse/protobufparse.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/model/textparse/protobufparse.go b/model/textparse/protobufparse.go index c111bb065..a9c040d33 100644 --- a/model/textparse/protobufparse.go +++ b/model/textparse/protobufparse.go @@ -293,7 +293,7 @@ func (p *ProtobufParser) Metric(l *labels.Labels) string { // Exemplar writes the exemplar of the current sample into the passed // exemplar. It returns if an exemplar exists or not. In case of a native // histogram, the legacy bucket section is still used for exemplars. To ingest -// all examplars, call the Exemplar method repeatedly until it returns false. +// all exemplars, call the Exemplar method repeatedly until it returns false. func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { m := p.mf.GetMetric()[p.metricPos] var exProto *dto.Exemplar From 983c0c5e9d2b726c96c58414b24942f3ee1b54a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Mon, 21 Aug 2023 14:44:53 +0200 Subject: [PATCH 55/82] Add missing buckets MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit My previous proposal for a fix was wrong and also missed these. Signed-off-by: György Krajcsovits --- scrape/scrape_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 3a5d3df06..b680bb337 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -2221,8 +2221,8 @@ metric: < {metric: labels.FromStrings("__name__", "test_histogram_count"), t: 1234568, f: 175}, {metric: labels.FromStrings("__name__", "test_histogram_sum"), t: 1234568, f: 0.0008280461746287094}, {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0004899999999999998"), t: 1234568, f: 2}, - // {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0003899999999999998"), t: 1234568, f: 4}, - // {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0002899999999999998"), t: 1234568, f: 16}, + {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0003899999999999998"), t: 1234568, f: 4}, + {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0002899999999999998"), t: 1234568, f: 16}, {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "+Inf"), t: 1234568, f: 175}, }, histograms: []histogramSample{{ From 3d9a830f2f545ed64d1dc55438a5b9141a01acdf Mon Sep 17 00:00:00 2001 From: beorn7 Date: Tue, 22 Aug 2023 20:48:52 +0200 Subject: [PATCH 56/82] textparse: Expose #12731 in protobufparse_test.go Signed-off-by: beorn7 --- model/textparse/protobufparse_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/model/textparse/protobufparse_test.go b/model/textparse/protobufparse_test.go index 53523a5dd..5436d7f3e 100644 --- a/model/textparse/protobufparse_test.go +++ b/model/textparse/protobufparse_test.go @@ -1779,6 +1779,7 @@ func TestProtobufParse(t *testing.T) { } else { require.Equal(t, true, found, "i: %d", i) require.Equal(t, exp[i].e[0], e, "i: %d", i) + require.False(t, p.Exemplar(&e), "too many exemplars returned, i: %d", i) } case EntryHistogram: From 65ccf4460afc3f83adb231b3ec15073ecc924f14 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Tue, 22 Aug 2023 21:03:54 +0200 Subject: [PATCH 57/82] textparse: Fix endless loop #12731 PR #12557 introduced the possibility of parsing multiple exemplars per native histograms. It did so by requiring the `Exemplar` method of the parser to be called repeatedly until it returns false. However, the protobuf parser code wasn't correctly updated for the old case of a single exemplar for a classic bucket (if actually parsed as a classic bucket) and a single exemplar on a counter. In those cases, the method would return `true` forever, yielding the same exemplar again and again, leading to an endless loop. With this fix, the state is now tracked and the single exemplar is only returned once. Signed-off-by: beorn7 --- model/textparse/protobufparse.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/model/textparse/protobufparse.go b/model/textparse/protobufparse.go index a9c040d33..fbb84a2bd 100644 --- a/model/textparse/protobufparse.go +++ b/model/textparse/protobufparse.go @@ -56,6 +56,10 @@ type ProtobufParser struct { fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed. redoClassic bool // true after parsing a native histogram if we need to parse it again as a classic histogram. + // exemplarReturned is set to true each time an exemplar has been + // returned, and set back to false upon each Next() call. + exemplarReturned bool + // state is marked by the entry we are processing. EntryInvalid implies // that we have to decode the next MetricFamily. state Entry @@ -295,6 +299,10 @@ func (p *ProtobufParser) Metric(l *labels.Labels) string { // histogram, the legacy bucket section is still used for exemplars. To ingest // all exemplars, call the Exemplar method repeatedly until it returns false. func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { + if p.exemplarReturned && p.state == EntrySeries { + // We only ever return one exemplar per (non-native-histogram) series. + return false + } m := p.mf.GetMetric()[p.metricPos] var exProto *dto.Exemplar switch p.mf.GetType() { @@ -335,6 +343,7 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { } p.builder.Sort() ex.Labels = p.builder.Labels() + p.exemplarReturned = true return true } @@ -342,6 +351,7 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { // text format parser). It returns (EntryInvalid, io.EOF) if no samples were // read. func (p *ProtobufParser) Next() (Entry, error) { + p.exemplarReturned = false switch p.state { case EntryInvalid: p.metricPos = 0 From aa82fe198f66f4e2a41647fd0845834acf365e0e Mon Sep 17 00:00:00 2001 From: beorn7 Date: Tue, 22 Aug 2023 21:51:56 +0200 Subject: [PATCH 58/82] tsdb: Fix histogram validation So far, `ValidateHistogram` would not detect if the count did not include the count in the zero bucket. This commit fixes the problem and updates all the tests that have been undetected offenders so far. Note that this problem would only ever create false negatives, so we never falsely rejected to store a histogram because of it. On the other hand, `ValidateFloatHistogram` has been to strict with the count being at least as large as the sum of the counts in all the buckets. Float precision issues could create false positives here, see products of PromQL evaluations, it's actually quite hard to put an upper limit no the floating point imprecision. Users could produce the weirdest expressions, maxing out float precision problems. Therefore, this commit simply removes that particular check from `ValidateFloatHistogram`. Signed-off-by: beorn7 --- promql/engine_test.go | 26 +++++++------- storage/remote/read_handler_test.go | 2 +- tsdb/block_test.go | 4 +-- tsdb/compact_test.go | 2 +- tsdb/db_test.go | 4 +-- tsdb/head_append.go | 11 +++--- tsdb/head_test.go | 53 +++++++++++++++++------------ tsdb/tsdbutil/histogram.go | 4 +-- web/federate_test.go | 3 +- 9 files changed, 59 insertions(+), 50 deletions(-) diff --git a/promql/engine_test.go b/promql/engine_test.go index 0df969375..154a45514 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -3182,7 +3182,7 @@ func TestNativeHistogramRate(t *testing.T) { Schema: 1, ZeroThreshold: 0.001, ZeroCount: 1. / 15., - Count: 8. / 15., + Count: 9. / 15., Sum: 1.226666666666667, PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, @@ -3223,7 +3223,7 @@ func TestNativeFloatHistogramRate(t *testing.T) { Schema: 1, ZeroThreshold: 0.001, ZeroCount: 1. / 15., - Count: 8. / 15., + Count: 9. / 15., Sum: 1.226666666666667, PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, @@ -3996,7 +3996,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { { CounterResetHint: histogram.GaugeType, Schema: 0, - Count: 21, + Count: 25, Sum: 1234.5, ZeroThreshold: 0.001, ZeroCount: 4, @@ -4014,7 +4014,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { { CounterResetHint: histogram.GaugeType, Schema: 0, - Count: 36, + Count: 41, Sum: 2345.6, ZeroThreshold: 0.001, ZeroCount: 5, @@ -4034,7 +4034,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { { CounterResetHint: histogram.GaugeType, Schema: 0, - Count: 36, + Count: 41, Sum: 1111.1, ZeroThreshold: 0.001, ZeroCount: 5, @@ -4061,7 +4061,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { Schema: 0, ZeroThreshold: 0.001, ZeroCount: 14, - Count: 93, + Count: 107, Sum: 4691.2, PositiveSpans: []histogram.Span{ {Offset: 0, Length: 7}, @@ -4078,7 +4078,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { Schema: 0, ZeroThreshold: 0.001, ZeroCount: 3.5, - Count: 23.25, + Count: 26.75, Sum: 1172.8, PositiveSpans: []histogram.Span{ {Offset: 0, Length: 7}, @@ -4189,7 +4189,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) { histograms: []histogram.Histogram{ { Schema: 0, - Count: 36, + Count: 41, Sum: 2345.6, ZeroThreshold: 0.001, ZeroCount: 5, @@ -4224,7 +4224,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) { }, expected: histogram.FloatHistogram{ Schema: 0, - Count: 25, + Count: 30, Sum: 1111.1, ZeroThreshold: 0.001, ZeroCount: 2, @@ -4245,7 +4245,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) { histograms: []histogram.Histogram{ { Schema: 0, - Count: 36, + Count: 41, Sum: 2345.6, ZeroThreshold: 0.001, ZeroCount: 5, @@ -4280,7 +4280,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) { }, expected: histogram.FloatHistogram{ Schema: 0, - Count: 25, + Count: 30, Sum: 1111.1, ZeroThreshold: 0.001, ZeroCount: 2, @@ -4315,7 +4315,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) { }, { Schema: 0, - Count: 36, + Count: 41, Sum: 2345.6, ZeroThreshold: 0.001, ZeroCount: 5, @@ -4335,7 +4335,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) { }, expected: histogram.FloatHistogram{ Schema: 0, - Count: -25, + Count: -30, Sum: -1111.1, ZeroThreshold: 0.001, ZeroCount: -2, diff --git a/storage/remote/read_handler_test.go b/storage/remote/read_handler_test.go index 3d9182640..7e8618615 100644 --- a/storage/remote/read_handler_test.go +++ b/storage/remote/read_handler_test.go @@ -421,7 +421,7 @@ func TestStreamReadEndpoint(t *testing.T) { { Type: prompb.Chunk_FLOAT_HISTOGRAM, MaxTimeMs: 7140000, - Data: []byte("\x00x\x00\xff?PbM\xd2\xf1\xa9\xfc\x8c\xa4\x94e$\xa2@$\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00@2ffffff?\xf0\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00?\xf0\x00\x00\x00\x00\x00\x00?\xf0\x00\x00\x00\x00\x00\x00?\xf0\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00?\xf0\x00\x00\x00\x00\x00\x00?\xf0\x00\x00\x00\x00\x00\x00\xf8\xea`\xd6%\xec\a\xa4?\x84\xbf\xff\xb0\x1e\x12\xff\xfe\x12\xff\xfe\x12\xff\xfe\xc0xK\xff\xf8K\xff\xe95\x85\xec\xd2\x7f\xff\xff\xff\xff\xff\xf6\x03\xd6\x17\xb0\x1e\xc0{\x01\xeb\v\xd8\x0f`6\x91\xfd\xed\a\xaf\\\xff\xff\xff\xff\xff\xff\xeb\v\xda\x0fX^\xb0\xbda{A\xeb\v\xd6\x16\x82l\v\x8a\xcc\xcc\xcc\xcc\xccʹ\x1e\xc0\xbd\xa0\xf6\x83\xda\x0f`^\xd0{A\xa1\x932fffffg`\\\xec\v\xd8\x17\xb0.v\x05\xec\vA5\t\xfa\x87\xef:\x84\xf9\x99\xd4'̵\x8d\xde\xe0{\xb2\x9f\xff\xff\xff\xff\xff\xf5\t\xfb\x81\xea\x13\xf5\t\xfa\x84\xfd\xc0\xf5\t\xfa\x84\xf4&н\xb9\xedUUUUU]\xc0\xf6\x85\xee\a\xb8\x1e\xe0{B\xf7\x03\xdc\r\x193\xb333333\xda\x17;B\xf6\x85\xed\v\x9d\xa1{BЛ\x03\xfb2\xe4\xcc\xcc\xcc\xcc\xcc\xe7`~fv\a\xe6S\x91ݕ\xaa\xaa\xaa\xaa\xaa\xab\xb0?\x1d\x81\xfd\x81\xfd\x81\xf8\xec\x0f\xec\x0f\xa1'\xb7<\xff\xff\xff\xff\xff\xff\x19\xc61\x9cb\x8c\x8e\xbd{\xff\xff\xff\xff\xff\xff8\xces\x8c\xe6\x84\xd6'\xc1Y\x99\x99\x99\x99\x99\x8e\xb1>1\x8e\xb1>1j#\xefx8d\xcc\xcc\xcc\xcc\xcc\xda\xc4\xfd\xe0\xf5\x89\xfa\xc4\xfdb~\xf0z\xc4\xfdbz\x04\xdc\x17\a\xaa\xaa\xaa\xaa\xaa\xabx=\xc1{\xc1\xef\a\xbc\x1e\xe0\xbd\xe0\xf7\x83A\x93\x1c\xff\xff\xff\xff\xff\xffp\\\xee\v\xdc\x17\xb8.w\x05\xee\v@\x9bC\xf0Z\xaa\xaa\xaa\xaa\xaa\xa7h~fv\x87\xe6P\xe4al\xcc\xcc\xcc\xcc\xcc\xed\x0f\xc7h\x7fh\x7fh~;C\xfbC\xe8\x12sə\x99\x99\x99\x99\xa38\xc63\x8cPd`\xb5UUUUUN3\x9c\xe39\xa0M\x82|3\xff\xff\xff\xff\xff\xf8\xec\x13\xe3\x18\xec\x13\xe3\x14y\f\x1e\xaa\xaa\xaa\xaa\xaa\xad\x82|;\x04\xfd\x82~\xc1>\x1d\x82~\xc1=\x02G\x1c\x99\x99\x99\x99\x99\x9a\x18\xe1\x86\x18\xe1\x85\x06C\x05ffffff8c\x8e8c\x8d\x02O\v\xaa\xaa\xaa\xaa\xaa\xaa\x19\xe1\x86\x19\xe1\x85\x0eC\xa3\x8f\xf1UUUUUY\xe1\x9ey\xe1\x9et\t\x1c\x01j\xaa\xaa\xaa\xaa\xab\fp\xc3\fp\u0083!\x80{33333#\x868\xe3\x868\xd0&\x91\xff\xc0\x12fffffp\xe9\x1f\xfc0ä\x7f\xf0\xc2\xd6G\xdf\x00p\x1d\xaa\xaa\xaa\xaa\xaa\xae\x91\xff\xf0\a\xa4\x7f\xfaG\xff\xa4\x7f\xfc\x01\xe9\x1f\xfe\x91\xff\xa0M\xe1p\x04\xff\xff\xff\xff\xff\xff\x00{\xc2\xf8\x03\xe0\x0f\x80=\xe1|\x01\xf0\x06\x83&\x01uUUUUU\xde\x17;\xc2\xf7\x85\xef\v\x9d\xe1{\xc2\xd0&\xe0\xfc\x0fY\x99\x99\x99\x99\x99;\x83\xf33\xb8?2\x87#\x00I\x99\x99\x99\x99\x99\xee\x0f\xc7p\x7fp\x7fp~;\x83\xfb\x83\xe8\x12p\x0f\xaa\xaa\xaa\xaa\xaa\xacg\x18\xc6q\x8a\f\x8c\x01?\xff\xff\xff\xff\xff8\xces\x8c\xe6\x816\x89\xf0\x1d\xaa\xaa\xaa\xaa\xaa\xacv\x89\xf1\x8cv\x89\xf1\x8a<\x86\x01l\xcc\xcc\xcc\xcc\xcc\xda'ôO\xda'\xed\x13\xe1\xda'\xed\x13\xd0$p\x04\x99\x99\x99\x99\x99\x9c1\xc3\f1\xc3\n\f\x86\x0f\xb5UUUUU\x8e\x18\xe3\x8e\x18\xe3@\x93\xc0\x13\xff\xff\xff\xff\xff\xf0\xcf\f0\xcf\f(r\x18\a\xd5UUUUVxg\x9exg\x9d\x02G\x00I\x99\x99\x99\x99\x99\xc3\x1c0\xc3\x1c0\xa0\xc8`:\xcc\xcc\xcc\xcc\xcc\xc8\xe1\x8e8\xe1\x8e4\t\xb0_\xc0.\xaa\xaa\xaa\xaa\xaa\xb0\xec\x17\xf0\xc3\x0e\xc1\x7f\f)\xf2\f\x01?\xff\xff\xff\xff\xff\xb0_\xc1\xd8/\xf6\v\xfd\x82\xfe\x0e\xc1\x7f\xb0_\xa0Hp=\xaa\xaa\xaa\xaa\xaa\xac\x18p`\xc1\x87\x06\n\f\x83\x00I\x99\x99\x99\x99\x99Ã\x0e\x1c80\xe1\xa0H\xf0\x0ffffffd\x18\xf0`\xc1\x8f\x06\n\x1c\x83\x00Z\xaa\xaa\xaa\xaa\xaaǃ\x1e|\xf83\xe7\xa0Hp\x03\xd5UUUUT\x18p`\xc1\x87\x06\n\f\x83\x00g\xff\xff\xff\xff\xffÃ\x0e\x1c80\xe1\xa0H\xf0\x02\xd5UUUUT\x18\xf0`\xc1\x8f\x06\n\x1c\x83\x00\xdb33333G\x83\x1e \xf8\x83\xe0\x17\xc4\x1f\x10h\x03&\x00I\x99\x99\x99\x99\x99\xe0\x17<\x02\xf8\x05\xf0\v\x9e\x01|\x02\xd0\x02o\x0f\xc07UUUUUS\xbc?3;\xc3\xf3(\a#\x00g\xff\xff\xff\xff\xff\xef\x0f\xc7x\x7fx\x7fx~;\xc3\xfb\xc3\xe8\x01'\x00-UUUUUFq\x8cg\x18\xa0\f\x8c\x0f\xec\xcc\xcc\xcc\xcc\xcd8\xces\x8c\xe6\x80\x13p\x9f\x00$\xcc\xcc\xcc\xcc\xcc\xc7p\x9f\x18\xc7p\x9f\x18\xa0<\x86\x00ڪ\xaa\xaa\xaa\xaa\xdc'øO\xdc'\xee\x13\xe1\xdc'\xee\x13\xd0\x02G\x00'\xff\xff\xff\xff\xff\xc3\x1c0\xc3\x1c0\xa0\f\x86\x01\xba\xaa\xaa\xaa\xaa\xaa\x8e\x18\xe3\x8e\x18\xe3@\t<\x01\xac\xcc\xcc\xcc\xcc\xcd\f\xf0\xc3\f\xf0\u0080r\x18\x01&fffffxg\x9exg\x9d\x00$p\x1f\xd5UUUUT1\xc3\f1\xc3\n\x00\xc8`\x04\xff\xff\xff\xff\xff\xf8\xe1\x8e8\xe1\x8e4\x00\x9bE\xfc\x01\xb5UUUUU\x0e\xd1\x7f\f0\xed\x17\xf0\u0081\xf2\f\x03l\xcc\xcc\xcc\xccʹ_\xc1\xda/\xf6\x8b\xfd\xa2\xfe\x0e\xd1\x7f\xb4_\xa0\x04\x87\x00$\xcc\xcc\xcc\xcc\xcc\xc1\x87\x06\f\x18p`\xa0\f\x83\x00mUUUUUC\x83\x0e\x1c80\xe1\xa0\x04\x8f\x00'\xff\xff\xff\xff\xff\xc1\x8f\x06\f\x18\xf0`\xa0\x1c\x83\a\xfdUUUUUG\x83\x1e|\xf83\xe7\xa0\x04\x87\x00mUUUUUA\x87\x06\f\x18p`\xa0\f\x83\x00$\xcc\xcc\xcc\xcc\xccÃ\x0e\x1c80\xe1\xa0\x04\x8f\x01\xfb33333A\x8f\x06\f\x18\xf0`\xa0\x1c\x83\x00-UUUUUG\x83\x1e|\3703\347\264\031\3770\340\007\252\252\252\252\252\2500\340\301\203\016\014\027\021&\014\001\237\377\377\377\377\377\016\0148p\340\303\206\340.\343\300\013UUUUUPc\301\203\006<\030)0`\033fffffh\360c\307\217\006\367\t\360\002L\314\314\314\314\314w\t\361\214w\t\361\212\t\206\000\332\252\252\252\252\252\334'\303\270O\334'\356\023\341\334'\356\023\320\374p\002\177\377\377\377\377\3741\303\0141\303\n\t\206\001\272\252\252\252\252\252\216\030\343\216\030\343Gs\300\032\314\314\314\314\314\320\317\0140\317\014(&\030\001&fffffxg\236xg\235\013\307\001\375UUUUUC\0340\303\0340\247\230`\004\377\377\377\377\377\370\341\2168\341\2164\027\264_\300\033UUUUUP\355\027\360\303\016\321\177\014(f\014\003l\314\314\314\314\315\264_\301\332/\366\213\375\242\376\016\321\177\264_\240\370p\002L\314\314\314\314\314\030p`\301\207\006\n9\203\000mUUUUUC\203\016\03480\341\240\270\360\002\177\377\377\377\377\374\030\360`\301\217\006\n\031\203\007\375UUUUUG\203\036\201\360p\001?\377\377\377\377\374\014\034\014\014\014\034\014\013Y\177\366\006\000]UUUUU\203\201\203\203\203\201\203\203@.\036\000\35333333\201\207\201\201\201\207\201\201@f\006\000$\314\314\314\314\314\207\201\207\207\207\201\207\207@\336\016\000}UUUUU\201\203\201\201\201\203\201\201@&\006\000'\377\377\377\377\377\203\201\203\203\203\201\203\203@n>\001\355UUUUU\201\217\201\201\201\217\201\201@&\006\000[33333\217\201\217\217\217\201\217\217"), }, }, }, diff --git a/tsdb/block_test.go b/tsdb/block_test.go index e9dc1a9d0..e0d928aca 100644 --- a/tsdb/block_test.go +++ b/tsdb/block_test.go @@ -621,7 +621,7 @@ func genSeries(totalSeries, labelCount int, mint, maxt int64) []storage.Series { func genHistogramSeries(totalSeries, labelCount int, mint, maxt, step int64, floatHistogram bool) []storage.Series { return genSeriesFromSampleGenerator(totalSeries, labelCount, mint, maxt, step, func(ts int64) tsdbutil.Sample { h := &histogram.Histogram{ - Count: 5 + uint64(ts*4), + Count: 7 + uint64(ts*5), ZeroCount: 2 + uint64(ts), ZeroThreshold: 0.001, Sum: 18.4 * rand.Float64(), @@ -660,7 +660,7 @@ func genHistogramAndFloatSeries(totalSeries, labelCount int, mint, maxt, step in s = sample{t: ts, f: rand.Float64()} } else { h := &histogram.Histogram{ - Count: 5 + uint64(ts*4), + Count: 7 + uint64(ts*5), ZeroCount: 2 + uint64(ts), ZeroThreshold: 0.001, Sum: 18.4 * rand.Float64(), diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index d20918268..512a6ecfb 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -1364,7 +1364,7 @@ func TestHeadCompactionWithHistograms(t *testing.T) { exp1, exp2, exp3, exp4 []tsdbutil.Sample ) h := &histogram.Histogram{ - Count: 11, + Count: 15, ZeroCount: 4, ZeroThreshold: 0.001, Sum: 35.5, diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 5f36e4333..e8b6d7c3b 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -6133,7 +6133,7 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) { } baseH := &histogram.Histogram{ - Count: 11, + Count: 15, ZeroCount: 4, ZeroThreshold: 0.001, Sum: 35.5, @@ -6506,7 +6506,7 @@ func TestNativeHistogramFlag(t *testing.T) { require.NoError(t, db.Close()) }) h := &histogram.Histogram{ - Count: 6, + Count: 10, ZeroCount: 4, ZeroThreshold: 0.001, Sum: 35.5, diff --git a/tsdb/head_append.go b/tsdb/head_append.go index f06aacba4..8c548fcd9 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -659,7 +659,7 @@ func ValidateHistogram(h *histogram.Histogram) error { return errors.Wrap(err, "positive side") } - if c := nCount + pCount; c > h.Count { + if c := nCount + pCount + h.ZeroCount; c > h.Count { return errors.Wrap( storage.ErrHistogramCountNotBigEnough, fmt.Sprintf("%d observations found in buckets, but the Count field is %d", c, h.Count), @@ -686,12 +686,9 @@ func ValidateFloatHistogram(h *histogram.FloatHistogram) error { return errors.Wrap(err, "positive side") } - if c := nCount + pCount; c > h.Count { - return errors.Wrap( - storage.ErrHistogramCountNotBigEnough, - fmt.Sprintf("%f observations found in buckets, but the Count field is %f", c, h.Count), - ) - } + // We do not check for h.Count being at least as large as the sum of the + // counts in the buckets because floating point precision issues can + // create false positives here. return nil } diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 9b49aca03..c7fea3f9a 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -4710,42 +4710,42 @@ func TestReplayAfterMmapReplayError(t *testing.T) { func TestHistogramValidation(t *testing.T) { tests := map[string]struct { - h *histogram.Histogram - errMsg string - errMsgFloat string // To be considered for float histogram only if it is non-empty. + h *histogram.Histogram + errMsg string + skipFloat bool }{ "valid histogram": { h: tsdbutil.GenerateTestHistograms(1)[0], }, - "rejects histogram who has too few negative buckets": { + "rejects histogram that has too few negative buckets": { h: &histogram.Histogram{ NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}}, NegativeBuckets: []int64{}, }, errMsg: `negative side: spans need 1 buckets, have 0 buckets`, }, - "rejects histogram who has too few positive buckets": { + "rejects histogram that has too few positive buckets": { h: &histogram.Histogram{ PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, PositiveBuckets: []int64{}, }, errMsg: `positive side: spans need 1 buckets, have 0 buckets`, }, - "rejects histogram who has too many negative buckets": { + "rejects histogram that has too many negative buckets": { h: &histogram.Histogram{ NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}}, NegativeBuckets: []int64{1, 2}, }, errMsg: `negative side: spans need 1 buckets, have 2 buckets`, }, - "rejects histogram who has too many positive buckets": { + "rejects histogram that has too many positive buckets": { h: &histogram.Histogram{ PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, PositiveBuckets: []int64{1, 2}, }, errMsg: `positive side: spans need 1 buckets, have 2 buckets`, }, - "rejects a histogram which has a negative span with a negative offset": { + "rejects a histogram that has a negative span with a negative offset": { h: &histogram.Histogram{ NegativeSpans: []histogram.Span{{Offset: -1, Length: 1}, {Offset: -1, Length: 1}}, NegativeBuckets: []int64{1, 2}, @@ -4759,21 +4759,21 @@ func TestHistogramValidation(t *testing.T) { }, errMsg: `positive side: span number 2 with offset -1`, }, - "rejects a histogram which has a negative bucket with a negative count": { + "rejects a histogram that has a negative bucket with a negative count": { h: &histogram.Histogram{ NegativeSpans: []histogram.Span{{Offset: -1, Length: 1}}, NegativeBuckets: []int64{-1}, }, errMsg: `negative side: bucket number 1 has observation count of -1`, }, - "rejects a histogram which has a positive bucket with a negative count": { + "rejects a histogram that has a positive bucket with a negative count": { h: &histogram.Histogram{ PositiveSpans: []histogram.Span{{Offset: -1, Length: 1}}, PositiveBuckets: []int64{-1}, }, errMsg: `positive side: bucket number 1 has observation count of -1`, }, - "rejects a histogram which which has a lower count than count in buckets": { + "rejects a histogram that has a lower count than count in buckets": { h: &histogram.Histogram{ Count: 0, NegativeSpans: []histogram.Span{{Offset: -1, Length: 1}}, @@ -4781,25 +4781,36 @@ func TestHistogramValidation(t *testing.T) { NegativeBuckets: []int64{1}, PositiveBuckets: []int64{1}, }, - errMsg: `2 observations found in buckets, but the Count field is 0`, - errMsgFloat: `2.000000 observations found in buckets, but the Count field is 0.000000`, + errMsg: `2 observations found in buckets, but the Count field is 0`, + skipFloat: true, + }, + "rejects a histogram that doesn't count the zero bucket in its count": { + h: &histogram.Histogram{ + Count: 2, + ZeroCount: 1, + NegativeSpans: []histogram.Span{{Offset: -1, Length: 1}}, + PositiveSpans: []histogram.Span{{Offset: -1, Length: 1}}, + NegativeBuckets: []int64{1}, + PositiveBuckets: []int64{1}, + }, + errMsg: `3 observations found in buckets, but the Count field is 2`, + skipFloat: true, }, } for testName, tc := range tests { t.Run(testName, func(t *testing.T) { - switch err := ValidateHistogram(tc.h); { - case tc.errMsg != "": + if err := ValidateHistogram(tc.h); tc.errMsg != "" { require.ErrorContains(t, err, tc.errMsg) - default: + } else { require.NoError(t, err) } - switch err := ValidateFloatHistogram(tc.h.ToFloat()); { - case tc.errMsgFloat != "": - require.ErrorContains(t, err, tc.errMsgFloat) - case tc.errMsg != "": + if tc.skipFloat { + return + } + if err := ValidateFloatHistogram(tc.h.ToFloat()); tc.errMsg != "" { require.ErrorContains(t, err, tc.errMsg) - default: + } else { require.NoError(t, err) } }) diff --git a/tsdb/tsdbutil/histogram.go b/tsdb/tsdbutil/histogram.go index 2145034e1..8270da686 100644 --- a/tsdb/tsdbutil/histogram.go +++ b/tsdb/tsdbutil/histogram.go @@ -33,7 +33,7 @@ func GenerateTestHistograms(n int) (r []*histogram.Histogram) { // GenerateTestHistogram but it is up to the user to set any known counter reset hint. func GenerateTestHistogram(i int) *histogram.Histogram { return &histogram.Histogram{ - Count: 10 + uint64(i*8), + Count: 12 + uint64(i*9), ZeroCount: 2 + uint64(i), ZeroThreshold: 0.001, Sum: 18.4 * float64(i+1), @@ -78,7 +78,7 @@ func GenerateTestFloatHistograms(n int) (r []*histogram.FloatHistogram) { // GenerateTestFloatHistogram but it is up to the user to set any known counter reset hint. func GenerateTestFloatHistogram(i int) *histogram.FloatHistogram { return &histogram.FloatHistogram{ - Count: 10 + float64(i*8), + Count: 12 + float64(i*9), ZeroCount: 2 + float64(i), ZeroThreshold: 0.001, Sum: 18.4 * float64(i+1), diff --git a/web/federate_test.go b/web/federate_test.go index 2d3542ddc..30db0d640 100644 --- a/web/federate_test.go +++ b/web/federate_test.go @@ -306,7 +306,7 @@ func TestFederationWithNativeHistograms(t *testing.T) { db := storage.DB hist := &histogram.Histogram{ - Count: 10, + Count: 12, ZeroCount: 2, ZeroThreshold: 0.001, Sum: 39.4, @@ -359,6 +359,7 @@ func TestFederationWithNativeHistograms(t *testing.T) { }) default: hist.ZeroCount++ + hist.Count++ _, err = app.AppendHistogram(0, l, 100*60*1000, hist.Copy(), nil) expVec = append(expVec, promql.Sample{ T: 100 * 60 * 1000, From eab3c93e80e4d098b4478d4d2687c5ca593b70b7 Mon Sep 17 00:00:00 2001 From: Ziqi Zhao Date: Wed, 23 Aug 2023 12:52:24 +0800 Subject: [PATCH 59/82] make code ready for review Signed-off-by: Ziqi Zhao --- model/histogram/float_histogram.go | 112 ++---- model/histogram/float_histogram_test.go | 493 ------------------------ 2 files changed, 30 insertions(+), 575 deletions(-) diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index ffd991d86..7c0ea8672 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -218,56 +218,6 @@ func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { h.Count += other.Count h.Sum += other.Sum - // TODO(beorn7): If needed, this can be optimized by inspecting the - // spans in other and create missing buckets in h in batches. - var iInSpan, index int32 - for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(true, h.ZeroThreshold, h.Schema); it.Next(); { - b := it.At() - h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket( - b, h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan, index, - ) - index = b.Index - } - for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(false, h.ZeroThreshold, h.Schema); it.Next(); { - b := it.At() - h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket( - b, h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan, index, - ) - index = b.Index - } - return h -} - -func (h *FloatHistogram) AddNew(other *FloatHistogram) *FloatHistogram { - switch { - case other.CounterResetHint == h.CounterResetHint: - // Adding apples to apples, all good. No need to change anything. - case h.CounterResetHint == GaugeType: - // Adding something else to a gauge. That's probably OK. Outcome is a gauge. - // Nothing to do since the receiver is already marked as gauge. - case other.CounterResetHint == GaugeType: - // Similar to before, but this time the receiver is "something else" and we have to change it to gauge. - h.CounterResetHint = GaugeType - case h.CounterResetHint == UnknownCounterReset: - // With the receiver's CounterResetHint being "unknown", this could still be legitimate - // if the caller knows what they are doing. Outcome is then again "unknown". - // No need to do anything since the receiver's CounterResetHint is already "unknown". - case other.CounterResetHint == UnknownCounterReset: - // Similar to before, but now we have to set the receiver's CounterResetHint to "unknown". - h.CounterResetHint = UnknownCounterReset - default: - // All other cases shouldn't actually happen. - // They are a direct collision of CounterReset and NotCounterReset. - // Conservatively set the CounterResetHint to "unknown" and isse a warning. - h.CounterResetHint = UnknownCounterReset - // TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place - } - - otherZeroCount := h.reconcileZeroBuckets(other) - h.ZeroCount += otherZeroCount - h.Count += other.Count - h.Sum += other.Sum - otherPositiveSpans := other.PositiveSpans otherPositiveBuckets := other.PositiveBuckets otherNegativeSpans := other.NegativeSpans @@ -277,10 +227,8 @@ func (h *FloatHistogram) AddNew(other *FloatHistogram) *FloatHistogram { otherNegativeSpans, otherNegativeBuckets = mergeToSchema(other.NegativeSpans, other.NegativeBuckets, other.Schema, h.Schema) } - // TODO(beorn7): If needed, this can be optimized by inspecting the - // spans in other and create missing buckets in h in batches. - h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, h.PositiveSpans, h.PositiveBuckets, otherPositiveSpans, otherPositiveBuckets) - h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, h.NegativeSpans, h.NegativeBuckets, otherNegativeSpans, otherNegativeBuckets) + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.PositiveSpans, h.PositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.NegativeSpans, h.NegativeBuckets, otherNegativeSpans, otherNegativeBuckets) return h } @@ -291,25 +239,17 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram { h.Count -= other.Count h.Sum -= other.Sum - // TODO(beorn7): If needed, this can be optimized by inspecting the - // spans in other and create missing buckets in h in batches. - var iInSpan, index int32 - for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(true, h.ZeroThreshold, h.Schema); it.Next(); { - b := it.At() - b.Count *= -1 - h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket( - b, h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan, index, - ) - index = b.Index - } - for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(false, h.ZeroThreshold, h.Schema); it.Next(); { - b := it.At() - b.Count *= -1 - h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket( - b, h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan, index, - ) - index = b.Index + otherPositiveSpans := other.PositiveSpans + otherPositiveBuckets := other.PositiveBuckets + otherNegativeSpans := other.NegativeSpans + otherNegativeBuckets := other.NegativeBuckets + if other.Schema != h.Schema { + otherPositiveSpans, otherPositiveBuckets = mergeToSchema(other.PositiveSpans, other.PositiveBuckets, other.Schema, h.Schema) + otherNegativeSpans, otherNegativeBuckets = mergeToSchema(other.NegativeSpans, other.NegativeBuckets, other.Schema, h.Schema) } + + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.PositiveSpans, h.PositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.NegativeSpans, h.NegativeBuckets, otherNegativeSpans, otherNegativeBuckets) return h } @@ -1080,16 +1020,19 @@ func mergeToSchema(originSpans []Span, originBuckets []float64, originSchema, ta return targetSpans, targetBuckets } -func addBuckets(schema int32, threshold float64, spansA []Span, bucketsA []float64, spansB []Span, bucketsB []float64) ([]Span, []float64) { +// addBuckets add two groups of Spans by inspecting the +// spans in other and create missing buckets in origin in batches. +func addBuckets(schema int32, threshold float64, negative bool, spansA []Span, bucketsA []float64, spansB []Span, bucketsB []float64) ([]Span, []float64) { var ( iSpan int = -1 iBucket int = -1 iInSpan int32 indexA int32 - indexB int32 = 0 - bIdxB int = 0 - lowerThanThreshold = true + indexB int32 + bIdxB int + bucketB float64 deltaIndex int32 + lowerThanThreshold = true ) for _, spanB := range spansB { @@ -1100,17 +1043,22 @@ func addBuckets(schema int32, threshold float64, spansA []Span, bucketsA []float } lowerThanThreshold = false + bucketB = bucketsB[bIdxB] + if negative { + bucketB *= -1 + } + if iSpan == -1 { if len(spansA) == 0 || spansA[0].Offset > indexB { // Add bucket before all others. bucketsA = append(bucketsA, 0) copy(bucketsA[1:], bucketsA) - bucketsA[0] = bucketsB[bIdxB] - if len(spansA) > 0 && spansA[0].Offset == indexB+1 { // bIndex just preceed spansA[0] by one step + bucketsA[0] = bucketB + if len(spansA) > 0 && spansA[0].Offset == indexB+1 { spansA[0].Length++ spansA[0].Offset-- goto nextLoop - } else { // if not create new span + } else { spansA = append(spansA, Span{}) copy(spansA[1:], spansA) spansA[0] = Span{Offset: indexB, Length: 1} @@ -1123,7 +1071,7 @@ func addBuckets(schema int32, threshold float64, spansA []Span, bucketsA []float } } else if spansA[0].Offset == indexB { // Just add to first bucket. - bucketsA[0] += bucketsB[bIdxB] + bucketsA[0] += bucketB goto nextLoop } iSpan, iBucket, iInSpan = 0, 0, 0 @@ -1136,7 +1084,7 @@ func addBuckets(schema int32, threshold float64, spansA []Span, bucketsA []float // Bucket is in current span. iBucket += int(deltaIndex) iInSpan += deltaIndex - bucketsA[iBucket] += bucketsB[bIdxB] + bucketsA[iBucket] += bucketB break } else { deltaIndex -= remainingInSpan @@ -1146,7 +1094,7 @@ func addBuckets(schema int32, threshold float64, spansA []Span, bucketsA []float // Bucket is in gap behind previous span (or there are no further spans). bucketsA = append(bucketsA, 0) copy(bucketsA[iBucket+1:], bucketsA[iBucket:]) - bucketsA[iBucket] = bucketsB[bIdxB] + bucketsA[iBucket] = bucketB if deltaIndex == 0 { // Directly after previous span, extend previous span. if iSpan < len(spansA) { diff --git a/model/histogram/float_histogram_test.go b/model/histogram/float_histogram_test.go index bef9a1d6c..dd3e30427 100644 --- a/model/histogram/float_histogram_test.go +++ b/model/histogram/float_histogram_test.go @@ -16,7 +16,6 @@ package histogram import ( "fmt" "math" - "math/rand" "testing" "github.com/stretchr/testify/require" @@ -2253,495 +2252,3 @@ func TestFloatBucketIteratorTargetSchema(t *testing.T) { } require.False(t, it.Next(), "negative iterator not exhausted") } - -func TestFloatHistogramAddNew(t *testing.T) { - cases := []struct { - name string - in1, in2, expected *FloatHistogram - }{ - { - "same bucket layout", - &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: 11, - Count: 30, - Sum: 2.345, - PositiveSpans: []Span{{-2, 2}, {1, 3}}, - PositiveBuckets: []float64{1, 0, 3, 4, 7}, - NegativeSpans: []Span{{3, 2}, {3, 2}}, - NegativeBuckets: []float64{3, 1, 5, 6}, - }, - &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: 8, - Count: 21, - Sum: 1.234, - PositiveSpans: []Span{{-2, 2}, {1, 3}}, - PositiveBuckets: []float64{0, 0, 2, 3, 6}, - NegativeSpans: []Span{{3, 2}, {3, 2}}, - NegativeBuckets: []float64{1, 1, 4, 4}, - }, - &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: 19, - Count: 51, - Sum: 3.579, - PositiveSpans: []Span{{-2, 2}, {1, 3}}, - PositiveBuckets: []float64{1, 0, 5, 7, 13}, - NegativeSpans: []Span{{3, 2}, {3, 2}}, - NegativeBuckets: []float64{4, 2, 9, 10}, - }, - }, - { - "same bucket layout, defined differently", - &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: 11, - Count: 30, - Sum: 2.345, - PositiveSpans: []Span{{-2, 2}, {1, 1}, {0, 2}}, - PositiveBuckets: []float64{1, 0, 3, 4, 7}, - NegativeSpans: []Span{{3, 2}, {3, 2}}, - NegativeBuckets: []float64{3, 1, 5, 6}, - }, - &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: 8, - Count: 21, - Sum: 1.234, - PositiveSpans: []Span{{-2, 2}, {1, 2}, {0, 1}}, - PositiveBuckets: []float64{0, 0, 2, 3, 6}, - NegativeSpans: []Span{{3, 7}}, - NegativeBuckets: []float64{1, 1, 0, 0, 0, 4, 4}, - }, - &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: 19, - Count: 51, - Sum: 3.579, - PositiveSpans: []Span{{-2, 2}, {1, 1}, {0, 2}}, - PositiveBuckets: []float64{1, 0, 5, 7, 13}, - NegativeSpans: []Span{{3, 5}, {0, 2}}, - NegativeBuckets: []float64{4, 2, 0, 0, 0, 9, 10}, - }, - }, - { - "non-overlapping spans", - &FloatHistogram{ - ZeroThreshold: 0.001, - ZeroCount: 11, - Count: 30, - Sum: 2.345, - PositiveSpans: []Span{{-2, 2}, {2, 3}}, - PositiveBuckets: []float64{1, 0, 3, 4, 7}, - NegativeSpans: []Span{{3, 2}, {3, 2}}, - NegativeBuckets: []float64{3, 1, 5, 6}, - }, - &FloatHistogram{ - ZeroThreshold: 0.001, - ZeroCount: 8, - Count: 21, - Sum: 1.234, - PositiveSpans: []Span{{0, 2}, {3, 3}}, - PositiveBuckets: []float64{5, 4, 2, 3, 6}, - NegativeSpans: []Span{{-9, 2}, {3, 2}}, - NegativeBuckets: []float64{1, 1, 4, 4}, - }, - &FloatHistogram{ - ZeroThreshold: 0.001, - ZeroCount: 19, - Count: 51, - Sum: 3.579, - PositiveSpans: []Span{{-2, 4}, {0, 6}}, - PositiveBuckets: []float64{1, 0, 5, 4, 3, 4, 7, 2, 3, 6}, - NegativeSpans: []Span{{-9, 2}, {3, 2}, {5, 2}, {3, 2}}, - NegativeBuckets: []float64{1, 1, 4, 4, 3, 1, 5, 6}, - }, - }, - { - "non-overlapping inverted order", - &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: 8, - Count: 21, - Sum: 1.234, - PositiveSpans: []Span{{0, 2}, {3, 3}}, - PositiveBuckets: []float64{5, 4, 2, 3, 6}, - NegativeSpans: []Span{{-9, 2}, {3, 2}}, - NegativeBuckets: []float64{1, 1, 4, 4}, - }, - &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: 11, - Count: 30, - Sum: 2.345, - PositiveSpans: []Span{{-2, 2}, {2, 3}}, - PositiveBuckets: []float64{1, 0, 3, 4, 7}, - NegativeSpans: []Span{{3, 2}, {3, 2}}, - NegativeBuckets: []float64{3, 1, 5, 6}, - }, - &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: 19, - Count: 51, - Sum: 3.579, - PositiveSpans: []Span{{-2, 2}, {0, 5}, {0, 3}}, - PositiveBuckets: []float64{1, 0, 5, 4, 3, 4, 7, 2, 3, 6}, - NegativeSpans: []Span{{-9, 2}, {3, 2}, {5, 2}, {3, 2}}, - NegativeBuckets: []float64{1, 1, 4, 4, 3, 1, 5, 6}, - }, - }, - { - "overlapping spans", - &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: 11, - Count: 30, - Sum: 2.345, - PositiveSpans: []Span{{-2, 2}, {2, 3}}, - PositiveBuckets: []float64{1, 0, 3, 4, 7}, - NegativeSpans: []Span{{3, 2}, {3, 2}}, - NegativeBuckets: []float64{3, 1, 5, 6}, - }, - &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: 8, - Count: 21, - Sum: 1.234, - PositiveSpans: []Span{{-1, 4}, {0, 3}}, - PositiveBuckets: []float64{5, 4, 2, 3, 6, 2, 5}, - NegativeSpans: []Span{{4, 2}, {1, 2}}, - NegativeBuckets: []float64{1, 1, 4, 4}, - }, - &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: 19, - Count: 51, - Sum: 3.579, - PositiveSpans: []Span{{-2, 4}, {0, 4}}, - PositiveBuckets: []float64{1, 5, 4, 2, 6, 10, 9, 5}, - NegativeSpans: []Span{{3, 3}, {1, 3}}, - NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, - }, - }, - { - "overlapping spans inverted order", - &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: 8, - Count: 21, - Sum: 1.234, - PositiveSpans: []Span{{-1, 4}, {0, 3}}, - PositiveBuckets: []float64{5, 4, 2, 3, 6, 2, 5}, - NegativeSpans: []Span{{4, 2}, {1, 2}}, - NegativeBuckets: []float64{1, 1, 4, 4}, - }, - &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: 11, - Count: 30, - Sum: 2.345, - PositiveSpans: []Span{{-2, 2}, {2, 3}}, - PositiveBuckets: []float64{1, 0, 3, 4, 7}, - NegativeSpans: []Span{{3, 2}, {3, 2}}, - NegativeBuckets: []float64{3, 1, 5, 6}, - }, - &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: 19, - Count: 51, - Sum: 3.579, - PositiveSpans: []Span{{-2, 5}, {0, 3}}, - PositiveBuckets: []float64{1, 5, 4, 2, 6, 10, 9, 5}, - NegativeSpans: []Span{{3, 3}, {1, 3}}, - NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, - }, - }, - { - "schema change", - &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: 8, - Count: 21, - Sum: 1.234, - Schema: 0, - PositiveSpans: []Span{{-1, 4}, {0, 3}}, - PositiveBuckets: []float64{5, 4, 2, 3, 6, 2, 5}, - NegativeSpans: []Span{{4, 2}, {1, 2}}, - NegativeBuckets: []float64{1, 1, 4, 4}, - }, - &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: 11, - Count: 30, - Sum: 2.345, - Schema: 1, - PositiveSpans: []Span{{-4, 3}, {5, 5}}, - PositiveBuckets: []float64{1, 0, 0, 3, 2, 2, 3, 4}, - NegativeSpans: []Span{{6, 3}, {6, 4}}, - NegativeBuckets: []float64{3, 0.5, 0.5, 2, 3, 2, 4}, - }, - &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: 19, - Count: 51, - Sum: 3.579, - PositiveSpans: []Span{{-2, 5}, {0, 3}}, - PositiveBuckets: []float64{1, 5, 4, 2, 6, 10, 9, 5}, - NegativeSpans: []Span{{3, 3}, {1, 3}}, - NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, - }, - }, - { - "larger zero bucket in first histogram", - &FloatHistogram{ - ZeroThreshold: 1, - ZeroCount: 17, - Count: 21, - Sum: 1.234, - PositiveSpans: []Span{{1, 2}, {0, 3}}, - PositiveBuckets: []float64{2, 3, 6, 2, 5}, - NegativeSpans: []Span{{4, 2}, {1, 2}}, - NegativeBuckets: []float64{1, 1, 4, 4}, - }, - &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: 11, - Count: 30, - Sum: 2.345, - PositiveSpans: []Span{{-2, 2}, {2, 3}}, - PositiveBuckets: []float64{1, 0, 3, 4, 7}, - NegativeSpans: []Span{{3, 2}, {3, 2}}, - NegativeBuckets: []float64{3, 1, 5, 6}, - }, - &FloatHistogram{ - ZeroThreshold: 1, - ZeroCount: 29, - Count: 51, - Sum: 3.579, - PositiveSpans: []Span{{1, 2}, {0, 3}}, - PositiveBuckets: []float64{2, 6, 10, 9, 5}, - NegativeSpans: []Span{{3, 3}, {1, 3}}, - NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, - }, - }, - { - "larger zero bucket in second histogram", - &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: 11, - Count: 30, - Sum: 2.345, - PositiveSpans: []Span{{-2, 2}, {2, 3}}, - PositiveBuckets: []float64{1, 0, 3, 4, 7}, - NegativeSpans: []Span{{3, 2}, {3, 2}}, - NegativeBuckets: []float64{3, 1, 5, 6}, - }, - &FloatHistogram{ - ZeroThreshold: 1, - ZeroCount: 17, - Count: 21, - Sum: 1.234, - PositiveSpans: []Span{{1, 2}, {0, 3}}, - PositiveBuckets: []float64{2, 3, 6, 2, 5}, - NegativeSpans: []Span{{4, 2}, {1, 2}}, - NegativeBuckets: []float64{1, 1, 4, 4}, - }, - &FloatHistogram{ - ZeroThreshold: 1, - ZeroCount: 29, - Count: 51, - Sum: 3.579, - PositiveSpans: []Span{{1, 5}}, - PositiveBuckets: []float64{2, 6, 10, 9, 5}, - NegativeSpans: []Span{{3, 3}, {1, 3}}, - NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, - }, - }, - { - "larger zero threshold in first histogram ends up inside a populated bucket of second histogram", - &FloatHistogram{ - ZeroThreshold: 0.2, - ZeroCount: 17, - Count: 21, - Sum: 1.234, - PositiveSpans: []Span{{1, 2}, {0, 3}}, - PositiveBuckets: []float64{2, 3, 6, 2, 5}, - NegativeSpans: []Span{{4, 2}, {1, 2}}, - NegativeBuckets: []float64{1, 1, 4, 4}, - }, - &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: 11, - Count: 30, - Sum: 2.345, - PositiveSpans: []Span{{-2, 2}, {2, 3}}, - PositiveBuckets: []float64{1, 0, 3, 4, 7}, - NegativeSpans: []Span{{3, 2}, {3, 2}}, - NegativeBuckets: []float64{3, 1, 5, 6}, - }, - &FloatHistogram{ - ZeroThreshold: 0.25, - ZeroCount: 29, - Count: 51, - Sum: 3.579, - PositiveSpans: []Span{{-1, 1}, {1, 5}}, - PositiveBuckets: []float64{0, 2, 6, 10, 9, 5}, - NegativeSpans: []Span{{3, 3}, {1, 3}}, - NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, - }, - }, - { - "larger zero threshold in second histogram ends up inside a populated bucket of first histogram", - &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: 11, - Count: 30, - Sum: 2.345, - PositiveSpans: []Span{{-2, 2}, {2, 3}}, - PositiveBuckets: []float64{1, 0, 3, 4, 7}, - NegativeSpans: []Span{{3, 2}, {3, 2}}, - NegativeBuckets: []float64{3, 1, 5, 6}, - }, - &FloatHistogram{ - ZeroThreshold: 0.2, - ZeroCount: 17, - Count: 21, - Sum: 1.234, - PositiveSpans: []Span{{1, 2}, {0, 3}}, - PositiveBuckets: []float64{2, 3, 6, 2, 5}, - NegativeSpans: []Span{{4, 2}, {1, 2}}, - NegativeBuckets: []float64{1, 1, 4, 4}, - }, - &FloatHistogram{ - ZeroThreshold: 0.25, - ZeroCount: 29, - Count: 51, - Sum: 3.579, - PositiveSpans: []Span{{1, 5}}, - PositiveBuckets: []float64{2, 6, 10, 9, 5}, - NegativeSpans: []Span{{3, 3}, {1, 3}}, - NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, - }, - }, - { - "schema change combined with larger zero bucket in second histogram", - &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: 8, - Count: 21, - Sum: 1.234, - Schema: 0, - PositiveSpans: []Span{{-2, 5}, {0, 3}}, - PositiveBuckets: []float64{2, 5, 4, 2, 3, 6, 2, 5}, - NegativeSpans: []Span{{4, 2}, {1, 2}}, - NegativeBuckets: []float64{1, 1, 4, 4}, - }, - &FloatHistogram{ - ZeroThreshold: 0.25, - ZeroCount: 12, - Count: 30, - Sum: 2.345, - Schema: 1, - PositiveSpans: []Span{{-3, 2}, {5, 5}}, - PositiveBuckets: []float64{1, 0, 3, 2, 2, 3, 4}, - NegativeSpans: []Span{{6, 3}, {6, 4}}, - NegativeBuckets: []float64{3, 0.5, 0.5, 2, 3, 2, 4}, - }, - &FloatHistogram{ - ZeroThreshold: 0.25, - ZeroCount: 22, - Count: 51, - Sum: 3.579, - PositiveSpans: []Span{{-1, 7}}, - PositiveBuckets: []float64{6, 4, 2, 6, 10, 9, 5}, - NegativeSpans: []Span{{3, 3}, {1, 3}}, - NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, - }, - }, - { - "schema change combined with larger zero bucket in first histogram", - &FloatHistogram{ - ZeroThreshold: 0.25, - ZeroCount: 8, - Count: 21, - Sum: 1.234, - Schema: 0, - PositiveSpans: []Span{{-1, 4}, {0, 3}}, - PositiveBuckets: []float64{5, 4, 2, 3, 6, 2, 5}, - NegativeSpans: []Span{{4, 2}, {1, 2}}, - NegativeBuckets: []float64{1, 1, 4, 4}, - }, - &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: 11, - Count: 30, - Sum: 2.345, - Schema: 1, - PositiveSpans: []Span{{-4, 3}, {5, 5}}, - PositiveBuckets: []float64{1, 0, 0, 3, 2, 2, 3, 4}, - NegativeSpans: []Span{{6, 3}, {6, 4}}, - NegativeBuckets: []float64{3, 0.5, 0.5, 2, 3, 2, 4}, - }, - &FloatHistogram{ - ZeroThreshold: 0.25, - ZeroCount: 20, - Count: 51, - Sum: 3.579, - PositiveSpans: []Span{{-1, 4}, {0, 3}}, - PositiveBuckets: []float64{5, 4, 2, 6, 10, 9, 5}, - NegativeSpans: []Span{{3, 3}, {1, 3}}, - NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, - }, - }, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - require.Equal(t, c.expected, c.in1.AddNew(c.in2)) - // Has it also happened in-place? - require.Equal(t, c.expected, c.in1) - }) - } -} - -func BenchmarkAddOld(b *testing.B) { - for n := 0; n < b.N; n++ { - b.StopTimer() - f1 := createRandomFloatHistogram(50) - f2 := createRandomFloatHistogram(50) - b.StartTimer() - f1.Add(f2) - } - -} - -func BenchmarkAddNew(b *testing.B) { - for n := 0; n < b.N; n++ { - b.StopTimer() - f1 := createRandomFloatHistogram(50) - f2 := createRandomFloatHistogram(50) - b.StartTimer() - f1.AddNew(f2) - } -} - -func createRandomFloatHistogram(spanNum int32) *FloatHistogram { - f := &FloatHistogram{} - f.PositiveSpans, f.PositiveBuckets = createRandomSpans(spanNum) - f.NegativeSpans, f.NegativeBuckets = createRandomSpans(spanNum) - return f -} - -func createRandomSpans(spanNum int32) ([]Span, []float64) { - Spans := make([]Span, spanNum) - Buckets := make([]float64, 0) - for i := 0; i < int(spanNum); i++ { - Spans[i].Offset = rand.Int31n(spanNum) + 1 - Spans[i].Length = uint32(rand.Int31n(spanNum) + 1) - for j := 0; j < int(Spans[i].Length); j++ { - Buckets = append(Buckets, float64(rand.Int31n(spanNum)+1)) - } - } - return Spans, Buckets -} From 788061e50933ea140e84b36b3852acb0dc54c76f Mon Sep 17 00:00:00 2001 From: Ziqi Zhao Date: Wed, 23 Aug 2023 12:55:59 +0800 Subject: [PATCH 60/82] remove unused addBucket function Signed-off-by: Ziqi Zhao --- model/histogram/float_histogram.go | 97 ------------------------------ 1 file changed, 97 deletions(-) diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index 7c0ea8672..539ee73b9 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -284,103 +284,6 @@ func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool { return true } -// addBucket takes the "coordinates" of the last bucket that was handled and -// adds the provided bucket after it. If a corresponding bucket exists, the -// count is added. If not, the bucket is inserted. The updated slices and the -// coordinates of the inserted or added-to bucket are returned. -func addBucket( - b Bucket[float64], - spans []Span, buckets []float64, - iSpan, iBucket int, - iInSpan, index int32, -) ( - newSpans []Span, newBuckets []float64, - newISpan, newIBucket int, newIInSpan int32, -) { - if iSpan == -1 { - // First add, check if it is before all spans. - if len(spans) == 0 || spans[0].Offset > b.Index { - // Add bucket before all others. - buckets = append(buckets, 0) - copy(buckets[1:], buckets) - buckets[0] = b.Count - if len(spans) > 0 && spans[0].Offset == b.Index+1 { - spans[0].Length++ - spans[0].Offset-- - return spans, buckets, 0, 0, 0 - } - spans = append(spans, Span{}) - copy(spans[1:], spans) - spans[0] = Span{Offset: b.Index, Length: 1} - if len(spans) > 1 { - // Convert the absolute offset in the formerly - // first span to a relative offset. - spans[1].Offset -= b.Index + 1 - } - return spans, buckets, 0, 0, 0 - } - if spans[0].Offset == b.Index { - // Just add to first bucket. - buckets[0] += b.Count - return spans, buckets, 0, 0, 0 - } - // We are behind the first bucket, so set everything to the - // first bucket and continue normally. - iSpan, iBucket, iInSpan = 0, 0, 0 - index = spans[0].Offset - } - deltaIndex := b.Index - index - for { - remainingInSpan := int32(spans[iSpan].Length) - iInSpan - if deltaIndex < remainingInSpan { - // Bucket is in current span. - iBucket += int(deltaIndex) - iInSpan += deltaIndex - buckets[iBucket] += b.Count - return spans, buckets, iSpan, iBucket, iInSpan - } - deltaIndex -= remainingInSpan - iBucket += int(remainingInSpan) - iSpan++ - if iSpan == len(spans) || deltaIndex < spans[iSpan].Offset { - // Bucket is in gap behind previous span (or there are no further spans). - buckets = append(buckets, 0) - copy(buckets[iBucket+1:], buckets[iBucket:]) - buckets[iBucket] = b.Count - if deltaIndex == 0 { - // Directly after previous span, extend previous span. - if iSpan < len(spans) { - spans[iSpan].Offset-- - } - iSpan-- - iInSpan = int32(spans[iSpan].Length) - spans[iSpan].Length++ - return spans, buckets, iSpan, iBucket, iInSpan - } - if iSpan < len(spans) && deltaIndex == spans[iSpan].Offset-1 { - // Directly before next span, extend next span. - iInSpan = 0 - spans[iSpan].Offset-- - spans[iSpan].Length++ - return spans, buckets, iSpan, iBucket, iInSpan - } - // No next span, or next span is not directly adjacent to new bucket. - // Add new span. - iInSpan = 0 - if iSpan < len(spans) { - spans[iSpan].Offset -= deltaIndex + 1 - } - spans = append(spans, Span{}) - copy(spans[iSpan+1:], spans[iSpan:]) - spans[iSpan] = Span{Length: 1, Offset: deltaIndex} - return spans, buckets, iSpan, iBucket, iInSpan - } - // Try start of next span. - deltaIndex -= spans[iSpan].Offset - iInSpan = 0 - } -} - // Compact eliminates empty buckets at the beginning and end of each span, then // merges spans that are consecutive or at most maxEmptyBuckets apart, and // finally splits spans that contain more consecutive empty buckets than From 893f97556f7648821121180fb8e8fb0f2aa58053 Mon Sep 17 00:00:00 2001 From: Ziqi Zhao Date: Wed, 23 Aug 2023 13:13:25 +0800 Subject: [PATCH 61/82] use switch instead of if-else to fix lint error Signed-off-by: Ziqi Zhao --- model/histogram/float_histogram.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index 539ee73b9..360fa6823 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -998,7 +998,8 @@ func addBuckets(schema int32, threshold float64, negative bool, spansA []Span, b bucketsA = append(bucketsA, 0) copy(bucketsA[iBucket+1:], bucketsA[iBucket:]) bucketsA[iBucket] = bucketB - if deltaIndex == 0 { + switch { + case deltaIndex == 0: // Directly after previous span, extend previous span. if iSpan < len(spansA) { spansA[iSpan].Offset-- @@ -1006,14 +1007,14 @@ func addBuckets(schema int32, threshold float64, negative bool, spansA []Span, b iSpan-- iInSpan = int32(spansA[iSpan].Length) spansA[iSpan].Length++ - break - } else if iSpan < len(spansA) && deltaIndex == spansA[iSpan].Offset-1 { + goto nextLoop + case iSpan < len(spansA) && deltaIndex == spansA[iSpan].Offset-1: // Directly before next span, extend next span. iInSpan = 0 spansA[iSpan].Offset-- spansA[iSpan].Length++ - break - } else { + goto nextLoop + default: // No next span, or next span is not directly adjacent to new bucket. // Add new span. iInSpan = 0 @@ -1023,7 +1024,7 @@ func addBuckets(schema int32, threshold float64, negative bool, spansA []Span, b spansA = append(spansA, Span{}) copy(spansA[iSpan+1:], spansA[iSpan:]) spansA[iSpan] = Span{Length: 1, Offset: deltaIndex} - break + goto nextLoop } } else { // Try start of next span. From d3633d4e764a71a0aff925c699ba0f30d40fd60d Mon Sep 17 00:00:00 2001 From: Ziqi Zhao Date: Thu, 24 Aug 2023 07:17:23 +0800 Subject: [PATCH 62/82] Update model/histogram/float_histogram.go MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Björn Rabenstein Signed-off-by: Ziqi Zhao --- model/histogram/float_histogram.go | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index 360fa6823..d7de64abf 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -923,9 +923,18 @@ func mergeToSchema(originSpans []Span, originBuckets []float64, originSchema, ta return targetSpans, targetBuckets } -// addBuckets add two groups of Spans by inspecting the -// spans in other and create missing buckets in origin in batches. -func addBuckets(schema int32, threshold float64, negative bool, spansA []Span, bucketsA []float64, spansB []Span, bucketsB []float64) ([]Span, []float64) { +// addBuckets adds the buckets described by spansB/bucketsB to the buckets described by spansA/bucketsA, +// creating missing buckets in spansA/bucketsA as needed. +// It returns the resulting spans/buckets (which must be used instead of the original spansA/bucketsA, +// although spansA/bucketsA might get modified by this function). +// All buckets must use the same provided schema. +// Buckets in spansB/bucketsB with an absolute upper limit ≤ threshold are ignored. +// If negative is true, the buckets in spansB/bucketsB are subtracted rather than added. +func addBuckets( + schema int32, threshold float64, negative bool, + spansA []Span, bucketsA []float64, + spansB []Span, bucketsB []float64, +) ([]Span, []float64) { var ( iSpan int = -1 iBucket int = -1 From de172049abc9b795e226c2153fcfaea48804bbf9 Mon Sep 17 00:00:00 2001 From: Ziqi Zhao Date: Thu, 24 Aug 2023 07:27:33 +0800 Subject: [PATCH 63/82] fix lint error Signed-off-by: Ziqi Zhao --- model/histogram/float_histogram.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index d7de64abf..bfb3c3d19 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -931,8 +931,8 @@ func mergeToSchema(originSpans []Span, originBuckets []float64, originSchema, ta // Buckets in spansB/bucketsB with an absolute upper limit ≤ threshold are ignored. // If negative is true, the buckets in spansB/bucketsB are subtracted rather than added. func addBuckets( - schema int32, threshold float64, negative bool, - spansA []Span, bucketsA []float64, + schema int32, threshold float64, negative bool, + spansA []Span, bucketsA []float64, spansB []Span, bucketsB []float64, ) ([]Span, []float64) { var ( From daca364b7045bf6c0bcba78618baf83f7c11a6d3 Mon Sep 17 00:00:00 2001 From: Wasim Nihal Date: Wed, 23 Aug 2023 14:46:42 +0530 Subject: [PATCH 64/82] Correcting indentation of basic_auth configuration for uyuni_sd_config Signed-off-by: Wasim Nihal --- docs/configuration/configuration.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index f15a9f914..b9373498a 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -2993,8 +2993,8 @@ password: # Optional HTTP basic authentication information, currently not supported by Uyuni. basic_auth: [ username: ] - [ password: ] - [ password_file: ] + [ password: ] + [ password_file: ] # Optional `Authorization` header configuration, currently not supported by Uyuni. authorization: From 8ef7dfdeebf0a7491973303c7fb6b68ec5cc065b Mon Sep 17 00:00:00 2001 From: Justin Lei Date: Thu, 24 Aug 2023 06:21:17 -0700 Subject: [PATCH 65/82] Add a chunk size limit in bytes (#12054) Add a chunk size limit in bytes This creates a hard cap for XOR chunks of 1024 bytes. The limit for histogram chunk is also 1024 bytes, but it is a soft limit as a histogram has a dynamic size, and even a single one could be larger than 1024 bytes. This also avoids cutting new histogram chunks if the existing chunk has fewer than 10 histograms yet. In that way, we are accepting "jumbo chunks" in order to have at least 10 histograms in a chunk, allowing compression to kick in. Signed-off-by: Justin Lei --- storage/buffer.go | 30 +-- storage/merge_test.go | 391 ++++++++++++++-------------- storage/remote/read_handler_test.go | 14 +- storage/series.go | 23 +- storage/series_test.go | 51 ++-- tsdb/agent/db_test.go | 5 +- tsdb/block_test.go | 33 ++- tsdb/blockwriter_test.go | 8 +- tsdb/chunkenc/chunk.go | 14 + tsdb/chunks/chunks.go | 67 +++++ tsdb/chunks/samples.go | 89 +++++++ tsdb/compact_test.go | 9 +- tsdb/db_test.go | 226 ++++++++-------- tsdb/head.go | 6 +- tsdb/head_append.go | 103 +++++++- tsdb/head_test.go | 374 ++++++++++++++++++++------ tsdb/ooo_head_read_test.go | 55 ++-- tsdb/querier_test.go | 244 ++++++++--------- tsdb/tsdbutil/chunks.go | 159 ----------- tsdb/tsdbutil/histogram.go | 8 +- 20 files changed, 1104 insertions(+), 805 deletions(-) create mode 100644 tsdb/chunks/samples.go delete mode 100644 tsdb/tsdbutil/chunks.go diff --git a/storage/buffer.go b/storage/buffer.go index 38f559103..b1b5f8148 100644 --- a/storage/buffer.go +++ b/storage/buffer.go @@ -19,7 +19,7 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/tsdb/chunkenc" - "github.com/prometheus/prometheus/tsdb/tsdbutil" + "github.com/prometheus/prometheus/tsdb/chunks" ) // BufferedSeriesIterator wraps an iterator with a look-back buffer. @@ -69,7 +69,7 @@ func (b *BufferedSeriesIterator) ReduceDelta(delta int64) bool { // PeekBack returns the nth previous element of the iterator. If there is none buffered, // ok is false. -func (b *BufferedSeriesIterator) PeekBack(n int) (sample tsdbutil.Sample, ok bool) { +func (b *BufferedSeriesIterator) PeekBack(n int) (sample chunks.Sample, ok bool) { return b.buf.nthLast(n) } @@ -247,7 +247,7 @@ type sampleRing struct { // allowed to be populated!) This avoids the overhead of the interface // wrapper for the happy (and by far most common) case of homogenous // samples. - iBuf []tsdbutil.Sample + iBuf []chunks.Sample fBuf []fSample hBuf []hSample fhBuf []fhSample @@ -289,7 +289,7 @@ func newSampleRing(delta int64, size int, typ chunkenc.ValueType) *sampleRing { case chunkenc.ValFloatHistogram: r.fhBuf = make([]fhSample, size) default: - r.iBuf = make([]tsdbutil.Sample, size) + r.iBuf = make([]chunks.Sample, size) } return r } @@ -383,7 +383,7 @@ func (it *sampleRingIterator) AtT() int64 { return it.t } -func (r *sampleRing) at(i int) tsdbutil.Sample { +func (r *sampleRing) at(i int) chunks.Sample { j := (r.f + i) % len(r.iBuf) return r.iBuf[j] } @@ -408,7 +408,7 @@ func (r *sampleRing) atFH(i int) fhSample { // implementation. If you know you are dealing with one of the implementations // from this package (fSample, hSample, fhSample), call one of the specialized // methods addF, addH, or addFH for better performance. -func (r *sampleRing) add(s tsdbutil.Sample) { +func (r *sampleRing) add(s chunks.Sample) { if r.bufInUse == noBuf { // First sample. switch s := s.(type) { @@ -519,7 +519,7 @@ func (r *sampleRing) addFH(s fhSample) { } } -// genericAdd is a generic implementation of adding a tsdbutil.Sample +// genericAdd is a generic implementation of adding a chunks.Sample // implementation to a buffer of a sample ring. However, the Go compiler // currently (go1.20) decides to not expand the code during compile time, but // creates dynamic code to handle the different types. That has a significant @@ -529,7 +529,7 @@ func (r *sampleRing) addFH(s fhSample) { // Therefore, genericAdd has been manually implemented for all the types // (addSample, addF, addH, addFH) below. // -// func genericAdd[T tsdbutil.Sample](s T, buf []T, r *sampleRing) []T { +// func genericAdd[T chunks.Sample](s T, buf []T, r *sampleRing) []T { // l := len(buf) // // Grow the ring buffer if it fits no more elements. // if l == 0 { @@ -568,15 +568,15 @@ func (r *sampleRing) addFH(s fhSample) { // } // addSample is a handcoded specialization of genericAdd (see above). -func addSample(s tsdbutil.Sample, buf []tsdbutil.Sample, r *sampleRing) []tsdbutil.Sample { +func addSample(s chunks.Sample, buf []chunks.Sample, r *sampleRing) []chunks.Sample { l := len(buf) // Grow the ring buffer if it fits no more elements. if l == 0 { - buf = make([]tsdbutil.Sample, 16) + buf = make([]chunks.Sample, 16) l = 16 } if l == r.l { - newBuf := make([]tsdbutil.Sample, 2*l) + newBuf := make([]chunks.Sample, 2*l) copy(newBuf[l+r.f:], buf[r.f:]) copy(newBuf, buf[:r.f]) @@ -748,7 +748,7 @@ func (r *sampleRing) reduceDelta(delta int64) bool { return true } -func genericReduceDelta[T tsdbutil.Sample](buf []T, r *sampleRing) { +func genericReduceDelta[T chunks.Sample](buf []T, r *sampleRing) { // Free head of the buffer of samples that just fell out of the range. l := len(buf) tmin := buf[r.i].T() - r.delta @@ -762,7 +762,7 @@ func genericReduceDelta[T tsdbutil.Sample](buf []T, r *sampleRing) { } // nthLast returns the nth most recent element added to the ring. -func (r *sampleRing) nthLast(n int) (tsdbutil.Sample, bool) { +func (r *sampleRing) nthLast(n int) (chunks.Sample, bool) { if n > r.l { return fSample{}, false } @@ -779,8 +779,8 @@ func (r *sampleRing) nthLast(n int) (tsdbutil.Sample, bool) { } } -func (r *sampleRing) samples() []tsdbutil.Sample { - res := make([]tsdbutil.Sample, r.l) +func (r *sampleRing) samples() []chunks.Sample { + res := make([]chunks.Sample, r.l) k := r.f + r.l var j int diff --git a/storage/merge_test.go b/storage/merge_test.go index b0544c2d8..e0b938be7 100644 --- a/storage/merge_test.go +++ b/storage/merge_test.go @@ -25,6 +25,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/tsdbutil" ) @@ -62,116 +63,116 @@ func TestMergeQuerierWithChainMerger(t *testing.T) { { name: "one querier, two series", querierSeries: [][]Series{{ - NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}), - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), + NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}), + NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), }}, expected: NewMockSeriesSet( - NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}), - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), + NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}), + NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), ), }, { name: "two queriers, one different series each", querierSeries: [][]Series{{ - NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}), + NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}), }, { - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), + NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), }}, expected: NewMockSeriesSet( - NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}), - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), + NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}), + NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), ), }, { name: "two time unsorted queriers, two series each", querierSeries: [][]Series{{ - NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{5, 5}, fSample{6, 6}}), - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), + NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{5, 5}, fSample{6, 6}}), + NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), }, { - NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}), - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{3, 3}, fSample{4, 4}}), + NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}), + NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{3, 3}, fSample{4, 4}}), }}, expected: NewMockSeriesSet( NewListSeries( labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}, fSample{6, 6}}, + []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}, fSample{6, 6}}, ), NewListSeries( labels.FromStrings("foo", "bar"), - []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}}, + []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}}, ), ), }, { name: "five queriers, only two queriers have two time unsorted series each", querierSeries: [][]Series{{}, {}, { - NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{5, 5}, fSample{6, 6}}), - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), + NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{5, 5}, fSample{6, 6}}), + NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), }, { - NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}), - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{3, 3}, fSample{4, 4}}), + NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}), + NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{3, 3}, fSample{4, 4}}), }, {}}, expected: NewMockSeriesSet( NewListSeries( labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}, fSample{6, 6}}, + []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}, fSample{6, 6}}, ), NewListSeries( labels.FromStrings("foo", "bar"), - []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}}, + []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}}, ), ), }, { name: "two queriers, only two queriers have two time unsorted series each, with 3 noop and one nil querier together", querierSeries: [][]Series{{}, {}, { - NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{5, 5}, fSample{6, 6}}), - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), + NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{5, 5}, fSample{6, 6}}), + NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), }, { - NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}), - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{3, 3}, fSample{4, 4}}), + NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}), + NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{3, 3}, fSample{4, 4}}), }, {}}, extraQueriers: []Querier{NoopQuerier(), NoopQuerier(), nil, NoopQuerier()}, expected: NewMockSeriesSet( NewListSeries( labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}, fSample{6, 6}}, + []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}, fSample{6, 6}}, ), NewListSeries( labels.FromStrings("foo", "bar"), - []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}}, + []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}}, ), ), }, { name: "two queriers, with two series, one is overlapping", querierSeries: [][]Series{{}, {}, { - NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{2, 21}, fSample{3, 31}, fSample{5, 5}, fSample{6, 6}}), - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), + NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{2, 21}, fSample{3, 31}, fSample{5, 5}, fSample{6, 6}}), + NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), }, { - NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 22}, fSample{3, 32}}), - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{3, 3}, fSample{4, 4}}), + NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 22}, fSample{3, 32}}), + NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{3, 3}, fSample{4, 4}}), }, {}}, expected: NewMockSeriesSet( NewListSeries( labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{fSample{1, 1}, fSample{2, 21}, fSample{3, 31}, fSample{5, 5}, fSample{6, 6}}, + []chunks.Sample{fSample{1, 1}, fSample{2, 21}, fSample{3, 31}, fSample{5, 5}, fSample{6, 6}}, ), NewListSeries( labels.FromStrings("foo", "bar"), - []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}}, + []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}}, ), ), }, { name: "two queries, one with NaN samples series", querierSeries: [][]Series{{ - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, math.NaN()}}), + NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, math.NaN()}}), }, { - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{1, 1}}), + NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{1, 1}}), }}, expected: NewMockSeriesSet( - NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, math.NaN()}, fSample{1, 1}}), + NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, math.NaN()}, fSample{1, 1}}), ), }, } { @@ -245,108 +246,108 @@ func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) { { name: "one querier, two series", chkQuerierSeries: [][]ChunkSeries{{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}), - NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}}, []tsdbutil.Sample{fSample{2, 2}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}), + NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}), }}, expected: NewMockChunkSeriesSet( - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}), - NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}}, []tsdbutil.Sample{fSample{2, 2}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}), + NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}), ), }, { name: "two secondaries, one different series each", chkQuerierSeries: [][]ChunkSeries{{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}), }, { - NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}}, []tsdbutil.Sample{fSample{2, 2}}), + NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}), }}, expected: NewMockChunkSeriesSet( - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}), - NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}}, []tsdbutil.Sample{fSample{2, 2}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}), + NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}), ), }, { name: "two secondaries, two not in time order series each", chkQuerierSeries: [][]ChunkSeries{{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{5, 5}}, []tsdbutil.Sample{fSample{6, 6}}), - NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}}, []tsdbutil.Sample{fSample{2, 2}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{5, 5}}, []chunks.Sample{fSample{6, 6}}), + NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}), }, { - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}), - NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{3, 3}}, []tsdbutil.Sample{fSample{4, 4}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}), + NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{3, 3}}, []chunks.Sample{fSample{4, 4}}), }}, expected: NewMockChunkSeriesSet( NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, - []tsdbutil.Sample{fSample{3, 3}}, - []tsdbutil.Sample{fSample{5, 5}}, - []tsdbutil.Sample{fSample{6, 6}}, + []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, + []chunks.Sample{fSample{3, 3}}, + []chunks.Sample{fSample{5, 5}}, + []chunks.Sample{fSample{6, 6}}, ), NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), - []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}}, - []tsdbutil.Sample{fSample{2, 2}}, - []tsdbutil.Sample{fSample{3, 3}}, - []tsdbutil.Sample{fSample{4, 4}}, + []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, + []chunks.Sample{fSample{2, 2}}, + []chunks.Sample{fSample{3, 3}}, + []chunks.Sample{fSample{4, 4}}, ), ), }, { name: "five secondaries, only two have two not in time order series each", chkQuerierSeries: [][]ChunkSeries{{}, {}, { - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{5, 5}}, []tsdbutil.Sample{fSample{6, 6}}), - NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}}, []tsdbutil.Sample{fSample{2, 2}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{5, 5}}, []chunks.Sample{fSample{6, 6}}), + NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}), }, { - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}), - NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{3, 3}}, []tsdbutil.Sample{fSample{4, 4}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}), + NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{3, 3}}, []chunks.Sample{fSample{4, 4}}), }, {}}, expected: NewMockChunkSeriesSet( NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, - []tsdbutil.Sample{fSample{3, 3}}, - []tsdbutil.Sample{fSample{5, 5}}, - []tsdbutil.Sample{fSample{6, 6}}, + []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, + []chunks.Sample{fSample{3, 3}}, + []chunks.Sample{fSample{5, 5}}, + []chunks.Sample{fSample{6, 6}}, ), NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), - []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}}, - []tsdbutil.Sample{fSample{2, 2}}, - []tsdbutil.Sample{fSample{3, 3}}, - []tsdbutil.Sample{fSample{4, 4}}, + []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, + []chunks.Sample{fSample{2, 2}}, + []chunks.Sample{fSample{3, 3}}, + []chunks.Sample{fSample{4, 4}}, ), ), }, { name: "two secondaries, with two not in time order series each, with 3 noop queries and one nil together", chkQuerierSeries: [][]ChunkSeries{{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{5, 5}}, []tsdbutil.Sample{fSample{6, 6}}), - NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}}, []tsdbutil.Sample{fSample{2, 2}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{5, 5}}, []chunks.Sample{fSample{6, 6}}), + NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}), }, { - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}), - NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{3, 3}}, []tsdbutil.Sample{fSample{4, 4}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}), + NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{3, 3}}, []chunks.Sample{fSample{4, 4}}), }}, extraQueriers: []ChunkQuerier{NoopChunkedQuerier(), NoopChunkedQuerier(), nil, NoopChunkedQuerier()}, expected: NewMockChunkSeriesSet( NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, - []tsdbutil.Sample{fSample{3, 3}}, - []tsdbutil.Sample{fSample{5, 5}}, - []tsdbutil.Sample{fSample{6, 6}}, + []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, + []chunks.Sample{fSample{3, 3}}, + []chunks.Sample{fSample{5, 5}}, + []chunks.Sample{fSample{6, 6}}, ), NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), - []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}}, - []tsdbutil.Sample{fSample{2, 2}}, - []tsdbutil.Sample{fSample{3, 3}}, - []tsdbutil.Sample{fSample{4, 4}}, + []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, + []chunks.Sample{fSample{2, 2}}, + []chunks.Sample{fSample{3, 3}}, + []chunks.Sample{fSample{4, 4}}, ), ), }, { name: "two queries, one with NaN samples series", chkQuerierSeries: [][]ChunkSeries{{ - NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, math.NaN()}}), + NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, math.NaN()}}), }, { - NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{1, 1}}), + NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{1, 1}}), }}, expected: NewMockChunkSeriesSet( - NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, math.NaN()}}, []tsdbutil.Sample{fSample{1, 1}}), + NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, math.NaN()}}, []chunks.Sample{fSample{1, 1}}), ), }, } { @@ -408,9 +409,9 @@ func TestCompactingChunkSeriesMerger(t *testing.T) { { name: "single series", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}), }, - expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}), + expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}), }, { name: "two empty series", @@ -423,142 +424,142 @@ func TestCompactingChunkSeriesMerger(t *testing.T) { { name: "two non overlapping", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}, fSample{5, 5}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{7, 7}, fSample{9, 9}}, []tsdbutil.Sample{fSample{10, 10}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{5, 5}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{7, 7}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}}), }, - expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}, fSample{5, 5}}, []tsdbutil.Sample{fSample{7, 7}, fSample{9, 9}}, []tsdbutil.Sample{fSample{10, 10}}), + expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{5, 5}}, []chunks.Sample{fSample{7, 7}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}}), }, { name: "two overlapping", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}, fSample{8, 8}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{7, 7}, fSample{9, 9}}, []tsdbutil.Sample{fSample{10, 10}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{8, 8}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{7, 7}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}}), }, - expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}, fSample{7, 7}, fSample{8, 8}, fSample{9, 9}}, []tsdbutil.Sample{fSample{10, 10}}), + expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{7, 7}, fSample{8, 8}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}}), }, { name: "two duplicated", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), }, - expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), + expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), }, { name: "three overlapping", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{2, 2}, fSample{3, 3}, fSample{6, 6}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{0, 0}, fSample{4, 4}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{2, 2}, fSample{3, 3}, fSample{6, 6}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 0}, fSample{4, 4}}), }, - expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}, fSample{5, 5}, fSample{6, 6}}), + expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}, fSample{5, 5}, fSample{6, 6}}), }, { name: "three in chained overlap", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{4, 4}, fSample{6, 66}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{6, 6}, fSample{10, 10}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{4, 4}, fSample{6, 66}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{6, 6}, fSample{10, 10}}), }, - expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}, fSample{5, 5}, fSample{6, 66}, fSample{10, 10}}), + expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}, fSample{5, 5}, fSample{6, 66}, fSample{10, 10}}), }, { name: "three in chained overlap complex", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{0, 0}, fSample{5, 5}}, []tsdbutil.Sample{fSample{10, 10}, fSample{15, 15}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{2, 2}, fSample{20, 20}}, []tsdbutil.Sample{fSample{25, 25}, fSample{30, 30}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{18, 18}, fSample{26, 26}}, []tsdbutil.Sample{fSample{31, 31}, fSample{35, 35}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 0}, fSample{5, 5}}, []chunks.Sample{fSample{10, 10}, fSample{15, 15}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{2, 2}, fSample{20, 20}}, []chunks.Sample{fSample{25, 25}, fSample{30, 30}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{18, 18}, fSample{26, 26}}, []chunks.Sample{fSample{31, 31}, fSample{35, 35}}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{fSample{0, 0}, fSample{2, 2}, fSample{5, 5}, fSample{10, 10}, fSample{15, 15}, fSample{18, 18}, fSample{20, 20}, fSample{25, 25}, fSample{26, 26}, fSample{30, 30}}, - []tsdbutil.Sample{fSample{31, 31}, fSample{35, 35}}, + []chunks.Sample{fSample{0, 0}, fSample{2, 2}, fSample{5, 5}, fSample{10, 10}, fSample{15, 15}, fSample{18, 18}, fSample{20, 20}, fSample{25, 25}, fSample{26, 26}, fSample{30, 30}}, + []chunks.Sample{fSample{31, 31}, fSample{35, 35}}, ), }, { name: "110 overlapping", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), tsdbutil.GenerateSamples(0, 110)), // [0 - 110) - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), tsdbutil.GenerateSamples(60, 50)), // [60 - 110) + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), chunks.GenerateSamples(0, 110)), // [0 - 110) + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), chunks.GenerateSamples(60, 50)), // [60 - 110) }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), - tsdbutil.GenerateSamples(0, 110), + chunks.GenerateSamples(0, 110), ), }, { name: "150 overlapping samples, split chunk", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), tsdbutil.GenerateSamples(0, 90)), // [0 - 90) - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), tsdbutil.GenerateSamples(60, 90)), // [90 - 150) + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), chunks.GenerateSamples(0, 90)), // [0 - 90) + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), chunks.GenerateSamples(60, 90)), // [90 - 150) }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), - tsdbutil.GenerateSamples(0, 120), - tsdbutil.GenerateSamples(120, 30), + chunks.GenerateSamples(0, 120), + chunks.GenerateSamples(120, 30), ), }, { name: "histogram chunks overlapping", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{histogramSample(0), histogramSample(5)}, []tsdbutil.Sample{histogramSample(10), histogramSample(15)}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{histogramSample(2), histogramSample(20)}, []tsdbutil.Sample{histogramSample(25), histogramSample(30)}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{histogramSample(18), histogramSample(26)}, []tsdbutil.Sample{histogramSample(31), histogramSample(35)}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{histogramSample(0), histogramSample(5)}, []chunks.Sample{histogramSample(10), histogramSample(15)}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{histogramSample(2), histogramSample(20)}, []chunks.Sample{histogramSample(25), histogramSample(30)}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{histogramSample(18), histogramSample(26)}, []chunks.Sample{histogramSample(31), histogramSample(35)}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{histogramSample(0), histogramSample(2), histogramSample(5), histogramSample(10), histogramSample(15), histogramSample(18), histogramSample(20), histogramSample(25), histogramSample(26), histogramSample(30)}, - []tsdbutil.Sample{histogramSample(31), histogramSample(35)}, + []chunks.Sample{histogramSample(0), histogramSample(2), histogramSample(5), histogramSample(10), histogramSample(15), histogramSample(18), histogramSample(20), histogramSample(25), histogramSample(26), histogramSample(30)}, + []chunks.Sample{histogramSample(31), histogramSample(35)}, ), }, { name: "histogram chunks overlapping with float chunks", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{histogramSample(0), histogramSample(5)}, []tsdbutil.Sample{histogramSample(10), histogramSample(15)}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{12, 12}}, []tsdbutil.Sample{fSample{14, 14}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{histogramSample(0), histogramSample(5)}, []chunks.Sample{histogramSample(10), histogramSample(15)}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{12, 12}}, []chunks.Sample{fSample{14, 14}}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{histogramSample(0)}, - []tsdbutil.Sample{fSample{1, 1}}, - []tsdbutil.Sample{histogramSample(5), histogramSample(10)}, - []tsdbutil.Sample{fSample{12, 12}, fSample{14, 14}}, - []tsdbutil.Sample{histogramSample(15)}, + []chunks.Sample{histogramSample(0)}, + []chunks.Sample{fSample{1, 1}}, + []chunks.Sample{histogramSample(5), histogramSample(10)}, + []chunks.Sample{fSample{12, 12}, fSample{14, 14}}, + []chunks.Sample{histogramSample(15)}, ), }, { name: "float histogram chunks overlapping", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{floatHistogramSample(0), floatHistogramSample(5)}, []tsdbutil.Sample{floatHistogramSample(10), floatHistogramSample(15)}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{floatHistogramSample(2), floatHistogramSample(20)}, []tsdbutil.Sample{floatHistogramSample(25), floatHistogramSample(30)}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{floatHistogramSample(18), floatHistogramSample(26)}, []tsdbutil.Sample{floatHistogramSample(31), floatHistogramSample(35)}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{floatHistogramSample(0), floatHistogramSample(5)}, []chunks.Sample{floatHistogramSample(10), floatHistogramSample(15)}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{floatHistogramSample(2), floatHistogramSample(20)}, []chunks.Sample{floatHistogramSample(25), floatHistogramSample(30)}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{floatHistogramSample(18), floatHistogramSample(26)}, []chunks.Sample{floatHistogramSample(31), floatHistogramSample(35)}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{floatHistogramSample(0), floatHistogramSample(2), floatHistogramSample(5), floatHistogramSample(10), floatHistogramSample(15), floatHistogramSample(18), floatHistogramSample(20), floatHistogramSample(25), floatHistogramSample(26), floatHistogramSample(30)}, - []tsdbutil.Sample{floatHistogramSample(31), floatHistogramSample(35)}, + []chunks.Sample{floatHistogramSample(0), floatHistogramSample(2), floatHistogramSample(5), floatHistogramSample(10), floatHistogramSample(15), floatHistogramSample(18), floatHistogramSample(20), floatHistogramSample(25), floatHistogramSample(26), floatHistogramSample(30)}, + []chunks.Sample{floatHistogramSample(31), floatHistogramSample(35)}, ), }, { name: "float histogram chunks overlapping with float chunks", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{floatHistogramSample(0), floatHistogramSample(5)}, []tsdbutil.Sample{floatHistogramSample(10), floatHistogramSample(15)}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{12, 12}}, []tsdbutil.Sample{fSample{14, 14}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{floatHistogramSample(0), floatHistogramSample(5)}, []chunks.Sample{floatHistogramSample(10), floatHistogramSample(15)}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{12, 12}}, []chunks.Sample{fSample{14, 14}}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{floatHistogramSample(0)}, - []tsdbutil.Sample{fSample{1, 1}}, - []tsdbutil.Sample{floatHistogramSample(5), floatHistogramSample(10)}, - []tsdbutil.Sample{fSample{12, 12}, fSample{14, 14}}, - []tsdbutil.Sample{floatHistogramSample(15)}, + []chunks.Sample{floatHistogramSample(0)}, + []chunks.Sample{fSample{1, 1}}, + []chunks.Sample{floatHistogramSample(5), floatHistogramSample(10)}, + []chunks.Sample{fSample{12, 12}, fSample{14, 14}}, + []chunks.Sample{floatHistogramSample(15)}, ), }, { name: "float histogram chunks overlapping with histogram chunks", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{floatHistogramSample(0), floatHistogramSample(5)}, []tsdbutil.Sample{floatHistogramSample(10), floatHistogramSample(15)}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{histogramSample(1), histogramSample(12)}, []tsdbutil.Sample{histogramSample(14)}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{floatHistogramSample(0), floatHistogramSample(5)}, []chunks.Sample{floatHistogramSample(10), floatHistogramSample(15)}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{histogramSample(1), histogramSample(12)}, []chunks.Sample{histogramSample(14)}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{floatHistogramSample(0)}, - []tsdbutil.Sample{histogramSample(1)}, - []tsdbutil.Sample{floatHistogramSample(5), floatHistogramSample(10)}, - []tsdbutil.Sample{histogramSample(12), histogramSample(14)}, - []tsdbutil.Sample{floatHistogramSample(15)}, + []chunks.Sample{floatHistogramSample(0)}, + []chunks.Sample{histogramSample(1)}, + []chunks.Sample{floatHistogramSample(5), floatHistogramSample(10)}, + []chunks.Sample{histogramSample(12), histogramSample(14)}, + []chunks.Sample{floatHistogramSample(15)}, ), }, } { @@ -592,9 +593,9 @@ func TestConcatenatingChunkSeriesMerger(t *testing.T) { { name: "single series", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}), }, - expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}), + expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}), }, { name: "two empty series", @@ -607,92 +608,92 @@ func TestConcatenatingChunkSeriesMerger(t *testing.T) { { name: "two non overlapping", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}, fSample{5, 5}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{7, 7}, fSample{9, 9}}, []tsdbutil.Sample{fSample{10, 10}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{5, 5}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{7, 7}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}}), }, - expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}, fSample{5, 5}}, []tsdbutil.Sample{fSample{7, 7}, fSample{9, 9}}, []tsdbutil.Sample{fSample{10, 10}}), + expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{5, 5}}, []chunks.Sample{fSample{7, 7}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}}), }, { name: "two overlapping", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}, fSample{8, 8}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{7, 7}, fSample{9, 9}}, []tsdbutil.Sample{fSample{10, 10}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{8, 8}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{7, 7}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}, fSample{8, 8}}, - []tsdbutil.Sample{fSample{7, 7}, fSample{9, 9}}, []tsdbutil.Sample{fSample{10, 10}}, + []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{8, 8}}, + []chunks.Sample{fSample{7, 7}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}}, ), }, { name: "two duplicated", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}, - []tsdbutil.Sample{fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}, + []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}, + []chunks.Sample{fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}, ), }, { name: "three overlapping", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{2, 2}, fSample{3, 3}, fSample{6, 6}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{0, 0}, fSample{4, 4}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{2, 2}, fSample{3, 3}, fSample{6, 6}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 0}, fSample{4, 4}}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}, - []tsdbutil.Sample{fSample{2, 2}, fSample{3, 3}, fSample{6, 6}}, - []tsdbutil.Sample{fSample{0, 0}, fSample{4, 4}}, + []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}, + []chunks.Sample{fSample{2, 2}, fSample{3, 3}, fSample{6, 6}}, + []chunks.Sample{fSample{0, 0}, fSample{4, 4}}, ), }, { name: "three in chained overlap", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{4, 4}, fSample{6, 66}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{6, 6}, fSample{10, 10}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{4, 4}, fSample{6, 66}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{6, 6}, fSample{10, 10}}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}, - []tsdbutil.Sample{fSample{4, 4}, fSample{6, 66}}, - []tsdbutil.Sample{fSample{6, 6}, fSample{10, 10}}, + []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}, + []chunks.Sample{fSample{4, 4}, fSample{6, 66}}, + []chunks.Sample{fSample{6, 6}, fSample{10, 10}}, ), }, { name: "three in chained overlap complex", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{0, 0}, fSample{5, 5}}, []tsdbutil.Sample{fSample{10, 10}, fSample{15, 15}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{2, 2}, fSample{20, 20}}, []tsdbutil.Sample{fSample{25, 25}, fSample{30, 30}}), - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{18, 18}, fSample{26, 26}}, []tsdbutil.Sample{fSample{31, 31}, fSample{35, 35}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 0}, fSample{5, 5}}, []chunks.Sample{fSample{10, 10}, fSample{15, 15}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{2, 2}, fSample{20, 20}}, []chunks.Sample{fSample{25, 25}, fSample{30, 30}}), + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{18, 18}, fSample{26, 26}}, []chunks.Sample{fSample{31, 31}, fSample{35, 35}}), }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), - []tsdbutil.Sample{fSample{0, 0}, fSample{5, 5}}, []tsdbutil.Sample{fSample{10, 10}, fSample{15, 15}}, - []tsdbutil.Sample{fSample{2, 2}, fSample{20, 20}}, []tsdbutil.Sample{fSample{25, 25}, fSample{30, 30}}, - []tsdbutil.Sample{fSample{18, 18}, fSample{26, 26}}, []tsdbutil.Sample{fSample{31, 31}, fSample{35, 35}}, + []chunks.Sample{fSample{0, 0}, fSample{5, 5}}, []chunks.Sample{fSample{10, 10}, fSample{15, 15}}, + []chunks.Sample{fSample{2, 2}, fSample{20, 20}}, []chunks.Sample{fSample{25, 25}, fSample{30, 30}}, + []chunks.Sample{fSample{18, 18}, fSample{26, 26}}, []chunks.Sample{fSample{31, 31}, fSample{35, 35}}, ), }, { name: "110 overlapping", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), tsdbutil.GenerateSamples(0, 110)), // [0 - 110) - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), tsdbutil.GenerateSamples(60, 50)), // [60 - 110) + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), chunks.GenerateSamples(0, 110)), // [0 - 110) + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), chunks.GenerateSamples(60, 50)), // [60 - 110) }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), - tsdbutil.GenerateSamples(0, 110), - tsdbutil.GenerateSamples(60, 50), + chunks.GenerateSamples(0, 110), + chunks.GenerateSamples(60, 50), ), }, { name: "150 overlapping samples, simply concatenated and no splits", input: []ChunkSeries{ - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), tsdbutil.GenerateSamples(0, 90)), // [0 - 90) - NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), tsdbutil.GenerateSamples(60, 90)), // [90 - 150) + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), chunks.GenerateSamples(0, 90)), // [0 - 90) + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), chunks.GenerateSamples(60, 90)), // [90 - 150) }, expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), - tsdbutil.GenerateSamples(0, 90), - tsdbutil.GenerateSamples(60, 90), + chunks.GenerateSamples(0, 90), + chunks.GenerateSamples(60, 90), ), }, } { @@ -803,20 +804,20 @@ func (m *mockChunkSeriesSet) Warnings() Warnings { return nil } func TestChainSampleIterator(t *testing.T) { for _, tc := range []struct { input []chunkenc.Iterator - expected []tsdbutil.Sample + expected []chunks.Sample }{ { input: []chunkenc.Iterator{ NewListSeriesIterator(samples{fSample{0, 0}, fSample{1, 1}}), }, - expected: []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}}, + expected: []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, }, { input: []chunkenc.Iterator{ NewListSeriesIterator(samples{fSample{0, 0}, fSample{1, 1}}), NewListSeriesIterator(samples{fSample{2, 2}, fSample{3, 3}}), }, - expected: []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}, + expected: []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}, }, { input: []chunkenc.Iterator{ @@ -824,7 +825,7 @@ func TestChainSampleIterator(t *testing.T) { NewListSeriesIterator(samples{fSample{1, 1}, fSample{4, 4}}), NewListSeriesIterator(samples{fSample{2, 2}, fSample{5, 5}}), }, - expected: []tsdbutil.Sample{ + expected: []chunks.Sample{ fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}, fSample{5, 5}, }, }, @@ -838,7 +839,7 @@ func TestChainSampleIterator(t *testing.T) { NewListSeriesIterator(samples{}), NewListSeriesIterator(samples{}), }, - expected: []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}, + expected: []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}, }, } { merged := ChainSampleIteratorFromIterators(nil, tc.input) @@ -852,14 +853,14 @@ func TestChainSampleIteratorSeek(t *testing.T) { for _, tc := range []struct { input []chunkenc.Iterator seek int64 - expected []tsdbutil.Sample + expected []chunks.Sample }{ { input: []chunkenc.Iterator{ NewListSeriesIterator(samples{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), }, seek: 1, - expected: []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, + expected: []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, }, { input: []chunkenc.Iterator{ @@ -867,7 +868,7 @@ func TestChainSampleIteratorSeek(t *testing.T) { NewListSeriesIterator(samples{fSample{2, 2}, fSample{3, 3}}), }, seek: 2, - expected: []tsdbutil.Sample{fSample{2, 2}, fSample{3, 3}}, + expected: []chunks.Sample{fSample{2, 2}, fSample{3, 3}}, }, { input: []chunkenc.Iterator{ @@ -876,7 +877,7 @@ func TestChainSampleIteratorSeek(t *testing.T) { NewListSeriesIterator(samples{fSample{2, 2}, fSample{5, 5}}), }, seek: 2, - expected: []tsdbutil.Sample{fSample{2, 2}, fSample{3, 3}, fSample{4, 4}, fSample{5, 5}}, + expected: []chunks.Sample{fSample{2, 2}, fSample{3, 3}, fSample{4, 4}, fSample{5, 5}}, }, { input: []chunkenc.Iterator{ @@ -884,11 +885,11 @@ func TestChainSampleIteratorSeek(t *testing.T) { NewListSeriesIterator(samples{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}), }, seek: 0, - expected: []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}, + expected: []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}, }, } { merged := ChainSampleIteratorFromIterators(nil, tc.input) - actual := []tsdbutil.Sample{} + actual := []chunks.Sample{} if merged.Seek(tc.seek) == chunkenc.ValFloat { t, f := merged.At() actual = append(actual, fSample{t, f}) @@ -904,7 +905,7 @@ func makeSeries(numSeries, numSamples int) []Series { series := []Series{} for j := 0; j < numSeries; j++ { labels := labels.FromStrings("foo", fmt.Sprintf("bar%d", j)) - samples := []tsdbutil.Sample{} + samples := []chunks.Sample{} for k := 0; k < numSamples; k++ { samples = append(samples, fSample{t: int64(k), f: float64(k)}) } diff --git a/storage/remote/read_handler_test.go b/storage/remote/read_handler_test.go index 7e8618615..fdb9f04dd 100644 --- a/storage/remote/read_handler_test.go +++ b/storage/remote/read_handler_test.go @@ -196,10 +196,10 @@ func BenchmarkStreamReadEndpoint(b *testing.B) { } func TestStreamReadEndpoint(t *testing.T) { - // First with 120 samples. We expect 1 frame with 1 chunk. - // Second with 121 samples, We expect 1 frame with 2 chunks. - // Third with 241 samples. We expect 1 frame with 2 chunks, and 1 frame with 1 chunk for the same series due to bytes limit. - // Fourth with 120 histogram samples. We expect 1 frame with 1 chunk. + // First with 120 float samples. We expect 1 frame with 1 chunk. + // Second with 121 float samples, We expect 1 frame with 2 chunks. + // Third with 241 float samples. We expect 1 frame with 2 chunks, and 1 frame with 1 chunk for the same series due to bytes limit. + // Fourth with 25 histogram samples. We expect 1 frame with 1 chunk. store := promql.LoadedStorage(t, ` load 1m test_metric1{foo="bar1",baz="qux"} 0+100x119 @@ -208,7 +208,7 @@ func TestStreamReadEndpoint(t *testing.T) { `) defer store.Close() - addNativeHistogramsToTestSuite(t, store, 120) + addNativeHistogramsToTestSuite(t, store, 25) api := NewReadHandler(nil, nil, store, func() config.Config { return config.Config{ @@ -420,8 +420,8 @@ func TestStreamReadEndpoint(t *testing.T) { Chunks: []prompb.Chunk{ { Type: prompb.Chunk_FLOAT_HISTOGRAM, - MaxTimeMs: 7140000, - Data: []byte("\000x\000\377?PbM\322\361\251\374\214\244\224e$\242@(\000\000\000\000\000\000@\000\000\000\000\000\000\000@2ffffff?\360\000\000\000\000\000\000@\000\000\000\000\000\000\000?\360\000\000\000\000\000\000?\360\000\000\000\000\000\000?\360\000\000\000\000\000\000@\000\000\000\000\000\000\000?\360\000\000\000\000\000\000?\360\000\000\000\000\000\000\370\352`\326/v\003\322\037\302_\377\330\017\t\177\377\t\177\377\t\177\377`<%\377\374%\377\364\275a{4\237\377\377\377\377\377\375\200\365\205\354\007\260\036\300z\302\366\003\330\r\244\217\275\240\365\353\237\377\377\377\377\377\375a{A\353\013\326\027\254/h=az\302\320\276\300\270\254\314\314\314\314\314\333A\354\013\332\017h=\240\366\005\355\007\264\032\t\231\22333333;\002\347`^\300\275\201s\260/`Z8\324'\352\037\274\352\023\346gP\2372\334$\356\007\273)\377\377\377\377\377\377P\237\270\036\241?P\237\250O\334\017P\237\250Om\027\373B\366\347\265UUUUUw\003\332\027\270\036\340{\201\355\013\334\017p4\231\235\231\231\231\231\231\236\320\271\332\027\264/h\\\355\013\332\026\3307{\003\3732\344\314\314\314\314\314\347`~fv\007\346Q1\335\225\252\252\252\252\252\253\260?\035\201\375\201\375\201\370\354\017\354\017\245\347\267<\377\377\377\377\377\377\031\3061\234b\324O\306:\365\357\377\377\377\377\377\374\3439\3163\232\005\353\023\340\254\314\314\314\314\314\307X\237\030\307X\237\030\240\316\360p\311\231\231\231\231\231\265\211\373\301\353\023\365\211\372\304\375\340\365\211\372\304\364\017\334\027\007\252\252\252\252\252\253x=\301{\301\357\007\274\036\340\275\340\367\203C\231\216\177\377\377\377\377\377\270.w\005\356\013\334\027;\202\367\005\240^\320\374\026\252\252\252\252\252\251\332\037\231\235\241\371\224\031\214-\231\231\231\231\231\235\241\370\355\017\355\017\355\017\307h\177h}\035\347<\231\231\231\231\231\2323\214c8\305\002c\005\252\252\252\252\252\252q\234\347\031\315\006\366\t\360\317\377\377\377\377\377\343\260O\214c\260O\214P&\030=UUUUU[\004\370v\t\373\004\375\202|;\004\375\202z\037\307\034\231\231\231\231\231\232\030\341\206\030\341\205\002a\202\26333333\0341\307\0341\306\203s\302\352\252\252\252\252\252\206xa\206xa@\230tq\376*\252\252\252\252\253<3\317<3\316\237q\300\026\252\252\252\252\252\260\307\0140\307\014-\342\346\030\007\263333328c\2168c\215/H\377\340\t333338t\217\376\030a\322?\370an\033\236\000\340;UUUUU]#\377\340\017H\377\364\217\377H\377\370\003\322?\375#\377G\357\013\200'\377\377\377\377\377\370\003\336\027\300\037\000|\001\357\013\340\017\2004\314\300.\252\252\252\252\252\273\302\347x^\360\275\341s\274/xZ/p~\007\254\314\314\314\314\314\235\301\371\231\334\037\231m\037\314`\t33333=\301\370\356\017\356\017\356\017\307p\177p}\027\234\003\352\252\252\252\252\253\031\3061\234b\204\306\000\237\377\377\377\377\377\234g9\306sN\366\211\360\035\252\252\252\252\252\254v\211\361\214v\211\361\212\023\014\002\331\231\231\231\231\231\264O\207h\237\264O\332'\303\264O\332'\243\361\300\022fffffp\307\0140\307\014(L0}\252\252\252\252\252\254p\307\034p\307\033a\037s\300\023\377\377\377\377\377\360\317\0140\317\014(&\030\007\325UUUUVxg\236xg\235\013\307\000I\231\231\231\231\231\303\0340\303\0340\243\230`:\314\314\314\314\314\310\341\2168\341\2164\027\260_\300.\252\252\252\252\252\260\354\027\360\303\016\301\177\014(f\014\001?\377\377\377\377\377\260_\301\330/\366\013\375\202\376\016\301\177\260_\240\370p=\252\252\252\252\252\254\030p`\301\207\006\ny\203\000I\231\231\231\231\231\303\203\016\03480\341\240\270\360\017fffffd\030\360`\301\217\006\n\031\203\000Z\252\252\252\252\252\307\203\036|\3703\347\264\031\3770\340\007\252\252\252\252\252\2500\340\301\203\016\014\027\021&\014\001\237\377\377\377\377\377\016\0148p\340\303\206\340.\343\300\013UUUUUPc\301\203\006<\030)0`\033fffffh\360c\307\217\006\367\t\360\002L\314\314\314\314\314w\t\361\214w\t\361\212\t\206\000\332\252\252\252\252\252\334'\303\270O\334'\356\023\341\334'\356\023\320\374p\002\177\377\377\377\377\3741\303\0141\303\n\t\206\001\272\252\252\252\252\252\216\030\343\216\030\343Gs\300\032\314\314\314\314\314\320\317\0140\317\014(&\030\001&fffffxg\236xg\235\013\307\001\375UUUUUC\0340\303\0340\247\230`\004\377\377\377\377\377\370\341\2168\341\2164\027\264_\300\033UUUUUP\355\027\360\303\016\321\177\014(f\014\003l\314\314\314\314\315\264_\301\332/\366\213\375\242\376\016\321\177\264_\240\370p\002L\314\314\314\314\314\030p`\301\207\006\n9\203\000mUUUUUC\203\016\03480\341\240\270\360\002\177\377\377\377\377\374\030\360`\301\217\006\n\031\203\007\375UUUUUG\203\036\201\360p\001?\377\377\377\377\374\014\034\014\014\014\034\014\013Y\177\366\006\000]UUUUU\203\201\203\203\203\201\203\203@.\036\000\35333333\201\207\201\201\201\207\201\201@f\006\000$\314\314\314\314\314\207\201\207\207\207\201\207\207@\336\016\000}UUUUU\201\203\201\201\201\203\201\201@&\006\000'\377\377\377\377\377\203\201\203\203\203\201\203\203@n>\001\355UUUUU\201\217\201\201\201\217\201\201@&\006\000[33333\217\201\217\217\217\201\217\217"), + MaxTimeMs: 1440000, + Data: []byte("\x00\x19\x00\xff?PbM\xd2\xf1\xa9\xfc\x8c\xa4\x94e$\xa2@(\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00@2ffffff?\xf0\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00?\xf0\x00\x00\x00\x00\x00\x00?\xf0\x00\x00\x00\x00\x00\x00?\xf0\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00?\xf0\x00\x00\x00\x00\x00\x00?\xf0\x00\x00\x00\x00\x00\x00\xf8\xea`\xd6/v\x03\xd2\x1f\xc2_\xff\xd8\x0f\t\x7f\xff\t\x7f\xff\t\x7f\xff`<%\xff\xfc%\xff\xf4\xbda{4\x9f\xff\xff\xff\xff\xff\xfd\x80\xf5\x85\xec\a\xb0\x1e\xc0z\xc2\xf6\x03\xd8\r\xa4\x8f\xbd\xa0\xf5\xeb\x9f\xff\xff\xff\xff\xff\xfda{A\xeb\v\xd6\x17\xac/h=az\xc2о\xc0\xb8\xac\xcc\xcc\xcc\xcc\xcc\xdbA\xec\v\xda\x0fh=\xa0\xf6\x05\xed\a\xb4\x1a\t\x99\x9333333;\x02\xe7`^\xc0\xbd\x81s\xb0/`Z8\xd4'\xea\x1f\xbc\xea\x13\xe6gP\x9f2\xdc$\xee\a\xbb)\xff\xff\xff\xff\xff\xffP\x9f\xb8\x1e\xa1?P\x9f\xa8O\xdc\x0fP\x9f\xa8Om\x17\xfbB\xf6\xe7\xb5UUUUUw\x03\xda\x17\xb8\x1e\xe0{\x81\xed\v\xdc\x0fp4\x99\x9d\x99\x99\x99\x99\x99\x9eй\xda\x17\xb4/h\\\xed\v\xda\x16\xd87{\x03\xfb2\xe4\xcc\xcc\xcc\xcc\xcc\xe7`~fv\a\xe6Q1ݕ\xaa\xaa\xaa\xaa\xaa\xab\xb0?\x1d\x81\xfd\x81\xfd\x81\xf8\xec\x0f\xec\x0f\xa5\xe7\xb7<\xff\xff\xff\xff\xff\xff\x19\xc61\x9cb\xd4O\xc6:\xf5\xef\xff\xff\xff\xff\xff\xfc\xe39\xce3\x9a\x05\xeb\x13\xe0\xac\xcc\xcc\xcc\xcc\xcc\xc7X\x9f\x18\xc7X\x9f\x18\xa0\xce\xf0pə\x99\x99\x99\x99\xb5\x89\xfb\xc1\xeb\x13\xf5\x89\xfa\xc4\xfd\xe0\xf5\x89\xfa\xc4\xf4\x0f\xdc\x17\a\xaa\xaa\xaa\xaa\xaa\xabx=\xc1{\xc1\xef\a\xbc\x1e\xe0\xbd\xe0\xf7\x83C\x99\x8e\x7f\xff\xff\xff\xff\xff\xb8.w\x05\xee\v\xdc\x17;\x82\xf7\x05\xa0^\xd0\xfc\x16\xaa\xaa\xaa\xaa\xaa\xa9\xda\x1f\x99\x9d\xa1\xf9\x94\x19\x8c-\x99\x99\x99\x99\x99\x9d\xa1\xf8\xed\x0f\xed\x0f\xed\x0f\xc7h\x7fh}\x1d\xe7<\x99\x99\x99\x99\x99\x9a3\x8cc8\xc5\x02c\x05\xaa\xaa\xaa\xaa\xaa\xaaq\x9c\xe7\x19\xcd\x06\xf6\t\xf0\xcf\xff\xff\xff\xff\xff\xe3\xb0O\x8cc\xb0O\x8cP&\x18=UUUUU[\x04\xf8v\t\xfb\x04\xfd\x82|;\x04\xfd\x82z\x1f\xc7\x1c\x99\x99\x99\x99\x99\x9a\x18\xe1\x86\x18\xe1\x84"), }, }, }, diff --git a/storage/series.go b/storage/series.go index e88964d88..b111505aa 100644 --- a/storage/series.go +++ b/storage/series.go @@ -22,7 +22,6 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" - "github.com/prometheus/prometheus/tsdb/tsdbutil" ) type SeriesEntry struct { @@ -42,7 +41,7 @@ func (s *ChunkSeriesEntry) Labels() labels.Labels { return func (s *ChunkSeriesEntry) Iterator(it chunks.Iterator) chunks.Iterator { return s.ChunkIteratorFn(it) } // NewListSeries returns series entry with iterator that allows to iterate over provided samples. -func NewListSeries(lset labels.Labels, s []tsdbutil.Sample) *SeriesEntry { +func NewListSeries(lset labels.Labels, s []chunks.Sample) *SeriesEntry { samplesS := Samples(samples(s)) return &SeriesEntry{ Lset: lset, @@ -59,10 +58,10 @@ func NewListSeries(lset labels.Labels, s []tsdbutil.Sample) *SeriesEntry { // NewListChunkSeriesFromSamples returns chunk series entry that allows to iterate over provided samples. // NOTE: It uses inefficient chunks encoding implementation, not caring about chunk size. // Use only for testing. -func NewListChunkSeriesFromSamples(lset labels.Labels, samples ...[]tsdbutil.Sample) *ChunkSeriesEntry { +func NewListChunkSeriesFromSamples(lset labels.Labels, samples ...[]chunks.Sample) *ChunkSeriesEntry { chksFromSamples := make([]chunks.Meta, 0, len(samples)) for _, s := range samples { - cfs, err := tsdbutil.ChunkFromSamples(s) + cfs, err := chunks.ChunkFromSamples(s) if err != nil { return &ChunkSeriesEntry{ Lset: lset, @@ -98,14 +97,14 @@ type listSeriesIterator struct { idx int } -type samples []tsdbutil.Sample +type samples []chunks.Sample -func (s samples) Get(i int) tsdbutil.Sample { return s[i] } -func (s samples) Len() int { return len(s) } +func (s samples) Get(i int) chunks.Sample { return s[i] } +func (s samples) Len() int { return len(s) } -// Samples interface allows to work on arrays of types that are compatible with tsdbutil.Sample. +// Samples interface allows to work on arrays of types that are compatible with chunks.Sample. type Samples interface { - Get(i int) tsdbutil.Sample + Get(i int) chunks.Sample Len() int } @@ -412,9 +411,9 @@ func (e errChunksIterator) Err() error { return e.err } // ExpandSamples iterates over all samples in the iterator, buffering all in slice. // Optionally it takes samples constructor, useful when you want to compare sample slices with different // sample implementations. if nil, sample type from this package will be used. -func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, f float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample) ([]tsdbutil.Sample, error) { +func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, f float64, h *histogram.Histogram, fh *histogram.FloatHistogram) chunks.Sample) ([]chunks.Sample, error) { if newSampleFn == nil { - newSampleFn = func(t int64, f float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample { + newSampleFn = func(t int64, f float64, h *histogram.Histogram, fh *histogram.FloatHistogram) chunks.Sample { switch { case h != nil: return hSample{t, h} @@ -426,7 +425,7 @@ func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, f float64, } } - var result []tsdbutil.Sample + var result []chunks.Sample for { switch iter.Next() { case chunkenc.ValNone: diff --git a/storage/series_test.go b/storage/series_test.go index 5c74fae09..669ade3a1 100644 --- a/storage/series_test.go +++ b/storage/series_test.go @@ -25,7 +25,6 @@ import ( "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" - "github.com/prometheus/prometheus/tsdb/tsdbutil" ) func TestListSeriesIterator(t *testing.T) { @@ -78,11 +77,11 @@ func TestListSeriesIterator(t *testing.T) { func TestChunkSeriesSetToSeriesSet(t *testing.T) { series := []struct { lbs labels.Labels - samples []tsdbutil.Sample + samples []chunks.Sample }{ { lbs: labels.FromStrings("__name__", "up", "instance", "localhost:8080"), - samples: []tsdbutil.Sample{ + samples: []chunks.Sample{ fSample{t: 1, f: 1}, fSample{t: 2, f: 2}, fSample{t: 3, f: 3}, @@ -90,7 +89,7 @@ func TestChunkSeriesSetToSeriesSet(t *testing.T) { }, }, { lbs: labels.FromStrings("__name__", "up", "instance", "localhost:8081"), - samples: []tsdbutil.Sample{ + samples: []chunks.Sample{ fSample{t: 1, f: 2}, fSample{t: 2, f: 3}, fSample{t: 3, f: 4}, @@ -126,7 +125,7 @@ func TestChunkSeriesSetToSeriesSet(t *testing.T) { } type histogramTest struct { - samples []tsdbutil.Sample + samples []chunks.Sample expectedCounterResetHeaders []chunkenc.CounterResetHeader } @@ -270,34 +269,34 @@ func TestHistogramSeriesToChunks(t *testing.T) { tests := map[string]histogramTest{ "single histogram to single chunk": { - samples: []tsdbutil.Sample{ + samples: []chunks.Sample{ hSample{t: 1, h: h1}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset}, }, "two histograms encoded to a single chunk": { - samples: []tsdbutil.Sample{ + samples: []chunks.Sample{ hSample{t: 1, h: h1}, hSample{t: 2, h: h2}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset}, }, "two histograms encoded to two chunks": { - samples: []tsdbutil.Sample{ + samples: []chunks.Sample{ hSample{t: 1, h: h2}, hSample{t: 2, h: h1}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset}, }, "histogram and stale sample encoded to two chunks": { - samples: []tsdbutil.Sample{ + samples: []chunks.Sample{ hSample{t: 1, h: staleHistogram}, hSample{t: 2, h: h1}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset}, }, "histogram and reduction in bucket encoded to two chunks": { - samples: []tsdbutil.Sample{ + samples: []chunks.Sample{ hSample{t: 1, h: h1}, hSample{t: 2, h: h2down}, }, @@ -305,34 +304,34 @@ func TestHistogramSeriesToChunks(t *testing.T) { }, // Float histograms. "single float histogram to single chunk": { - samples: []tsdbutil.Sample{ + samples: []chunks.Sample{ fhSample{t: 1, fh: fh1}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset}, }, "two float histograms encoded to a single chunk": { - samples: []tsdbutil.Sample{ + samples: []chunks.Sample{ fhSample{t: 1, fh: fh1}, fhSample{t: 2, fh: fh2}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset}, }, "two float histograms encoded to two chunks": { - samples: []tsdbutil.Sample{ + samples: []chunks.Sample{ fhSample{t: 1, fh: fh2}, fhSample{t: 2, fh: fh1}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset}, }, "float histogram and stale sample encoded to two chunks": { - samples: []tsdbutil.Sample{ + samples: []chunks.Sample{ fhSample{t: 1, fh: staleFloatHistogram}, fhSample{t: 2, fh: fh1}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset}, }, "float histogram and reduction in bucket encoded to two chunks": { - samples: []tsdbutil.Sample{ + samples: []chunks.Sample{ fhSample{t: 1, fh: fh1}, fhSample{t: 2, fh: fh2down}, }, @@ -340,61 +339,61 @@ func TestHistogramSeriesToChunks(t *testing.T) { }, // Mixed. "histogram and float histogram encoded to two chunks": { - samples: []tsdbutil.Sample{ + samples: []chunks.Sample{ hSample{t: 1, h: h1}, fhSample{t: 2, fh: fh2}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset}, }, "float histogram and histogram encoded to two chunks": { - samples: []tsdbutil.Sample{ + samples: []chunks.Sample{ fhSample{t: 1, fh: fh1}, hSample{t: 2, h: h2}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset}, }, "histogram and stale float histogram encoded to two chunks": { - samples: []tsdbutil.Sample{ + samples: []chunks.Sample{ hSample{t: 1, h: h1}, fhSample{t: 2, fh: staleFloatHistogram}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset}, }, "single gauge histogram encoded to one chunk": { - samples: []tsdbutil.Sample{ + samples: []chunks.Sample{ hSample{t: 1, h: gh1}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType}, }, "two gauge histograms encoded to one chunk when counter increases": { - samples: []tsdbutil.Sample{ + samples: []chunks.Sample{ hSample{t: 1, h: gh1}, hSample{t: 2, h: gh2}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType}, }, "two gauge histograms encoded to one chunk when counter decreases": { - samples: []tsdbutil.Sample{ + samples: []chunks.Sample{ hSample{t: 1, h: gh2}, hSample{t: 2, h: gh1}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType}, }, "single gauge float histogram encoded to one chunk": { - samples: []tsdbutil.Sample{ + samples: []chunks.Sample{ fhSample{t: 1, fh: gfh1}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType}, }, "two float gauge histograms encoded to one chunk when counter increases": { - samples: []tsdbutil.Sample{ + samples: []chunks.Sample{ fhSample{t: 1, fh: gfh1}, fhSample{t: 2, fh: gfh2}, }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType}, }, "two float gauge histograms encoded to one chunk when counter decreases": { - samples: []tsdbutil.Sample{ + samples: []chunks.Sample{ fhSample{t: 1, fh: gfh2}, fhSample{t: 2, fh: gfh1}, }, @@ -411,7 +410,7 @@ func TestHistogramSeriesToChunks(t *testing.T) { func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) { lbs := labels.FromStrings("__name__", "up", "instance", "localhost:8080") - copiedSamples := []tsdbutil.Sample{} + copiedSamples := []chunks.Sample{} for _, s := range test.samples { switch cs := s.(type) { case hSample: @@ -470,7 +469,7 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) { } } -func expandHistogramSamples(chunks []chunks.Meta) (result []tsdbutil.Sample) { +func expandHistogramSamples(chunks []chunks.Meta) (result []chunks.Sample) { if len(chunks) == 0 { return } diff --git a/tsdb/agent/db_test.go b/tsdb/agent/db_test.go index 5c71d548f..7eda6110c 100644 --- a/tsdb/agent/db_test.go +++ b/tsdb/agent/db_test.go @@ -33,6 +33,7 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage/remote" "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/record" "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/tsdb/wlog" @@ -132,7 +133,7 @@ func TestCommit(t *testing.T) { lset := labels.New(l...) for i := 0; i < numDatapoints; i++ { - sample := tsdbutil.GenerateSamples(0, 1) + sample := chunks.GenerateSamples(0, 1) ref, err := app.Append(0, lset, sample[0].T(), sample[0].F()) require.NoError(t, err) @@ -247,7 +248,7 @@ func TestRollback(t *testing.T) { lset := labels.New(l...) for i := 0; i < numDatapoints; i++ { - sample := tsdbutil.GenerateSamples(0, 1) + sample := chunks.GenerateSamples(0, 1) _, err := app.Append(0, lset, sample[0].T(), sample[0].F()) require.NoError(t, err) } diff --git a/tsdb/block_test.go b/tsdb/block_test.go index e0d928aca..d65d76d8f 100644 --- a/tsdb/block_test.go +++ b/tsdb/block_test.go @@ -36,7 +36,6 @@ import ( "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/fileutil" - "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/tsdb/wlog" ) @@ -175,7 +174,7 @@ func TestCorruptedChunk(t *testing.T) { t.Run(tc.name, func(t *testing.T) { tmpdir := t.TempDir() - series := storage.NewListSeries(labels.FromStrings("a", "b"), []tsdbutil.Sample{sample{1, 1, nil, nil}}) + series := storage.NewListSeries(labels.FromStrings("a", "b"), []chunks.Sample{sample{1, 1, nil, nil}}) blockDir := createBlock(t, tmpdir, []storage.Series{series}) files, err := sequenceFiles(chunkDir(blockDir)) require.NoError(t, err) @@ -218,7 +217,7 @@ func TestLabelValuesWithMatchers(t *testing.T) { seriesEntries = append(seriesEntries, storage.NewListSeries(labels.FromStrings( "tens", fmt.Sprintf("value%d", i/10), "unique", fmt.Sprintf("value%d", i), - ), []tsdbutil.Sample{sample{100, 0, nil, nil}})) + ), []chunks.Sample{sample{100, 0, nil, nil}})) } blockDir := createBlock(t, tmpdir, seriesEntries) @@ -353,12 +352,12 @@ func TestReadIndexFormatV1(t *testing.T) { q, err := NewBlockQuerier(block, 0, 1000) require.NoError(t, err) require.Equal(t, query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")), - map[string][]tsdbutil.Sample{`{foo="bar"}`: {sample{t: 1, f: 2}}}) + map[string][]chunks.Sample{`{foo="bar"}`: {sample{t: 1, f: 2}}}) q, err = NewBlockQuerier(block, 0, 1000) require.NoError(t, err) require.Equal(t, query(t, q, labels.MustNewMatcher(labels.MatchNotRegexp, "foo", "^.?$")), - map[string][]tsdbutil.Sample{ + map[string][]chunks.Sample{ `{foo="bar"}`: {sample{t: 1, f: 2}}, `{foo="baz"}`: {sample{t: 3, f: 4}}, }) @@ -376,7 +375,7 @@ func BenchmarkLabelValuesWithMatchers(b *testing.B) { "a_unique", fmt.Sprintf("value%d", i), "b_tens", fmt.Sprintf("value%d", i/(metricCount/10)), "c_ninety", fmt.Sprintf("value%d", i/(metricCount/10)/9), // "0" for the first 90%, then "1" - ), []tsdbutil.Sample{sample{100, 0, nil, nil}})) + ), []chunks.Sample{sample{100, 0, nil, nil}})) } blockDir := createBlock(b, tmpdir, seriesEntries) @@ -412,13 +411,13 @@ func TestLabelNamesWithMatchers(t *testing.T) { for i := 0; i < 100; i++ { seriesEntries = append(seriesEntries, storage.NewListSeries(labels.FromStrings( "unique", fmt.Sprintf("value%d", i), - ), []tsdbutil.Sample{sample{100, 0, nil, nil}})) + ), []chunks.Sample{sample{100, 0, nil, nil}})) if i%10 == 0 { seriesEntries = append(seriesEntries, storage.NewListSeries(labels.FromStrings( "tens", fmt.Sprintf("value%d", i/10), "unique", fmt.Sprintf("value%d", i), - ), []tsdbutil.Sample{sample{100, 0, nil, nil}})) + ), []chunks.Sample{sample{100, 0, nil, nil}})) } if i%20 == 0 { @@ -426,7 +425,7 @@ func TestLabelNamesWithMatchers(t *testing.T) { "tens", fmt.Sprintf("value%d", i/10), "twenties", fmt.Sprintf("value%d", i/20), "unique", fmt.Sprintf("value%d", i), - ), []tsdbutil.Sample{sample{100, 0, nil, nil}})) + ), []chunks.Sample{sample{100, 0, nil, nil}})) } } @@ -552,7 +551,7 @@ func createHeadWithOOOSamples(tb testing.TB, w *wlog.WL, series []storage.Series require.NoError(tb, err) oooSampleLabels := make([]labels.Labels, 0, len(series)) - oooSamples := make([]tsdbutil.SampleSlice, 0, len(series)) + oooSamples := make([]chunks.SampleSlice, 0, len(series)) var it chunkenc.Iterator totalSamples := 0 @@ -561,7 +560,7 @@ func createHeadWithOOOSamples(tb testing.TB, w *wlog.WL, series []storage.Series ref := storage.SeriesRef(0) it = s.Iterator(it) lset := s.Labels() - os := tsdbutil.SampleSlice{} + os := chunks.SampleSlice{} count := 0 for it.Next() == chunkenc.ValFloat { totalSamples++ @@ -612,14 +611,14 @@ const ( // genSeries generates series of float64 samples with a given number of labels and values. func genSeries(totalSeries, labelCount int, mint, maxt int64) []storage.Series { - return genSeriesFromSampleGenerator(totalSeries, labelCount, mint, maxt, 1, func(ts int64) tsdbutil.Sample { + return genSeriesFromSampleGenerator(totalSeries, labelCount, mint, maxt, 1, func(ts int64) chunks.Sample { return sample{t: ts, f: rand.Float64()} }) } // genHistogramSeries generates series of histogram samples with a given number of labels and values. func genHistogramSeries(totalSeries, labelCount int, mint, maxt, step int64, floatHistogram bool) []storage.Series { - return genSeriesFromSampleGenerator(totalSeries, labelCount, mint, maxt, step, func(ts int64) tsdbutil.Sample { + return genSeriesFromSampleGenerator(totalSeries, labelCount, mint, maxt, step, func(ts int64) chunks.Sample { h := &histogram.Histogram{ Count: 7 + uint64(ts*5), ZeroCount: 2 + uint64(ts), @@ -653,7 +652,7 @@ func genHistogramSeries(totalSeries, labelCount int, mint, maxt, step int64, flo func genHistogramAndFloatSeries(totalSeries, labelCount int, mint, maxt, step int64, floatHistogram bool) []storage.Series { floatSample := false count := 0 - return genSeriesFromSampleGenerator(totalSeries, labelCount, mint, maxt, step, func(ts int64) tsdbutil.Sample { + return genSeriesFromSampleGenerator(totalSeries, labelCount, mint, maxt, step, func(ts int64) chunks.Sample { count++ var s sample if floatSample { @@ -694,7 +693,7 @@ func genHistogramAndFloatSeries(totalSeries, labelCount int, mint, maxt, step in }) } -func genSeriesFromSampleGenerator(totalSeries, labelCount int, mint, maxt, step int64, generator func(ts int64) tsdbutil.Sample) []storage.Series { +func genSeriesFromSampleGenerator(totalSeries, labelCount int, mint, maxt, step int64, generator func(ts int64) chunks.Sample) []storage.Series { if totalSeries == 0 || labelCount == 0 { return nil } @@ -707,7 +706,7 @@ func genSeriesFromSampleGenerator(totalSeries, labelCount int, mint, maxt, step for j := 1; len(lbls) < labelCount; j++ { lbls[defaultLabelName+strconv.Itoa(j)] = defaultLabelValue + strconv.Itoa(j) } - samples := make([]tsdbutil.Sample, 0, (maxt-mint)/step+1) + samples := make([]chunks.Sample, 0, (maxt-mint)/step+1) for t := mint; t < maxt; t += step { samples = append(samples, generator(t)) } @@ -727,7 +726,7 @@ func populateSeries(lbls []map[string]string, mint, maxt int64) []storage.Series if len(lbl) == 0 { continue } - samples := make([]tsdbutil.Sample, 0, maxt-mint+1) + samples := make([]chunks.Sample, 0, maxt-mint+1) for t := mint; t <= maxt; t++ { samples = append(samples, sample{t: t, f: rand.Float64()}) } diff --git a/tsdb/blockwriter_test.go b/tsdb/blockwriter_test.go index 84ea8d51b..d8240b53c 100644 --- a/tsdb/blockwriter_test.go +++ b/tsdb/blockwriter_test.go @@ -23,7 +23,7 @@ import ( "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/tsdb/tsdbutil" + "github.com/prometheus/prometheus/tsdb/chunks" ) func TestBlockWriter(t *testing.T) { @@ -52,9 +52,9 @@ func TestBlockWriter(t *testing.T) { q, err := NewBlockQuerier(b, math.MinInt64, math.MaxInt64) require.NoError(t, err) series := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "", ".*")) - sample1 := []tsdbutil.Sample{sample{t: ts1, f: v1}} - sample2 := []tsdbutil.Sample{sample{t: ts2, f: v2}} - expectedSeries := map[string][]tsdbutil.Sample{"{a=\"b\"}": sample1, "{c=\"d\"}": sample2} + sample1 := []chunks.Sample{sample{t: ts1, f: v1}} + sample2 := []chunks.Sample{sample{t: ts2, f: v2}} + expectedSeries := map[string][]chunks.Sample{"{a=\"b\"}": sample1, "{c=\"d\"}": sample2} require.Equal(t, expectedSeries, series) require.NoError(t, w.Close()) diff --git a/tsdb/chunkenc/chunk.go b/tsdb/chunkenc/chunk.go index d6a6f4614..e7ff5b165 100644 --- a/tsdb/chunkenc/chunk.go +++ b/tsdb/chunkenc/chunk.go @@ -52,6 +52,20 @@ func IsValidEncoding(e Encoding) bool { return e == EncXOR || e == EncHistogram || e == EncFloatHistogram } +const ( + // MaxBytesPerXORChunk is the maximum size an XOR chunk can be. + MaxBytesPerXORChunk = 1024 + // TargetBytesPerHistogramChunk sets a size target for each histogram chunk. + TargetBytesPerHistogramChunk = 1024 + // MinSamplesPerHistogramChunk sets a minimum sample count for histogram chunks. This is desirable because a single + // histogram sample can be larger than TargetBytesPerHistogramChunk but we want to avoid too-small sample count + // chunks so we can achieve some measure of compression advantage even while dealing with really large histograms. + // Note that this minimum sample count is not enforced across chunk range boundaries (for example, if the chunk + // range is 100 and the first sample in the chunk range is 99, the next sample will be included in a new chunk + // resulting in the old chunk containing only a single sample). + MinSamplesPerHistogramChunk = 10 +) + // Chunk holds a sequence of sample pairs that can be iterated over and appended to. type Chunk interface { // Bytes returns the underlying byte slice of the chunk. diff --git a/tsdb/chunks/chunks.go b/tsdb/chunks/chunks.go index 9817fe47a..88fc5924b 100644 --- a/tsdb/chunks/chunks.go +++ b/tsdb/chunks/chunks.go @@ -140,6 +140,73 @@ type Meta struct { OOOLastMinTime, OOOLastMaxTime int64 } +// ChunkFromSamples requires all samples to have the same type. +func ChunkFromSamples(s []Sample) (Meta, error) { + return ChunkFromSamplesGeneric(SampleSlice(s)) +} + +// ChunkFromSamplesGeneric requires all samples to have the same type. +func ChunkFromSamplesGeneric(s Samples) (Meta, error) { + emptyChunk := Meta{Chunk: chunkenc.NewXORChunk()} + mint, maxt := int64(0), int64(0) + + if s.Len() > 0 { + mint, maxt = s.Get(0).T(), s.Get(s.Len()-1).T() + } + + if s.Len() == 0 { + return emptyChunk, nil + } + + sampleType := s.Get(0).Type() + c, err := chunkenc.NewEmptyChunk(sampleType.ChunkEncoding()) + if err != nil { + return Meta{}, err + } + + ca, _ := c.Appender() + var newChunk chunkenc.Chunk + + for i := 0; i < s.Len(); i++ { + switch sampleType { + case chunkenc.ValFloat: + ca.Append(s.Get(i).T(), s.Get(i).F()) + case chunkenc.ValHistogram: + newChunk, _, ca, err = ca.AppendHistogram(nil, s.Get(i).T(), s.Get(i).H(), false) + if err != nil { + return emptyChunk, err + } + if newChunk != nil { + return emptyChunk, fmt.Errorf("did not expect to start a second chunk") + } + case chunkenc.ValFloatHistogram: + newChunk, _, ca, err = ca.AppendFloatHistogram(nil, s.Get(i).T(), s.Get(i).FH(), false) + if err != nil { + return emptyChunk, err + } + if newChunk != nil { + return emptyChunk, fmt.Errorf("did not expect to start a second chunk") + } + default: + panic(fmt.Sprintf("unknown sample type %s", sampleType.String())) + } + } + return Meta{ + MinTime: mint, + MaxTime: maxt, + Chunk: c, + }, nil +} + +// PopulatedChunk creates a chunk populated with samples every second starting at minTime +func PopulatedChunk(numSamples int, minTime int64) (Meta, error) { + samples := make([]Sample, numSamples) + for i := 0; i < numSamples; i++ { + samples[i] = sample{t: minTime + int64(i*1000), f: 1.0} + } + return ChunkFromSamples(samples) +} + // Iterator iterates over the chunks of a single time series. type Iterator interface { // At returns the current meta. diff --git a/tsdb/chunks/samples.go b/tsdb/chunks/samples.go new file mode 100644 index 000000000..638660c70 --- /dev/null +++ b/tsdb/chunks/samples.go @@ -0,0 +1,89 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package chunks + +import ( + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/tsdb/chunkenc" +) + +type Samples interface { + Get(i int) Sample + Len() int +} + +type Sample interface { + T() int64 + F() float64 + H() *histogram.Histogram + FH() *histogram.FloatHistogram + Type() chunkenc.ValueType +} + +type SampleSlice []Sample + +func (s SampleSlice) Get(i int) Sample { return s[i] } +func (s SampleSlice) Len() int { return len(s) } + +type sample struct { + t int64 + f float64 + h *histogram.Histogram + fh *histogram.FloatHistogram +} + +func (s sample) T() int64 { + return s.t +} + +func (s sample) F() float64 { + return s.f +} + +func (s sample) H() *histogram.Histogram { + return s.h +} + +func (s sample) FH() *histogram.FloatHistogram { + return s.fh +} + +func (s sample) Type() chunkenc.ValueType { + switch { + case s.h != nil: + return chunkenc.ValHistogram + case s.fh != nil: + return chunkenc.ValFloatHistogram + default: + return chunkenc.ValFloat + } +} + +// GenerateSamples starting at start and counting up numSamples. +func GenerateSamples(start, numSamples int) []Sample { + return generateSamples(start, numSamples, func(i int) Sample { + return sample{ + t: int64(i), + f: float64(i), + } + }) +} + +func generateSamples(start, numSamples int, gen func(int) Sample) []Sample { + samples := make([]Sample, 0, numSamples) + for i := start; i < start+numSamples; i++ { + samples = append(samples, gen(i)) + } + return samples +} diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index 512a6ecfb..2ef25d91a 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -38,7 +38,6 @@ import ( "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/fileutil" "github.com/prometheus/prometheus/tsdb/tombstones" - "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/tsdb/wlog" ) @@ -1316,7 +1315,7 @@ func TestHeadCompactionWithHistograms(t *testing.T) { minute := func(m int) int64 { return int64(m) * time.Minute.Milliseconds() } ctx := context.Background() appendHistogram := func( - lbls labels.Labels, from, to int, h *histogram.Histogram, exp *[]tsdbutil.Sample, + lbls labels.Labels, from, to int, h *histogram.Histogram, exp *[]chunks.Sample, ) { t.Helper() app := head.Appender(ctx) @@ -1345,7 +1344,7 @@ func TestHeadCompactionWithHistograms(t *testing.T) { } require.NoError(t, app.Commit()) } - appendFloat := func(lbls labels.Labels, from, to int, exp *[]tsdbutil.Sample) { + appendFloat := func(lbls labels.Labels, from, to int, exp *[]chunks.Sample) { t.Helper() app := head.Appender(ctx) for tsMinute := from; tsMinute <= to; tsMinute++ { @@ -1361,7 +1360,7 @@ func TestHeadCompactionWithHistograms(t *testing.T) { series2 = labels.FromStrings("foo", "bar2") series3 = labels.FromStrings("foo", "bar3") series4 = labels.FromStrings("foo", "bar4") - exp1, exp2, exp3, exp4 []tsdbutil.Sample + exp1, exp2, exp3, exp4 []chunks.Sample ) h := &histogram.Histogram{ Count: 15, @@ -1419,7 +1418,7 @@ func TestHeadCompactionWithHistograms(t *testing.T) { require.NoError(t, err) actHists := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*")) - require.Equal(t, map[string][]tsdbutil.Sample{ + require.Equal(t, map[string][]chunks.Sample{ series1.String(): exp1, series2.String(): exp2, series3.String(): exp3, diff --git a/tsdb/db_test.go b/tsdb/db_test.go index e8b6d7c3b..ffe8b7cc0 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -87,18 +87,18 @@ func openTestDB(t testing.TB, opts *Options, rngs []int64) (db *DB) { } // query runs a matcher query against the querier and fully expands its data. -func query(t testing.TB, q storage.Querier, matchers ...*labels.Matcher) map[string][]tsdbutil.Sample { +func query(t testing.TB, q storage.Querier, matchers ...*labels.Matcher) map[string][]chunks.Sample { ss := q.Select(false, nil, matchers...) defer func() { require.NoError(t, q.Close()) }() var it chunkenc.Iterator - result := map[string][]tsdbutil.Sample{} + result := map[string][]chunks.Sample{} for ss.Next() { series := ss.At() - samples := []tsdbutil.Sample{} + samples := []chunks.Sample{} it = series.Iterator(it) for typ := it.Next(); typ != chunkenc.ValNone; typ = it.Next() { switch typ { @@ -131,12 +131,12 @@ func query(t testing.TB, q storage.Querier, matchers ...*labels.Matcher) map[str } // queryAndExpandChunks runs a matcher query against the querier and fully expands its data into samples. -func queryAndExpandChunks(t testing.TB, q storage.ChunkQuerier, matchers ...*labels.Matcher) map[string][][]tsdbutil.Sample { +func queryAndExpandChunks(t testing.TB, q storage.ChunkQuerier, matchers ...*labels.Matcher) map[string][][]chunks.Sample { s := queryChunks(t, q, matchers...) - res := make(map[string][][]tsdbutil.Sample) + res := make(map[string][][]chunks.Sample) for k, v := range s { - var samples [][]tsdbutil.Sample + var samples [][]chunks.Sample for _, chk := range v { sam, err := storage.ExpandSamples(chk.Chunk.Iterator(nil), nil) require.NoError(t, err) @@ -222,7 +222,7 @@ func TestDataAvailableOnlyAfterCommit(t *testing.T) { querier, err := db.Querier(context.TODO(), 0, 1) require.NoError(t, err) seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) - require.Equal(t, map[string][]tsdbutil.Sample{}, seriesSet) + require.Equal(t, map[string][]chunks.Sample{}, seriesSet) err = app.Commit() require.NoError(t, err) @@ -233,7 +233,7 @@ func TestDataAvailableOnlyAfterCommit(t *testing.T) { seriesSet = query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) - require.Equal(t, map[string][]tsdbutil.Sample{`{foo="bar"}`: {sample{t: 0, f: 0}}}, seriesSet) + require.Equal(t, map[string][]chunks.Sample{`{foo="bar"}`: {sample{t: 0, f: 0}}}, seriesSet) } // TestNoPanicAfterWALCorruption ensures that querying the db after a WAL corruption doesn't cause a panic. @@ -243,7 +243,7 @@ func TestNoPanicAfterWALCorruption(t *testing.T) { // Append until the first mmaped head chunk. // This is to ensure that all samples can be read from the mmaped chunks when the WAL is corrupted. - var expSamples []tsdbutil.Sample + var expSamples []chunks.Sample var maxt int64 ctx := context.Background() { @@ -289,7 +289,7 @@ func TestNoPanicAfterWALCorruption(t *testing.T) { require.NoError(t, err) seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "", "")) // The last sample should be missing as it was after the WAL segment corruption. - require.Equal(t, map[string][]tsdbutil.Sample{`{foo="bar"}`: expSamples[0 : len(expSamples)-1]}, seriesSet) + require.Equal(t, map[string][]chunks.Sample{`{foo="bar"}`: expSamples[0 : len(expSamples)-1]}, seriesSet) } } @@ -312,7 +312,7 @@ func TestDataNotAvailableAfterRollback(t *testing.T) { seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) - require.Equal(t, map[string][]tsdbutil.Sample{}, seriesSet) + require.Equal(t, map[string][]chunks.Sample{}, seriesSet) } func TestDBAppenderAddRef(t *testing.T) { @@ -362,7 +362,7 @@ func TestDBAppenderAddRef(t *testing.T) { res := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) - require.Equal(t, map[string][]tsdbutil.Sample{ + require.Equal(t, map[string][]chunks.Sample{ labels.FromStrings("a", "b").String(): { sample{t: 123, f: 0}, sample{t: 124, f: 1}, @@ -455,7 +455,7 @@ Outer: res := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) - expSamples := make([]tsdbutil.Sample, 0, len(c.remaint)) + expSamples := make([]chunks.Sample, 0, len(c.remaint)) for _, ts := range c.remaint { expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil}) } @@ -615,7 +615,7 @@ func TestSkippingInvalidValuesInSameTxn(t *testing.T) { ssMap := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) - require.Equal(t, map[string][]tsdbutil.Sample{ + require.Equal(t, map[string][]chunks.Sample{ labels.New(labels.Label{Name: "a", Value: "b"}).String(): {sample{0, 1, nil, nil}}, }, ssMap) @@ -632,7 +632,7 @@ func TestSkippingInvalidValuesInSameTxn(t *testing.T) { ssMap = query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) - require.Equal(t, map[string][]tsdbutil.Sample{ + require.Equal(t, map[string][]chunks.Sample{ labels.New(labels.Label{Name: "a", Value: "b"}).String(): {sample{0, 1, nil, nil}, sample{10, 3, nil, nil}}, }, ssMap) } @@ -783,7 +783,7 @@ Outer: res := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) - expSamples := make([]tsdbutil.Sample, 0, len(c.remaint)) + expSamples := make([]chunks.Sample, 0, len(c.remaint)) for _, ts := range c.remaint { expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil}) } @@ -869,9 +869,9 @@ func TestDB_e2e(t *testing.T) { }, } - seriesMap := map[string][]tsdbutil.Sample{} + seriesMap := map[string][]chunks.Sample{} for _, l := range lbls { - seriesMap[labels.New(l...).String()] = []tsdbutil.Sample{} + seriesMap[labels.New(l...).String()] = []chunks.Sample{} } db := openTestDB(t, nil, nil) @@ -884,7 +884,7 @@ func TestDB_e2e(t *testing.T) { for _, l := range lbls { lset := labels.New(l...) - series := []tsdbutil.Sample{} + series := []chunks.Sample{} ts := rand.Int63n(300) for i := 0; i < numDatapoints; i++ { @@ -942,7 +942,7 @@ func TestDB_e2e(t *testing.T) { mint := rand.Int63n(300) maxt := mint + rand.Int63n(timeInterval*int64(numDatapoints)) - expected := map[string][]tsdbutil.Sample{} + expected := map[string][]chunks.Sample{} // Build the mockSeriesSet. for _, m := range matched { @@ -956,7 +956,7 @@ func TestDB_e2e(t *testing.T) { require.NoError(t, err) ss := q.Select(false, nil, qry.ms...) - result := map[string][]tsdbutil.Sample{} + result := map[string][]chunks.Sample{} for ss.Next() { x := ss.At() @@ -1220,7 +1220,7 @@ func TestTombstoneClean(t *testing.T) { res := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) - expSamples := make([]tsdbutil.Sample, 0, len(c.remaint)) + expSamples := make([]chunks.Sample, 0, len(c.remaint)) for _, ts := range c.remaint { expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil}) } @@ -2386,8 +2386,8 @@ func TestDBReadOnly(t *testing.T) { logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) expBlocks []*Block expBlock *Block - expSeries map[string][]tsdbutil.Sample - expChunks map[string][][]tsdbutil.Sample + expSeries map[string][]chunks.Sample + expChunks map[string][][]chunks.Sample expDBHash []byte matchAll = labels.MustNewMatcher(labels.MatchEqual, "", "") err error @@ -2714,8 +2714,8 @@ func TestDBQueryDoesntSeeAppendsAfterCreation(t *testing.T) { require.Equal(t, map[string][]sample{`{foo="bar"}`: {{t: 0, f: 0}}}, seriesSet) } -func assureChunkFromSamples(t *testing.T, samples []tsdbutil.Sample) chunks.Meta { - chks, err := tsdbutil.ChunkFromSamples(samples) +func assureChunkFromSamples(t *testing.T, samples []chunks.Sample) chunks.Meta { + chks, err := chunks.ChunkFromSamples(samples) require.NoError(t, err) return chks } @@ -2723,11 +2723,11 @@ func assureChunkFromSamples(t *testing.T, samples []tsdbutil.Sample) chunks.Meta // TestChunkWriter_ReadAfterWrite ensures that chunk segment are cut at the set segment size and // that the resulted segments includes the expected chunks data. func TestChunkWriter_ReadAfterWrite(t *testing.T) { - chk1 := assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 1, nil, nil}}) - chk2 := assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 2, nil, nil}}) - chk3 := assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 3, nil, nil}}) - chk4 := assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 4, nil, nil}}) - chk5 := assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 5, nil, nil}}) + chk1 := assureChunkFromSamples(t, []chunks.Sample{sample{1, 1, nil, nil}}) + chk2 := assureChunkFromSamples(t, []chunks.Sample{sample{1, 2, nil, nil}}) + chk3 := assureChunkFromSamples(t, []chunks.Sample{sample{1, 3, nil, nil}}) + chk4 := assureChunkFromSamples(t, []chunks.Sample{sample{1, 4, nil, nil}}) + chk5 := assureChunkFromSamples(t, []chunks.Sample{sample{1, 5, nil, nil}}) chunkSize := len(chk1.Chunk.Bytes()) + chunks.MaxChunkLengthFieldSize + chunks.ChunkEncodingSize + crc32.Size tests := []struct { @@ -2927,11 +2927,11 @@ func TestRangeForTimestamp(t *testing.T) { // Regression test for https://github.com/prometheus/prometheus/pull/6514. func TestChunkReader_ConcurrentReads(t *testing.T) { chks := []chunks.Meta{ - assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 1, nil, nil}}), - assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 2, nil, nil}}), - assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 3, nil, nil}}), - assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 4, nil, nil}}), - assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 5, nil, nil}}), + assureChunkFromSamples(t, []chunks.Sample{sample{1, 1, nil, nil}}), + assureChunkFromSamples(t, []chunks.Sample{sample{1, 2, nil, nil}}), + assureChunkFromSamples(t, []chunks.Sample{sample{1, 3, nil, nil}}), + assureChunkFromSamples(t, []chunks.Sample{sample{1, 4, nil, nil}}), + assureChunkFromSamples(t, []chunks.Sample{sample{1, 5, nil, nil}}), } tempDir := t.TempDir() @@ -4156,7 +4156,7 @@ func TestOOOCompaction(t *testing.T) { addSample(90, 310) verifyDBSamples := func() { - var series1Samples, series2Samples []tsdbutil.Sample + var series1Samples, series2Samples []chunks.Sample for _, r := range [][2]int64{{90, 119}, {120, 239}, {240, 350}} { fromMins, toMins := r[0], r[1] for min := fromMins; min <= toMins; min++ { @@ -4165,7 +4165,7 @@ func TestOOOCompaction(t *testing.T) { series2Samples = append(series2Samples, sample{ts, float64(2 * ts), nil, nil}) } } - expRes := map[string][]tsdbutil.Sample{ + expRes := map[string][]chunks.Sample{ series1.String(): series1Samples, series2.String(): series2Samples, } @@ -4225,14 +4225,14 @@ func TestOOOCompaction(t *testing.T) { checkEmptyOOOChunk(series2) verifySamples := func(block *Block, fromMins, toMins int64) { - series1Samples := make([]tsdbutil.Sample, 0, toMins-fromMins+1) - series2Samples := make([]tsdbutil.Sample, 0, toMins-fromMins+1) + series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1) + series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1) for min := fromMins; min <= toMins; min++ { ts := min * time.Minute.Milliseconds() series1Samples = append(series1Samples, sample{ts, float64(ts), nil, nil}) series2Samples = append(series2Samples, sample{ts, float64(2 * ts), nil, nil}) } - expRes := map[string][]tsdbutil.Sample{ + expRes := map[string][]chunks.Sample{ series1.String(): series1Samples, series2.String(): series2Samples, } @@ -4356,14 +4356,14 @@ func TestOOOCompactionWithNormalCompaction(t *testing.T) { } verifySamples := func(block *Block, fromMins, toMins int64) { - series1Samples := make([]tsdbutil.Sample, 0, toMins-fromMins+1) - series2Samples := make([]tsdbutil.Sample, 0, toMins-fromMins+1) + series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1) + series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1) for min := fromMins; min <= toMins; min++ { ts := min * time.Minute.Milliseconds() series1Samples = append(series1Samples, sample{ts, float64(ts), nil, nil}) series2Samples = append(series2Samples, sample{ts, float64(2 * ts), nil, nil}) } - expRes := map[string][]tsdbutil.Sample{ + expRes := map[string][]chunks.Sample{ series1.String(): series1Samples, series2.String(): series2Samples, } @@ -4456,14 +4456,14 @@ func TestOOOCompactionWithDisabledWriteLog(t *testing.T) { } verifySamples := func(block *Block, fromMins, toMins int64) { - series1Samples := make([]tsdbutil.Sample, 0, toMins-fromMins+1) - series2Samples := make([]tsdbutil.Sample, 0, toMins-fromMins+1) + series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1) + series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1) for min := fromMins; min <= toMins; min++ { ts := min * time.Minute.Milliseconds() series1Samples = append(series1Samples, sample{ts, float64(ts), nil, nil}) series2Samples = append(series2Samples, sample{ts, float64(2 * ts), nil, nil}) } - expRes := map[string][]tsdbutil.Sample{ + expRes := map[string][]chunks.Sample{ series1.String(): series1Samples, series2.String(): series2Samples, } @@ -4549,14 +4549,14 @@ func TestOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T) { } verifySamples := func(fromMins, toMins int64) { - series1Samples := make([]tsdbutil.Sample, 0, toMins-fromMins+1) - series2Samples := make([]tsdbutil.Sample, 0, toMins-fromMins+1) + series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1) + series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1) for min := fromMins; min <= toMins; min++ { ts := min * time.Minute.Milliseconds() series1Samples = append(series1Samples, sample{ts, float64(ts), nil, nil}) series2Samples = append(series2Samples, sample{ts, float64(2 * ts), nil, nil}) } - expRes := map[string][]tsdbutil.Sample{ + expRes := map[string][]chunks.Sample{ series1.String(): series1Samples, series2.String(): series2Samples, } @@ -4598,7 +4598,7 @@ func Test_Querier_OOOQuery(t *testing.T) { series1 := labels.FromStrings("foo", "bar1") minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() } - addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []tsdbutil.Sample) ([]tsdbutil.Sample, int) { + addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []chunks.Sample) ([]chunks.Sample, int) { app := db.Appender(context.Background()) totalAppended := 0 for min := fromMins; min <= toMins; min += time.Minute.Milliseconds() { @@ -4649,7 +4649,7 @@ func Test_Querier_OOOQuery(t *testing.T) { require.NoError(t, db.Close()) }() - var expSamples []tsdbutil.Sample + var expSamples []chunks.Sample // Add in-order samples. expSamples, _ = addSample(db, tc.inOrderMinT, tc.inOrderMaxT, tc.queryMinT, tc.queryMaxT, expSamples) @@ -4683,7 +4683,7 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) { series1 := labels.FromStrings("foo", "bar1") minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() } - addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []tsdbutil.Sample) ([]tsdbutil.Sample, int) { + addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []chunks.Sample) ([]chunks.Sample, int) { app := db.Appender(context.Background()) totalAppended := 0 for min := fromMins; min <= toMins; min += time.Minute.Milliseconds() { @@ -4734,7 +4734,7 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) { require.NoError(t, db.Close()) }() - var expSamples []tsdbutil.Sample + var expSamples []chunks.Sample // Add in-order samples. expSamples, _ = addSample(db, tc.inOrderMinT, tc.inOrderMaxT, tc.queryMinT, tc.queryMaxT, expSamples) @@ -4754,7 +4754,7 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) { require.NotNil(t, chks[series1.String()]) require.Equal(t, 1, len(chks)) require.Equal(t, float64(oooSamples), prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamplesAppended), "number of ooo appended samples mismatch") - var gotSamples []tsdbutil.Sample + var gotSamples []chunks.Sample for _, chunk := range chks[series1.String()] { it := chunk.Chunk.Iterator(nil) for it.Next() == chunkenc.ValFloat { @@ -4782,7 +4782,7 @@ func TestOOOAppendAndQuery(t *testing.T) { s2 := labels.FromStrings("foo", "bar2") minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() } - appendedSamples := make(map[string][]tsdbutil.Sample) + appendedSamples := make(map[string][]chunks.Sample) totalSamples := 0 addSample := func(lbls labels.Labels, fromMins, toMins int64, faceError bool) { app := db.Appender(context.Background()) @@ -4819,7 +4819,7 @@ func TestOOOAppendAndQuery(t *testing.T) { appendedSamples[k] = v } - expSamples := make(map[string][]tsdbutil.Sample) + expSamples := make(map[string][]chunks.Sample) for k, samples := range appendedSamples { for _, s := range samples { if s.T() < from { @@ -4903,7 +4903,7 @@ func TestOOODisabled(t *testing.T) { s1 := labels.FromStrings("foo", "bar1") minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() } - expSamples := make(map[string][]tsdbutil.Sample) + expSamples := make(map[string][]chunks.Sample) totalSamples := 0 failedSamples := 0 addSample := func(lbls labels.Labels, fromMins, toMins int64, faceError bool) { @@ -4971,7 +4971,7 @@ func TestWBLAndMmapReplay(t *testing.T) { s1 := labels.FromStrings("foo", "bar1") minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() } - expSamples := make(map[string][]tsdbutil.Sample) + expSamples := make(map[string][]chunks.Sample) totalSamples := 0 addSample := func(lbls labels.Labels, fromMins, toMins int64) { app := db.Appender(context.Background()) @@ -4987,7 +4987,7 @@ func TestWBLAndMmapReplay(t *testing.T) { require.NoError(t, app.Commit()) } - testQuery := func(exp map[string][]tsdbutil.Sample) { + testQuery := func(exp map[string][]chunks.Sample) { querier, err := db.Querier(context.TODO(), math.MinInt64, math.MaxInt64) require.NoError(t, err) @@ -5017,7 +5017,7 @@ func TestWBLAndMmapReplay(t *testing.T) { ms, created, err := db.head.getOrCreate(s1.Hash(), s1) require.False(t, created) require.NoError(t, err) - var s1MmapSamples []tsdbutil.Sample + var s1MmapSamples []chunks.Sample for _, mc := range ms.ooo.oooMmappedChunks { chk, err := db.head.chunkDiskMapper.Chunk(mc.ref) require.NoError(t, err) @@ -5076,7 +5076,7 @@ func TestWBLAndMmapReplay(t *testing.T) { require.Equal(t, oooMint, db.head.MinOOOTime()) require.Equal(t, oooMaxt, db.head.MaxOOOTime()) inOrderSample := expSamples[s1.String()][len(expSamples[s1.String()])-1] - testQuery(map[string][]tsdbutil.Sample{ + testQuery(map[string][]chunks.Sample{ s1.String(): append(s1MmapSamples, inOrderSample), }) require.NoError(t, db.Close()) @@ -5247,12 +5247,12 @@ func TestOOOCompactionFailure(t *testing.T) { require.Equal(t, int64(0), f.Size()) verifySamples := func(block *Block, fromMins, toMins int64) { - series1Samples := make([]tsdbutil.Sample, 0, toMins-fromMins+1) + series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1) for min := fromMins; min <= toMins; min++ { ts := min * time.Minute.Milliseconds() series1Samples = append(series1Samples, sample{ts, float64(ts), nil, nil}) } - expRes := map[string][]tsdbutil.Sample{ + expRes := map[string][]chunks.Sample{ series1.String(): series1Samples, } @@ -5295,7 +5295,7 @@ func TestWBLCorruption(t *testing.T) { }) series1 := labels.FromStrings("foo", "bar1") - var allSamples, expAfterRestart []tsdbutil.Sample + var allSamples, expAfterRestart []chunks.Sample addSamples := func(fromMins, toMins int64, afterRestart bool) { app := db.Appender(context.Background()) for min := fromMins; min <= toMins; min++ { @@ -5367,12 +5367,12 @@ func TestWBLCorruption(t *testing.T) { require.NoError(t, err) require.Greater(t, f2.Size(), int64(100)) - verifySamples := func(expSamples []tsdbutil.Sample) { + verifySamples := func(expSamples []chunks.Sample) { sort.Slice(expSamples, func(i, j int) bool { return expSamples[i].T() < expSamples[j].T() }) - expRes := map[string][]tsdbutil.Sample{ + expRes := map[string][]chunks.Sample{ series1.String(): expSamples, } @@ -5441,7 +5441,7 @@ func TestOOOMmapCorruption(t *testing.T) { }) series1 := labels.FromStrings("foo", "bar1") - var allSamples, expInMmapChunks []tsdbutil.Sample + var allSamples, expInMmapChunks []chunks.Sample addSamples := func(fromMins, toMins int64, inMmapAfterCorruption bool) { app := db.Appender(context.Background()) for min := fromMins; min <= toMins; min++ { @@ -5475,12 +5475,12 @@ func TestOOOMmapCorruption(t *testing.T) { db.head.chunkDiskMapper.CutNewFile() addSamples(260, 290, false) - verifySamples := func(expSamples []tsdbutil.Sample) { + verifySamples := func(expSamples []chunks.Sample) { sort.Slice(expSamples, func(i, j int) bool { return expSamples[i].T() < expSamples[j].T() }) - expRes := map[string][]tsdbutil.Sample{ + expRes := map[string][]chunks.Sample{ series1.String(): expSamples, } @@ -5577,7 +5577,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) { } series1 := labels.FromStrings("foo", "bar1") - addSamples := func(t *testing.T, db *DB, fromMins, toMins int64, success bool, allSamples []tsdbutil.Sample) []tsdbutil.Sample { + addSamples := func(t *testing.T, db *DB, fromMins, toMins int64, success bool, allSamples []chunks.Sample) []chunks.Sample { app := db.Appender(context.Background()) for min := fromMins; min <= toMins; min++ { ts := min * time.Minute.Milliseconds() @@ -5593,12 +5593,12 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) { return allSamples } - verifySamples := func(t *testing.T, db *DB, expSamples []tsdbutil.Sample) { + verifySamples := func(t *testing.T, db *DB, expSamples []chunks.Sample) { sort.Slice(expSamples, func(i, j int) bool { return expSamples[i].T() < expSamples[j].T() }) - expRes := map[string][]tsdbutil.Sample{ + expRes := map[string][]chunks.Sample{ series1.String(): expSamples, } @@ -5626,7 +5626,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) { } t.Run("increase time window", func(t *testing.T) { - var allSamples []tsdbutil.Sample + var allSamples []chunks.Sample db := getDB(30 * time.Minute.Milliseconds()) // In-order. @@ -5656,7 +5656,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) { }) t.Run("decrease time window and increase again", func(t *testing.T) { - var allSamples []tsdbutil.Sample + var allSamples []chunks.Sample db := getDB(60 * time.Minute.Milliseconds()) // In-order. @@ -5695,7 +5695,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) { }) t.Run("disabled to enabled", func(t *testing.T) { - var allSamples []tsdbutil.Sample + var allSamples []chunks.Sample db := getDB(0) // In-order. @@ -5724,7 +5724,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) { }) t.Run("enabled to disabled", func(t *testing.T) { - var allSamples []tsdbutil.Sample + var allSamples []chunks.Sample db := getDB(60 * time.Minute.Milliseconds()) // In-order. @@ -5754,7 +5754,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) { }) t.Run("disabled to disabled", func(t *testing.T) { - var allSamples []tsdbutil.Sample + var allSamples []chunks.Sample db := getDB(0) // In-order. @@ -5795,13 +5795,13 @@ func TestNoGapAfterRestartWithOOO(t *testing.T) { } verifySamples := func(t *testing.T, db *DB, fromMins, toMins int64) { - var expSamples []tsdbutil.Sample + var expSamples []chunks.Sample for min := fromMins; min <= toMins; min++ { ts := min * time.Minute.Milliseconds() expSamples = append(expSamples, sample{t: ts, f: float64(ts)}) } - expRes := map[string][]tsdbutil.Sample{ + expRes := map[string][]chunks.Sample{ series1.String(): expSamples, } @@ -5898,7 +5898,7 @@ func TestWblReplayAfterOOODisableAndRestart(t *testing.T) { }) series1 := labels.FromStrings("foo", "bar1") - var allSamples []tsdbutil.Sample + var allSamples []chunks.Sample addSamples := func(fromMins, toMins int64) { app := db.Appender(context.Background()) for min := fromMins; min <= toMins; min++ { @@ -5915,12 +5915,12 @@ func TestWblReplayAfterOOODisableAndRestart(t *testing.T) { // OOO samples. addSamples(250, 260) - verifySamples := func(expSamples []tsdbutil.Sample) { + verifySamples := func(expSamples []chunks.Sample) { sort.Slice(expSamples, func(i, j int) bool { return expSamples[i].T() < expSamples[j].T() }) - expRes := map[string][]tsdbutil.Sample{ + expRes := map[string][]chunks.Sample{ series1.String(): expSamples, } @@ -5957,7 +5957,7 @@ func TestPanicOnApplyConfig(t *testing.T) { }) series1 := labels.FromStrings("foo", "bar1") - var allSamples []tsdbutil.Sample + var allSamples []chunks.Sample addSamples := func(fromMins, toMins int64) { app := db.Appender(context.Background()) for min := fromMins; min <= toMins; min++ { @@ -6005,7 +6005,7 @@ func TestDiskFillingUpAfterDisablingOOO(t *testing.T) { }) series1 := labels.FromStrings("foo", "bar1") - var allSamples []tsdbutil.Sample + var allSamples []chunks.Sample addSamples := func(fromMins, toMins int64) { app := db.Appender(context.Background()) for min := fromMins; min <= toMins; min++ { @@ -6096,7 +6096,7 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) { ctx := context.Background() appendHistogram := func( lbls labels.Labels, tsMinute int, h *histogram.Histogram, - exp *[]tsdbutil.Sample, expCRH histogram.CounterResetHint, + exp *[]chunks.Sample, expCRH histogram.CounterResetHint, ) { t.Helper() var err error @@ -6115,7 +6115,7 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) { require.NoError(t, err) require.NoError(t, app.Commit()) } - appendFloat := func(lbls labels.Labels, tsMinute int, val float64, exp *[]tsdbutil.Sample) { + appendFloat := func(lbls labels.Labels, tsMinute int, val float64, exp *[]chunks.Sample) { t.Helper() app := db.Appender(ctx) _, err := app.Append(0, lbls, minute(tsMinute), val) @@ -6124,7 +6124,7 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) { *exp = append(*exp, sample{t: minute(tsMinute), f: val}) } - testQuery := func(name, value string, exp map[string][]tsdbutil.Sample) { + testQuery := func(name, value string, exp map[string][]chunks.Sample) { t.Helper() q, err := db.Querier(ctx, math.MinInt64, math.MaxInt64) require.NoError(t, err) @@ -6155,7 +6155,7 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) { series2 = labels.FromStrings("foo", "bar2") series3 = labels.FromStrings("foo", "bar3") series4 = labels.FromStrings("foo", "bar4") - exp1, exp2, exp3, exp4 []tsdbutil.Sample + exp1, exp2, exp3, exp4 []chunks.Sample ) // TODO(codesome): test everything for negative buckets as well. @@ -6163,23 +6163,23 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) { h := baseH.Copy() // This is shared across all sub tests. appendHistogram(series1, 100, h, &exp1, histogram.UnknownCounterReset) - testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1}) + testQuery("foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1}) h.PositiveBuckets[0]++ h.NegativeBuckets[0] += 2 h.Count += 10 appendHistogram(series1, 101, h, &exp1, histogram.NotCounterReset) - testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1}) + testQuery("foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1}) t.Run("changing schema", func(t *testing.T) { h.Schema = 2 appendHistogram(series1, 102, h, &exp1, histogram.UnknownCounterReset) - testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1}) + testQuery("foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1}) // Schema back to old. h.Schema = 1 appendHistogram(series1, 103, h, &exp1, histogram.UnknownCounterReset) - testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1}) + testQuery("foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1}) }) t.Run("new buckets incoming", func(t *testing.T) { @@ -6208,7 +6208,7 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) { h.PositiveBuckets = append(h.PositiveBuckets, 1) h.Count += 3 appendHistogram(series1, 104, h, &exp1, histogram.NotCounterReset) - testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1}) + testQuery("foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1}) // Because of the previous two histograms being on the active chunk, // and the next append is only adding a new bucket, the active chunk @@ -6246,7 +6246,7 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) { // {2, 1, -1, 0, 1} -> {2, 1, 0, -1, 0, 1} h.PositiveBuckets = append(h.PositiveBuckets[:2], append([]int64{0}, h.PositiveBuckets[2:]...)...) appendHistogram(series1, 105, h, &exp1, histogram.NotCounterReset) - testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1}) + testQuery("foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1}) // We add 4 more histograms to clear out the buffer and see the re-encoded histograms. appendHistogram(series1, 106, h, &exp1, histogram.NotCounterReset) @@ -6279,14 +6279,14 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) { exp1[l-6] = sample{t: exp1[l-6].T(), h: h6} } - testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1}) + testQuery("foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1}) }) t.Run("buckets disappearing", func(t *testing.T) { h.PositiveSpans[1].Length-- h.PositiveBuckets = h.PositiveBuckets[:len(h.PositiveBuckets)-1] appendHistogram(series1, 110, h, &exp1, histogram.CounterReset) - testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1}) + testQuery("foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1}) }) }) @@ -6294,22 +6294,22 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) { appendFloat(series2, 100, 100, &exp2) appendFloat(series2, 101, 101, &exp2) appendFloat(series2, 102, 102, &exp2) - testQuery("foo", "bar2", map[string][]tsdbutil.Sample{series2.String(): exp2}) + testQuery("foo", "bar2", map[string][]chunks.Sample{series2.String(): exp2}) h := baseH.Copy() appendHistogram(series2, 103, h, &exp2, histogram.UnknownCounterReset) appendHistogram(series2, 104, h, &exp2, histogram.NotCounterReset) appendHistogram(series2, 105, h, &exp2, histogram.NotCounterReset) - testQuery("foo", "bar2", map[string][]tsdbutil.Sample{series2.String(): exp2}) + testQuery("foo", "bar2", map[string][]chunks.Sample{series2.String(): exp2}) // Switching between float and histograms again. appendFloat(series2, 106, 106, &exp2) appendFloat(series2, 107, 107, &exp2) - testQuery("foo", "bar2", map[string][]tsdbutil.Sample{series2.String(): exp2}) + testQuery("foo", "bar2", map[string][]chunks.Sample{series2.String(): exp2}) appendHistogram(series2, 108, h, &exp2, histogram.UnknownCounterReset) appendHistogram(series2, 109, h, &exp2, histogram.NotCounterReset) - testQuery("foo", "bar2", map[string][]tsdbutil.Sample{series2.String(): exp2}) + testQuery("foo", "bar2", map[string][]chunks.Sample{series2.String(): exp2}) }) t.Run("series starting with histogram and then getting float", func(t *testing.T) { @@ -6317,21 +6317,21 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) { appendHistogram(series3, 101, h, &exp3, histogram.UnknownCounterReset) appendHistogram(series3, 102, h, &exp3, histogram.NotCounterReset) appendHistogram(series3, 103, h, &exp3, histogram.NotCounterReset) - testQuery("foo", "bar3", map[string][]tsdbutil.Sample{series3.String(): exp3}) + testQuery("foo", "bar3", map[string][]chunks.Sample{series3.String(): exp3}) appendFloat(series3, 104, 100, &exp3) appendFloat(series3, 105, 101, &exp3) appendFloat(series3, 106, 102, &exp3) - testQuery("foo", "bar3", map[string][]tsdbutil.Sample{series3.String(): exp3}) + testQuery("foo", "bar3", map[string][]chunks.Sample{series3.String(): exp3}) // Switching between histogram and float again. appendHistogram(series3, 107, h, &exp3, histogram.UnknownCounterReset) appendHistogram(series3, 108, h, &exp3, histogram.NotCounterReset) - testQuery("foo", "bar3", map[string][]tsdbutil.Sample{series3.String(): exp3}) + testQuery("foo", "bar3", map[string][]chunks.Sample{series3.String(): exp3}) appendFloat(series3, 109, 106, &exp3) appendFloat(series3, 110, 107, &exp3) - testQuery("foo", "bar3", map[string][]tsdbutil.Sample{series3.String(): exp3}) + testQuery("foo", "bar3", map[string][]chunks.Sample{series3.String(): exp3}) }) t.Run("query mix of histogram and float series", func(t *testing.T) { @@ -6340,7 +6340,7 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) { appendFloat(series4, 101, 101, &exp4) appendFloat(series4, 102, 102, &exp4) - testQuery("foo", "bar.*", map[string][]tsdbutil.Sample{ + testQuery("foo", "bar.*", map[string][]chunks.Sample{ series1.String(): exp1, series2.String(): exp2, series3.String(): exp3, @@ -6365,7 +6365,7 @@ func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) { ctx := context.Background() var it chunkenc.Iterator - exp := make(map[string][]tsdbutil.Sample) + exp := make(map[string][]chunks.Sample) for _, series := range blockSeries { createBlock(t, db.Dir(), series) @@ -6448,7 +6448,7 @@ func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) { t.Run("serial blocks with either histograms or floats in a block and not both", func(t *testing.T) { testBlockQuerying(t, genHistogramSeries(10, 5, minute(0), minute(119), minute(1), floatHistogram), - genSeriesFromSampleGenerator(10, 5, minute(120), minute(239), minute(1), func(ts int64) tsdbutil.Sample { + genSeriesFromSampleGenerator(10, 5, minute(120), minute(239), minute(1), func(ts int64) chunks.Sample { return sample{t: ts, f: rand.Float64()} }), genHistogramSeries(10, 5, minute(240), minute(359), minute(1), floatHistogram), @@ -6460,7 +6460,7 @@ func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) { genHistogramAndFloatSeries(10, 5, minute(0), minute(60), minute(1), floatHistogram), genHistogramSeries(10, 5, minute(61), minute(120), minute(1), floatHistogram), genHistogramAndFloatSeries(10, 5, minute(121), minute(180), minute(1), floatHistogram), - genSeriesFromSampleGenerator(10, 5, minute(181), minute(240), minute(1), func(ts int64) tsdbutil.Sample { + genSeriesFromSampleGenerator(10, 5, minute(181), minute(240), minute(1), func(ts int64) chunks.Sample { return sample{t: ts, f: rand.Float64()} }), ) @@ -6477,7 +6477,7 @@ func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) { t.Run("overlapping blocks with only histograms and only float in a series", func(t *testing.T) { testBlockQuerying(t, genHistogramSeries(10, 5, minute(0), minute(120), minute(3), floatHistogram), - genSeriesFromSampleGenerator(10, 5, minute(1), minute(120), minute(3), func(ts int64) tsdbutil.Sample { + genSeriesFromSampleGenerator(10, 5, minute(1), minute(120), minute(3), func(ts int64) chunks.Sample { return sample{t: ts, f: rand.Float64()} }), genHistogramSeries(10, 5, minute(2), minute(120), minute(3), floatHistogram), @@ -6489,7 +6489,7 @@ func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) { genHistogramAndFloatSeries(10, 5, minute(0), minute(60), minute(3), floatHistogram), genHistogramSeries(10, 5, minute(46), minute(100), minute(3), floatHistogram), genHistogramAndFloatSeries(10, 5, minute(89), minute(140), minute(3), floatHistogram), - genSeriesFromSampleGenerator(10, 5, minute(126), minute(200), minute(3), func(ts int64) tsdbutil.Sample { + genSeriesFromSampleGenerator(10, 5, minute(126), minute(200), minute(3), func(ts int64) chunks.Sample { return sample{t: ts, f: rand.Float64()} }), ) @@ -6546,7 +6546,7 @@ func TestNativeHistogramFlag(t *testing.T) { q, err := db.Querier(context.Background(), math.MinInt, math.MaxInt64) require.NoError(t, err) act := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) - require.Equal(t, map[string][]tsdbutil.Sample{ + require.Equal(t, map[string][]chunks.Sample{ l.String(): {sample{t: 200, h: h}, sample{t: 205, fh: h.ToFloat()}}, }, act) } @@ -6559,7 +6559,7 @@ func TestNativeHistogramFlag(t *testing.T) { // actual series contains a counter reset hint "UnknownCounterReset". // "GaugeType" hints are still strictly checked, and any "UnknownCounterReset" // in an expected series has to be matched precisely by the actual series. -func compareSeries(t require.TestingT, expected, actual map[string][]tsdbutil.Sample) { +func compareSeries(t require.TestingT, expected, actual map[string][]chunks.Sample) { if len(expected) != len(actual) { // The reason for the difference is not the counter reset hints // (alone), so let's use the pretty diffing by the require diff --git a/tsdb/head.go b/tsdb/head.go index 34a289a98..cfda3f644 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -42,7 +42,6 @@ import ( "github.com/prometheus/prometheus/tsdb/index" "github.com/prometheus/prometheus/tsdb/record" "github.com/prometheus/prometheus/tsdb/tombstones" - "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/tsdb/wlog" "github.com/prometheus/prometheus/util/zeropool" ) @@ -1918,7 +1917,7 @@ type sample struct { fh *histogram.FloatHistogram } -func newSample(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample { +func newSample(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram) chunks.Sample { return sample{t, v, h, fh} } @@ -1967,7 +1966,8 @@ type memSeries struct { mmMaxTime int64 // Max time of any mmapped chunk, only used during WAL replay. - nextAt int64 // Timestamp at which to cut the next chunk. + nextAt int64 // Timestamp at which to cut the next chunk. + histogramChunkHasComputedEndTime bool // True if nextAt has been predicted for the current histograms chunk; false otherwise. // We keep the last value here (in addition to appending it to the chunk) so we can check for duplicates. lastValue float64 diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 8c548fcd9..901694375 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -1160,7 +1160,7 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui // Ignoring ok is ok, since we don't want to compare to the wrong previous appender anyway. prevApp, _ := s.app.(*chunkenc.HistogramAppender) - c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncHistogram, o) + c, sampleInOrder, chunkCreated := s.histogramsAppendPreprocessor(t, chunkenc.EncHistogram, o) if !sampleInOrder { return sampleInOrder, chunkCreated } @@ -1217,7 +1217,7 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, // Ignoring ok is ok, since we don't want to compare to the wrong previous appender anyway. prevApp, _ := s.app.(*chunkenc.FloatHistogramAppender) - c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncFloatHistogram, o) + c, sampleInOrder, chunkCreated := s.histogramsAppendPreprocessor(t, chunkenc.EncFloatHistogram, o) if !sampleInOrder { return sampleInOrder, chunkCreated } @@ -1262,10 +1262,16 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, return true, true } -// appendPreprocessor takes care of cutting new chunks and m-mapping old chunks. +// appendPreprocessor takes care of cutting new XOR chunks and m-mapping old ones. XOR chunks are cut based on the +// number of samples they contain with a soft cap in bytes. // It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. // This should be called only when appending data. func (s *memSeries) appendPreprocessor(t int64, e chunkenc.Encoding, o chunkOpts) (c *memChunk, sampleInOrder, chunkCreated bool) { + // We target chunkenc.MaxBytesPerXORChunk as a hard for the size of an XOR chunk. We must determine whether to cut + // a new head chunk without knowing the size of the next sample, however, so we assume the next sample will be a + // maximally-sized sample (19 bytes). + const maxBytesPerXORChunk = chunkenc.MaxBytesPerXORChunk - 19 + c = s.headChunks if c == nil { @@ -1276,6 +1282,9 @@ func (s *memSeries) appendPreprocessor(t int64, e chunkenc.Encoding, o chunkOpts // There is no head chunk in this series yet, create the first chunk for the sample. c = s.cutNewHeadChunk(t, e, o.chunkRange) chunkCreated = true + } else if len(c.chunk.Bytes()) > maxBytesPerXORChunk { + c = s.cutNewHeadChunk(t, e, o.chunkRange) + chunkCreated = true } // Out of order sample. @@ -1304,7 +1313,7 @@ func (s *memSeries) appendPreprocessor(t int64, e chunkenc.Encoding, o chunkOpts // the remaining chunks in the current chunk range. // At latest it must happen at the timestamp set when the chunk was cut. if numSamples == o.samplesPerChunk/4 { - s.nextAt = computeChunkEndTime(c.minTime, c.maxTime, s.nextAt) + s.nextAt = computeChunkEndTime(c.minTime, c.maxTime, s.nextAt, 4) } // If numSamples > samplesPerChunk*2 then our previous prediction was invalid, // most likely because samples rate has changed and now they are arriving more frequently. @@ -1319,17 +1328,95 @@ func (s *memSeries) appendPreprocessor(t int64, e chunkenc.Encoding, o chunkOpts return c, true, chunkCreated } +// histogramsAppendPreprocessor takes care of cutting new histogram chunks and m-mapping old ones. Histogram chunks are +// cut based on their size in bytes. +// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. +// This should be called only when appending data. +func (s *memSeries) histogramsAppendPreprocessor(t int64, e chunkenc.Encoding, o chunkOpts) (c *memChunk, sampleInOrder, chunkCreated bool) { + c = s.headChunks + + if c == nil { + if len(s.mmappedChunks) > 0 && s.mmappedChunks[len(s.mmappedChunks)-1].maxTime >= t { + // Out of order sample. Sample timestamp is already in the mmapped chunks, so ignore it. + return c, false, false + } + // There is no head chunk in this series yet, create the first chunk for the sample. + c = s.cutNewHeadChunk(t, e, o.chunkRange) + chunkCreated = true + } + + // Out of order sample. + if c.maxTime >= t { + return c, false, chunkCreated + } + + if c.chunk.Encoding() != e { + // The chunk encoding expected by this append is different than the head chunk's + // encoding. So we cut a new chunk with the expected encoding. + c = s.cutNewHeadChunk(t, e, o.chunkRange) + chunkCreated = true + } + + numSamples := c.chunk.NumSamples() + targetBytes := chunkenc.TargetBytesPerHistogramChunk + numBytes := len(c.chunk.Bytes()) + + if numSamples == 0 { + // It could be the new chunk created after reading the chunk snapshot, + // hence we fix the minTime of the chunk here. + c.minTime = t + s.nextAt = rangeForTimestamp(c.minTime, o.chunkRange) + } + + // Below, we will enforce chunkenc.MinSamplesPerHistogramChunk. There are, however, two cases that supersede it: + // - The current chunk range is ending before chunkenc.MinSamplesPerHistogramChunk will be satisfied. + // - s.nextAt was set while loading a chunk snapshot with the intent that a new chunk be cut on the next append. + var nextChunkRangeStart int64 + if s.histogramChunkHasComputedEndTime { + nextChunkRangeStart = rangeForTimestamp(c.minTime, o.chunkRange) + } else { + // If we haven't yet computed an end time yet, s.nextAt is either set to + // rangeForTimestamp(c.minTime, o.chunkRange) or was set while loading a chunk snapshot. Either way, we want to + // skip enforcing chunkenc.MinSamplesPerHistogramChunk. + nextChunkRangeStart = s.nextAt + } + + // If we reach 25% of a chunk's desired maximum size, predict an end time + // for this chunk that will try to make samples equally distributed within + // the remaining chunks in the current chunk range. + // At the latest it must happen at the timestamp set when the chunk was cut. + if !s.histogramChunkHasComputedEndTime && numBytes >= targetBytes/4 { + ratioToFull := float64(targetBytes) / float64(numBytes) + s.nextAt = computeChunkEndTime(c.minTime, c.maxTime, s.nextAt, ratioToFull) + s.histogramChunkHasComputedEndTime = true + } + // If numBytes > targetBytes*2 then our previous prediction was invalid. This could happen if the sample rate has + // increased or if the bucket/span count has increased. + // Note that next chunk will have its nextAt recalculated for the new rate. + if (t >= s.nextAt || numBytes >= targetBytes*2) && (numSamples >= chunkenc.MinSamplesPerHistogramChunk || t >= nextChunkRangeStart) { + c = s.cutNewHeadChunk(t, e, o.chunkRange) + chunkCreated = true + } + + // The new chunk will also need a new computed end time. + if chunkCreated { + s.histogramChunkHasComputedEndTime = false + } + + return c, true, chunkCreated +} + // computeChunkEndTime estimates the end timestamp based the beginning of a // chunk, its current timestamp and the upper bound up to which we insert data. -// It assumes that the time range is 1/4 full. +// It assumes that the time range is 1/ratioToFull full. // Assuming that the samples will keep arriving at the same rate, it will make the // remaining n chunks within this chunk range (before max) equally sized. -func computeChunkEndTime(start, cur, max int64) int64 { - n := (max - start) / ((cur - start + 1) * 4) +func computeChunkEndTime(start, cur, max int64, ratioToFull float64) int64 { + n := float64(max-start) / (float64(cur-start+1) * ratioToFull) if n <= 1 { return max } - return start + (max-start)/n + return int64(float64(start) + float64(max-start)/math.Floor(n)) } func (s *memSeries) cutNewHeadChunk(mint int64, e chunkenc.Encoding, chunkRange int64) *memChunk { diff --git a/tsdb/head_test.go b/tsdb/head_test.go index c7fea3f9a..a58afba87 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -416,7 +416,7 @@ func TestHead_HighConcurrencyReadAndWrite(t *testing.T) { } // queryHead is a helper to query the head for a given time range and labelset. - queryHead := func(mint, maxt uint64, label labels.Label) (map[string][]tsdbutil.Sample, error) { + queryHead := func(mint, maxt uint64, label labels.Label) (map[string][]chunks.Sample, error) { q, err := NewBlockQuerier(head, int64(mint), int64(maxt)) if err != nil { return nil, err @@ -662,7 +662,7 @@ func TestHead_WALMultiRef(t *testing.T) { series := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) // The samples before the new ref should be discarded since Head truncation // happens only after compacting the Head. - require.Equal(t, map[string][]tsdbutil.Sample{`{foo="bar"}`: { + require.Equal(t, map[string][]chunks.Sample{`{foo="bar"}`: { sample{1700, 3, nil, nil}, sample{2000, 4, nil, nil}, }}, series) @@ -1143,8 +1143,8 @@ func TestHeadDeleteSimple(t *testing.T) { actSeriesSet := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, lblDefault.Name, lblDefault.Value)) require.NoError(t, q.Close()) expSeriesSet := newMockSeriesSet([]storage.Series{ - storage.NewListSeries(lblsDefault, func() []tsdbutil.Sample { - ss := make([]tsdbutil.Sample, 0, len(c.smplsExp)) + storage.NewListSeries(lblsDefault, func() []chunks.Sample { + ss := make([]chunks.Sample, 0, len(c.smplsExp)) for _, s := range c.smplsExp { ss = append(ss, s) } @@ -1223,7 +1223,7 @@ func TestDeleteUntilCurMax(t *testing.T) { it = exps.Iterator(nil) resSamples, err := storage.ExpandSamples(it, newSample) require.NoError(t, err) - require.Equal(t, []tsdbutil.Sample{sample{11, 1, nil, nil}}, resSamples) + require.Equal(t, []chunks.Sample{sample{11, 1, nil, nil}}, resSamples) for res.Next() { } require.NoError(t, res.Err()) @@ -1321,9 +1321,9 @@ func TestDelete_e2e(t *testing.T) { {Name: "job", Value: "prom-k8s"}, }, } - seriesMap := map[string][]tsdbutil.Sample{} + seriesMap := map[string][]chunks.Sample{} for _, l := range lbls { - seriesMap[labels.New(l...).String()] = []tsdbutil.Sample{} + seriesMap[labels.New(l...).String()] = []chunks.Sample{} } hb, _ := newTestHead(t, 100000, wlog.CompressionNone, false) @@ -1334,7 +1334,7 @@ func TestDelete_e2e(t *testing.T) { app := hb.Appender(context.Background()) for _, l := range lbls { ls := labels.New(l...) - series := []tsdbutil.Sample{} + series := []chunks.Sample{} ts := rand.Int63n(300) for i := 0; i < numDatapoints; i++ { v := rand.Float64() @@ -1433,7 +1433,7 @@ func TestDelete_e2e(t *testing.T) { } } -func boundedSamples(full []tsdbutil.Sample, mint, maxt int64) []tsdbutil.Sample { +func boundedSamples(full []chunks.Sample, mint, maxt int64) []chunks.Sample { for len(full) > 0 { if full[0].T() >= mint { break @@ -1450,8 +1450,8 @@ func boundedSamples(full []tsdbutil.Sample, mint, maxt int64) []tsdbutil.Sample return full } -func deletedSamples(full []tsdbutil.Sample, dranges tombstones.Intervals) []tsdbutil.Sample { - ds := make([]tsdbutil.Sample, 0, len(full)) +func deletedSamples(full []chunks.Sample, dranges tombstones.Intervals) []chunks.Sample { + ds := make([]chunks.Sample, 0, len(full)) Outer: for _, s := range full { for _, r := range dranges { @@ -1466,44 +1466,58 @@ Outer: } func TestComputeChunkEndTime(t *testing.T) { - cases := []struct { + cases := map[string]struct { start, cur, max int64 + ratioToFull float64 res int64 }{ - { - start: 0, - cur: 250, - max: 1000, - res: 1000, + "exactly 1/4 full, even increment": { + start: 0, + cur: 250, + max: 1000, + ratioToFull: 4, + res: 1000, }, - { - start: 100, - cur: 200, - max: 1000, - res: 550, + "exactly 1/4 full, uneven increment": { + start: 100, + cur: 200, + max: 1000, + ratioToFull: 4, + res: 550, + }, + "decimal ratio to full": { + start: 5000, + cur: 5110, + max: 10000, + ratioToFull: 4.2, + res: 5500, }, // Case where we fit floored 0 chunks. Must catch division by 0 // and default to maximum time. - { - start: 0, - cur: 500, - max: 1000, - res: 1000, + "fit floored 0 chunks": { + start: 0, + cur: 500, + max: 1000, + ratioToFull: 4, + res: 1000, }, // Catch division by zero for cur == start. Strictly not a possible case. - { - start: 100, - cur: 100, - max: 1000, - res: 104, + "cur == start": { + start: 100, + cur: 100, + max: 1000, + ratioToFull: 4, + res: 104, }, } - for _, c := range cases { - got := computeChunkEndTime(c.start, c.cur, c.max) - if got != c.res { - t.Errorf("expected %d for (start: %d, cur: %d, max: %d), got %d", c.res, c.start, c.cur, c.max, got) - } + for testName, tc := range cases { + t.Run(testName, func(t *testing.T) { + got := computeChunkEndTime(tc.start, tc.cur, tc.max, tc.ratioToFull) + if got != tc.res { + t.Errorf("expected %d for (start: %d, cur: %d, max: %d, ratioToFull: %f), got %d", tc.res, tc.start, tc.cur, tc.max, tc.ratioToFull, got) + } + }) } } @@ -2967,7 +2981,7 @@ func TestDataMissingOnQueryDuringCompaction(t *testing.T) { ) // Appends samples to span over 1.5 block ranges. - expSamples := make([]tsdbutil.Sample, 0) + expSamples := make([]chunks.Sample, 0) // 7 chunks with 15s scrape interval. for i := int64(0); i <= 120*7; i++ { ts := i * DefaultBlockDuration / (4 * 120) @@ -2997,7 +3011,7 @@ func TestDataMissingOnQueryDuringCompaction(t *testing.T) { // Querying the querier that was got before compaction. series := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) - require.Equal(t, map[string][]tsdbutil.Sample{`{a="b"}`: expSamples}, series) + require.Equal(t, map[string][]chunks.Sample{`{a="b"}`: expSamples}, series) wg.Wait() } @@ -3117,7 +3131,7 @@ func TestAppendHistogram(t *testing.T) { ingestTs := int64(0) app := head.Appender(context.Background()) - expHistograms := make([]tsdbutil.Sample, 0, 2*numHistograms) + expHistograms := make([]chunks.Sample, 0, 2*numHistograms) // Counter integer histograms. for _, h := range tsdbutil.GenerateTestHistograms(numHistograms) { @@ -3143,7 +3157,7 @@ func TestAppendHistogram(t *testing.T) { } } - expFloatHistograms := make([]tsdbutil.Sample, 0, 2*numHistograms) + expFloatHistograms := make([]chunks.Sample, 0, 2*numHistograms) // Counter float histograms. for _, fh := range tsdbutil.GenerateTestFloatHistograms(numHistograms) { @@ -3184,8 +3198,8 @@ func TestAppendHistogram(t *testing.T) { require.False(t, ss.Next()) it := s.Iterator(nil) - actHistograms := make([]tsdbutil.Sample, 0, len(expHistograms)) - actFloatHistograms := make([]tsdbutil.Sample, 0, len(expFloatHistograms)) + actHistograms := make([]chunks.Sample, 0, len(expHistograms)) + actFloatHistograms := make([]chunks.Sample, 0, len(expFloatHistograms)) for typ := it.Next(); typ != chunkenc.ValNone; typ = it.Next() { switch typ { case chunkenc.ValHistogram: @@ -3199,13 +3213,13 @@ func TestAppendHistogram(t *testing.T) { compareSeries( t, - map[string][]tsdbutil.Sample{"dummy": expHistograms}, - map[string][]tsdbutil.Sample{"dummy": actHistograms}, + map[string][]chunks.Sample{"dummy": expHistograms}, + map[string][]chunks.Sample{"dummy": actHistograms}, ) compareSeries( t, - map[string][]tsdbutil.Sample{"dummy": expFloatHistograms}, - map[string][]tsdbutil.Sample{"dummy": actFloatHistograms}, + map[string][]chunks.Sample{"dummy": expFloatHistograms}, + map[string][]chunks.Sample{"dummy": actFloatHistograms}, ) }) } @@ -3222,7 +3236,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { s1 := labels.FromStrings("a", "b1") k1 := s1.String() numHistograms := 300 - exp := map[string][]tsdbutil.Sample{} + exp := map[string][]chunks.Sample{} ts := int64(0) var app storage.Appender for _, gauge := range []bool{true, false} { @@ -3273,10 +3287,10 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { head.mmapHeadChunks() } - // There should be 11 mmap chunks in s1. + // There should be 20 mmap chunks in s1. ms := head.series.getByHash(s1.Hash(), s1) - require.Len(t, ms.mmappedChunks, 11) - expMmapChunks := make([]*mmappedChunk, 0, 11) + require.Len(t, ms.mmappedChunks, 25) + expMmapChunks := make([]*mmappedChunk, 0, 20) for _, mmap := range ms.mmappedChunks { require.Greater(t, mmap.numSamples, uint16(0)) cpy := *mmap @@ -3408,9 +3422,9 @@ func TestChunkSnapshot(t *testing.T) { } numSeries := 10 - expSeries := make(map[string][]tsdbutil.Sample) - expHist := make(map[string][]tsdbutil.Sample) - expFloatHist := make(map[string][]tsdbutil.Sample) + expSeries := make(map[string][]chunks.Sample) + expHist := make(map[string][]chunks.Sample) + expFloatHist := make(map[string][]chunks.Sample) expTombstones := make(map[storage.SeriesRef]tombstones.Intervals) expExemplars := make([]ex, 0) histograms := tsdbutil.GenerateTestGaugeHistograms(481) @@ -3959,7 +3973,7 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) { } func TestHistogramCounterResetHeader(t *testing.T) { - for _, floatHisto := range []bool{true, false} { + for _, floatHisto := range []bool{true} { // FIXME t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) { l := labels.FromStrings("a", "b") head, _ := newTestHead(t, 1000, wlog.CompressionNone, false) @@ -4026,10 +4040,16 @@ func TestHistogramCounterResetHeader(t *testing.T) { appendHistogram(h) checkExpCounterResetHeader(chunkenc.CounterReset) - // Add 2 non-counter reset histogram chunks. - for i := 0; i < 250; i++ { + // Add 2 non-counter reset histogram chunks (each chunk targets 1024 bytes which contains ~500 int histogram + // samples or ~1000 float histogram samples). + numAppend := 2000 + if floatHisto { + numAppend = 1000 + } + for i := 0; i < numAppend; i++ { appendHistogram(h) } + checkExpCounterResetHeader(chunkenc.NotCounterReset, chunkenc.NotCounterReset) // Changing schema will cut a new chunk with unknown counter reset. @@ -4054,8 +4074,8 @@ func TestHistogramCounterResetHeader(t *testing.T) { appendHistogram(h) checkExpCounterResetHeader(chunkenc.CounterReset) - // Add 2 non-counter reset histograms. Just to have some non-counter reset chunks in between. - for i := 0; i < 250; i++ { + // Add 2 non-counter reset histogram chunks. Just to have some non-counter reset chunks in between. + for i := 0; i < 2000; i++ { appendHistogram(h) } checkExpCounterResetHeader(chunkenc.NotCounterReset, chunkenc.NotCounterReset) @@ -4088,7 +4108,7 @@ func TestAppendingDifferentEncodingToSameSeries(t *testing.T) { floatHists := tsdbutil.GenerateTestFloatHistograms(10) lbls := labels.FromStrings("a", "b") - var expResult []tsdbutil.Sample + var expResult []chunks.Sample checkExpChunks := func(count int) { ms, created, err := db.Head().getOrCreate(lbls.Hash(), lbls) require.NoError(t, err) @@ -4098,59 +4118,59 @@ func TestAppendingDifferentEncodingToSameSeries(t *testing.T) { } appends := []struct { - samples []tsdbutil.Sample + samples []chunks.Sample expChunks int err error // If this is empty, samples above will be taken instead of this. - addToExp []tsdbutil.Sample + addToExp []chunks.Sample }{ // Histograms that end up in the expected samples are copied here so that we // can independently set the CounterResetHint later. { - samples: []tsdbutil.Sample{sample{t: 100, h: hists[0].Copy()}}, + samples: []chunks.Sample{sample{t: 100, h: hists[0].Copy()}}, expChunks: 1, }, { - samples: []tsdbutil.Sample{sample{t: 200, f: 2}}, + samples: []chunks.Sample{sample{t: 200, f: 2}}, expChunks: 2, }, { - samples: []tsdbutil.Sample{sample{t: 210, fh: floatHists[0].Copy()}}, + samples: []chunks.Sample{sample{t: 210, fh: floatHists[0].Copy()}}, expChunks: 3, }, { - samples: []tsdbutil.Sample{sample{t: 220, h: hists[1].Copy()}}, + samples: []chunks.Sample{sample{t: 220, h: hists[1].Copy()}}, expChunks: 4, }, { - samples: []tsdbutil.Sample{sample{t: 230, fh: floatHists[3].Copy()}}, + samples: []chunks.Sample{sample{t: 230, fh: floatHists[3].Copy()}}, expChunks: 5, }, { - samples: []tsdbutil.Sample{sample{t: 100, h: hists[2].Copy()}}, + samples: []chunks.Sample{sample{t: 100, h: hists[2].Copy()}}, err: storage.ErrOutOfOrderSample, }, { - samples: []tsdbutil.Sample{sample{t: 300, h: hists[3].Copy()}}, + samples: []chunks.Sample{sample{t: 300, h: hists[3].Copy()}}, expChunks: 6, }, { - samples: []tsdbutil.Sample{sample{t: 100, f: 2}}, + samples: []chunks.Sample{sample{t: 100, f: 2}}, err: storage.ErrOutOfOrderSample, }, { - samples: []tsdbutil.Sample{sample{t: 100, fh: floatHists[4].Copy()}}, + samples: []chunks.Sample{sample{t: 100, fh: floatHists[4].Copy()}}, err: storage.ErrOutOfOrderSample, }, { // Combination of histograms and float64 in the same commit. The behaviour is undefined, but we want to also // verify how TSDB would behave. Here the histogram is appended at the end, hence will be considered as out of order. - samples: []tsdbutil.Sample{ + samples: []chunks.Sample{ sample{t: 400, f: 4}, sample{t: 500, h: hists[5]}, // This won't be committed. sample{t: 600, f: 6}, }, - addToExp: []tsdbutil.Sample{ + addToExp: []chunks.Sample{ sample{t: 400, f: 4}, sample{t: 600, f: 6}, }, @@ -4158,12 +4178,12 @@ func TestAppendingDifferentEncodingToSameSeries(t *testing.T) { }, { // Here the histogram is appended at the end, hence the first histogram is out of order. - samples: []tsdbutil.Sample{ + samples: []chunks.Sample{ sample{t: 700, h: hists[7]}, // Out of order w.r.t. the next float64 sample that is appended first. sample{t: 800, f: 8}, sample{t: 900, h: hists[9]}, }, - addToExp: []tsdbutil.Sample{ + addToExp: []chunks.Sample{ sample{t: 800, f: 8}, sample{t: 900, h: hists[9].Copy()}, }, @@ -4171,11 +4191,11 @@ func TestAppendingDifferentEncodingToSameSeries(t *testing.T) { }, { // Float histogram is appended at the end. - samples: []tsdbutil.Sample{ + samples: []chunks.Sample{ sample{t: 1000, fh: floatHists[7]}, // Out of order w.r.t. the next histogram. sample{t: 1100, h: hists[9]}, }, - addToExp: []tsdbutil.Sample{ + addToExp: []chunks.Sample{ sample{t: 1100, h: hists[9].Copy()}, }, expChunks: 8, @@ -4220,7 +4240,7 @@ func TestAppendingDifferentEncodingToSameSeries(t *testing.T) { require.NoError(t, err) series := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) - require.Equal(t, map[string][]tsdbutil.Sample{lbls.String(): expResult}, series) + require.Equal(t, map[string][]chunks.Sample{lbls.String(): expResult}, series) } // Tests https://github.com/prometheus/prometheus/issues/9725. @@ -4654,7 +4674,7 @@ func TestReplayAfterMmapReplayError(t *testing.T) { itvl := int64(15 * time.Second / time.Millisecond) lastTs := int64(0) lbls := labels.FromStrings("__name__", "testing", "foo", "bar") - var expSamples []tsdbutil.Sample + var expSamples []chunks.Sample addSamples := func(numSamples int) { app := h.Appender(context.Background()) var ref storage.SeriesRef @@ -4703,7 +4723,7 @@ func TestReplayAfterMmapReplayError(t *testing.T) { q, err := NewBlockQuerier(h, 0, lastTs) require.NoError(t, err) res := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "__name__", "testing")) - require.Equal(t, map[string][]tsdbutil.Sample{lbls.String(): expSamples}, res) + require.Equal(t, map[string][]chunks.Sample{lbls.String(): expSamples}, res) require.NoError(t, h.Close()) } @@ -4818,23 +4838,22 @@ func TestHistogramValidation(t *testing.T) { } func BenchmarkHistogramValidation(b *testing.B) { - histograms := generateBigTestHistograms(b.N) + histograms := generateBigTestHistograms(b.N, 500) b.ResetTimer() for _, h := range histograms { require.NoError(b, ValidateHistogram(h)) } } -func generateBigTestHistograms(n int) []*histogram.Histogram { - const numBuckets = 500 +func generateBigTestHistograms(numHistograms, numBuckets int) []*histogram.Histogram { numSpans := numBuckets / 10 bucketsPerSide := numBuckets / 2 spanLength := uint32(bucketsPerSide / numSpans) - // Given all bucket deltas are 1, sum n + 1. + // Given all bucket deltas are 1, sum numHistograms + 1. observationCount := numBuckets / 2 * (1 + numBuckets) var histograms []*histogram.Histogram - for i := 0; i < n; i++ { + for i := 0; i < numHistograms; i++ { h := &histogram.Histogram{ Count: uint64(i + observationCount), ZeroCount: uint64(i), @@ -4848,7 +4867,7 @@ func generateBigTestHistograms(n int) []*histogram.Histogram { } for j := 0; j < numSpans; j++ { - s := histogram.Span{Offset: 1 + int32(i), Length: spanLength} + s := histogram.Span{Offset: 1, Length: spanLength} h.NegativeSpans[j] = s h.PositiveSpans[j] = s } @@ -5197,3 +5216,186 @@ func TestSnapshotAheadOfWALError(t *testing.T) { require.NoError(t, head.Close()) } + +func BenchmarkCuttingHeadHistogramChunks(b *testing.B) { + const ( + numSamples = 50000 + numBuckets = 100 + ) + samples := generateBigTestHistograms(numSamples, numBuckets) + + h, _ := newTestHead(b, DefaultBlockDuration, wlog.CompressionNone, false) + defer func() { + require.NoError(b, h.Close()) + }() + + a := h.Appender(context.Background()) + ts := time.Now().UnixMilli() + lbls := labels.FromStrings("foo", "bar") + + b.ResetTimer() + + for _, s := range samples { + _, err := a.AppendHistogram(0, lbls, ts, s, nil) + require.NoError(b, err) + } +} + +func TestCuttingNewHeadChunks(t *testing.T) { + testCases := map[string]struct { + numTotalSamples int + timestampJitter bool + floatValFunc func(i int) float64 + histValFunc func(i int) *histogram.Histogram + expectedChks []struct { + numSamples int + numBytes int + } + }{ + "float samples": { + numTotalSamples: 180, + floatValFunc: func(i int) float64 { + return 1. + }, + expectedChks: []struct { + numSamples int + numBytes int + }{ + {numSamples: 120, numBytes: 46}, + {numSamples: 60, numBytes: 32}, + }, + }, + "large float samples": { + // Normally 120 samples would fit into a single chunk but these chunks violate the 1005 byte soft cap. + numTotalSamples: 120, + timestampJitter: true, + floatValFunc: func(i int) float64 { + // Flipping between these two make each sample val take at least 64 bits. + vals := []float64{math.MaxFloat64, 0x00} + return vals[i%len(vals)] + }, + expectedChks: []struct { + numSamples int + numBytes int + }{ + {99, 1008}, + {21, 219}, + }, + }, + "small histograms": { + numTotalSamples: 240, + histValFunc: func() func(i int) *histogram.Histogram { + hists := generateBigTestHistograms(240, 10) + return func(i int) *histogram.Histogram { + return hists[i] + } + }(), + expectedChks: []struct { + numSamples int + numBytes int + }{ + {120, 1087}, + {120, 1039}, + }, + }, + "large histograms": { + numTotalSamples: 240, + histValFunc: func() func(i int) *histogram.Histogram { + hists := generateBigTestHistograms(240, 100) + return func(i int) *histogram.Histogram { + return hists[i] + } + }(), + expectedChks: []struct { + numSamples int + numBytes int + }{ + {30, 696}, + {30, 700}, + {30, 708}, + {30, 693}, + {30, 691}, + {30, 692}, + {30, 695}, + {30, 694}, + }, + }, + "really large histograms": { + // Really large histograms; each chunk can only contain a single histogram but we have a 10 sample minimum + // per chunk. + numTotalSamples: 11, + histValFunc: func() func(i int) *histogram.Histogram { + hists := generateBigTestHistograms(11, 100000) + return func(i int) *histogram.Histogram { + return hists[i] + } + }(), + expectedChks: []struct { + numSamples int + numBytes int + }{ + {10, 200103}, + {1, 87540}, + }, + }, + } + for testName, tc := range testCases { + t.Run(testName, func(t *testing.T) { + h, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false) + defer func() { + require.NoError(t, h.Close()) + }() + + a := h.Appender(context.Background()) + + ts := int64(10000) + lbls := labels.FromStrings("foo", "bar") + jitter := []int64{0, 1} // A bit of jitter to prevent dod=0. + + for i := 0; i < tc.numTotalSamples; i++ { + if tc.floatValFunc != nil { + _, err := a.Append(0, lbls, ts, tc.floatValFunc(i)) + require.NoError(t, err) + } else if tc.histValFunc != nil { + _, err := a.AppendHistogram(0, lbls, ts, tc.histValFunc(i), nil) + require.NoError(t, err) + } + + ts += int64(60 * time.Second / time.Millisecond) + if tc.timestampJitter { + ts += jitter[i%len(jitter)] + } + } + + require.NoError(t, a.Commit()) + + idxReader, err := h.Index() + require.NoError(t, err) + + chkReader, err := h.Chunks() + require.NoError(t, err) + + p, err := idxReader.Postings("foo", "bar") + require.NoError(t, err) + + var lblBuilder labels.ScratchBuilder + + for p.Next() { + sRef := p.At() + + chkMetas := make([]chunks.Meta, len(tc.expectedChks)) + require.NoError(t, idxReader.Series(sRef, &lblBuilder, &chkMetas)) + + require.Len(t, chkMetas, len(tc.expectedChks)) + + for i, expected := range tc.expectedChks { + chk, err := chkReader.Chunk(chkMetas[i]) + require.NoError(t, err) + + require.Equal(t, expected.numSamples, chk.NumSamples()) + require.Len(t, chk.Bytes(), expected.numBytes) + } + } + }) + } +} diff --git a/tsdb/ooo_head_read_test.go b/tsdb/ooo_head_read_test.go index 6c0038f89..013b59aa5 100644 --- a/tsdb/ooo_head_read_test.go +++ b/tsdb/ooo_head_read_test.go @@ -28,7 +28,6 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" - "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/tsdb/wlog" ) @@ -496,16 +495,16 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { queryMinT int64 queryMaxT int64 firstInOrderSampleAt int64 - inputSamples tsdbutil.SampleSlice + inputSamples chunks.SampleSlice expChunkError bool - expChunksSamples []tsdbutil.SampleSlice + expChunksSamples []chunks.SampleSlice }{ { name: "Getting the head when there are no overlapping chunks returns just the samples in the head", queryMinT: minutes(0), queryMaxT: minutes(100), firstInOrderSampleAt: minutes(120), - inputSamples: tsdbutil.SampleSlice{ + inputSamples: chunks.SampleSlice{ sample{t: minutes(30), f: float64(0)}, sample{t: minutes(40), f: float64(0)}, }, @@ -514,7 +513,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { // Query Interval [------------------------------------------------------------------------------------------] // Chunk 0: Current Head [--------] (With 2 samples) // Output Graphically [--------] (With 2 samples) - expChunksSamples: []tsdbutil.SampleSlice{ + expChunksSamples: []chunks.SampleSlice{ { sample{t: minutes(30), f: float64(0)}, sample{t: minutes(40), f: float64(0)}, @@ -526,7 +525,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { queryMinT: minutes(0), queryMaxT: minutes(100), firstInOrderSampleAt: minutes(120), - inputSamples: tsdbutil.SampleSlice{ + inputSamples: chunks.SampleSlice{ // opts.OOOCapMax is 5 so these will be mmapped to the first mmapped chunk sample{t: minutes(41), f: float64(0)}, sample{t: minutes(42), f: float64(0)}, @@ -544,7 +543,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { // Chunk 0 [---] (With 5 samples) // Chunk 1: Current Head [-----------------] (With 2 samples) // Output Graphically [-----------------] (With 7 samples) - expChunksSamples: []tsdbutil.SampleSlice{ + expChunksSamples: []chunks.SampleSlice{ { sample{t: minutes(30), f: float64(1)}, sample{t: minutes(41), f: float64(0)}, @@ -561,7 +560,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { queryMinT: minutes(0), queryMaxT: minutes(100), firstInOrderSampleAt: minutes(120), - inputSamples: tsdbutil.SampleSlice{ + inputSamples: chunks.SampleSlice{ // Chunk 0 sample{t: minutes(10), f: float64(0)}, sample{t: minutes(12), f: float64(0)}, @@ -592,7 +591,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { // Chunk 2 [--------] // Chunk 3: Current Head [--------] // Output Graphically [----------------][-----------------] - expChunksSamples: []tsdbutil.SampleSlice{ + expChunksSamples: []chunks.SampleSlice{ { sample{t: minutes(10), f: float64(0)}, sample{t: minutes(12), f: float64(0)}, @@ -619,7 +618,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { queryMinT: minutes(0), queryMaxT: minutes(100), firstInOrderSampleAt: minutes(120), - inputSamples: tsdbutil.SampleSlice{ + inputSamples: chunks.SampleSlice{ // Chunk 0 sample{t: minutes(40), f: float64(0)}, sample{t: minutes(42), f: float64(0)}, @@ -650,7 +649,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { // Chunk 2 [-------] // Chunk 3: Current Head [--------] // Output Graphically [----------------][-----------------] - expChunksSamples: []tsdbutil.SampleSlice{ + expChunksSamples: []chunks.SampleSlice{ { sample{t: minutes(10), f: float64(3)}, sample{t: minutes(20), f: float64(2)}, @@ -677,7 +676,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { queryMinT: minutes(0), queryMaxT: minutes(100), firstInOrderSampleAt: minutes(120), - inputSamples: tsdbutil.SampleSlice{ + inputSamples: chunks.SampleSlice{ // Chunk 0 sample{t: minutes(10), f: float64(0)}, sample{t: minutes(12), f: float64(0)}, @@ -708,7 +707,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { // Chunk 2 [-------] // Chunk 3: Current Head [-------] // Output Graphically [-------][-------][-------][--------] - expChunksSamples: []tsdbutil.SampleSlice{ + expChunksSamples: []chunks.SampleSlice{ { sample{t: minutes(10), f: float64(0)}, sample{t: minutes(12), f: float64(0)}, @@ -741,7 +740,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { queryMinT: minutes(0), queryMaxT: minutes(100), firstInOrderSampleAt: minutes(120), - inputSamples: tsdbutil.SampleSlice{ + inputSamples: chunks.SampleSlice{ // Chunk 0 sample{t: minutes(10), f: float64(0)}, sample{t: minutes(15), f: float64(0)}, @@ -765,7 +764,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { // Chunk 1 [--------------------] // Chunk 2 Current Head [--------------] // Output Graphically [-----------------------------------] - expChunksSamples: []tsdbutil.SampleSlice{ + expChunksSamples: []chunks.SampleSlice{ { sample{t: minutes(10), f: float64(0)}, sample{t: minutes(15), f: float64(0)}, @@ -784,7 +783,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { queryMinT: minutes(12), queryMaxT: minutes(33), firstInOrderSampleAt: minutes(120), - inputSamples: tsdbutil.SampleSlice{ + inputSamples: chunks.SampleSlice{ // Chunk 0 sample{t: minutes(10), f: float64(0)}, sample{t: minutes(15), f: float64(0)}, @@ -808,7 +807,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { // Chunk 1 [--------------------] // Chunk 2 Current Head [--------------] // Output Graphically [-----------------------------------] - expChunksSamples: []tsdbutil.SampleSlice{ + expChunksSamples: []chunks.SampleSlice{ { sample{t: minutes(10), f: float64(0)}, sample{t: minutes(15), f: float64(0)}, @@ -853,7 +852,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { c, err := cr.Chunk(chks[i]) require.NoError(t, err) - var resultSamples tsdbutil.SampleSlice + var resultSamples chunks.SampleSlice it := c.Iterator(nil) for it.Next() == chunkenc.ValFloat { t, v := it.At() @@ -892,17 +891,17 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( queryMinT int64 queryMaxT int64 firstInOrderSampleAt int64 - initialSamples tsdbutil.SampleSlice - samplesAfterSeriesCall tsdbutil.SampleSlice + initialSamples chunks.SampleSlice + samplesAfterSeriesCall chunks.SampleSlice expChunkError bool - expChunksSamples []tsdbutil.SampleSlice + expChunksSamples []chunks.SampleSlice }{ { name: "Current head gets old, new and in between sample after Series call, they all should be omitted from the result", queryMinT: minutes(0), queryMaxT: minutes(100), firstInOrderSampleAt: minutes(120), - initialSamples: tsdbutil.SampleSlice{ + initialSamples: chunks.SampleSlice{ // Chunk 0 sample{t: minutes(20), f: float64(0)}, sample{t: minutes(22), f: float64(0)}, @@ -913,7 +912,7 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( sample{t: minutes(25), f: float64(1)}, sample{t: minutes(35), f: float64(1)}, }, - samplesAfterSeriesCall: tsdbutil.SampleSlice{ + samplesAfterSeriesCall: chunks.SampleSlice{ sample{t: minutes(10), f: float64(1)}, sample{t: minutes(32), f: float64(1)}, sample{t: minutes(50), f: float64(1)}, @@ -926,7 +925,7 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( // New samples added after Series() // Chunk 1: Current Head [-----------------------------------] (5 samples) // Output Graphically [------------] (With 8 samples, samples newer than lastmint or older than lastmaxt are omitted but the ones in between are kept) - expChunksSamples: []tsdbutil.SampleSlice{ + expChunksSamples: []chunks.SampleSlice{ { sample{t: minutes(20), f: float64(0)}, sample{t: minutes(22), f: float64(0)}, @@ -944,7 +943,7 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( queryMinT: minutes(0), queryMaxT: minutes(100), firstInOrderSampleAt: minutes(120), - initialSamples: tsdbutil.SampleSlice{ + initialSamples: chunks.SampleSlice{ // Chunk 0 sample{t: minutes(20), f: float64(0)}, sample{t: minutes(22), f: float64(0)}, @@ -955,7 +954,7 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( sample{t: minutes(25), f: float64(1)}, sample{t: minutes(35), f: float64(1)}, }, - samplesAfterSeriesCall: tsdbutil.SampleSlice{ + samplesAfterSeriesCall: chunks.SampleSlice{ sample{t: minutes(10), f: float64(1)}, sample{t: minutes(32), f: float64(1)}, sample{t: minutes(50), f: float64(1)}, @@ -972,7 +971,7 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( // Chunk 1 (mmapped) [-------------------------] (5 samples) // Chunk 2: Current Head [-----------] (2 samples) // Output Graphically [------------] (8 samples) It has 5 from Chunk 0 and 3 from Chunk 1 - expChunksSamples: []tsdbutil.SampleSlice{ + expChunksSamples: []chunks.SampleSlice{ { sample{t: minutes(20), f: float64(0)}, sample{t: minutes(22), f: float64(0)}, @@ -1024,7 +1023,7 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( c, err := cr.Chunk(chks[i]) require.NoError(t, err) - var resultSamples tsdbutil.SampleSlice + var resultSamples chunks.SampleSlice it := c.Iterator(nil) for it.Next() == chunkenc.ValFloat { ts, v := it.At() diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index a63041b53..24005ab5f 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -239,7 +239,7 @@ func testBlockQuerier(t *testing.T, c blockQuerierTestCase, ir IndexReader, cr C require.Equal(t, errExp, errRes) require.Equal(t, len(chksExp), len(chksRes)) - var exp, act [][]tsdbutil.Sample + var exp, act [][]chunks.Sample for i := range chksExp { samples, err := storage.ExpandSamples(chksExp[i].Chunk.Iterator(nil), nil) require.NoError(t, err) @@ -291,24 +291,24 @@ func TestBlockQuerier(t *testing.T) { ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "a", ".*")}, exp: newMockSeriesSet([]storage.Series{ storage.NewListSeries(labels.FromStrings("a", "a"), - []tsdbutil.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}}, + []chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}}, ), storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"), - []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, + []chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, ), storage.NewListSeries(labels.FromStrings("b", "b"), - []tsdbutil.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}}, + []chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}}, ), }), expChks: newMockChunkSeriesSet([]storage.ChunkSeries{ storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"), - []tsdbutil.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}}, []tsdbutil.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}}, + []chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}}, []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}}, ), storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"), - []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}, []tsdbutil.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, + []chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}, []chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, ), storage.NewListChunkSeriesFromSamples(labels.FromStrings("b", "b"), - []tsdbutil.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}}, []tsdbutil.Sample{sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}}, + []chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}}, []chunks.Sample{sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}}, ), }), }, @@ -318,18 +318,18 @@ func TestBlockQuerier(t *testing.T) { ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")}, exp: newMockSeriesSet([]storage.Series{ storage.NewListSeries(labels.FromStrings("a", "a"), - []tsdbutil.Sample{sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}}, + []chunks.Sample{sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}}, ), storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"), - []tsdbutil.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, + []chunks.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, ), }), expChks: newMockChunkSeriesSet([]storage.ChunkSeries{ storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"), - []tsdbutil.Sample{sample{2, 3, nil, nil}, sample{3, 4, nil, nil}}, []tsdbutil.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}}, + []chunks.Sample{sample{2, 3, nil, nil}, sample{3, 4, nil, nil}}, []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}}, ), storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"), - []tsdbutil.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}, []tsdbutil.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, + []chunks.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}, []chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, ), }), }, @@ -342,20 +342,20 @@ func TestBlockQuerier(t *testing.T) { ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")}, exp: newMockSeriesSet([]storage.Series{ storage.NewListSeries(labels.FromStrings("a", "a"), - []tsdbutil.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}}, + []chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}}, ), storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"), - []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, + []chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, ), }), expChks: newMockChunkSeriesSet([]storage.ChunkSeries{ storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"), - []tsdbutil.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}}, - []tsdbutil.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}}, + []chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}}, + []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}}, ), storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"), - []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}, - []tsdbutil.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, + []chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}, + []chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, ), }), }, @@ -368,18 +368,18 @@ func TestBlockQuerier(t *testing.T) { ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")}, exp: newMockSeriesSet([]storage.Series{ storage.NewListSeries(labels.FromStrings("a", "a"), - []tsdbutil.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}}, + []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}}, ), storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"), - []tsdbutil.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, + []chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, ), }), expChks: newMockChunkSeriesSet([]storage.ChunkSeries{ storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"), - []tsdbutil.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}}, + []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}}, ), storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"), - []tsdbutil.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, + []chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, ), }), }, @@ -427,24 +427,24 @@ func TestBlockQuerier_AgainstHeadWithOpenChunks(t *testing.T) { ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "a", ".*")}, exp: newMockSeriesSet([]storage.Series{ storage.NewListSeries(labels.FromStrings("a", "a"), - []tsdbutil.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}}, + []chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}}, ), storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"), - []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, + []chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, ), storage.NewListSeries(labels.FromStrings("b", "b"), - []tsdbutil.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}}, + []chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}}, ), }), expChks: newMockChunkSeriesSet([]storage.ChunkSeries{ storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"), - []tsdbutil.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}}, + []chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}}, ), storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"), - []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, + []chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, ), storage.NewListChunkSeriesFromSamples(labels.FromStrings("b", "b"), - []tsdbutil.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}}, + []chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}}, ), }), }, @@ -454,18 +454,18 @@ func TestBlockQuerier_AgainstHeadWithOpenChunks(t *testing.T) { ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")}, exp: newMockSeriesSet([]storage.Series{ storage.NewListSeries(labels.FromStrings("a", "a"), - []tsdbutil.Sample{sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}}, + []chunks.Sample{sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}}, ), storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"), - []tsdbutil.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, + []chunks.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, ), }), expChks: newMockChunkSeriesSet([]storage.ChunkSeries{ storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"), - []tsdbutil.Sample{sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}}, + []chunks.Sample{sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}}, ), storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"), - []tsdbutil.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, + []chunks.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, ), }), }, @@ -509,18 +509,18 @@ func TestBlockQuerier_TrimmingDoesNotModifyOriginalTombstoneIntervals(t *testing ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "a", "a")}, exp: newMockSeriesSet([]storage.Series{ storage.NewListSeries(labels.FromStrings("a", "a"), - []tsdbutil.Sample{sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}}, + []chunks.Sample{sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}}, ), storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"), - []tsdbutil.Sample{sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, + []chunks.Sample{sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, ), }), expChks: newMockChunkSeriesSet([]storage.ChunkSeries{ storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"), - []tsdbutil.Sample{sample{3, 4, nil, nil}}, []tsdbutil.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}}, + []chunks.Sample{sample{3, 4, nil, nil}}, []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}}, ), storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"), - []tsdbutil.Sample{sample{3, 3, nil, nil}}, []tsdbutil.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, + []chunks.Sample{sample{3, 3, nil, nil}}, []chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}}, ), }), } @@ -608,24 +608,24 @@ func TestBlockQuerierDelete(t *testing.T) { ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "a", ".*")}, exp: newMockSeriesSet([]storage.Series{ storage.NewListSeries(labels.FromStrings("a", "a"), - []tsdbutil.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}}, + []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}}, ), storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"), - []tsdbutil.Sample{sample{5, 3, nil, nil}}, + []chunks.Sample{sample{5, 3, nil, nil}}, ), storage.NewListSeries(labels.FromStrings("b", "b"), - []tsdbutil.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}}, + []chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}}, ), }), expChks: newMockChunkSeriesSet([]storage.ChunkSeries{ storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"), - []tsdbutil.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}}, + []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}}, ), storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"), - []tsdbutil.Sample{sample{5, 3, nil, nil}}, + []chunks.Sample{sample{5, 3, nil, nil}}, ), storage.NewListChunkSeriesFromSamples(labels.FromStrings("b", "b"), - []tsdbutil.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}}, []tsdbutil.Sample{sample{5, 1, nil, nil}}, + []chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}}, []chunks.Sample{sample{5, 1, nil, nil}}, ), }), }, @@ -635,18 +635,18 @@ func TestBlockQuerierDelete(t *testing.T) { ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")}, exp: newMockSeriesSet([]storage.Series{ storage.NewListSeries(labels.FromStrings("a", "a"), - []tsdbutil.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}}, + []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}}, ), storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"), - []tsdbutil.Sample{sample{5, 3, nil, nil}}, + []chunks.Sample{sample{5, 3, nil, nil}}, ), }), expChks: newMockChunkSeriesSet([]storage.ChunkSeries{ storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"), - []tsdbutil.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}}, + []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}}, ), storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"), - []tsdbutil.Sample{sample{5, 3, nil, nil}}, + []chunks.Sample{sample{5, 3, nil, nil}}, ), }), }, @@ -663,14 +663,14 @@ type fakeChunksReader struct { chks map[chunks.ChunkRef]chunkenc.Chunk } -func createFakeReaderAndNotPopulatedChunks(s ...[]tsdbutil.Sample) (*fakeChunksReader, []chunks.Meta) { +func createFakeReaderAndNotPopulatedChunks(s ...[]chunks.Sample) (*fakeChunksReader, []chunks.Meta) { f := &fakeChunksReader{ chks: map[chunks.ChunkRef]chunkenc.Chunk{}, } chks := make([]chunks.Meta, 0, len(s)) for ref, samples := range s { - chk, _ := tsdbutil.ChunkFromSamples(samples) + chk, _ := chunks.ChunkFromSamples(samples) f.chks[chunks.ChunkRef(ref)] = chk.Chunk chks = append(chks, chunks.Meta{ @@ -693,9 +693,9 @@ func (r *fakeChunksReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) { func TestPopulateWithTombSeriesIterators(t *testing.T) { cases := []struct { name string - chks [][]tsdbutil.Sample + chks [][]chunks.Sample - expected []tsdbutil.Sample + expected []chunks.Sample expectedChks []chunks.Meta intervals tombstones.Intervals @@ -706,79 +706,79 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }{ { name: "no chunk", - chks: [][]tsdbutil.Sample{}, + chks: [][]chunks.Sample{}, }, { name: "one empty chunk", // This should never happen. - chks: [][]tsdbutil.Sample{{}}, + chks: [][]chunks.Sample{{}}, expectedChks: []chunks.Meta{ - assureChunkFromSamples(t, []tsdbutil.Sample{}), + assureChunkFromSamples(t, []chunks.Sample{}), }, }, { name: "three empty chunks", // This should never happen. - chks: [][]tsdbutil.Sample{{}, {}, {}}, + chks: [][]chunks.Sample{{}, {}, {}}, expectedChks: []chunks.Meta{ - assureChunkFromSamples(t, []tsdbutil.Sample{}), - assureChunkFromSamples(t, []tsdbutil.Sample{}), - assureChunkFromSamples(t, []tsdbutil.Sample{}), + assureChunkFromSamples(t, []chunks.Sample{}), + assureChunkFromSamples(t, []chunks.Sample{}), + assureChunkFromSamples(t, []chunks.Sample{}), }, }, { name: "one chunk", - chks: [][]tsdbutil.Sample{ + chks: [][]chunks.Sample{ {sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}}, }, - expected: []tsdbutil.Sample{ + expected: []chunks.Sample{ sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, }, expectedChks: []chunks.Meta{ - assureChunkFromSamples(t, []tsdbutil.Sample{ + assureChunkFromSamples(t, []chunks.Sample{ sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, }), }, }, { name: "two full chunks", - chks: [][]tsdbutil.Sample{ + chks: [][]chunks.Sample{ {sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}}, {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}}, }, - expected: []tsdbutil.Sample{ + expected: []chunks.Sample{ sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil}, }, expectedChks: []chunks.Meta{ - assureChunkFromSamples(t, []tsdbutil.Sample{ + assureChunkFromSamples(t, []chunks.Sample{ sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, }), - assureChunkFromSamples(t, []tsdbutil.Sample{ + assureChunkFromSamples(t, []chunks.Sample{ sample{7, 89, nil, nil}, sample{9, 8, nil, nil}, }), }, }, { name: "three full chunks", - chks: [][]tsdbutil.Sample{ + chks: [][]chunks.Sample{ {sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}}, {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}}, {sample{10, 22, nil, nil}, sample{203, 3493, nil, nil}}, }, - expected: []tsdbutil.Sample{ + expected: []chunks.Sample{ sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil}, sample{10, 22, nil, nil}, sample{203, 3493, nil, nil}, }, expectedChks: []chunks.Meta{ - assureChunkFromSamples(t, []tsdbutil.Sample{ + assureChunkFromSamples(t, []chunks.Sample{ sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, }), - assureChunkFromSamples(t, []tsdbutil.Sample{ + assureChunkFromSamples(t, []chunks.Sample{ sample{7, 89, nil, nil}, sample{9, 8, nil, nil}, }), - assureChunkFromSamples(t, []tsdbutil.Sample{ + assureChunkFromSamples(t, []chunks.Sample{ sample{10, 22, nil, nil}, sample{203, 3493, nil, nil}, }), }, @@ -786,14 +786,14 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { // Seek cases. { name: "three empty chunks and seek", // This should never happen. - chks: [][]tsdbutil.Sample{{}, {}, {}}, + chks: [][]chunks.Sample{{}, {}, {}}, seek: 1, seekSuccess: false, }, { name: "two chunks and seek beyond chunks", - chks: [][]tsdbutil.Sample{ + chks: [][]chunks.Sample{ {sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}}, {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}}, }, @@ -803,92 +803,92 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, { name: "two chunks and seek on middle of first chunk", - chks: [][]tsdbutil.Sample{ + chks: [][]chunks.Sample{ {sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}}, {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}}, }, seek: 2, seekSuccess: true, - expected: []tsdbutil.Sample{ + expected: []chunks.Sample{ sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil}, }, }, { name: "two chunks and seek before first chunk", - chks: [][]tsdbutil.Sample{ + chks: [][]chunks.Sample{ {sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}}, {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}}, }, seek: -32, seekSuccess: true, - expected: []tsdbutil.Sample{ + expected: []chunks.Sample{ sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil}, }, }, // Deletion / Trim cases. { name: "no chunk with deletion interval", - chks: [][]tsdbutil.Sample{}, + chks: [][]chunks.Sample{}, intervals: tombstones.Intervals{{Mint: 20, Maxt: 21}}, }, { name: "two chunks with trimmed first and last samples from edge chunks", - chks: [][]tsdbutil.Sample{ + chks: [][]chunks.Sample{ {sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}}, {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}}, }, intervals: tombstones.Intervals{{Mint: math.MinInt64, Maxt: 2}}.Add(tombstones.Interval{Mint: 9, Maxt: math.MaxInt64}), - expected: []tsdbutil.Sample{ + expected: []chunks.Sample{ sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, }, expectedChks: []chunks.Meta{ - assureChunkFromSamples(t, []tsdbutil.Sample{ + assureChunkFromSamples(t, []chunks.Sample{ sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, }), - assureChunkFromSamples(t, []tsdbutil.Sample{ + assureChunkFromSamples(t, []chunks.Sample{ sample{7, 89, nil, nil}, }), }, }, { name: "two chunks with trimmed middle sample of first chunk", - chks: [][]tsdbutil.Sample{ + chks: [][]chunks.Sample{ {sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}}, {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}}, }, intervals: tombstones.Intervals{{Mint: 2, Maxt: 3}}, - expected: []tsdbutil.Sample{ + expected: []chunks.Sample{ sample{1, 2, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil}, }, expectedChks: []chunks.Meta{ - assureChunkFromSamples(t, []tsdbutil.Sample{ + assureChunkFromSamples(t, []chunks.Sample{ sample{1, 2, nil, nil}, sample{6, 1, nil, nil}, }), - assureChunkFromSamples(t, []tsdbutil.Sample{ + assureChunkFromSamples(t, []chunks.Sample{ sample{7, 89, nil, nil}, sample{9, 8, nil, nil}, }), }, }, { name: "two chunks with deletion across two chunks", - chks: [][]tsdbutil.Sample{ + chks: [][]chunks.Sample{ {sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}}, {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}}, }, intervals: tombstones.Intervals{{Mint: 6, Maxt: 7}}, - expected: []tsdbutil.Sample{ + expected: []chunks.Sample{ sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{9, 8, nil, nil}, }, expectedChks: []chunks.Meta{ - assureChunkFromSamples(t, []tsdbutil.Sample{ + assureChunkFromSamples(t, []chunks.Sample{ sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, }), - assureChunkFromSamples(t, []tsdbutil.Sample{ + assureChunkFromSamples(t, []chunks.Sample{ sample{9, 8, nil, nil}, }), }, @@ -896,7 +896,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { // Deletion with seek. { name: "two chunks with trimmed first and last samples from edge chunks, seek from middle of first chunk", - chks: [][]tsdbutil.Sample{ + chks: [][]chunks.Sample{ {sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}}, {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}}, }, @@ -904,13 +904,13 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { seek: 3, seekSuccess: true, - expected: []tsdbutil.Sample{ + expected: []chunks.Sample{ sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, }, }, { name: "one histogram chunk", - chks: [][]tsdbutil.Sample{ + chks: [][]chunks.Sample{ { sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil}, sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil}, @@ -918,14 +918,14 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { sample{6, 0, tsdbutil.GenerateTestHistogram(6), nil}, }, }, - expected: []tsdbutil.Sample{ + expected: []chunks.Sample{ sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil}, sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil}, sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil}, sample{6, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil}, }, expectedChks: []chunks.Meta{ - assureChunkFromSamples(t, []tsdbutil.Sample{ + assureChunkFromSamples(t, []chunks.Sample{ sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil}, sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil}, sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil}, @@ -935,7 +935,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, { name: "one histogram chunk intersect with deletion interval", - chks: [][]tsdbutil.Sample{ + chks: [][]chunks.Sample{ { sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil}, sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil}, @@ -944,13 +944,13 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, }, intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}}, - expected: []tsdbutil.Sample{ + expected: []chunks.Sample{ sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil}, sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil}, sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil}, }, expectedChks: []chunks.Meta{ - assureChunkFromSamples(t, []tsdbutil.Sample{ + assureChunkFromSamples(t, []chunks.Sample{ sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil}, sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil}, sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil}, @@ -959,7 +959,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, { name: "one float histogram chunk", - chks: [][]tsdbutil.Sample{ + chks: [][]chunks.Sample{ { sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)}, sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)}, @@ -967,14 +967,14 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { sample{6, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, }, }, - expected: []tsdbutil.Sample{ + expected: []chunks.Sample{ sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)}, sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))}, sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))}, sample{6, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))}, }, expectedChks: []chunks.Meta{ - assureChunkFromSamples(t, []tsdbutil.Sample{ + assureChunkFromSamples(t, []chunks.Sample{ sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)}, sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))}, sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))}, @@ -984,7 +984,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, { name: "one float histogram chunk intersect with deletion interval", - chks: [][]tsdbutil.Sample{ + chks: [][]chunks.Sample{ { sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)}, sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)}, @@ -993,13 +993,13 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, }, intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}}, - expected: []tsdbutil.Sample{ + expected: []chunks.Sample{ sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)}, sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))}, sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))}, }, expectedChks: []chunks.Meta{ - assureChunkFromSamples(t, []tsdbutil.Sample{ + assureChunkFromSamples(t, []chunks.Sample{ sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)}, sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))}, sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))}, @@ -1008,7 +1008,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, { name: "one gauge histogram chunk", - chks: [][]tsdbutil.Sample{ + chks: [][]chunks.Sample{ { sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil}, sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil}, @@ -1016,14 +1016,14 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil}, }, }, - expected: []tsdbutil.Sample{ + expected: []chunks.Sample{ sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil}, sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil}, sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil}, sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil}, }, expectedChks: []chunks.Meta{ - assureChunkFromSamples(t, []tsdbutil.Sample{ + assureChunkFromSamples(t, []chunks.Sample{ sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil}, sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil}, sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil}, @@ -1033,7 +1033,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, { name: "one gauge histogram chunk intersect with deletion interval", - chks: [][]tsdbutil.Sample{ + chks: [][]chunks.Sample{ { sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil}, sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil}, @@ -1042,13 +1042,13 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, }, intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}}, - expected: []tsdbutil.Sample{ + expected: []chunks.Sample{ sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil}, sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil}, sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil}, }, expectedChks: []chunks.Meta{ - assureChunkFromSamples(t, []tsdbutil.Sample{ + assureChunkFromSamples(t, []chunks.Sample{ sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil}, sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil}, sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil}, @@ -1057,7 +1057,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, { name: "one gauge float histogram", - chks: [][]tsdbutil.Sample{ + chks: [][]chunks.Sample{ { sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)}, sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)}, @@ -1065,14 +1065,14 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)}, }, }, - expected: []tsdbutil.Sample{ + expected: []chunks.Sample{ sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)}, sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)}, sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)}, sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)}, }, expectedChks: []chunks.Meta{ - assureChunkFromSamples(t, []tsdbutil.Sample{ + assureChunkFromSamples(t, []chunks.Sample{ sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)}, sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)}, sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)}, @@ -1082,7 +1082,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, { name: "one gauge float histogram chunk intersect with deletion interval", - chks: [][]tsdbutil.Sample{ + chks: [][]chunks.Sample{ { sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)}, sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)}, @@ -1091,13 +1091,13 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, }, intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}}, - expected: []tsdbutil.Sample{ + expected: []chunks.Sample{ sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)}, sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)}, sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)}, }, expectedChks: []chunks.Meta{ - assureChunkFromSamples(t, []tsdbutil.Sample{ + assureChunkFromSamples(t, []chunks.Sample{ sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)}, sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)}, sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)}, @@ -1112,7 +1112,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { it := &populateWithDelSeriesIterator{} it.reset(ulid.ULID{}, f, chkMetas, tc.intervals) - var r []tsdbutil.Sample + var r []chunks.Sample if tc.seek != 0 { require.Equal(t, tc.seekSuccess, it.Seek(tc.seek) == chunkenc.ValFloat) require.Equal(t, tc.seekSuccess, it.Seek(tc.seek) == chunkenc.ValFloat) // Next one should be noop. @@ -1158,9 +1158,9 @@ func rmChunkRefs(chks []chunks.Meta) { // Regression for: https://github.com/prometheus/tsdb/pull/97 func TestPopulateWithDelSeriesIterator_DoubleSeek(t *testing.T) { f, chkMetas := createFakeReaderAndNotPopulatedChunks( - []tsdbutil.Sample{}, - []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}, - []tsdbutil.Sample{sample{4, 4, nil, nil}, sample{5, 5, nil, nil}}, + []chunks.Sample{}, + []chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}, + []chunks.Sample{sample{4, 4, nil, nil}, sample{5, 5, nil, nil}}, ) it := &populateWithDelSeriesIterator{} @@ -1177,9 +1177,9 @@ func TestPopulateWithDelSeriesIterator_DoubleSeek(t *testing.T) { // skipped to the end when seeking a value in the current chunk. func TestPopulateWithDelSeriesIterator_SeekInCurrentChunk(t *testing.T) { f, chkMetas := createFakeReaderAndNotPopulatedChunks( - []tsdbutil.Sample{}, - []tsdbutil.Sample{sample{1, 2, nil, nil}, sample{3, 4, nil, nil}, sample{5, 6, nil, nil}, sample{7, 8, nil, nil}}, - []tsdbutil.Sample{}, + []chunks.Sample{}, + []chunks.Sample{sample{1, 2, nil, nil}, sample{3, 4, nil, nil}, sample{5, 6, nil, nil}, sample{7, 8, nil, nil}}, + []chunks.Sample{}, ) it := &populateWithDelSeriesIterator{} @@ -1197,7 +1197,7 @@ func TestPopulateWithDelSeriesIterator_SeekInCurrentChunk(t *testing.T) { func TestPopulateWithDelSeriesIterator_SeekWithMinTime(t *testing.T) { f, chkMetas := createFakeReaderAndNotPopulatedChunks( - []tsdbutil.Sample{sample{1, 6, nil, nil}, sample{5, 6, nil, nil}, sample{6, 8, nil, nil}}, + []chunks.Sample{sample{1, 6, nil, nil}, sample{5, 6, nil, nil}, sample{6, 8, nil, nil}}, ) it := &populateWithDelSeriesIterator{} @@ -1210,7 +1210,7 @@ func TestPopulateWithDelSeriesIterator_SeekWithMinTime(t *testing.T) { // Seek gets called and advances beyond the max time, which was just accepted as a valid sample. func TestPopulateWithDelSeriesIterator_NextWithMinTime(t *testing.T) { f, chkMetas := createFakeReaderAndNotPopulatedChunks( - []tsdbutil.Sample{sample{1, 6, nil, nil}, sample{5, 6, nil, nil}, sample{7, 8, nil, nil}}, + []chunks.Sample{sample{1, 6, nil, nil}, sample{5, 6, nil, nil}, sample{7, 8, nil, nil}}, ) it := &populateWithDelSeriesIterator{} diff --git a/tsdb/tsdbutil/chunks.go b/tsdb/tsdbutil/chunks.go deleted file mode 100644 index c9d796f9b..000000000 --- a/tsdb/tsdbutil/chunks.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tsdbutil - -import ( - "fmt" - - "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/tsdb/chunkenc" - "github.com/prometheus/prometheus/tsdb/chunks" -) - -type Samples interface { - Get(i int) Sample - Len() int -} - -type Sample interface { - T() int64 - F() float64 - H() *histogram.Histogram - FH() *histogram.FloatHistogram - Type() chunkenc.ValueType -} - -type SampleSlice []Sample - -func (s SampleSlice) Get(i int) Sample { return s[i] } -func (s SampleSlice) Len() int { return len(s) } - -// ChunkFromSamples requires all samples to have the same type. -func ChunkFromSamples(s []Sample) (chunks.Meta, error) { - return ChunkFromSamplesGeneric(SampleSlice(s)) -} - -// ChunkFromSamplesGeneric requires all samples to have the same type. -func ChunkFromSamplesGeneric(s Samples) (chunks.Meta, error) { - emptyChunk := chunks.Meta{Chunk: chunkenc.NewXORChunk()} - mint, maxt := int64(0), int64(0) - - if s.Len() > 0 { - mint, maxt = s.Get(0).T(), s.Get(s.Len()-1).T() - } - - if s.Len() == 0 { - return emptyChunk, nil - } - - sampleType := s.Get(0).Type() - c, err := chunkenc.NewEmptyChunk(sampleType.ChunkEncoding()) - if err != nil { - return chunks.Meta{}, err - } - - ca, _ := c.Appender() - var newChunk chunkenc.Chunk - - for i := 0; i < s.Len(); i++ { - switch sampleType { - case chunkenc.ValFloat: - ca.Append(s.Get(i).T(), s.Get(i).F()) - case chunkenc.ValHistogram: - newChunk, _, ca, err = ca.AppendHistogram(nil, s.Get(i).T(), s.Get(i).H(), false) - if err != nil { - return emptyChunk, err - } - if newChunk != nil { - return emptyChunk, fmt.Errorf("did not expect to start a second chunk") - } - case chunkenc.ValFloatHistogram: - newChunk, _, ca, err = ca.AppendFloatHistogram(nil, s.Get(i).T(), s.Get(i).FH(), false) - if err != nil { - return emptyChunk, err - } - if newChunk != nil { - return emptyChunk, fmt.Errorf("did not expect to start a second chunk") - } - default: - panic(fmt.Sprintf("unknown sample type %s", sampleType.String())) - } - } - return chunks.Meta{ - MinTime: mint, - MaxTime: maxt, - Chunk: c, - }, nil -} - -type sample struct { - t int64 - f float64 - h *histogram.Histogram - fh *histogram.FloatHistogram -} - -func (s sample) T() int64 { - return s.t -} - -func (s sample) F() float64 { - return s.f -} - -func (s sample) H() *histogram.Histogram { - return s.h -} - -func (s sample) FH() *histogram.FloatHistogram { - return s.fh -} - -func (s sample) Type() chunkenc.ValueType { - switch { - case s.h != nil: - return chunkenc.ValHistogram - case s.fh != nil: - return chunkenc.ValFloatHistogram - default: - return chunkenc.ValFloat - } -} - -// PopulatedChunk creates a chunk populated with samples every second starting at minTime -func PopulatedChunk(numSamples int, minTime int64) (chunks.Meta, error) { - samples := make([]Sample, numSamples) - for i := 0; i < numSamples; i++ { - samples[i] = sample{t: minTime + int64(i*1000), f: 1.0} - } - return ChunkFromSamples(samples) -} - -// GenerateSamples starting at start and counting up numSamples. -func GenerateSamples(start, numSamples int) []Sample { - return generateSamples(start, numSamples, func(i int) Sample { - return sample{ - t: int64(i), - f: float64(i), - } - }) -} - -func generateSamples(start, numSamples int, gen func(int) Sample) []Sample { - samples := make([]Sample, 0, numSamples) - for i := start; i < start+numSamples; i++ { - samples = append(samples, gen(i)) - } - return samples -} diff --git a/tsdb/tsdbutil/histogram.go b/tsdb/tsdbutil/histogram.go index 8270da686..0327815c4 100644 --- a/tsdb/tsdbutil/histogram.go +++ b/tsdb/tsdbutil/histogram.go @@ -14,7 +14,7 @@ package tsdbutil import ( - "math/rand" + "math" "github.com/prometheus/prometheus/model/histogram" ) @@ -53,7 +53,8 @@ func GenerateTestHistogram(i int) *histogram.Histogram { func GenerateTestGaugeHistograms(n int) (r []*histogram.Histogram) { for x := 0; x < n; x++ { - r = append(r, GenerateTestGaugeHistogram(rand.Intn(n))) + i := int(math.Sin(float64(x))*100) + 100 + r = append(r, GenerateTestGaugeHistogram(i)) } return r } @@ -98,7 +99,8 @@ func GenerateTestFloatHistogram(i int) *histogram.FloatHistogram { func GenerateTestGaugeFloatHistograms(n int) (r []*histogram.FloatHistogram) { for x := 0; x < n; x++ { - r = append(r, GenerateTestGaugeFloatHistogram(rand.Intn(n))) + i := int(math.Sin(float64(x))*100) + 100 + r = append(r, GenerateTestGaugeFloatHistogram(i)) } return r } From 54aaa2bd7e9dad6a0eae34232b95b82dc7143059 Mon Sep 17 00:00:00 2001 From: zenador Date: Fri, 25 Aug 2023 03:02:14 +0800 Subject: [PATCH 66/82] Add `histogram_stdvar` and `histogram_stddev` functions (#12614) * Add new function: histogram_stdvar and histogram_stddev Signed-off-by: Jeanette Tan --- docs/querying/functions.md | 17 +- model/histogram/float_histogram.go | 2 +- promql/engine_test.go | 159 ++++++++++++++++++ promql/functions.go | 68 ++++++++ promql/parser/functions.go | 10 ++ .../src/complete/promql.terms.ts | 14 +- .../src/parser/parser.test.ts | 24 +++ .../codemirror-promql/src/types/function.ts | 14 ++ web/ui/module/lezer-promql/src/promql.grammar | 4 + 9 files changed, 308 insertions(+), 4 deletions(-) diff --git a/docs/querying/functions.md b/docs/querying/functions.md index 1da70c603..6b3a77e97 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -145,7 +145,7 @@ delta(cpu_temp_celsius{host="zeus"}[2h]) ``` `delta` acts on native histograms by calculating a new histogram where each -compononent (sum and count of observations, buckets) is the difference between +component (sum and count of observations, buckets) is the difference between the respective component in the first and last native histogram in `v`. However, each element in `v` that contains a mix of float and native histogram samples within the range, will be missing from the result vector. @@ -323,6 +323,19 @@ a histogram. You can use `histogram_quantile(1, v instant-vector)` to get the estimated maximum value stored in a histogram. +## `histogram_stddev()` and `histogram_stdvar()` + +_Both functions only act on native histograms, which are an experimental +feature. The behavior of these functions may change in future versions of +Prometheus, including their removal from PromQL._ + +`histogram_stddev(v instant-vector)` returns the estimated standard deviation +of observations in a native histogram, based on the geometric mean of the buckets +where the observations lie. Samples that are not native histograms are ignored and +do not show up in the returned vector. + +Similarly, `histogram_stdvar(v instant-vector)` returns the estimated standard +variance of observations in a native histogram. ## `holt_winters()` @@ -495,7 +508,7 @@ rate(http_requests_total{job="api-server"}[5m]) ``` `rate` acts on native histograms by calculating a new histogram where each -compononent (sum and count of observations, buckets) is the rate of increase +component (sum and count of observations, buckets) is the rate of increase between the respective component in the first and last native histogram in `v`. However, each element in `v` that contains a mix of float and native histogram samples within the range, will be missing from the result vector. diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index bfb3c3d19..f8766f7a8 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -159,7 +159,7 @@ func (h *FloatHistogram) Mul(factor float64) *FloatHistogram { return h } -// Div works like Scale but divides instead of multiplies. +// Div works like Mul but divides instead of multiplies. // When dividing by 0, everything will be set to Inf. func (h *FloatHistogram) Div(scalar float64) *FloatHistogram { h.ZeroCount /= scalar diff --git a/promql/engine_test.go b/promql/engine_test.go index 154a45514..1ded05858 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -3312,6 +3312,165 @@ func TestNativeHistogram_HistogramCountAndSum(t *testing.T) { } } +func TestNativeHistogram_HistogramStdDevVar(t *testing.T) { + // TODO(codesome): Integrate histograms into the PromQL testing framework + // and write more tests there. + testCases := []struct { + name string + h *histogram.Histogram + stdVar float64 + }{ + { + name: "1, 2, 3, 4 low-res", + h: &histogram.Histogram{ + Count: 4, + Sum: 10, + Schema: 2, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 1}, + {Offset: 3, Length: 1}, + {Offset: 2, Length: 2}, + }, + PositiveBuckets: []int64{1, 0, 0, 0}, + }, + stdVar: 1.163807968526718, // actual variance: 1.25 + }, + { + name: "1, 2, 3, 4 hi-res", + h: &histogram.Histogram{ + Count: 4, + Sum: 10, + Schema: 8, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 1}, + {Offset: 255, Length: 1}, + {Offset: 149, Length: 1}, + {Offset: 105, Length: 1}, + }, + PositiveBuckets: []int64{1, 0, 0, 0}, + }, + stdVar: 1.2471347737158793, // actual variance: 1.25 + }, + { + name: "-50, -8, 0, 3, 8, 9, 100", + h: &histogram.Histogram{ + Count: 7, + ZeroCount: 1, + Sum: 62, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: 13, Length: 1}, + {Offset: 10, Length: 1}, + {Offset: 1, Length: 1}, + {Offset: 27, Length: 1}, + }, + PositiveBuckets: []int64{1, 0, 0, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 24, Length: 1}, + {Offset: 21, Length: 1}, + }, + NegativeBuckets: []int64{1, 0}, + }, + stdVar: 1544.8582535368798, // actual variance: 1738.4082 + }, + { + name: "-50, -8, 0, 3, 8, 9, 100, NaN", + h: &histogram.Histogram{ + Count: 8, + ZeroCount: 1, + Sum: math.NaN(), + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: 13, Length: 1}, + {Offset: 10, Length: 1}, + {Offset: 1, Length: 1}, + {Offset: 27, Length: 1}, + }, + PositiveBuckets: []int64{1, 0, 0, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 24, Length: 1}, + {Offset: 21, Length: 1}, + }, + NegativeBuckets: []int64{1, 0}, + }, + stdVar: math.NaN(), + }, + { + name: "-50, -8, 0, 3, 8, 9, 100, +Inf", + h: &histogram.Histogram{ + Count: 8, + ZeroCount: 1, + Sum: math.Inf(1), + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: 13, Length: 1}, + {Offset: 10, Length: 1}, + {Offset: 1, Length: 1}, + {Offset: 27, Length: 1}, + }, + PositiveBuckets: []int64{1, 0, 0, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 24, Length: 1}, + {Offset: 21, Length: 1}, + }, + NegativeBuckets: []int64{1, 0}, + }, + stdVar: math.NaN(), + }, + } + for _, tc := range testCases { + for _, floatHisto := range []bool{true, false} { + t.Run(fmt.Sprintf("%s floatHistogram=%t", tc.name, floatHisto), func(t *testing.T) { + engine := newTestEngine() + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) + + seriesName := "sparse_histogram_series" + lbls := labels.FromStrings("__name__", seriesName) + + ts := int64(10 * time.Minute / time.Millisecond) + app := storage.Appender(context.Background()) + var err error + if floatHisto { + _, err = app.AppendHistogram(0, lbls, ts, nil, tc.h.ToFloat()) + } else { + _, err = app.AppendHistogram(0, lbls, ts, tc.h, nil) + } + require.NoError(t, err) + require.NoError(t, app.Commit()) + + queryString := fmt.Sprintf("histogram_stdvar(%s)", seriesName) + qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) + require.NoError(t, err) + + res := qry.Exec(context.Background()) + require.NoError(t, res.Err) + + vector, err := res.Vector() + require.NoError(t, err) + + require.Len(t, vector, 1) + require.Nil(t, vector[0].H) + require.InEpsilon(t, tc.stdVar, vector[0].F, 1e-12) + + queryString = fmt.Sprintf("histogram_stddev(%s)", seriesName) + qry, err = engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) + require.NoError(t, err) + + res = qry.Exec(context.Background()) + require.NoError(t, res.Err) + + vector, err = res.Vector() + require.NoError(t, err) + + require.Len(t, vector, 1) + require.Nil(t, vector[0].H) + require.InEpsilon(t, math.Sqrt(tc.stdVar), vector[0].F, 1e-12) + }) + } + } +} + func TestNativeHistogram_HistogramQuantile(t *testing.T) { // TODO(codesome): Integrate histograms into the PromQL testing framework // and write more tests there. diff --git a/promql/functions.go b/promql/functions.go index 96bffab96..5c39d6bd8 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -996,6 +996,72 @@ func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNod return enh.Out } +// === histogram_stddev(Vector parser.ValueTypeVector) Vector === +func funcHistogramStdDev(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + inVec := vals[0].(Vector) + + for _, sample := range inVec { + // Skip non-histogram samples. + if sample.H == nil { + continue + } + mean := sample.H.Sum / sample.H.Count + var variance, cVariance float64 + it := sample.H.AllBucketIterator() + for it.Next() { + bucket := it.At() + var val float64 + if bucket.Lower <= 0 && 0 <= bucket.Upper { + val = 0 + } else { + val = math.Sqrt(bucket.Upper * bucket.Lower) + } + delta := val - mean + variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance) + } + variance += cVariance + variance /= sample.H.Count + enh.Out = append(enh.Out, Sample{ + Metric: enh.DropMetricName(sample.Metric), + F: math.Sqrt(variance), + }) + } + return enh.Out +} + +// === histogram_stdvar(Vector parser.ValueTypeVector) Vector === +func funcHistogramStdVar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + inVec := vals[0].(Vector) + + for _, sample := range inVec { + // Skip non-histogram samples. + if sample.H == nil { + continue + } + mean := sample.H.Sum / sample.H.Count + var variance, cVariance float64 + it := sample.H.AllBucketIterator() + for it.Next() { + bucket := it.At() + var val float64 + if bucket.Lower <= 0 && 0 <= bucket.Upper { + val = 0 + } else { + val = math.Sqrt(bucket.Upper * bucket.Lower) + } + delta := val - mean + variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance) + } + variance += cVariance + variance /= sample.H.Count + enh.Out = append(enh.Out, Sample{ + Metric: enh.DropMetricName(sample.Metric), + F: variance, + }) + } + return enh.Out +} + // === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector === func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { lower := vals[0].(Vector)[0].F @@ -1377,6 +1443,8 @@ var FunctionCalls = map[string]FunctionCall{ "histogram_fraction": funcHistogramFraction, "histogram_quantile": funcHistogramQuantile, "histogram_sum": funcHistogramSum, + "histogram_stddev": funcHistogramStdDev, + "histogram_stdvar": funcHistogramStdVar, "holt_winters": funcHoltWinters, "hour": funcHour, "idelta": funcIdelta, diff --git a/promql/parser/functions.go b/promql/parser/functions.go index 479c7f635..45a30219e 100644 --- a/promql/parser/functions.go +++ b/promql/parser/functions.go @@ -173,6 +173,16 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeVector}, ReturnType: ValueTypeVector, }, + "histogram_stddev": { + Name: "histogram_stddev", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, + "histogram_stdvar": { + Name: "histogram_stdvar", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, "histogram_fraction": { Name: "histogram_fraction", ArgTypes: []ValueType{ValueTypeScalar, ValueTypeScalar, ValueTypeVector}, diff --git a/web/ui/module/codemirror-promql/src/complete/promql.terms.ts b/web/ui/module/codemirror-promql/src/complete/promql.terms.ts index c97385bad..5df81fe10 100644 --- a/web/ui/module/codemirror-promql/src/complete/promql.terms.ts +++ b/web/ui/module/codemirror-promql/src/complete/promql.terms.ts @@ -239,6 +239,18 @@ export const functionIdentifierTerms = [ info: 'Return the sum of observations from a native histogram (experimental feature)', type: 'function', }, + { + label: 'histogram_stddev', + detail: 'function', + info: 'Estimate the standard deviation of observations from a native histogram (experimental feature)', + type: 'function', + }, + { + label: 'histogram_stdvar', + detail: 'function', + info: 'Estimate the standard variance of observations from a native histogram (experimental feature)', + type: 'function', + }, { label: 'holt_winters', detail: 'function', @@ -430,7 +442,7 @@ export const functionIdentifierTerms = [ { label: 'stdvar_over_time', detail: 'function', - info: 'Calculate the standard variation within input series over time', + info: 'Calculate the standard variance within input series over time', type: 'function', }, { diff --git a/web/ui/module/codemirror-promql/src/parser/parser.test.ts b/web/ui/module/codemirror-promql/src/parser/parser.test.ts index b2140c075..5ef9c1f90 100644 --- a/web/ui/module/codemirror-promql/src/parser/parser.test.ts +++ b/web/ui/module/codemirror-promql/src/parser/parser.test.ts @@ -752,6 +752,30 @@ describe('promql operations', () => { expectedValueType: ValueType.vector, expectedDiag: [], }, + { + expr: + 'histogram_stddev( # Root of the query, final result, returns the standard deviation of observations.\n' + + ' sum by(method, path) ( # Argument to histogram_stddev(), an aggregated histogram.\n' + + ' rate( # Argument to sum(), the per-second increase of a histogram over 5m.\n' + + ' demo_api_request_duration_seconds{job="demo"}[5m] # Argument to rate(), a vector of sparse histogram series over the last 5m.\n' + + ' )\n' + + ' )\n' + + ')', + expectedValueType: ValueType.vector, + expectedDiag: [], + }, + { + expr: + 'histogram_stdvar( # Root of the query, final result, returns the standard variance of observations.\n' + + ' sum by(method, path) ( # Argument to histogram_stdvar(), an aggregated histogram.\n' + + ' rate( # Argument to sum(), the per-second increase of a histogram over 5m.\n' + + ' demo_api_request_duration_seconds{job="demo"}[5m] # Argument to rate(), a vector of sparse histogram series over the last 5m.\n' + + ' )\n' + + ' )\n' + + ')', + expectedValueType: ValueType.vector, + expectedDiag: [], + }, { expr: '1 @ start()', expectedValueType: ValueType.scalar, diff --git a/web/ui/module/codemirror-promql/src/types/function.ts b/web/ui/module/codemirror-promql/src/types/function.ts index 746524b6f..649cbad33 100644 --- a/web/ui/module/codemirror-promql/src/types/function.ts +++ b/web/ui/module/codemirror-promql/src/types/function.ts @@ -42,6 +42,8 @@ import { HistogramCount, HistogramFraction, HistogramQuantile, + HistogramStdDev, + HistogramStdVar, HistogramSum, HoltWinters, Hour, @@ -282,6 +284,18 @@ const promqlFunctions: { [key: number]: PromQLFunction } = { variadic: 0, returnType: ValueType.vector, }, + [HistogramStdDev]: { + name: 'histogram_stddev', + argTypes: [ValueType.vector], + variadic: 0, + returnType: ValueType.vector, + }, + [HistogramStdVar]: { + name: 'histogram_stdvar', + argTypes: [ValueType.vector], + variadic: 0, + returnType: ValueType.vector, + }, [HistogramSum]: { name: 'histogram_sum', argTypes: [ValueType.vector], diff --git a/web/ui/module/lezer-promql/src/promql.grammar b/web/ui/module/lezer-promql/src/promql.grammar index 3973f019a..37f9a39cd 100644 --- a/web/ui/module/lezer-promql/src/promql.grammar +++ b/web/ui/module/lezer-promql/src/promql.grammar @@ -135,6 +135,8 @@ FunctionIdentifier { HistogramCount | HistogramFraction | HistogramQuantile | + HistogramStdDev | + HistogramStdVar | HistogramSum | HoltWinters | Hour | @@ -362,6 +364,8 @@ NumberLiteral { HistogramCount { condFn<"histogram_count"> } HistogramFraction { condFn<"histogram_fraction"> } HistogramQuantile { condFn<"histogram_quantile"> } + HistogramStdDev { condFn<"histogram_stddev"> } + HistogramStdVar { condFn<"histogram_stdvar"> } HistogramSum { condFn<"histogram_sum"> } HoltWinters { condFn<"holt_winters"> } Hour { condFn<"hour"> } From 7d1110a679c7287bde05724eaf3db77e6afd00cb Mon Sep 17 00:00:00 2001 From: Michal Biesek Date: Mon, 14 Aug 2023 23:14:09 +0200 Subject: [PATCH 67/82] Update Go version Update build/test to use Go 1.21. Signed-off-by: Michal Biesek --- .github/workflows/ci.yml | 10 +++++----- .github/workflows/codeql-analysis.yml | 2 +- .promu.yml | 2 +- go.mod | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 14b788c0f..c46422723 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -11,7 +11,7 @@ jobs: # Whenever the Go version is updated here, .promu.yml # should also be updated. container: - image: quay.io/prometheus/golang-builder:1.20-base + image: quay.io/prometheus/golang-builder:1.21-base steps: - uses: actions/checkout@v3 - uses: prometheus/promci@v0.1.0 @@ -32,7 +32,7 @@ jobs: # Whenever the Go version is updated here, .promu.yml # should also be updated. container: - image: quay.io/prometheus/golang-builder:1.20-base + image: quay.io/prometheus/golang-builder:1.21-base steps: - uses: actions/checkout@v3 @@ -55,7 +55,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: '>=1.20 <1.21' + go-version: '>=1.21 <1.22' - run: | $TestTargets = go list ./... | Where-Object { $_ -NotMatch "(github.com/prometheus/prometheus/discovery.*|github.com/prometheus/prometheus/config|github.com/prometheus/prometheus/web)"} go test $TestTargets -vet=off -v @@ -66,7 +66,7 @@ jobs: runs-on: ubuntu-latest # The go verson in this image should be N-1 wrt test_go. container: - image: quay.io/prometheus/golang-builder:1.19-base + image: quay.io/prometheus/golang-builder:1.20-base steps: - uses: actions/checkout@v3 - run: make build @@ -79,7 +79,7 @@ jobs: # Whenever the Go version is updated here, .promu.yml # should also be updated. container: - image: quay.io/prometheus/golang-builder:1.19-base + image: quay.io/prometheus/golang-builder:1.20-base steps: - uses: actions/checkout@v3 - run: go install ./cmd/promtool/. diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 762e92016..ec8d993fb 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -27,7 +27,7 @@ jobs: uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: '>=1.20 <1.21' + go-version: '>=1.21 <1.22' - name: Initialize CodeQL uses: github/codeql-action/init@v2 diff --git a/.promu.yml b/.promu.yml index f724dc34f..9f5948523 100644 --- a/.promu.yml +++ b/.promu.yml @@ -1,7 +1,7 @@ go: # Whenever the Go version is updated here, # .circle/config.yml should also be updated. - version: 1.20 + version: 1.21 repository: path: github.com/prometheus/prometheus build: diff --git a/go.mod b/go.mod index 3e97cdda0..b84a4623d 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/prometheus/prometheus -go 1.19 +go 1.20 require ( github.com/Azure/azure-sdk-for-go v65.0.0+incompatible From 04d7b4dbee6076235295a00b1af62dd93ba1ca47 Mon Sep 17 00:00:00 2001 From: Michal Biesek Date: Mon, 14 Aug 2023 23:42:02 +0200 Subject: [PATCH 68/82] lint: Fix `SA1019` Using a deprecated function `rand.Read` has been deprecated since Go 1.20 `crypto/rand.Read` is more appropriate Ref: https://tip.golang.org/doc/go1.20 Signed-off-by: Michal Biesek --- tsdb/wlog/reader_test.go | 16 ++++++++++++---- tsdb/wlog/wlog_test.go | 2 +- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/tsdb/wlog/reader_test.go b/tsdb/wlog/reader_test.go index 2c4dd622c..309bee755 100644 --- a/tsdb/wlog/reader_test.go +++ b/tsdb/wlog/reader_test.go @@ -16,11 +16,12 @@ package wlog import ( "bytes" + "crypto/rand" "encoding/binary" "fmt" "hash/crc32" "io" - "math/rand" + "math/big" "os" "path/filepath" "runtime" @@ -252,8 +253,11 @@ func generateRandomEntries(w *WL, records chan []byte) error { default: sz = pageSize * 8 } - - rec := make([]byte, rand.Int63n(sz)) + n, err := rand.Int(rand.Reader, big.NewInt(sz)) + if err != nil { + return err + } + rec := make([]byte, n.Int64()) if _, err := rand.Read(rec); err != nil { return err } @@ -262,7 +266,11 @@ func generateRandomEntries(w *WL, records chan []byte) error { // Randomly batch up records. recs = append(recs, rec) - if rand.Intn(4) < 3 { + n, err = rand.Int(rand.Reader, big.NewInt(int64(4))) + if err != nil { + return err + } + if int(n.Int64()) < 3 { if err := w.Log(recs...); err != nil { return err } diff --git a/tsdb/wlog/wlog_test.go b/tsdb/wlog/wlog_test.go index f9ce225b3..5602a3ee0 100644 --- a/tsdb/wlog/wlog_test.go +++ b/tsdb/wlog/wlog_test.go @@ -16,9 +16,9 @@ package wlog import ( "bytes" + "crypto/rand" "fmt" "io" - "math/rand" "os" "path/filepath" "testing" From f01718262a83751a0ff286ca3d8504937a321326 Mon Sep 17 00:00:00 2001 From: Gregor Zeitlinger Date: Fri, 25 Aug 2023 23:35:42 +0200 Subject: [PATCH 69/82] Unit tests for native histograms (#12668) promql: Extend testing framework to support native histograms This includes both the internal testing framework as well as the rules unit test feature of promtool. This also adds a bunch of basic tests. Many of the code level tests can now be converted to tests within the framework, and more tests can be added easily. --------- Signed-off-by: Harold Dost Signed-off-by: Gregor Zeitlinger Signed-off-by: Stephen Lang Co-authored-by: Harold Dost Co-authored-by: Stephen Lang Co-authored-by: Gregor Zeitlinger --- cmd/promtool/testdata/unittest.yml | 46 + cmd/promtool/unittest.go | 38 +- docs/configuration/unit_testing_rules.md | 43 +- model/histogram/float_histogram.go | 50 + model/histogram/float_histogram_test.go | 49 +- promql/engine_test.go | 12 +- promql/parser/generated_parser.y | 198 ++- promql/parser/generated_parser.y.go | 1272 ++++++++++------- promql/parser/lex.go | 156 +- promql/parser/lex_test.go | 68 +- promql/parser/parse.go | 169 ++- promql/parser/parse_test.go | 311 ++++ promql/test.go | 100 +- promql/testdata/native_histograms.test | 226 +++ .../prometheusremotewrite/helper.go | 9 +- tsdb/head_test.go | 24 +- 16 files changed, 2171 insertions(+), 600 deletions(-) create mode 100644 promql/testdata/native_histograms.test diff --git a/cmd/promtool/testdata/unittest.yml b/cmd/promtool/testdata/unittest.yml index e6745aadf..ff511729b 100644 --- a/cmd/promtool/testdata/unittest.yml +++ b/cmd/promtool/testdata/unittest.yml @@ -10,6 +10,21 @@ tests: - series: test_full values: "0 0" + - series: test_repeat + values: "1x2" + + - series: test_increase + values: "1+1x2" + + - series: test_histogram + values: "{{schema:1 sum:-0.3 count:32.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5}}" + + - series: test_histogram_repeat + values: "{{sum:3 count:2 buckets:[2]}}x2" + + - series: test_histogram_increase + values: "{{sum:3 count:2 buckets:[2]}}+{{sum:1.3 count:1 buckets:[1]}}x2" + - series: test_stale values: "0 stale" @@ -31,6 +46,37 @@ tests: exp_samples: - value: 60 + # Repeat & increase + - expr: test_repeat + eval_time: 2m + exp_samples: + - value: 1 + labels: "test_repeat" + - expr: test_increase + eval_time: 2m + exp_samples: + - value: 3 + labels: "test_increase" + + # Histograms + - expr: test_histogram + eval_time: 1m + exp_samples: + - labels: "test_histogram" + histogram: "{{schema:1 sum:-0.3 count:32.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5}}" + + - expr: test_histogram_repeat + eval_time: 2m + exp_samples: + - labels: "test_histogram_repeat" + histogram: "{{count:2 sum:3 buckets:[2]}}" + + - expr: test_histogram_increase + eval_time: 2m + exp_samples: + - labels: "test_histogram_increase" + histogram: "{{count:4 sum:5.6 buckets:[4]}}" + # Ensure a value is stale as soon as it is marked as such. - expr: test_stale eval_time: 59s diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index e934f37c8..575480b03 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -29,6 +29,7 @@ import ( "github.com/prometheus/common/model" "gopkg.in/yaml.v2" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" @@ -346,14 +347,29 @@ Outer: var gotSamples []parsedSample for _, s := range got { gotSamples = append(gotSamples, parsedSample{ - Labels: s.Metric.Copy(), - Value: s.F, + Labels: s.Metric.Copy(), + Value: s.F, + Histogram: promql.HistogramTestExpression(s.H), }) } var expSamples []parsedSample for _, s := range testCase.ExpSamples { lb, err := parser.ParseMetric(s.Labels) + var hist *histogram.FloatHistogram + if err == nil && s.Histogram != "" { + _, values, parseErr := parser.ParseSeriesDesc("{} " + s.Histogram) + switch { + case parseErr != nil: + err = parseErr + case len(values) != 1: + err = fmt.Errorf("expected 1 value, got %d", len(values)) + case values[0].Histogram == nil: + err = fmt.Errorf("expected histogram, got %v", values[0]) + default: + hist = values[0].Histogram + } + } if err != nil { err = fmt.Errorf("labels %q: %w", s.Labels, err) errs = append(errs, fmt.Errorf(" expr: %q, time: %s, err: %w", testCase.Expr, @@ -361,8 +377,9 @@ Outer: continue Outer } expSamples = append(expSamples, parsedSample{ - Labels: lb, - Value: s.Value, + Labels: lb, + Value: s.Value, + Histogram: promql.HistogramTestExpression(hist), }) } @@ -530,14 +547,16 @@ type promqlTestCase struct { } type sample struct { - Labels string `yaml:"labels"` - Value float64 `yaml:"value"` + Labels string `yaml:"labels"` + Value float64 `yaml:"value"` + Histogram string `yaml:"histogram"` // A non-empty string means Value is ignored. } // parsedSample is a sample with parsed Labels. type parsedSample struct { - Labels labels.Labels - Value float64 + Labels labels.Labels + Value float64 + Histogram string // TestExpression() of histogram.FloatHistogram } func parsedSamplesString(pss []parsedSample) string { @@ -552,5 +571,8 @@ func parsedSamplesString(pss []parsedSample) string { } func (ps *parsedSample) String() string { + if ps.Histogram != "" { + return ps.Labels.String() + " " + ps.Histogram + } return ps.Labels.String() + " " + strconv.FormatFloat(ps.Value, 'E', -1, 64) } diff --git a/docs/configuration/unit_testing_rules.md b/docs/configuration/unit_testing_rules.md index efd168b35..73d8ddd38 100644 --- a/docs/configuration/unit_testing_rules.md +++ b/docs/configuration/unit_testing_rules.md @@ -76,18 +76,49 @@ series: # This uses expanding notation. # Expanding notation: -# 'a+bxc' becomes 'a a+b a+(2*b) a+(3*b) … a+(c*b)' -# Read this as series starts at a, then c further samples incrementing by b. -# 'a-bxc' becomes 'a a-b a-(2*b) a-(3*b) … a-(c*b)' -# Read this as series starts at a, then c further samples decrementing by b (or incrementing by negative b). +# 'a+bxn' becomes 'a a+b a+(2*b) a+(3*b) … a+(n*b)' +# Read this as series starts at a, then n further samples incrementing by b. +# 'a-bxn' becomes 'a a-b a-(2*b) a-(3*b) … a-(n*b)' +# Read this as series starts at a, then n further samples decrementing by b (or incrementing by negative b). +# 'axn' becomes 'a a a … a' (n times) - it's a shorthand for 'a+0xn' # There are special values to indicate missing and stale samples: -# '_' represents a missing sample from scrape -# 'stale' indicates a stale sample +# '_' represents a missing sample from scrape +# 'stale' indicates a stale sample # Examples: # 1. '-2+4x3' becomes '-2 2 6 10' - series starts at -2, then 3 further samples incrementing by 4. # 2. ' 1-2x4' becomes '1 -1 -3 -5 -7' - series starts at 1, then 4 further samples decrementing by 2. # 3. ' 1x4' becomes '1 1 1 1 1' - shorthand for '1+0x4', series starts at 1, then 4 further samples incrementing by 0. # 4. ' 1 _x3 stale' becomes '1 _ _ _ stale' - the missing sample cannot increment, so 3 missing samples are produced by the '_x3' expression. +# +# Native histogram notation: +# Native histograms can be used instead of floating point numbers using the following notation: +# {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5}} +# Native histograms support the same expanding notation as floating point numbers, i.e. 'axn', 'a+bxn' and 'a-bxn'. +# All properties are optional and default to 0. The order is not important. The following properties are supported: +# - schema (int): +# Currently valid schema numbers are -4 <= n <= 8. They are all for +# base-2 bucket schemas, where 1 is a bucket boundary in each case, and +# then each power of two is divided into 2^n logarithmic buckets. Or +# in other words, each bucket boundary is the previous boundary times +# 2^(2^-n). +# - sum (float): +# The sum of all observations, including the zero bucket. +# - count (non-negative float): +# The number of observations, including those that are NaN and including the zero bucket. +# - z_bucket (non-negative float): +# The sum of all observations in the zero bucket. +# - z_bucket_w (non-negative float): +# The width of the zero bucket. +# If z_bucket_w > 0, the zero bucket contains all observations -z_bucket_w <= x <= z_bucket_w. +# Otherwise, the zero bucket only contains observations that are exactly 0. +# - buckets (list of non-negative floats): +# Observation counts in positive buckets. Each represents an absolute count. +# - offset (int): +# The starting index of the first entry in the positive buckets. +# - n_buckets (list of non-negative floats): +# Observation counts in negative buckets. Each represents an absolute count. +# - n_offset (int): +# The starting index of the first entry in the negative buckets. values: ``` diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index f8766f7a8..41873278c 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -15,6 +15,7 @@ package histogram import ( "fmt" + "math" "strings" ) @@ -130,6 +131,55 @@ func (h *FloatHistogram) String() string { return sb.String() } +// TestExpression returns the string representation of this histogram as it is used in the internal PromQL testing +// framework as well as in promtool rules unit tests. +// The syntax is described in https://prometheus.io/docs/prometheus/latest/configuration/unit_testing_rules/#series +func (h *FloatHistogram) TestExpression() string { + var res []string + m := h.Copy() + + m.Compact(math.MaxInt) // Compact to reduce the number of positive and negative spans to 1. + + if m.Schema != 0 { + res = append(res, fmt.Sprintf("schema:%d", m.Schema)) + } + if m.Count != 0 { + res = append(res, fmt.Sprintf("count:%g", m.Count)) + } + if m.Sum != 0 { + res = append(res, fmt.Sprintf("sum:%g", m.Sum)) + } + if m.ZeroCount != 0 { + res = append(res, fmt.Sprintf("z_bucket:%g", m.ZeroCount)) + } + if m.ZeroThreshold != 0 { + res = append(res, fmt.Sprintf("z_bucket_w:%g", m.ZeroThreshold)) + } + + addBuckets := func(kind, bucketsKey, offsetKey string, buckets []float64, spans []Span) []string { + if len(spans) > 1 { + panic(fmt.Sprintf("histogram with multiple %s spans not supported", kind)) + } + for _, span := range spans { + if span.Offset != 0 { + res = append(res, fmt.Sprintf("%s:%d", offsetKey, span.Offset)) + } + } + + var bucketStr []string + for _, bucket := range buckets { + bucketStr = append(bucketStr, fmt.Sprintf("%g", bucket)) + } + if len(bucketStr) > 0 { + res = append(res, fmt.Sprintf("%s:[%s]", bucketsKey, strings.Join(bucketStr, " "))) + } + return res + } + res = addBuckets("positive", "buckets", "offset", m.PositiveBuckets, m.PositiveSpans) + res = addBuckets("negative", "n_buckets", "n_offset", m.NegativeBuckets, m.NegativeSpans) + return "{{" + strings.Join(res, " ") + "}}" +} + // ZeroBucket returns the zero bucket. func (h *FloatHistogram) ZeroBucket() Bucket[float64] { return Bucket[float64]{ diff --git a/model/histogram/float_histogram_test.go b/model/histogram/float_histogram_test.go index dd3e30427..0b712be43 100644 --- a/model/histogram/float_histogram_test.go +++ b/model/histogram/float_histogram_test.go @@ -938,6 +938,21 @@ func TestFloatHistogramCompact(t *testing.T) { NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 3, 4}, }, }, + { + "cut empty buckets in the middle", + &FloatHistogram{ + PositiveSpans: []Span{{5, 4}}, + PositiveBuckets: []float64{1, 3, 0, 2}, + }, + 0, + &FloatHistogram{ + PositiveSpans: []Span{ + {Offset: 5, Length: 2}, + {Offset: 1, Length: 1}, + }, + PositiveBuckets: []float64{1, 3, 2}, + }, + }, { "cut empty buckets at start or end of spans, even in the middle", &FloatHistogram{ @@ -955,7 +970,7 @@ func TestFloatHistogramCompact(t *testing.T) { }, }, { - "cut empty buckets at start or end but merge spans due to maxEmptyBuckets", + "cut empty buckets at start and end - also merge spans due to maxEmptyBuckets", &FloatHistogram{ PositiveSpans: []Span{{-4, 4}, {5, 3}}, PositiveBuckets: []float64{0, 0, 1, 3.3, 4.2, 0.1, 3.3}, @@ -998,18 +1013,42 @@ func TestFloatHistogramCompact(t *testing.T) { PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, }, }, + { + "cut empty buckets from the middle of a span, avoiding none due to maxEmptyBuckets", + &FloatHistogram{ + PositiveSpans: []Span{{-2, 4}}, + PositiveBuckets: []float64{1, 0, 0, 3.3}, + }, + 1, + &FloatHistogram{ + PositiveSpans: []Span{{-2, 1}, {2, 1}}, + PositiveBuckets: []float64{1, 3.3}, + }, + }, + { + "cut empty buckets and merge spans due to maxEmptyBuckets", + &FloatHistogram{ + PositiveSpans: []Span{{-2, 4}, {3, 1}}, + PositiveBuckets: []float64{1, 0, 0, 3.3, 4.2}, + }, + 1, + &FloatHistogram{ + PositiveSpans: []Span{{-2, 1}, {2, 1}, {3, 1}}, + PositiveBuckets: []float64{1, 3.3, 4.2}, + }, + }, { "cut empty buckets from the middle of a span, avoiding some due to maxEmptyBuckets", &FloatHistogram{ - PositiveSpans: []Span{{-4, 6}, {3, 3}}, - PositiveBuckets: []float64{0, 0, 1, 0, 0, 3.3, 4.2, 0.1, 3.3}, + PositiveSpans: []Span{{-4, 6}, {3, 3}, {10, 2}}, + PositiveBuckets: []float64{0, 0, 1, 0, 0, 3.3, 4.2, 0.1, 3.3, 2, 3}, NegativeSpans: []Span{{0, 2}, {3, 5}}, NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 0, 3, 4}, }, 1, &FloatHistogram{ - PositiveSpans: []Span{{-2, 1}, {2, 1}, {3, 3}}, - PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, + PositiveSpans: []Span{{-2, 1}, {2, 1}, {3, 3}, {10, 2}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3, 2, 3}, NegativeSpans: []Span{{0, 2}, {3, 5}}, NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 0, 3, 4}, }, diff --git a/promql/engine_test.go b/promql/engine_test.go index 1ded05858..82e44bcbc 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -4547,6 +4547,16 @@ func TestNativeHistogram_SubOperator(t *testing.T) { vector, err := res.Vector() require.NoError(t, err) + if len(vector) == len(exp) { + for i, e := range exp { + got := vector[i].H + if got != e.H { + // Error messages are better if we compare structs, not pointers. + require.Equal(t, *e.H, *got) + } + } + } + require.Equal(t, exp, vector) } @@ -4557,8 +4567,8 @@ func TestNativeHistogram_SubOperator(t *testing.T) { } queryAndCheck(queryString, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) }) - idx0++ } + idx0++ } } diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y index b28e9d544..f7951db2b 100644 --- a/promql/parser/generated_parser.y +++ b/promql/parser/generated_parser.y @@ -21,23 +21,28 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/model/histogram" ) %} %union { - node Node - item Item - matchers []*labels.Matcher - matcher *labels.Matcher - label labels.Label - labels labels.Labels - lblList []labels.Label - strings []string - series []SequenceValue - uint uint64 - float float64 - duration time.Duration + node Node + item Item + matchers []*labels.Matcher + matcher *labels.Matcher + label labels.Label + labels labels.Labels + lblList []labels.Label + strings []string + series []SequenceValue + histogram *histogram.FloatHistogram + descriptors map[string]interface{} + bucket_set []float64 + int int64 + uint uint64 + float float64 + duration time.Duration } @@ -54,6 +59,8 @@ IDENTIFIER LEFT_BRACE LEFT_BRACKET LEFT_PAREN +OPEN_HIST +CLOSE_HIST METRIC_IDENTIFIER NUMBER RIGHT_BRACE @@ -64,6 +71,20 @@ SPACE STRING TIMES +// Histogram Descriptors. +%token histogramDescStart +%token +SUM_DESC +COUNT_DESC +SCHEMA_DESC +OFFSET_DESC +NEGATIVE_OFFSET_DESC +BUCKETS_DESC +NEGATIVE_BUCKETS_DESC +ZERO_BUCKET_DESC +ZERO_BUCKET_WIDTH_DESC +%token histogramDescEnd + // Operators. %token operatorsStart %token @@ -145,6 +166,10 @@ START_METRIC_SELECTOR %type