From 5d0a0a754265adc443e97775c9c299c94efc9852 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Wed, 28 Feb 2024 14:06:43 +0100 Subject: [PATCH 01/30] Add custom buckets to native histogram model (#13592) * add custom buckets to native histogram model * simple copy for custom bounds * return errors for unsupported add/sub operations * add test cases for string and update appendhistogram in scrape to account for new schema * check fields which are supposed to be unused but may affect results in equals * allow appending custom buckets histograms regardless of max schema Signed-off-by: Jeanette Tan --- model/histogram/float_histogram.go | 299 +++++--- model/histogram/float_histogram_test.go | 924 +++++++++++++++++++++++- model/histogram/generic.go | 96 ++- model/histogram/generic_test.go | 4 +- model/histogram/histogram.go | 219 ++++-- model/histogram/histogram_test.go | 458 +++++++++++- promql/engine.go | 118 ++- promql/functions.go | 69 +- promql/parser/parse.go | 12 +- rules/manager_test.go | 2 +- scrape/scrape.go | 10 +- scrape/scrape_test.go | 12 +- scrape/target.go | 13 +- scrape/target_test.go | 39 + util/annotations/annotations.go | 32 +- 15 files changed, 2036 insertions(+), 271 deletions(-) diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index 19a92b3d5a..ded7dc400d 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -30,11 +30,12 @@ import ( type FloatHistogram struct { // Counter reset information. CounterResetHint CounterResetHint - // Currently valid schema numbers are -4 <= n <= 8. They are all for - // base-2 bucket schemas, where 1 is a bucket boundary in each case, and - // then each power of two is divided into 2^n logarithmic buckets. Or - // in other words, each bucket boundary is the previous boundary times - // 2^(2^-n). + // Currently valid schema numbers are -4 <= n <= 8 for exponential buckets, + // They are all for base-2 bucket schemas, where 1 is a bucket boundary in + // each case, and then each power of two is divided into 2^n logarithmic buckets. + // Or in other words, each bucket boundary is the previous boundary times + // 2^(2^-n). Another valid schema number is 127 for custom buckets, defined by + // the CustomBounds field. Schema int32 // Width of the zero bucket. ZeroThreshold float64 @@ -49,6 +50,16 @@ type FloatHistogram struct { // Observation counts in buckets. Each represents an absolute count and // must be zero or positive. PositiveBuckets, NegativeBuckets []float64 + // Holds the custom (usually upper) bounds for bucket definitions, otherwise nil. + // This slice is interned, to be treated as immutable and copied by reference. + // These numbers should be strictly increasing. This field is only used when the + // schema is 127, and the ZeroThreshold, ZeroCount, NegativeSpans and NegativeBuckets + // fields are not used. + CustomBounds []float64 +} + +func (h *FloatHistogram) UsesCustomBuckets() bool { + return IsCustomBucketsSchema(h.Schema) } // Copy returns a deep copy of the Histogram. @@ -56,28 +67,34 @@ func (h *FloatHistogram) Copy() *FloatHistogram { c := FloatHistogram{ CounterResetHint: h.CounterResetHint, Schema: h.Schema, - ZeroThreshold: h.ZeroThreshold, - ZeroCount: h.ZeroCount, Count: h.Count, Sum: h.Sum, } + if h.UsesCustomBuckets() { + c.CustomBounds = h.CustomBounds + } else { + c.ZeroThreshold = h.ZeroThreshold + c.ZeroCount = h.ZeroCount + + if len(h.NegativeSpans) != 0 { + c.NegativeSpans = make([]Span, len(h.NegativeSpans)) + copy(c.NegativeSpans, h.NegativeSpans) + } + if len(h.NegativeBuckets) != 0 { + c.NegativeBuckets = make([]float64, len(h.NegativeBuckets)) + copy(c.NegativeBuckets, h.NegativeBuckets) + } + } + if len(h.PositiveSpans) != 0 { c.PositiveSpans = make([]Span, len(h.PositiveSpans)) copy(c.PositiveSpans, h.PositiveSpans) } - if len(h.NegativeSpans) != 0 { - c.NegativeSpans = make([]Span, len(h.NegativeSpans)) - copy(c.NegativeSpans, h.NegativeSpans) - } if len(h.PositiveBuckets) != 0 { c.PositiveBuckets = make([]float64, len(h.PositiveBuckets)) copy(c.PositiveBuckets, h.PositiveBuckets) } - if len(h.NegativeBuckets) != 0 { - c.NegativeBuckets = make([]float64, len(h.NegativeBuckets)) - copy(c.NegativeBuckets, h.NegativeBuckets) - } return &c } @@ -87,22 +104,35 @@ func (h *FloatHistogram) Copy() *FloatHistogram { func (h *FloatHistogram) CopyTo(to *FloatHistogram) { to.CounterResetHint = h.CounterResetHint to.Schema = h.Schema - to.ZeroThreshold = h.ZeroThreshold - to.ZeroCount = h.ZeroCount to.Count = h.Count to.Sum = h.Sum + if h.UsesCustomBuckets() { + to.ZeroThreshold = 0 + to.ZeroCount = 0 + + to.NegativeSpans = clearIfNotNil(to.NegativeSpans) + to.NegativeBuckets = clearIfNotNil(to.NegativeBuckets) + + to.CustomBounds = h.CustomBounds + } else { + to.ZeroThreshold = h.ZeroThreshold + to.ZeroCount = h.ZeroCount + + to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans)) + copy(to.NegativeSpans, h.NegativeSpans) + + to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets)) + copy(to.NegativeBuckets, h.NegativeBuckets) + + to.CustomBounds = clearIfNotNil(to.CustomBounds) + } + to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans)) copy(to.PositiveSpans, h.PositiveSpans) - to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans)) - copy(to.NegativeSpans, h.NegativeSpans) - to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets)) copy(to.PositiveBuckets, h.PositiveBuckets) - - to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets)) - copy(to.NegativeBuckets, h.NegativeBuckets) } // CopyToSchema works like Copy, but the returned deep copy has the provided @@ -113,6 +143,12 @@ func (h *FloatHistogram) CopyToSchema(targetSchema int32) *FloatHistogram { // Fast path. return h.Copy() } + if h.UsesCustomBuckets() { + panic(fmt.Errorf("cannot reduce resolution to %d when there are custom buckets", targetSchema)) + } + if IsCustomBucketsSchema(targetSchema) { + panic("cannot reduce resolution to custom buckets schema") + } if targetSchema > h.Schema { panic(fmt.Errorf("cannot copy from schema %d to %d", h.Schema, targetSchema)) } @@ -212,12 +248,16 @@ func (h *FloatHistogram) TestExpression() string { // ZeroBucket returns the zero bucket. func (h *FloatHistogram) ZeroBucket() Bucket[float64] { + if h.UsesCustomBuckets() { + panic("histograms with custom buckets have no zero bucket") + } return Bucket[float64]{ Lower: -h.ZeroThreshold, Upper: h.ZeroThreshold, LowerInclusive: true, UpperInclusive: true, Count: h.ZeroCount, + // Index is irrelevant for the zero bucket. } } @@ -263,9 +303,18 @@ func (h *FloatHistogram) Div(scalar float64) *FloatHistogram { // // The method reconciles differences in the zero threshold and in the schema, and // changes them if needed. The other histogram will not be modified in any case. +// Adding is currently only supported between 2 exponential histograms, or between +// 2 custom buckets histograms with the exact same custom bounds. // // This method returns a pointer to the receiving histogram for convenience. -func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { +func (h *FloatHistogram) Add(other *FloatHistogram) (*FloatHistogram, error) { + if h.UsesCustomBuckets() != other.UsesCustomBuckets() { + return nil, ErrHistogramsIncompatibleSchema + } + if h.UsesCustomBuckets() && !floatBucketsMatch(h.CustomBounds, other.CustomBounds) { + return nil, ErrHistogramsIncompatibleBounds + } + switch { case other.CounterResetHint == h.CounterResetHint: // Adding apples to apples, all good. No need to change anything. @@ -290,19 +339,28 @@ func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { // TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place } - otherZeroCount := h.reconcileZeroBuckets(other) - h.ZeroCount += otherZeroCount + if !h.UsesCustomBuckets() { + otherZeroCount := h.reconcileZeroBuckets(other) + h.ZeroCount += otherZeroCount + } h.Count += other.Count h.Sum += other.Sum var ( - hPositiveSpans = h.PositiveSpans - hPositiveBuckets = h.PositiveBuckets - hNegativeSpans = h.NegativeSpans - hNegativeBuckets = h.NegativeBuckets - + hPositiveSpans = h.PositiveSpans + hPositiveBuckets = h.PositiveBuckets otherPositiveSpans = other.PositiveSpans otherPositiveBuckets = other.PositiveBuckets + ) + + if h.UsesCustomBuckets() { + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + return h, nil + } + + var ( + hNegativeSpans = h.NegativeSpans + hNegativeBuckets = h.NegativeBuckets otherNegativeSpans = other.NegativeSpans otherNegativeBuckets = other.NegativeBuckets ) @@ -321,24 +379,40 @@ func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets) - return h + return h, nil } // Sub works like Add but subtracts the other histogram. -func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram { - otherZeroCount := h.reconcileZeroBuckets(other) - h.ZeroCount -= otherZeroCount +func (h *FloatHistogram) Sub(other *FloatHistogram) (*FloatHistogram, error) { + if h.UsesCustomBuckets() != other.UsesCustomBuckets() { + return nil, ErrHistogramsIncompatibleSchema + } + if h.UsesCustomBuckets() && !floatBucketsMatch(h.CustomBounds, other.CustomBounds) { + return nil, ErrHistogramsIncompatibleBounds + } + + if !h.UsesCustomBuckets() { + otherZeroCount := h.reconcileZeroBuckets(other) + h.ZeroCount -= otherZeroCount + } h.Count -= other.Count h.Sum -= other.Sum var ( - hPositiveSpans = h.PositiveSpans - hPositiveBuckets = h.PositiveBuckets - hNegativeSpans = h.NegativeSpans - hNegativeBuckets = h.NegativeBuckets - + hPositiveSpans = h.PositiveSpans + hPositiveBuckets = h.PositiveBuckets otherPositiveSpans = other.PositiveSpans otherPositiveBuckets = other.PositiveBuckets + ) + + if h.UsesCustomBuckets() { + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + return h, nil + } + + var ( + hNegativeSpans = h.NegativeSpans + hNegativeBuckets = h.NegativeBuckets otherNegativeSpans = other.NegativeSpans otherNegativeBuckets = other.NegativeBuckets ) @@ -356,7 +430,7 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram { h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets) - return h + return h, nil } // Equals returns true if the given float histogram matches exactly. @@ -365,31 +439,44 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram { // but they must represent the same bucket layout to match. // Sum, Count, ZeroCount and bucket values are compared based on their bit patterns // because this method is about data equality rather than mathematical equality. +// We ignore fields that are not used based on the exponential / custom buckets schema, +// but check fields where differences may cause unintended behaviour even if they are not +// supposed to be used according to the schema. func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool { if h2 == nil { return false } - if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold || - math.Float64bits(h.ZeroCount) != math.Float64bits(h2.ZeroCount) || + if h.Schema != h2.Schema || math.Float64bits(h.Count) != math.Float64bits(h2.Count) || math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) { return false } + if h.UsesCustomBuckets() { + if !floatBucketsMatch(h.CustomBounds, h2.CustomBounds) { + return false + } + } + + if h.ZeroThreshold != h2.ZeroThreshold || + math.Float64bits(h.ZeroCount) != math.Float64bits(h2.ZeroCount) { + return false + } + + if !spansMatch(h.NegativeSpans, h2.NegativeSpans) { + return false + } + if !floatBucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) { + return false + } + if !spansMatch(h.PositiveSpans, h2.PositiveSpans) { return false } - if !spansMatch(h.NegativeSpans, h2.NegativeSpans) { - return false - } - if !floatBucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) { return false } - if !floatBucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) { - return false - } return true } @@ -403,6 +490,7 @@ func (h *FloatHistogram) Size() int { negSpanSize := len(h.NegativeSpans) * 8 // 8 bytes (int32 + uint32). posBucketSize := len(h.PositiveBuckets) * 8 // 8 bytes (float64). negBucketSize := len(h.NegativeBuckets) * 8 // 8 bytes (float64). + customBoundSize := len(h.CustomBounds) * 8 // 8 bytes (float64). // Total size of the struct. @@ -417,9 +505,10 @@ func (h *FloatHistogram) Size() int { // fh.NegativeSpans is 24 bytes. // fh.PositiveBuckets is 24 bytes. // fh.NegativeBuckets is 24 bytes. - structSize := 144 + // fh.CustomBounds is 24 bytes. + structSize := 168 - return structSize + posSpanSize + negSpanSize + posBucketSize + negBucketSize + return structSize + posSpanSize + negSpanSize + posBucketSize + negBucketSize + customBoundSize } // Compact eliminates empty buckets at the beginning and end of each span, then @@ -504,6 +593,12 @@ func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool { if h.Count < previous.Count { return true } + if h.UsesCustomBuckets() != previous.UsesCustomBuckets() || (h.UsesCustomBuckets() && !floatBucketsMatch(h.CustomBounds, previous.CustomBounds)) { + // Mark that something has changed or that the application has been restarted. However, this does + // not matter so much since the change in schema will be handled directly in the chunks and PromQL + // functions. + return true + } if h.Schema > previous.Schema { return true } @@ -609,7 +704,7 @@ func (h *FloatHistogram) NegativeBucketIterator() BucketIterator[float64] { // positive buckets in descending order (starting at the highest bucket and // going down towards the zero bucket). func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64] { - it := newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true) + it := newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomBounds) return &it } @@ -617,7 +712,7 @@ func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64] // negative buckets in ascending order (starting at the lowest bucket and going // up towards the zero bucket). func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64] { - it := newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false) + it := newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil) return &it } @@ -629,7 +724,7 @@ func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64] func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] { return &allFloatBucketIterator{ h: h, - leftIter: newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false), + leftIter: newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil), rightIter: h.floatBucketIterator(true, 0, h.Schema), state: -1, } @@ -643,30 +738,52 @@ func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] { func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] { return &allFloatBucketIterator{ h: h, - leftIter: newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true), + leftIter: newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomBounds), rightIter: h.floatBucketIterator(false, 0, h.Schema), state: -1, } } // Validate validates consistency between span and bucket slices. Also, buckets are checked -// against negative values. +// against negative values. We check to make sure there are no unexpected fields or field values +// based on the exponential / custom buckets schema. // We do not check for h.Count being at least as large as the sum of the // counts in the buckets because floating point precision issues can // create false positives here. func (h *FloatHistogram) Validate() error { - if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { - return fmt.Errorf("negative side: %w", err) - } - if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { - return fmt.Errorf("positive side: %w", err) - } var nCount, pCount float64 - err := checkHistogramBuckets(h.NegativeBuckets, &nCount, false) - if err != nil { - return fmt.Errorf("negative side: %w", err) + if h.UsesCustomBuckets() { + if err := checkHistogramCustomBounds(h.CustomBounds, h.PositiveSpans, len(h.PositiveBuckets)); err != nil { + return fmt.Errorf("custom buckets: %w", err) + } + if h.ZeroCount != 0 { + return fmt.Errorf("custom buckets: must have zero count of 0") + } + if h.ZeroThreshold != 0 { + return fmt.Errorf("custom buckets: must have zero threshold of 0") + } + if len(h.NegativeSpans) > 0 { + return fmt.Errorf("custom buckets: must not have negative spans") + } + if len(h.NegativeBuckets) > 0 { + return fmt.Errorf("custom buckets: must not have negative buckets") + } + } else { + if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { + return fmt.Errorf("positive side: %w", err) + } + if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { + return fmt.Errorf("negative side: %w", err) + } + err := checkHistogramBuckets(h.NegativeBuckets, &nCount, false) + if err != nil { + return fmt.Errorf("negative side: %w", err) + } + if h.CustomBounds != nil { + return fmt.Errorf("histogram with exponential schema must not have custom bounds") + } } - err = checkHistogramBuckets(h.PositiveBuckets, &pCount, false) + err := checkHistogramBuckets(h.PositiveBuckets, &pCount, false) if err != nil { return fmt.Errorf("positive side: %w", err) } @@ -790,10 +907,11 @@ func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 { // If positive is true, the returned iterator iterates through the positive // buckets, otherwise through the negative buckets. // -// If absoluteStartValue is < the lowest absolute value of any upper bucket -// boundary, the iterator starts with the first bucket. Otherwise, it will skip -// all buckets with an absolute value of their upper boundary ≤ -// absoluteStartValue. +// Only for exponential schemas, if absoluteStartValue is < the lowest absolute +// value of any upper bucket boundary, the iterator starts with the first bucket. +// Otherwise, it will skip all buckets with an absolute value of their upper boundary ≤ +// absoluteStartValue. For custom bucket schemas, absoluteStartValue is ignored and +// no buckets are skipped. // // targetSchema must be ≤ the schema of FloatHistogram (and of course within the // legal values for schemas in general). The buckets are merged to match the @@ -801,6 +919,12 @@ func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 { func (h *FloatHistogram) floatBucketIterator( positive bool, absoluteStartValue float64, targetSchema int32, ) floatBucketIterator { + if h.UsesCustomBuckets() && targetSchema != h.Schema { + panic(fmt.Errorf("cannot merge from custom buckets schema to exponential schema")) + } + if !h.UsesCustomBuckets() && IsCustomBucketsSchema(targetSchema) { + panic(fmt.Errorf("cannot merge from exponential buckets schema to custom schema")) + } if targetSchema > h.Schema { panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema)) } @@ -816,6 +940,7 @@ func (h *FloatHistogram) floatBucketIterator( if positive { i.spans = h.PositiveSpans i.buckets = h.PositiveBuckets + i.customBounds = h.CustomBounds } else { i.spans = h.NegativeSpans i.buckets = h.NegativeBuckets @@ -825,14 +950,15 @@ func (h *FloatHistogram) floatBucketIterator( // reverseFloatBucketIterator is a low-level constructor for reverse bucket iterators. func newReverseFloatBucketIterator( - spans []Span, buckets []float64, schema int32, positive bool, + spans []Span, buckets []float64, schema int32, positive bool, customBounds []float64, ) reverseFloatBucketIterator { r := reverseFloatBucketIterator{ baseBucketIterator: baseBucketIterator[float64, float64]{ - schema: schema, - spans: spans, - buckets: buckets, - positive: positive, + schema: schema, + spans: spans, + buckets: buckets, + positive: positive, + customBounds: customBounds, }, } @@ -946,9 +1072,9 @@ func (i *floatBucketIterator) Next() bool { } } - // Skip buckets before absoluteStartValue. + // Skip buckets before absoluteStartValue for exponential schemas. // TODO(beorn7): Maybe do something more efficient than this recursive call. - if !i.boundReachedStartValue && getBound(i.currIdx, i.targetSchema) <= i.absoluteStartValue { + if !i.boundReachedStartValue && IsExponentialSchema(i.targetSchema) && getBoundExponential(i.currIdx, i.targetSchema) <= i.absoluteStartValue { return i.Next() } i.boundReachedStartValue = true @@ -1010,14 +1136,7 @@ func (i *allFloatBucketIterator) Next() bool { case 0: i.state = 1 if i.h.ZeroCount > 0 { - i.currBucket = Bucket[float64]{ - Lower: -i.h.ZeroThreshold, - Upper: i.h.ZeroThreshold, - LowerInclusive: true, - UpperInclusive: true, - Count: i.h.ZeroCount, - // Index is irrelevant for the zero bucket. - } + i.currBucket = i.h.ZeroBucket() return true } return i.Next() @@ -1076,7 +1195,7 @@ func addBuckets( for _, spanB := range spansB { indexB += spanB.Offset for j := 0; j < int(spanB.Length); j++ { - if lowerThanThreshold && getBound(indexB, schema) <= threshold { + if lowerThanThreshold && IsExponentialSchema(schema) && getBoundExponential(indexB, schema) <= threshold { goto nextLoop } lowerThanThreshold = false @@ -1192,6 +1311,12 @@ func floatBucketsMatch(b1, b2 []float64) bool { // ReduceResolution reduces the float histogram's spans, buckets into target schema. // The target schema must be smaller than the current float histogram's schema. func (h *FloatHistogram) ReduceResolution(targetSchema int32) *FloatHistogram { + if h.UsesCustomBuckets() { + panic("cannot reduce resolution when there are custom buckets") + } + if IsCustomBucketsSchema(targetSchema) { + panic("cannot reduce resolution to custom buckets schema") + } if targetSchema >= h.Schema { panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema)) } diff --git a/model/histogram/float_histogram_test.go b/model/histogram/float_histogram_test.go index 49fb77ab0b..400645cad4 100644 --- a/model/histogram/float_histogram_test.go +++ b/model/histogram/float_histogram_test.go @@ -131,6 +131,46 @@ func TestFloatHistogramMul(t *testing.T) { NegativeBuckets: []float64{9, 3, 15, 18}, }, }, + { + "no-op with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomBounds: []float64{1, 2, 3}, + }, + 1, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomBounds: []float64{1, 2, 3}, + }, + }, + { + "triple with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 30, + Sum: 23, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + 3, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 90, + Sum: 69, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{3, 0, 9, 12, 21}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + }, } for _, c := range cases { @@ -178,6 +218,21 @@ func TestFloatHistogramCopy(t *testing.T) { }, expected: &FloatHistogram{}, }, + { + name: "with custom buckets", + orig: &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 1}}, + PositiveBuckets: []float64{1, 3, -3, 42}, + CustomBounds: []float64{1, 2, 3}, + }, + expected: &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 1}}, + PositiveBuckets: []float64{1, 3, -3, 42}, + CustomBounds: []float64{1, 2, 3}, + }, + }, } for _, tcase := range cases { @@ -228,6 +283,21 @@ func TestFloatHistogramCopyTo(t *testing.T) { }, expected: &FloatHistogram{}, }, + { + name: "with custom buckets", + orig: &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 1}}, + PositiveBuckets: []float64{1, 3, -3, 42}, + CustomBounds: []float64{1, 2, 3}, + }, + expected: &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 1}}, + PositiveBuckets: []float64{1, 3, -3, 42}, + CustomBounds: []float64{1, 2, 3}, + }, + }, } for _, tcase := range cases { @@ -339,6 +409,46 @@ func TestFloatHistogramDiv(t *testing.T) { NegativeBuckets: []float64{1.5, 0.5, 2.5, 3}, }, }, + { + "no-op with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomBounds: []float64{1, 2, 3}, + }, + 1, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomBounds: []float64{1, 2, 3}, + }, + }, + { + "half with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 30, + Sum: 23, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + 2, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 11.5, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{0.5, 0, 1.5, 2, 3.5}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + }, } for _, c := range cases { @@ -937,6 +1047,138 @@ func TestFloatHistogramDetectReset(t *testing.T) { }, true, }, + { + "no buckets to some buckets with custom bounds", + &FloatHistogram{ + Schema: CustomBucketsSchema, + CustomBounds: []float64{1, 2, 3}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomBounds: []float64{1, 2, 3}, + }, + false, + }, + { + "some buckets to no buckets with custom bounds", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomBounds: []float64{1, 2, 3}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + CustomBounds: []float64{1, 2, 3}, + }, + true, + }, + { + "one bucket appears, nothing else changes with custom bounds", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 1.23, 3.3, 4.2, 0.1}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + false, + }, + { + "one bucket disappears, nothing else changes with custom bounds", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 1.23, 3.3, 4.2, 0.1}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + true, + }, + { + "an unpopulated bucket disappears, nothing else changes with custom bounds", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + false, + }, + { + "one positive bucket goes up with custom bounds", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.3, 0.1}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + false, + }, + { + "one positive bucket goes down with custom bounds", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.1, 0.1}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + true, + }, } for _, c := range cases { @@ -1230,6 +1472,70 @@ func TestFloatHistogramCompact(t *testing.T) { NegativeBuckets: []float64{2, 3}, }, }, + { + "nothing should happen with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomBounds: []float64{1, 2, 3}, + }, + 0, + &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomBounds: []float64{1, 2, 3}, + }, + }, + { + "eliminate zero offsets with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 1}, {0, 3}, {0, 1}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + 0, + &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 5}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + }, + { + "eliminate multiple zero length spans with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 2}, {2, 0}, {2, 0}, {2, 0}, {3, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + 0, + &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 2}, {9, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + }, + { + "cut empty buckets at start and end with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 4}, {5, 6}}, + PositiveBuckets: []float64{0, 0, 1, 3.3, 4.2, 0.1, 3.3, 0, 0, 0}, + CustomBounds: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9}, + }, + 0, + &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{2, 2}, {5, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, + CustomBounds: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9}, + }, + }, } for _, c := range cases { @@ -1245,6 +1551,7 @@ func TestFloatHistogramAdd(t *testing.T) { cases := []struct { name string in1, in2, expected *FloatHistogram + expErrMsg string }{ { "same bucket layout", @@ -1278,6 +1585,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 2}, {3, 2}}, NegativeBuckets: []float64{4, 2, 9, 10}, }, + "", }, { "same bucket layout, defined differently", @@ -1311,6 +1619,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 5}, {0, 2}}, NegativeBuckets: []float64{4, 2, 0, 0, 0, 9, 10}, }, + "", }, { "non-overlapping spans", @@ -1344,9 +1653,10 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{-9, 2}, {3, 2}, {5, 2}, {3, 2}}, NegativeBuckets: []float64{1, 1, 4, 4, 3, 1, 5, 6}, }, + "", }, { - "non-overlapping inverted order", + "non-overlapping spans inverted order", &FloatHistogram{ ZeroThreshold: 0.01, ZeroCount: 8, @@ -1377,6 +1687,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{-6, 2}, {1, 2}, {4, 2}, {3, 2}}, NegativeBuckets: []float64{1, 1, 4, 4, 3, 1, 5, 6}, }, + "", }, { "overlapping spans", @@ -1410,6 +1721,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, }, + "", }, { "overlapping spans inverted order", @@ -1443,6 +1755,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, }, + "", }, { "schema change", @@ -1478,6 +1791,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, }, + "", }, { "larger zero bucket in first histogram", @@ -1511,6 +1825,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, }, + "", }, { "larger zero bucket in second histogram", @@ -1544,6 +1859,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, }, + "", }, { "larger zero threshold in first histogram ends up inside a populated bucket of second histogram", @@ -1577,6 +1893,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, }, + "", }, { "larger zero threshold in second histogram ends up inside a populated bucket of first histogram", @@ -1610,6 +1927,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, }, + "", }, { "schema change combined with larger zero bucket in second histogram", @@ -1645,6 +1963,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, }, + "", }, { "schema change combined with larger zero bucket in first histogram", @@ -1680,36 +1999,261 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, }, + "", + }, + { + "same custom bucket layout", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 2.345, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 11, + Sum: 1.234, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{0, 0, 2, 3, 6}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 26, + Sum: 3.579, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 5, 7, 13}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + "", + }, + { + "same custom bucket layout, defined differently", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 2.345, + PositiveSpans: []Span{{0, 2}, {1, 1}, {0, 2}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 11, + Sum: 1.234, + PositiveSpans: []Span{{0, 2}, {1, 2}, {0, 1}}, + PositiveBuckets: []float64{0, 0, 2, 3, 6}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 26, + Sum: 3.579, + PositiveSpans: []Span{{0, 2}, {1, 1}, {0, 2}}, + PositiveBuckets: []float64{1, 0, 5, 7, 13}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + "", + }, + { + "non-overlapping spans with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 2.345, + PositiveSpans: []Span{{0, 2}, {2, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 20, + Sum: 1.234, + PositiveSpans: []Span{{2, 2}, {3, 3}}, + PositiveBuckets: []float64{5, 4, 2, 3, 6}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 35, + Sum: 3.579, + PositiveSpans: []Span{{0, 4}, {0, 6}}, + PositiveBuckets: []float64{1, 0, 5, 4, 3, 4, 7, 2, 3, 6}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + "", + }, + { + "non-overlapping spans inverted order with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 20, + Sum: 1.234, + PositiveSpans: []Span{{2, 2}, {3, 3}}, + PositiveBuckets: []float64{5, 4, 2, 3, 6}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 2.345, + PositiveSpans: []Span{{0, 2}, {2, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 35, + Sum: 3.579, + PositiveSpans: []Span{{0, 4}, {0, 6}}, + PositiveBuckets: []float64{1, 0, 5, 4, 3, 4, 7, 2, 3, 6}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + "", + }, + { + "overlapping spans with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 2.345, + PositiveSpans: []Span{{0, 2}, {2, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 27, + Sum: 1.234, + PositiveSpans: []Span{{1, 4}, {0, 3}}, + PositiveBuckets: []float64{5, 4, 2, 3, 6, 2, 5}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 42, + Sum: 3.579, + PositiveSpans: []Span{{0, 4}, {0, 4}}, + PositiveBuckets: []float64{1, 5, 4, 2, 6, 10, 9, 5}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + "", + }, + { + "overlapping spans inverted order with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 27, + Sum: 1.234, + PositiveSpans: []Span{{1, 4}, {0, 3}}, + PositiveBuckets: []float64{5, 4, 2, 3, 6, 2, 5}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 2.345, + PositiveSpans: []Span{{0, 2}, {2, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 42, + Sum: 3.579, + PositiveSpans: []Span{{0, 4}, {0, 4}}, + PositiveBuckets: []float64{1, 5, 4, 2, 6, 10, 9, 5}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + "", + }, + { + "different custom bucket layout", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 2.345, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 11, + Sum: 1.234, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{0, 0, 2, 3, 6}, + CustomBounds: []float64{1, 2, 3, 4, 5}, + }, + nil, + "cannot apply this operation on custom buckets histograms with different custom bounds", + }, + { + "mix exponential and custom buckets histograms", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 8, + Count: 59, + Sum: 1.234, + Schema: 0, + PositiveSpans: []Span{{-2, 5}, {0, 3}}, + PositiveBuckets: []float64{2, 5, 4, 2, 3, 6, 7, 5}, + NegativeSpans: []Span{{3, 3}, {1, 3}}, + NegativeBuckets: []float64{4, 10, 1, 4, 14, 7}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 11, + Sum: 12, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{0, 0, 2, 3, 6}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + nil, + "cannot apply this operation on histograms with a mix of exponential and custom bucket schemas", }, } for _, c := range cases { t.Run(c.name, func(t *testing.T) { - testHistogramAdd(t, c.in1, c.in2, c.expected) - testHistogramAdd(t, c.in2, c.in1, c.expected) + testHistogramAdd(t, c.in1, c.in2, c.expected, c.expErrMsg) + testHistogramAdd(t, c.in2, c.in1, c.expected, c.expErrMsg) }) } } -func testHistogramAdd(t *testing.T, a, b, expected *FloatHistogram) { +func testHistogramAdd(t *testing.T, a, b, expected *FloatHistogram, expErrMsg string) { var ( aCopy = a.Copy() bCopy = b.Copy() - expectedCopy = expected.Copy() + expectedCopy *FloatHistogram ) - res := aCopy.Add(bCopy) + if expected != nil { + expectedCopy = expected.Copy() + } - res.Compact(0) - expectedCopy.Compact(0) + res, err := aCopy.Add(bCopy) + if expErrMsg != "" { + require.EqualError(t, err, expErrMsg) + } else { + require.NoError(t, err) + } - require.Equal(t, expectedCopy, res) + if expected != nil { + res.Compact(0) + expectedCopy.Compact(0) - // Has it also happened in-place? - require.Equal(t, expectedCopy, aCopy) + require.Equal(t, expectedCopy, res) - // Check that the argument was not mutated. - require.Equal(t, b, bCopy) + // Has it also happened in-place? + require.Equal(t, expectedCopy, aCopy) + + // Check that the argument was not mutated. + require.Equal(t, b, bCopy) + } } func TestFloatHistogramSub(t *testing.T) { @@ -1718,6 +2262,7 @@ func TestFloatHistogramSub(t *testing.T) { cases := []struct { name string in1, in2, expected *FloatHistogram + expErrMsg string }{ { "same bucket layout", @@ -1751,6 +2296,7 @@ func TestFloatHistogramSub(t *testing.T) { NegativeSpans: []Span{{3, 2}, {3, 2}}, NegativeBuckets: []float64{2, 0, 1, 2}, }, + "", }, { "schema change", @@ -1786,38 +2332,126 @@ func TestFloatHistogramSub(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{1, 9, 1, 4, 9, 1}, }, + "", + }, + { + "same custom bucket layout", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 23, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 11, + Sum: 12, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{0, 0, 2, 3, 6}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 4, + Sum: 11, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 1, 1, 1}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + "", + }, + { + "different custom bucket layout", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 23, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 11, + Sum: 12, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{0, 0, 2, 3, 6}, + CustomBounds: []float64{1, 2, 3, 4, 5}, + }, + nil, + "cannot apply this operation on custom buckets histograms with different custom bounds", + }, + { + "mix exponential and custom buckets histograms", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 8, + Count: 59, + Sum: 1.234, + Schema: 0, + PositiveSpans: []Span{{-2, 5}, {0, 3}}, + PositiveBuckets: []float64{2, 5, 4, 2, 3, 6, 7, 5}, + NegativeSpans: []Span{{3, 3}, {1, 3}}, + NegativeBuckets: []float64{4, 10, 1, 4, 14, 7}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 11, + Sum: 12, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{0, 0, 2, 3, 6}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + nil, + "cannot apply this operation on histograms with a mix of exponential and custom bucket schemas", }, } for _, c := range cases { t.Run(c.name, func(t *testing.T) { - testFloatHistogramSub(t, c.in1, c.in2, c.expected) + testFloatHistogramSub(t, c.in1, c.in2, c.expected, c.expErrMsg) - expectedNegative := c.expected.Copy().Mul(-1) - testFloatHistogramSub(t, c.in2, c.in1, expectedNegative) + var expectedNegative *FloatHistogram + if c.expected != nil { + expectedNegative = c.expected.Copy().Mul(-1) + } + testFloatHistogramSub(t, c.in2, c.in1, expectedNegative, c.expErrMsg) }) } } -func testFloatHistogramSub(t *testing.T, a, b, expected *FloatHistogram) { +func testFloatHistogramSub(t *testing.T, a, b, expected *FloatHistogram, expErrMsg string) { var ( aCopy = a.Copy() bCopy = b.Copy() - expectedCopy = expected.Copy() + expectedCopy *FloatHistogram ) - res := aCopy.Sub(bCopy) + if expected != nil { + expectedCopy = expected.Copy() + } - res.Compact(0) - expectedCopy.Compact(0) + res, err := aCopy.Sub(bCopy) + if expErrMsg != "" { + require.EqualError(t, err, expErrMsg) + } else { + require.NoError(t, err) + } - require.Equal(t, expectedCopy, res) + if expected != nil { + res.Compact(0) + expectedCopy.Compact(0) - // Has it also happened in-place? - require.Equal(t, expectedCopy, aCopy) + require.Equal(t, expectedCopy, res) - // Check that the argument was not mutated. - require.Equal(t, b, bCopy) + // Has it also happened in-place? + require.Equal(t, expectedCopy, aCopy) + + // Check that the argument was not mutated. + require.Equal(t, b, bCopy) + } } func TestFloatHistogramCopyToSchema(t *testing.T) { @@ -1878,6 +2512,26 @@ func TestFloatHistogramCopyToSchema(t *testing.T) { NegativeBuckets: []float64{3, 1, 5, 6}, }, }, + { + "no schema change for custom buckets", + CustomBucketsSchema, + &FloatHistogram{ + Count: 30, + Sum: 2.345, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 3}, {5, 5}}, + PositiveBuckets: []float64{1, 0, 0, 3, 2, 2, 3, 4}, + CustomBounds: []float64{1, 2, 3, 4, 5, 6, 7}, + }, + &FloatHistogram{ + Count: 30, + Sum: 2.345, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 3}, {5, 5}}, + PositiveBuckets: []float64{1, 0, 0, 3, 2, 2, 3, 4}, + CustomBounds: []float64{1, 2, 3, 4, 5, 6, 7}, + }, + }, } for _, c := range cases { @@ -2450,6 +3104,110 @@ func TestFloatBucketIteratorTargetSchema(t *testing.T) { require.False(t, it.Next(), "negative iterator not exhausted") } +func TestFloatCustomBucketsIterators(t *testing.T) { + cases := []struct { + h FloatHistogram + expPositiveBuckets []Bucket[float64] + }{ + { + h: FloatHistogram{ + Count: 622, + Sum: 10008.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 1}, + {Offset: 1, Length: 1}, + }, + PositiveBuckets: []float64{100, 344, 123, 55}, + CustomBounds: []float64{10, 25, 50, 100, 500}, + }, + expPositiveBuckets: []Bucket[float64]{ + {Lower: math.Inf(-1), Upper: 10, LowerInclusive: true, UpperInclusive: true, Count: 100, Index: 0}, + {Lower: 10, Upper: 25, LowerInclusive: false, UpperInclusive: true, Count: 344, Index: 1}, + {Lower: 50, Upper: 100, LowerInclusive: false, UpperInclusive: true, Count: 123, Index: 3}, + {Lower: 500, Upper: math.Inf(1), LowerInclusive: false, UpperInclusive: true, Count: 55, Index: 5}, + }, + }, + { + h: FloatHistogram{ + Count: 622, + Sum: 10008.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 1}, + {Offset: 1, Length: 1}, + }, + PositiveBuckets: []float64{100, 344, 123, 55}, + CustomBounds: []float64{-10, -5, 0, 10, 25}, + }, + expPositiveBuckets: []Bucket[float64]{ + {Lower: math.Inf(-1), Upper: -10, LowerInclusive: true, UpperInclusive: true, Count: 100, Index: 0}, + {Lower: -10, Upper: -5, LowerInclusive: false, UpperInclusive: true, Count: 344, Index: 1}, + {Lower: 0, Upper: 10, LowerInclusive: false, UpperInclusive: true, Count: 123, Index: 3}, + {Lower: 25, Upper: math.Inf(1), LowerInclusive: false, UpperInclusive: true, Count: 55, Index: 5}, + }, + }, + } + + for i, c := range cases { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + { + it := c.h.AllBucketIterator() + for i, b := range c.expPositiveBuckets { + require.True(t, it.Next(), "all bucket iterator exhausted too early") + require.Equal(t, b, it.At(), "bucket %d", i) + } + require.False(t, it.Next(), "all bucket iterator not exhausted") + + it = c.h.AllReverseBucketIterator() + length := len(c.expPositiveBuckets) + for j := 0; j < length; j++ { + i := length - j - 1 + b := c.expPositiveBuckets[i] + require.True(t, it.Next(), "all reverse bucket iterator exhausted too early") + require.Equal(t, b, it.At(), "bucket %d", i) + } + require.False(t, it.Next(), "all reverse bucket iterator not exhausted") + + it = c.h.PositiveBucketIterator() + for i, b := range c.expPositiveBuckets { + require.True(t, it.Next(), "positive bucket iterator exhausted too early") + require.Equal(t, b, it.At(), "bucket %d", i) + } + require.False(t, it.Next(), "positive bucket iterator not exhausted") + + it = c.h.PositiveReverseBucketIterator() + for j := 0; j < length; j++ { + i := length - j - 1 + b := c.expPositiveBuckets[i] + require.True(t, it.Next(), "positive reverse bucket iterator exhausted too early") + require.Equal(t, b, it.At(), "bucket %d", i) + } + require.False(t, it.Next(), "positive reverse bucket iterator not exhausted") + + it = c.h.NegativeBucketIterator() + require.False(t, it.Next(), "negative bucket iterator not exhausted") + + it = c.h.NegativeReverseBucketIterator() + require.False(t, it.Next(), "negative reverse bucket iterator not exhausted") + } + { + it := c.h.floatBucketIterator(true, 0, CustomBucketsSchema) + for i, b := range c.expPositiveBuckets { + require.True(t, it.Next(), "positive iterator exhausted too early") + require.Equal(t, b, it.At(), "bucket %d", i) + } + require.False(t, it.Next(), "positive iterator not exhausted") + + it = c.h.floatBucketIterator(false, 0, CustomBucketsSchema) + require.False(t, it.Next(), "negative iterator not exhausted") + } + }) + } +} + // TestFloatHistogramEquals tests FloatHistogram with float-specific cases that // cannot be covered by TestHistogramEquals. func TestFloatHistogramEquals(t *testing.T) { @@ -2498,6 +3256,42 @@ func TestFloatHistogramEquals(t *testing.T) { hNegBucketNaN.NegativeBuckets[0] = math.NaN() notEquals(h1, *hNegBucketNaN) equals(*hNegBucketNaN, *hNegBucketNaN) + + // Custom bounds are defined for exponential schema. + hCustom := h1.Copy() + hCustom.CustomBounds = []float64{1, 2, 3} + equals(h1, *hCustom) + + cbh1 := FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 2.2, + Sum: 9.7, + PositiveSpans: []Span{{0, 1}}, + PositiveBuckets: []float64{3}, + CustomBounds: []float64{1, 2, 3}, + } + + require.NoError(t, cbh1.Validate()) + + cbh2 := cbh1.Copy() + equals(cbh1, *cbh2) + + // Has different custom bounds for custom buckets schema. + cbh2 = cbh1.Copy() + cbh2.CustomBounds = []float64{1, 2, 3, 4} + notEquals(cbh1, *cbh2) + + // Has non-empty negative spans and buckets for custom buckets schema. + cbh2 = cbh1.Copy() + cbh2.NegativeSpans = []Span{{Offset: 0, Length: 1}} + cbh2.NegativeBuckets = []float64{1} + notEquals(cbh1, *cbh2) + + // Has non-zero zero count and threshold for custom buckets schema. + cbh2 = cbh1.Copy() + cbh2.ZeroThreshold = 0.1 + cbh2.ZeroCount = 10 + notEquals(cbh1, *cbh2) } func TestFloatHistogramSize(t *testing.T) { @@ -2519,8 +3313,9 @@ func TestFloatHistogramSize(t *testing.T) { PositiveBuckets: nil, // 24 bytes. NegativeSpans: nil, // 24 bytes. NegativeBuckets: nil, // 24 bytes. + CustomBounds: nil, // 24 bytes. }, - 8 + 4 + 4 + 8 + 8 + 8 + 8 + 24 + 24 + 24 + 24, + 8 + 4 + 4 + 8 + 8 + 8 + 8 + 24 + 24 + 24 + 24 + 24, }, { "complete struct", @@ -2540,8 +3335,29 @@ func TestFloatHistogramSize(t *testing.T) { {3, 2}, // 2 * 4 bytes. {3, 2}}, // 2 * 4 bytes. NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, // 24 bytes + 4 * 8 bytes. + CustomBounds: nil, // 24 bytes. }, - 8 + 4 + 4 + 8 + 8 + 8 + 8 + (24 + 2*4 + 2*4) + (24 + 2*4 + 2*4) + (24 + 4*8) + (24 + 4*8), + 8 + 4 + 4 + 8 + 8 + 8 + 8 + (24 + 2*4 + 2*4) + (24 + 2*4 + 2*4) + (24 + 4*8) + (24 + 4*8) + 24, + }, + { + "complete struct with custom buckets", + &FloatHistogram{ // 8 bytes. + CounterResetHint: 0, // 1 byte. + Schema: CustomBucketsSchema, // 4 bytes. + ZeroThreshold: 0, // 8 bytes. + ZeroCount: 0, // 8 bytes. + Count: 3493.3, // 8 bytes. + Sum: 2349209.324, // 8 bytes. + PositiveSpans: []Span{ // 24 bytes. + {0, 1}, // 2 * 4 bytes. + {2, 3}, // 2 * 4 bytes. + }, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, // 24 bytes + 4 * 8 bytes. + NegativeSpans: nil, // 24 bytes. + NegativeBuckets: nil, // 24 bytes. + CustomBounds: []float64{1, 2, 3}, // 24 bytes + 3 * 8 bytes. + }, + 8 + 4 + 4 + 8 + 8 + 8 + 8 + (24 + 2*4 + 2*4) + (24 + 4*8) + 24 + 24 + (24 + 3*8), }, } @@ -2552,6 +3368,58 @@ func TestFloatHistogramSize(t *testing.T) { } } +func TestFloatHistogramString(t *testing.T) { + cases := []struct { + name string + fh *FloatHistogram + expected string + }{ + { + "exponential histogram", + &FloatHistogram{ + Schema: 1, + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{ + {-2, 1}, + {2, 3}, + }, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{ + {3, 2}, + {3, 2}, + }, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + `{count:3493.3, sum:2.349209324e+06, [-22.62741699796952,-16):1000, [-16,-11.31370849898476):123400, [-4,-2.82842712474619):3, [-2.82842712474619,-2):3.1, [-0.01,0.01]:5.5, (0.35355339059327373,0.5]:1, (1,1.414213562373095]:3.3, (1.414213562373095,2]:4.2, (2,2.82842712474619]:0.1}`, + }, + { + "custom buckets histogram", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{ + {0, 1}, + {2, 4}, + }, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 5}, + CustomBounds: []float64{1, 2, 5, 10, 15, 20}, + }, + `{count:3493.3, sum:2.349209324e+06, [-Inf,1]:1, (5,10]:3.3, (10,15]:4.2, (15,20]:0.1, (20,+Inf]:5}`, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + require.NoError(t, c.fh.Validate()) + require.Equal(t, c.expected, c.fh.String()) + }) + } +} + func BenchmarkFloatHistogramAllBucketIterator(b *testing.B) { rng := rand.New(rand.NewSource(0)) diff --git a/model/histogram/generic.go b/model/histogram/generic.go index 7e1cc4b605..c6780c2003 100644 --- a/model/histogram/generic.go +++ b/model/histogram/generic.go @@ -20,14 +20,32 @@ import ( "strings" ) -var ( - ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets") - ErrHistogramCountMismatch = errors.New("histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)") - ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative") - ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative") - ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided") +const ( + ExponentialSchemaMax int32 = 8 + ExponentialSchemaMin int32 = -4 + CustomBucketsSchema int32 = 127 ) +var ( + ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets") + ErrHistogramCountMismatch = errors.New("histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)") + ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative") + ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative") + ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided") + ErrHistogramCustomBucketsMismatch = errors.New("histogram custom bounds are too few") + ErrHistogramCustomBucketsInvalid = errors.New("histogram custom bounds must be in strictly increasing order") + ErrHistogramsIncompatibleSchema = errors.New("cannot apply this operation on histograms with a mix of exponential and custom bucket schemas") + ErrHistogramsIncompatibleBounds = errors.New("cannot apply this operation on custom buckets histograms with different custom bounds") +) + +func IsCustomBucketsSchema(s int32) bool { + return s == CustomBucketsSchema +} + +func IsExponentialSchema(s int32) bool { + return s >= ExponentialSchemaMin && s <= ExponentialSchemaMax +} + // BucketCount is a type constraint for the count in a bucket, which can be // float64 (for type FloatHistogram) or uint64 (for type Histogram). type BucketCount interface { @@ -115,6 +133,8 @@ type baseBucketIterator[BC BucketCount, IBC InternalBucketCount] struct { currCount IBC // Count in the current bucket. currIdx int32 // The actual bucket index. + + customBounds []float64 // Bounds (usually upper) for histograms with custom buckets. } func (b *baseBucketIterator[BC, IBC]) At() Bucket[BC] { @@ -128,14 +148,19 @@ func (b *baseBucketIterator[BC, IBC]) at(schema int32) Bucket[BC] { Index: b.currIdx, } if b.positive { - bucket.Upper = getBound(b.currIdx, schema) - bucket.Lower = getBound(b.currIdx-1, schema) + bucket.Upper = getBound(b.currIdx, schema, b.customBounds) + bucket.Lower = getBound(b.currIdx-1, schema, b.customBounds) } else { - bucket.Lower = -getBound(b.currIdx, schema) - bucket.Upper = -getBound(b.currIdx-1, schema) + bucket.Lower = -getBound(b.currIdx, schema, b.customBounds) + bucket.Upper = -getBound(b.currIdx-1, schema, b.customBounds) + } + if IsCustomBucketsSchema(schema) { + bucket.LowerInclusive = b.currIdx == 0 + bucket.UpperInclusive = true + } else { + bucket.LowerInclusive = bucket.Lower < 0 + bucket.UpperInclusive = bucket.Upper > 0 } - bucket.LowerInclusive = bucket.Lower < 0 - bucket.UpperInclusive = bucket.Upper > 0 return bucket } @@ -393,7 +418,52 @@ func checkHistogramBuckets[BC BucketCount, IBC InternalBucketCount](buckets []IB return nil } -func getBound(idx, schema int32) float64 { +func checkHistogramCustomBounds(bounds []float64, spans []Span, numBuckets int) error { + prev := math.Inf(-1) + for _, curr := range bounds { + if curr <= prev { + return fmt.Errorf("previous bound is %f and current is %f: %w", prev, curr, ErrHistogramCustomBucketsInvalid) + } + prev = curr + } + + var spanBuckets int + var totalSpanLength int + for n, span := range spans { + if span.Offset < 0 { + return fmt.Errorf("span number %d with offset %d: %w", n+1, span.Offset, ErrHistogramSpanNegativeOffset) + } + spanBuckets += int(span.Length) + totalSpanLength += int(span.Length) + int(span.Offset) + } + if spanBuckets != numBuckets { + return fmt.Errorf("spans need %d buckets, have %d buckets: %w", spanBuckets, numBuckets, ErrHistogramSpansBucketsMismatch) + } + if (len(bounds) + 1) < totalSpanLength { + return fmt.Errorf("only %d custom bounds defined which is insufficient to cover total span length of %d: %w", len(bounds), totalSpanLength, ErrHistogramCustomBucketsMismatch) + } + + return nil +} + +func getBound(idx, schema int32, customBounds []float64) float64 { + if IsCustomBucketsSchema(schema) { + length := int32(len(customBounds)) + switch { + case idx > length || idx < -1: + panic(fmt.Errorf("index %d out of bounds for custom bounds of length %d", idx, length)) + case idx == length: + return math.Inf(1) + case idx == -1: + return math.Inf(-1) + default: + return customBounds[idx] + } + } + return getBoundExponential(idx, schema) +} + +func getBoundExponential(idx, schema int32) float64 { // Here a bit of context about the behavior for the last bucket counting // regular numbers (called simply "last bucket" below) and the bucket // counting observations of ±Inf (called "inf bucket" below, with an idx diff --git a/model/histogram/generic_test.go b/model/histogram/generic_test.go index 6a22b6f193..28c9b6bf39 100644 --- a/model/histogram/generic_test.go +++ b/model/histogram/generic_test.go @@ -21,7 +21,7 @@ import ( "golang.org/x/exp/slices" ) -func TestGetBound(t *testing.T) { +func TestGetBoundExponential(t *testing.T) { scenarios := []struct { idx int32 schema int32 @@ -105,7 +105,7 @@ func TestGetBound(t *testing.T) { } for _, s := range scenarios { - got := getBound(s.idx, s.schema) + got := getBoundExponential(s.idx, s.schema) if s.want != got { require.Equal(t, s.want, got, "idx %d, schema %d", s.idx, s.schema) } diff --git a/model/histogram/histogram.go b/model/histogram/histogram.go index d40adeb620..98a8a606c9 100644 --- a/model/histogram/histogram.go +++ b/model/histogram/histogram.go @@ -50,11 +50,12 @@ const ( type Histogram struct { // Counter reset information. CounterResetHint CounterResetHint - // Currently valid schema numbers are -4 <= n <= 8. They are all for - // base-2 bucket schemas, where 1 is a bucket boundary in each case, and - // then each power of two is divided into 2^n logarithmic buckets. Or - // in other words, each bucket boundary is the previous boundary times - // 2^(2^-n). + // Currently valid schema numbers are -4 <= n <= 8 for exponential buckets, + // They are all for base-2 bucket schemas, where 1 is a bucket boundary in + // each case, and then each power of two is divided into 2^n logarithmic buckets. + // Or in other words, each bucket boundary is the previous boundary times + // 2^(2^-n). Another valid schema number is 127 for custom buckets, defined by + // the CustomBounds field. Schema int32 // Width of the zero bucket. ZeroThreshold float64 @@ -70,6 +71,12 @@ type Histogram struct { // count. All following ones are deltas relative to the previous // element. PositiveBuckets, NegativeBuckets []int64 + // Holds the custom (usually upper) bounds for bucket definitions, otherwise nil. + // This slice is interned, to be treated as immutable and copied by reference. + // These numbers should be strictly increasing. This field is only used when the + // schema is 127, and the ZeroThreshold, ZeroCount, NegativeSpans and NegativeBuckets + // fields are not used. + CustomBounds []float64 } // A Span defines a continuous sequence of buckets. @@ -81,33 +88,43 @@ type Span struct { Length uint32 } +func (h *Histogram) UsesCustomBuckets() bool { + return IsCustomBucketsSchema(h.Schema) +} + // Copy returns a deep copy of the Histogram. func (h *Histogram) Copy() *Histogram { c := Histogram{ CounterResetHint: h.CounterResetHint, Schema: h.Schema, - ZeroThreshold: h.ZeroThreshold, - ZeroCount: h.ZeroCount, Count: h.Count, Sum: h.Sum, } + if h.UsesCustomBuckets() { + c.CustomBounds = h.CustomBounds + } else { + c.ZeroThreshold = h.ZeroThreshold + c.ZeroCount = h.ZeroCount + + if len(h.NegativeSpans) != 0 { + c.NegativeSpans = make([]Span, len(h.NegativeSpans)) + copy(c.NegativeSpans, h.NegativeSpans) + } + if len(h.NegativeBuckets) != 0 { + c.NegativeBuckets = make([]int64, len(h.NegativeBuckets)) + copy(c.NegativeBuckets, h.NegativeBuckets) + } + } + if len(h.PositiveSpans) != 0 { c.PositiveSpans = make([]Span, len(h.PositiveSpans)) copy(c.PositiveSpans, h.PositiveSpans) } - if len(h.NegativeSpans) != 0 { - c.NegativeSpans = make([]Span, len(h.NegativeSpans)) - copy(c.NegativeSpans, h.NegativeSpans) - } if len(h.PositiveBuckets) != 0 { c.PositiveBuckets = make([]int64, len(h.PositiveBuckets)) copy(c.PositiveBuckets, h.PositiveBuckets) } - if len(h.NegativeBuckets) != 0 { - c.NegativeBuckets = make([]int64, len(h.NegativeBuckets)) - copy(c.NegativeBuckets, h.NegativeBuckets) - } return &c } @@ -117,22 +134,35 @@ func (h *Histogram) Copy() *Histogram { func (h *Histogram) CopyTo(to *Histogram) { to.CounterResetHint = h.CounterResetHint to.Schema = h.Schema - to.ZeroThreshold = h.ZeroThreshold - to.ZeroCount = h.ZeroCount to.Count = h.Count to.Sum = h.Sum + if h.UsesCustomBuckets() { + to.ZeroThreshold = 0 + to.ZeroCount = 0 + + to.NegativeSpans = clearIfNotNil(to.NegativeSpans) + to.NegativeBuckets = clearIfNotNil(to.NegativeBuckets) + + to.CustomBounds = h.CustomBounds + } else { + to.ZeroThreshold = h.ZeroThreshold + to.ZeroCount = h.ZeroCount + + to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans)) + copy(to.NegativeSpans, h.NegativeSpans) + + to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets)) + copy(to.NegativeBuckets, h.NegativeBuckets) + + to.CustomBounds = clearIfNotNil(to.CustomBounds) + } + to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans)) copy(to.PositiveSpans, h.PositiveSpans) - to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans)) - copy(to.NegativeSpans, h.NegativeSpans) - to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets)) copy(to.PositiveBuckets, h.PositiveBuckets) - - to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets)) - copy(to.NegativeBuckets, h.NegativeBuckets) } // String returns a string representation of the Histogram. @@ -168,6 +198,9 @@ func (h *Histogram) String() string { // ZeroBucket returns the zero bucket. func (h *Histogram) ZeroBucket() Bucket[uint64] { + if h.UsesCustomBuckets() { + panic("histograms with custom buckets have no zero bucket") + } return Bucket[uint64]{ Lower: -h.ZeroThreshold, Upper: h.ZeroThreshold, @@ -180,14 +213,14 @@ func (h *Histogram) ZeroBucket() Bucket[uint64] { // PositiveBucketIterator returns a BucketIterator to iterate over all positive // buckets in ascending order (starting next to the zero bucket and going up). func (h *Histogram) PositiveBucketIterator() BucketIterator[uint64] { - it := newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true) + it := newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomBounds) return &it } // NegativeBucketIterator returns a BucketIterator to iterate over all negative // buckets in descending order (starting next to the zero bucket and going down). func (h *Histogram) NegativeBucketIterator() BucketIterator[uint64] { - it := newRegularBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false) + it := newRegularBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil) return &it } @@ -208,30 +241,42 @@ func (h *Histogram) CumulativeBucketIterator() BucketIterator[uint64] { // but they must represent the same bucket layout to match. // Sum is compared based on its bit pattern because this method // is about data equality rather than mathematical equality. +// We ignore fields that are not used based on the exponential / custom buckets schema, +// but check fields where differences may cause unintended behaviour even if they are not +// supposed to be used according to the schema. func (h *Histogram) Equals(h2 *Histogram) bool { if h2 == nil { return false } - if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold || - h.ZeroCount != h2.ZeroCount || h.Count != h2.Count || + if h.Schema != h2.Schema || h.Count != h2.Count || math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) { return false } + if h.UsesCustomBuckets() { + if !floatBucketsMatch(h.CustomBounds, h2.CustomBounds) { + return false + } + } + + if h.ZeroThreshold != h2.ZeroThreshold || h.ZeroCount != h2.ZeroCount { + return false + } + + if !spansMatch(h.NegativeSpans, h2.NegativeSpans) { + return false + } + if !slices.Equal(h.NegativeBuckets, h2.NegativeBuckets) { + return false + } + if !spansMatch(h.PositiveSpans, h2.PositiveSpans) { return false } - if !spansMatch(h.NegativeSpans, h2.NegativeSpans) { - return false - } - if !slices.Equal(h.PositiveBuckets, h2.PositiveBuckets) { return false } - if !slices.Equal(h.NegativeBuckets, h2.NegativeBuckets) { - return false - } return true } @@ -322,17 +367,34 @@ func (h *Histogram) ToFloat(fh *FloatHistogram) *FloatHistogram { } fh.CounterResetHint = h.CounterResetHint fh.Schema = h.Schema - fh.ZeroThreshold = h.ZeroThreshold - fh.ZeroCount = float64(h.ZeroCount) fh.Count = float64(h.Count) fh.Sum = h.Sum + if h.UsesCustomBuckets() { + fh.ZeroThreshold = 0 + fh.ZeroCount = 0 + fh.NegativeSpans = clearIfNotNil(fh.NegativeSpans) + fh.NegativeBuckets = clearIfNotNil(fh.NegativeBuckets) + fh.CustomBounds = h.CustomBounds + } else { + fh.ZeroThreshold = h.ZeroThreshold + fh.ZeroCount = float64(h.ZeroCount) + + fh.NegativeSpans = resize(fh.NegativeSpans, len(h.NegativeSpans)) + copy(fh.NegativeSpans, h.NegativeSpans) + + fh.NegativeBuckets = resize(fh.NegativeBuckets, len(h.NegativeBuckets)) + var currentNegative float64 + for i, b := range h.NegativeBuckets { + currentNegative += float64(b) + fh.NegativeBuckets[i] = currentNegative + } + fh.CustomBounds = clearIfNotNil(fh.CustomBounds) + } + fh.PositiveSpans = resize(fh.PositiveSpans, len(h.PositiveSpans)) copy(fh.PositiveSpans, h.PositiveSpans) - fh.NegativeSpans = resize(fh.NegativeSpans, len(h.NegativeSpans)) - copy(fh.NegativeSpans, h.NegativeSpans) - fh.PositiveBuckets = resize(fh.PositiveBuckets, len(h.PositiveBuckets)) var currentPositive float64 for i, b := range h.PositiveBuckets { @@ -340,13 +402,6 @@ func (h *Histogram) ToFloat(fh *FloatHistogram) *FloatHistogram { fh.PositiveBuckets[i] = currentPositive } - fh.NegativeBuckets = resize(fh.NegativeBuckets, len(h.NegativeBuckets)) - var currentNegative float64 - for i, b := range h.NegativeBuckets { - currentNegative += float64(b) - fh.NegativeBuckets[i] = currentNegative - } - return fh } @@ -357,26 +412,55 @@ func resize[T any](items []T, n int) []T { return items[:n] } +func clearIfNotNil[T any](items []T) []T { + if items == nil { + return nil + } + return items[:0] +} + // Validate validates consistency between span and bucket slices. Also, buckets are checked -// against negative values. +// against negative values. We check to make sure there are no unexpected fields or field values +// based on the exponential / custom buckets schema. // For histograms that have not observed any NaN values (based on IsNaN(h.Sum) check), a // strict h.Count = nCount + pCount + h.ZeroCount check is performed. // Otherwise, only a lower bound check will be done (h.Count >= nCount + pCount + h.ZeroCount), // because NaN observations do not increment the values of buckets (but they do increment // the total h.Count). func (h *Histogram) Validate() error { - if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { - return fmt.Errorf("negative side: %w", err) - } - if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { - return fmt.Errorf("positive side: %w", err) - } var nCount, pCount uint64 - err := checkHistogramBuckets(h.NegativeBuckets, &nCount, true) - if err != nil { - return fmt.Errorf("negative side: %w", err) + if h.UsesCustomBuckets() { + if err := checkHistogramCustomBounds(h.CustomBounds, h.PositiveSpans, len(h.PositiveBuckets)); err != nil { + return fmt.Errorf("custom buckets: %w", err) + } + if h.ZeroCount != 0 { + return fmt.Errorf("custom buckets: must have zero count of 0") + } + if h.ZeroThreshold != 0 { + return fmt.Errorf("custom buckets: must have zero threshold of 0") + } + if len(h.NegativeSpans) > 0 { + return fmt.Errorf("custom buckets: must not have negative spans") + } + if len(h.NegativeBuckets) > 0 { + return fmt.Errorf("custom buckets: must not have negative buckets") + } + } else { + if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { + return fmt.Errorf("positive side: %w", err) + } + if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { + return fmt.Errorf("negative side: %w", err) + } + err := checkHistogramBuckets(h.NegativeBuckets, &nCount, true) + if err != nil { + return fmt.Errorf("negative side: %w", err) + } + if h.CustomBounds != nil { + return fmt.Errorf("histogram with exponential schema must not have custom bounds") + } } - err = checkHistogramBuckets(h.PositiveBuckets, &pCount, true) + err := checkHistogramBuckets(h.PositiveBuckets, &pCount, true) if err != nil { return fmt.Errorf("positive side: %w", err) } @@ -399,12 +483,13 @@ type regularBucketIterator struct { baseBucketIterator[uint64, int64] } -func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool) regularBucketIterator { +func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool, customBounds []float64) regularBucketIterator { i := baseBucketIterator[uint64, int64]{ - schema: schema, - spans: spans, - buckets: buckets, - positive: positive, + schema: schema, + spans: spans, + buckets: buckets, + positive: positive, + customBounds: customBounds, } return regularBucketIterator{i} } @@ -478,7 +563,7 @@ func (c *cumulativeBucketIterator) Next() bool { if c.emptyBucketCount > 0 { // We are traversing through empty buckets at the moment. - c.currUpper = getBound(c.currIdx, c.h.Schema) + c.currUpper = getBound(c.currIdx, c.h.Schema, c.h.CustomBounds) c.currIdx++ c.emptyBucketCount-- return true @@ -495,7 +580,7 @@ func (c *cumulativeBucketIterator) Next() bool { c.currCount += c.h.PositiveBuckets[c.posBucketsIdx] c.currCumulativeCount += uint64(c.currCount) - c.currUpper = getBound(c.currIdx, c.h.Schema) + c.currUpper = getBound(c.currIdx, c.h.Schema, c.h.CustomBounds) c.posBucketsIdx++ c.idxInSpan++ @@ -526,6 +611,12 @@ func (c *cumulativeBucketIterator) At() Bucket[uint64] { // ReduceResolution reduces the histogram's spans, buckets into target schema. // The target schema must be smaller than the current histogram's schema. func (h *Histogram) ReduceResolution(targetSchema int32) *Histogram { + if h.UsesCustomBuckets() { + panic("cannot reduce resolution when there are custom buckets") + } + if IsCustomBucketsSchema(targetSchema) { + panic("cannot reduce resolution to custom buckets schema") + } if targetSchema >= h.Schema { panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema)) } diff --git a/model/histogram/histogram_test.go b/model/histogram/histogram_test.go index 14a948e644..e63819debc 100644 --- a/model/histogram/histogram_test.go +++ b/model/histogram/histogram_test.go @@ -69,6 +69,21 @@ func TestHistogramString(t *testing.T) { }, expectedString: "{count:19, sum:2.7, [-64,-32):1, [-16,-8):1, [-8,-4):2, [-4,-2):1, [-2,-1):3, [-1,-0.5):1, (0.5,1]:1, (1,2]:3, (2,4]:1, (4,8]:2, (8,16]:1, (16,32]:1, (32,64]:1}", }, + { + histogram: Histogram{ + Schema: CustomBucketsSchema, + Count: 19, + Sum: 2.7, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + CustomBounds: []float64{1, 2, 5, 10, 15, 20, 25, 50}, + }, + expectedString: "{count:19, sum:2.7, [-Inf,1]:1, (1,2]:3, (2,5]:1, (5,10]:2, (10,15]:1, (15,20]:1, (20,25]:1}", + }, } for i, c := range cases { @@ -208,6 +223,26 @@ func TestCumulativeBucketIterator(t *testing.T) { {Lower: math.Inf(-1), Upper: 16, Count: 8, LowerInclusive: true, UpperInclusive: true, Index: 2}, }, }, + { + histogram: Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + CustomBounds: []float64{5, 10, 20, 50}, + }, + expectedBuckets: []Bucket[uint64]{ + {Lower: math.Inf(-1), Upper: 5, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0}, + {Lower: math.Inf(-1), Upper: 10, Count: 3, LowerInclusive: true, UpperInclusive: true, Index: 1}, + + {Lower: math.Inf(-1), Upper: 20, Count: 3, LowerInclusive: true, UpperInclusive: true, Index: 2}, + + {Lower: math.Inf(-1), Upper: 50, Count: 4, LowerInclusive: true, UpperInclusive: true, Index: 3}, + {Lower: math.Inf(-1), Upper: math.Inf(1), Count: 5, LowerInclusive: true, UpperInclusive: true, Index: 4}, + }, + }, } for i, c := range cases { @@ -368,6 +403,62 @@ func TestRegularBucketIterator(t *testing.T) { }, expectedNegativeBuckets: []Bucket[uint64]{}, }, + { + histogram: Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + CustomBounds: []float64{5, 10, 20, 50}, + }, + expectedPositiveBuckets: []Bucket[uint64]{ + {Lower: math.Inf(-1), Upper: 5, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0}, + {Lower: 5, Upper: 10, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1}, + + {Lower: 20, Upper: 50, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 3}, + {Lower: 50, Upper: math.Inf(1), Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4}, + }, + expectedNegativeBuckets: []Bucket[uint64]{}, + }, + { + histogram: Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + CustomBounds: []float64{0, 10, 20, 50}, + }, + expectedPositiveBuckets: []Bucket[uint64]{ + {Lower: math.Inf(-1), Upper: 0, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0}, + {Lower: 0, Upper: 10, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1}, + + {Lower: 20, Upper: 50, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 3}, + {Lower: 50, Upper: math.Inf(1), Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4}, + }, + expectedNegativeBuckets: []Bucket[uint64]{}, + }, + { + histogram: Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 5}, + }, + PositiveBuckets: []int64{1, 1, 0, -1, 0}, + CustomBounds: []float64{-5, 0, 20, 50}, + }, + expectedPositiveBuckets: []Bucket[uint64]{ + {Lower: math.Inf(-1), Upper: -5, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0}, + {Lower: -5, Upper: 0, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1}, + {Lower: 0, Upper: 20, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 2}, + {Lower: 20, Upper: 50, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 3}, + {Lower: 50, Upper: math.Inf(1), Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4}, + }, + expectedNegativeBuckets: []Bucket[uint64]{}, + }, } for i, c := range cases { @@ -461,11 +552,79 @@ func TestHistogramToFloat(t *testing.T) { } } +func TestCustomBucketsHistogramToFloat(t *testing.T) { + h := Histogram{ + Schema: CustomBucketsSchema, + Count: 10, + Sum: 2.7, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + CustomBounds: []float64{5, 10, 20, 50, 100, 500}, + } + cases := []struct { + name string + fh *FloatHistogram + }{ + {name: "without prior float histogram"}, + {name: "prior float histogram with more buckets", fh: &FloatHistogram{ + Schema: 2, + Count: 3, + Sum: 5, + ZeroThreshold: 4, + ZeroCount: 1, + PositiveSpans: []Span{ + {Offset: 1, Length: 2}, + {Offset: 1, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9}, + NegativeSpans: []Span{ + {Offset: 20, Length: 6}, + {Offset: 12, Length: 7}, + {Offset: 33, Length: 10}, + }, + NegativeBuckets: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9}, + }}, + {name: "prior float histogram with fewer buckets", fh: &FloatHistogram{ + Schema: 2, + Count: 3, + Sum: 5, + ZeroThreshold: 4, + ZeroCount: 1, + PositiveSpans: []Span{ + {Offset: 1, Length: 2}, + {Offset: 1, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []float64{1, 2}, + NegativeSpans: []Span{ + {Offset: 20, Length: 6}, + {Offset: 12, Length: 7}, + {Offset: 33, Length: 10}, + }, + NegativeBuckets: []float64{1, 2}, + }}, + } + + require.NoError(t, h.Validate()) + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + fh := h.ToFloat(c.fh) + require.NoError(t, fh.Validate()) + require.Equal(t, h.String(), fh.String()) + }) + } +} + // TestHistogramEquals tests both Histogram and FloatHistogram. func TestHistogramEquals(t *testing.T) { h1 := Histogram{ Schema: 3, - Count: 61, + Count: 62, Sum: 2.7, ZeroThreshold: 0.1, ZeroCount: 42, @@ -495,6 +654,15 @@ func TestHistogramEquals(t *testing.T) { require.False(t, h1f.Equals(h2f)) require.False(t, h2f.Equals(h1f)) } + notEqualsUntilFloatConv := func(h1, h2 Histogram) { + require.False(t, h1.Equals(&h2)) + require.False(t, h2.Equals(&h1)) + h1f, h2f := h1.ToFloat(nil), h2.ToFloat(nil) + require.True(t, h1f.Equals(h2f)) + require.True(t, h2f.Equals(h1f)) + } + + require.NoError(t, h1.Validate()) h2 := h1.Copy() equals(h1, *h2) @@ -602,6 +770,45 @@ func TestHistogramEquals(t *testing.T) { // Sum StaleNaN vs regular NaN. notEquals(*hStale, *hNaN) + + // Has non-empty custom bounds for exponential schema. + hCustom := h1.Copy() + hCustom.CustomBounds = []float64{1, 2, 3} + equals(h1, *hCustom) + + cbh1 := Histogram{ + Schema: CustomBucketsSchema, + Count: 10, + Sum: 2.7, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 10, Length: 3}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + CustomBounds: []float64{0.1, 0.2, 0.5, 1, 2, 5, 10, 15, 20, 25, 50, 75, 100, 200, 250, 500, 1000}, + } + + require.NoError(t, cbh1.Validate()) + + cbh2 := cbh1.Copy() + equals(cbh1, *cbh2) + + // Has different custom bounds for custom buckets schema. + cbh2 = cbh1.Copy() + cbh2.CustomBounds = []float64{0.1, 0.2, 0.5} + notEquals(cbh1, *cbh2) + + // Has non-empty negative spans and buckets for custom buckets schema. + cbh2 = cbh1.Copy() + cbh2.NegativeSpans = []Span{{Offset: 0, Length: 1}} + cbh2.NegativeBuckets = []int64{1} + notEqualsUntilFloatConv(cbh1, *cbh2) + + // Has non-zero zero count and threshold for custom buckets schema. + cbh2 = cbh1.Copy() + cbh2.ZeroThreshold = 0.1 + cbh2.ZeroCount = 10 + notEqualsUntilFloatConv(cbh1, *cbh2) } func TestHistogramCopy(t *testing.T) { @@ -640,6 +847,21 @@ func TestHistogramCopy(t *testing.T) { }, expected: &Histogram{}, }, + { + name: "with custom buckets", + orig: &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 1}}, + PositiveBuckets: []int64{1, 3, -3, 42}, + CustomBounds: []float64{5, 10, 15}, + }, + expected: &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 1}}, + PositiveBuckets: []int64{1, 3, -3, 42}, + CustomBounds: []float64{5, 10, 15}, + }, + }, } for _, tcase := range cases { @@ -690,6 +912,21 @@ func TestHistogramCopyTo(t *testing.T) { }, expected: &Histogram{}, }, + { + name: "with custom buckets", + orig: &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 1}}, + PositiveBuckets: []int64{1, 3, -3, 42}, + CustomBounds: []float64{5, 10, 15}, + }, + expected: &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 1}}, + PositiveBuckets: []int64{1, 3, -3, 42}, + CustomBounds: []float64{5, 10, 15}, + }, + }, } for _, tcase := range cases { @@ -971,6 +1208,86 @@ func TestHistogramCompact(t *testing.T) { NegativeBuckets: []int64{2, 3}, }, }, + { + "nothing should happen with custom buckets", + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42}, + CustomBounds: []float64{5, 10, 15}, + }, + 0, + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42}, + CustomBounds: []float64{5, 10, 15}, + }, + }, + { + "eliminate zero offsets with custom buckets", + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 1}, {0, 3}, {0, 1}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + CustomBounds: []float64{5, 10, 15, 20}, + }, + 0, + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 5}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + CustomBounds: []float64{5, 10, 15, 20}, + }, + }, + { + "eliminate zero length with custom buckets", + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 2}, {2, 0}, {3, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + CustomBounds: []float64{5, 10, 15, 20}, + }, + 0, + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 2}, {5, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + CustomBounds: []float64{5, 10, 15, 20}, + }, + }, + { + "eliminate multiple zero length spans with custom buckets", + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 2}, {2, 0}, {2, 0}, {2, 0}, {3, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + CustomBounds: []float64{5, 10, 15, 20}, + }, + 0, + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 2}, {9, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + CustomBounds: []float64{5, 10, 15, 20}, + }, + }, + { + "cut empty buckets at start or end of spans, even in the middle, with custom buckets", + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-4, 6}, {3, 6}}, + PositiveBuckets: []int64{0, 0, 1, 3, -4, 0, 1, 42, 3, -46, 0, 0}, + CustomBounds: []float64{5, 10, 15, 20}, + }, + 0, + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 2}, {5, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + CustomBounds: []float64{5, 10, 15, 20}, + }, + }, } for _, c := range cases { @@ -1107,6 +1424,145 @@ func TestHistogramValidation(t *testing.T) { errMsg: `3 observations found in buckets, but the Count field is 2: histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)`, skipFloat: true, }, + "rejects an exponential histogram with custom buckets schema": { + h: &Histogram{ + Count: 12, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 19.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + NegativeSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{1, 1, -1, 0}, + }, + errMsg: `custom buckets: only 0 custom bounds defined which is insufficient to cover total span length of 5: histogram custom bounds are too few`, + }, + "rejects a custom buckets histogram with exponential schema": { + h: &Histogram{ + Count: 5, + Sum: 19.4, + Schema: 0, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + errMsg: `histogram with exponential schema must not have custom bounds`, + skipFloat: true, // Converting to float will remove the wrong fields so only the float version will pass validation + }, + "rejects a custom buckets histogram with zero/negative buckets": { + h: &Histogram{ + Count: 12, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 19.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + NegativeSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{1, 1, -1, 0}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + errMsg: `custom buckets: must have zero count of 0`, + skipFloat: true, // Converting to float will remove the wrong fields so only the float version will pass validation + }, + "rejects a custom buckets histogram with negative offset in first span": { + h: &Histogram{ + Count: 5, + Sum: 19.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: -1, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + errMsg: `custom buckets: span number 1 with offset -1: histogram has a span whose offset is negative`, + }, + "rejects a custom buckets histogram with negative offset in subsequent spans": { + h: &Histogram{ + Count: 5, + Sum: 19.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: -1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + errMsg: `custom buckets: span number 2 with offset -1: histogram has a span whose offset is negative`, + }, + "rejects a custom buckets histogram with non-matching bucket counts": { + h: &Histogram{ + Count: 5, + Sum: 19.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + errMsg: `custom buckets: spans need 4 buckets, have 3 buckets: histogram spans specify different number of buckets than provided`, + }, + "rejects a custom buckets histogram with too few bounds": { + h: &Histogram{ + Count: 5, + Sum: 19.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + CustomBounds: []float64{1, 2, 3}, + }, + errMsg: `custom buckets: only 3 custom bounds defined which is insufficient to cover total span length of 5: histogram custom bounds are too few`, + }, + "valid custom buckets histogram": { + h: &Histogram{ + Count: 5, + Sum: 19.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + CustomBounds: []float64{1, 2, 3, 4}, + }, + }, + "valid custom buckets histogram with extra bounds": { + h: &Histogram{ + Count: 5, + Sum: 19.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + CustomBounds: []float64{1, 2, 3, 4, 5, 6, 7, 8}, + }, + }, } for testName, tc := range tests { diff --git a/promql/engine.go b/promql/engine.go index cd955ff5eb..b2c9a7ac89 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1657,18 +1657,21 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio }, e.LHS, e.RHS) default: return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh), nil + vec, err := ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh) + return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) } case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar: return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh), nil + vec, err := ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh) + return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector: return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh), nil + vec, err := ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh) + return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) } @@ -2303,12 +2306,12 @@ func (ev *evaluator) VectorUnless(lhs, rhs Vector, matching *parser.VectorMatchi } // VectorBinop evaluates a binary operation between two Vectors, excluding set operators. -func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *parser.VectorMatching, returnBool bool, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector { +func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *parser.VectorMatching, returnBool bool, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) (Vector, error) { if matching.Card == parser.CardManyToMany { panic("many-to-many only allowed for set operators") } if len(lhs) == 0 || len(rhs) == 0 { - return nil // Short-circuit: nothing is going to match. + return nil, nil // Short-circuit: nothing is going to match. } // The control flow below handles one-to-one or many-to-one matching. @@ -2361,6 +2364,7 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * // For all lhs samples find a respective rhs sample and perform // the binary operation. + var lastErr error for i, ls := range lhs { sig := lhsh[i].signature @@ -2376,7 +2380,10 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * fl, fr = fr, fl hl, hr = hr, hl } - floatValue, histogramValue, keep := vectorElemBinop(op, fl, fr, hl, hr) + floatValue, histogramValue, keep, err := vectorElemBinop(op, fl, fr, hl, hr) + if err != nil { + lastErr = err + } switch { case returnBool: if keep { @@ -2418,7 +2425,7 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * H: histogramValue, }) } - return enh.Out + return enh.Out, lastErr } func signatureFunc(on bool, b []byte, names ...string) func(labels.Labels) string { @@ -2481,7 +2488,8 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V } // VectorscalarBinop evaluates a binary operation between a Vector and a Scalar. -func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) Vector { +func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) (Vector, error) { + var lastErr error for _, lhsSample := range lhs { lf, rf := lhsSample.F, rhs.V var rh *histogram.FloatHistogram @@ -2492,7 +2500,10 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala lf, rf = rf, lf lh, rh = rh, lh } - float, histogram, keep := vectorElemBinop(op, lf, rf, lh, rh) + float, histogram, keep, err := vectorElemBinop(op, lf, rf, lh, rh) + if err != nil { + lastErr = err + } // Catch cases where the scalar is the LHS in a scalar-vector comparison operation. // We want to always keep the vector element value as the output value, even if it's on the RHS. if op.IsComparisonOperator() && swap { @@ -2516,7 +2527,7 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala enh.Out = append(enh.Out, lhsSample) } } - return enh.Out + return enh.Out, lastErr } // scalarBinop evaluates a binary operation between two Scalars. @@ -2553,49 +2564,57 @@ func scalarBinop(op parser.ItemType, lhs, rhs float64) float64 { } // vectorElemBinop evaluates a binary operation between two Vector elements. -func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool) { +func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) { switch op { case parser.ADD: if hlhs != nil && hrhs != nil { - return 0, hlhs.Copy().Add(hrhs).Compact(0), true + res, err := hlhs.Copy().Add(hrhs) + if err != nil { + return 0, nil, false, err + } + return 0, res.Compact(0), true, nil } - return lhs + rhs, nil, true + return lhs + rhs, nil, true, nil case parser.SUB: if hlhs != nil && hrhs != nil { - return 0, hlhs.Copy().Sub(hrhs).Compact(0), true + res, err := hlhs.Copy().Sub(hrhs) + if err != nil { + return 0, nil, false, err + } + return 0, res.Compact(0), true, nil } - return lhs - rhs, nil, true + return lhs - rhs, nil, true, nil case parser.MUL: if hlhs != nil && hrhs == nil { - return 0, hlhs.Copy().Mul(rhs), true + return 0, hlhs.Copy().Mul(rhs), true, nil } if hlhs == nil && hrhs != nil { - return 0, hrhs.Copy().Mul(lhs), true + return 0, hrhs.Copy().Mul(lhs), true, nil } - return lhs * rhs, nil, true + return lhs * rhs, nil, true, nil case parser.DIV: if hlhs != nil && hrhs == nil { - return 0, hlhs.Copy().Div(rhs), true + return 0, hlhs.Copy().Div(rhs), true, nil } - return lhs / rhs, nil, true + return lhs / rhs, nil, true, nil case parser.POW: - return math.Pow(lhs, rhs), nil, true + return math.Pow(lhs, rhs), nil, true, nil case parser.MOD: - return math.Mod(lhs, rhs), nil, true + return math.Mod(lhs, rhs), nil, true, nil case parser.EQLC: - return lhs, nil, lhs == rhs + return lhs, nil, lhs == rhs, nil case parser.NEQ: - return lhs, nil, lhs != rhs + return lhs, nil, lhs != rhs, nil case parser.GTR: - return lhs, nil, lhs > rhs + return lhs, nil, lhs > rhs, nil case parser.LSS: - return lhs, nil, lhs < rhs + return lhs, nil, lhs < rhs, nil case parser.GTE: - return lhs, nil, lhs >= rhs + return lhs, nil, lhs >= rhs, nil case parser.LTE: - return lhs, nil, lhs <= rhs + return lhs, nil, lhs <= rhs, nil case parser.ATAN2: - return math.Atan2(lhs, rhs), nil, true + return math.Atan2(lhs, rhs), nil, true, nil } panic(fmt.Errorf("operator %q not allowed for operations between Vectors", op)) } @@ -2747,7 +2766,10 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, grouping []string, par if s.H != nil { group.hasHistogram = true if group.histogramValue != nil { - group.histogramValue.Add(s.H) + _, err := group.histogramValue.Add(s.H) + if err != nil { + handleAggregationError(err, e, group, &annos) + } } // Otherwise the aggregation contained floats // previously and will be invalid anyway. No @@ -2764,8 +2786,14 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, grouping []string, par if group.histogramMean != nil { left := s.H.Copy().Div(float64(group.groupCount)) right := group.histogramMean.Copy().Div(float64(group.groupCount)) - toAdd := left.Sub(right) - group.histogramMean.Add(toAdd) + toAdd, err := left.Sub(right) + if err != nil { + handleAggregationError(err, e, group, &annos) + } + _, err = group.histogramMean.Add(toAdd) + if err != nil { + handleAggregationError(err, e, group, &annos) + } } // Otherwise the aggregation contained floats // previously and will be invalid anyway. No @@ -2941,6 +2969,32 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, grouping []string, par return enh.Out, annos } +// handleAggregationError adds the appropriate annotation based on the aggregation error. +func handleAggregationError(err error, e *parser.AggregateExpr, group *groupedAggregation, annos *annotations.Annotations) { + metricName := group.labels.Get(labels.MetricName) + pos := e.Expr.PositionRange() + if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) { + annos.Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) + } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { + annos.Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos)) + } +} + +// handleVectorBinopError returns the appropriate annotation based on the vector binary operation error. +func handleVectorBinopError(err error, e *parser.BinaryExpr) annotations.Annotations { + if err == nil { + return nil + } + metricName := "" + pos := e.PositionRange() + if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) { + return annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) + } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { + return annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos)) + } + return nil +} + // groupingKey builds and returns the grouping key for the given metric and // grouping labels. func generateGroupingKey(metric labels.Labels, grouping []string, without bool, buf []byte) (uint64, []byte) { diff --git a/promql/functions.go b/promql/functions.go index fe1a5644ec..9f390f3340 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -14,6 +14,7 @@ package promql import ( + "errors" "fmt" "math" "sort" @@ -211,14 +212,28 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra } h := last.CopyToSchema(minSchema) - h.Sub(prev) + _, err := h.Sub(prev) + if err != nil { + if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) { + return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) + } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { + return nil, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos)) + } + } if isCounter { // Second iteration to deal with counter resets. for _, currPoint := range points[1:] { curr := currPoint.H if curr.DetectReset(prev) { - h.Add(prev) + _, err := h.Add(prev) + if err != nil { + if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) { + return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) + } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { + return nil, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos)) + } + } } prev = curr } @@ -515,10 +530,11 @@ func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) return append(enh.Out, Sample{F: aggrFn(el)}) } -func aggrHistOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) *histogram.FloatHistogram) Vector { +func aggrHistOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) (*histogram.FloatHistogram, error)) (Vector, error) { el := vals[0].(Matrix)[0] + res, err := aggrFn(el) - return append(enh.Out, Sample{H: aggrFn(el)}) + return append(enh.Out, Sample{H: res}), err } // === avg_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === @@ -530,18 +546,33 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } if len(firstSeries.Floats) == 0 { // The passed values only contain histograms. - return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram { + vec, err := aggrHistOverTime(vals, enh, func(s Series) (*histogram.FloatHistogram, error) { count := 1 mean := s.Histograms[0].H.Copy() for _, h := range s.Histograms[1:] { count++ left := h.H.Copy().Div(float64(count)) right := mean.Copy().Div(float64(count)) - toAdd := left.Sub(right) - mean.Add(toAdd) + toAdd, err := left.Sub(right) + if err != nil { + return mean, err + } + _, err = mean.Add(toAdd) + if err != nil { + return mean, err + } } - return mean - }), nil + return mean, nil + }) + if err != nil { + metricName := firstSeries.Metric.Get(labels.MetricName) + if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) { + return enh.Out, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, args[0].PositionRange())) + } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { + return enh.Out, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, args[0].PositionRange())) + } + } + return vec, nil } return aggrOverTime(vals, enh, func(s Series) float64 { var mean, count, c float64 @@ -675,13 +706,25 @@ func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } if len(firstSeries.Floats) == 0 { // The passed values only contain histograms. - return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram { + vec, err := aggrHistOverTime(vals, enh, func(s Series) (*histogram.FloatHistogram, error) { sum := s.Histograms[0].H.Copy() for _, h := range s.Histograms[1:] { - sum.Add(h.H) + _, err := sum.Add(h.H) + if err != nil { + return sum, err + } } - return sum - }), nil + return sum, nil + }) + if err != nil { + metricName := firstSeries.Metric.Get(labels.MetricName) + if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) { + return enh.Out, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, args[0].PositionRange())) + } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { + return enh.Out, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, args[0].PositionRange())) + } + } + return vec, nil } return aggrOverTime(vals, enh, func(s Series) float64 { var sum, c float64 diff --git a/promql/parser/parse.go b/promql/parser/parse.go index 6a05e4a792..c15b85306f 100644 --- a/promql/parser/parse.go +++ b/promql/parser/parse.go @@ -482,19 +482,19 @@ func (p *parser) mergeMaps(left, right *map[string]interface{}) (ret *map[string } func (p *parser) histogramsIncreaseSeries(base, inc *histogram.FloatHistogram, times uint64) ([]SequenceValue, error) { - return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) *histogram.FloatHistogram { + return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) (*histogram.FloatHistogram, error) { return a.Add(b) }) } func (p *parser) histogramsDecreaseSeries(base, inc *histogram.FloatHistogram, times uint64) ([]SequenceValue, error) { - return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) *histogram.FloatHistogram { + return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) (*histogram.FloatHistogram, error) { return a.Sub(b) }) } func (p *parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uint64, - combine func(*histogram.FloatHistogram, *histogram.FloatHistogram) *histogram.FloatHistogram, + combine func(*histogram.FloatHistogram, *histogram.FloatHistogram) (*histogram.FloatHistogram, error), ) ([]SequenceValue, error) { ret := make([]SequenceValue, times+1) // Add an additional value (the base) for time 0, which we ignore in tests. @@ -505,7 +505,11 @@ func (p *parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uin return nil, fmt.Errorf("error combining histograms: cannot merge from schema %d to %d", inc.Schema, cur.Schema) } - cur = combine(cur.Copy(), inc) + var err error + cur, err = combine(cur.Copy(), inc) + if err != nil { + return ret, err + } ret[i] = SequenceValue{Histogram: cur} } diff --git a/rules/manager_test.go b/rules/manager_test.go index 4215ca4e43..482a6e4bdc 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -1399,7 +1399,7 @@ func TestNativeHistogramsInRecordingRules(t *testing.T) { expHist := hists[0].ToFloat(nil) for _, h := range hists[1:] { - expHist = expHist.Add(h.ToFloat(nil)) + expHist, _ = expHist.Add(h.ToFloat(nil)) } it := s.Iterator(nil) diff --git a/scrape/scrape.go b/scrape/scrape.go index aa2d5538b1..c8eac298ed 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -660,7 +660,7 @@ func appender(app storage.Appender, sampleLimit, bucketLimit int, maxSchema int3 } } - if maxSchema < nativeHistogramMaxSchema { + if maxSchema < histogram.ExponentialSchemaMax { app = &maxSchemaAppender{ Appender: app, maxSchema: maxSchema, @@ -1956,10 +1956,10 @@ func pickSchema(bucketFactor float64) int32 { } floor := math.Floor(-math.Log2(math.Log2(bucketFactor))) switch { - case floor >= float64(nativeHistogramMaxSchema): - return nativeHistogramMaxSchema - case floor <= float64(nativeHistogramMinSchema): - return nativeHistogramMinSchema + case floor >= float64(histogram.ExponentialSchemaMax): + return histogram.ExponentialSchemaMax + case floor <= float64(histogram.ExponentialSchemaMin): + return histogram.ExponentialSchemaMin default: return int32(floor) } diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index bcaeb460e2..38055695fa 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -510,7 +510,7 @@ func TestScrapePoolAppender(t *testing.T) { appl, ok := loop.(*scrapeLoop) require.True(t, ok, "Expected scrapeLoop but got %T", loop) - wrapped := appender(appl.appender(context.Background()), 0, 0, nativeHistogramMaxSchema) + wrapped := appender(appl.appender(context.Background()), 0, 0, histogram.ExponentialSchemaMax) tl, ok := wrapped.(*timeLimitAppender) require.True(t, ok, "Expected timeLimitAppender but got %T", wrapped) @@ -526,7 +526,7 @@ func TestScrapePoolAppender(t *testing.T) { appl, ok = loop.(*scrapeLoop) require.True(t, ok, "Expected scrapeLoop but got %T", loop) - wrapped = appender(appl.appender(context.Background()), sampleLimit, 0, nativeHistogramMaxSchema) + wrapped = appender(appl.appender(context.Background()), sampleLimit, 0, histogram.ExponentialSchemaMax) sl, ok := wrapped.(*limitAppender) require.True(t, ok, "Expected limitAppender but got %T", wrapped) @@ -537,7 +537,7 @@ func TestScrapePoolAppender(t *testing.T) { _, ok = tl.Appender.(nopAppender) require.True(t, ok, "Expected base appender but got %T", tl.Appender) - wrapped = appender(appl.appender(context.Background()), sampleLimit, 100, nativeHistogramMaxSchema) + wrapped = appender(appl.appender(context.Background()), sampleLimit, 100, histogram.ExponentialSchemaMax) bl, ok := wrapped.(*bucketLimitAppender) require.True(t, ok, "Expected bucketLimitAppender but got %T", wrapped) @@ -669,7 +669,7 @@ func newBasicScrapeLoop(t testing.TB, ctx context.Context, scraper scraper, app true, false, true, - 0, 0, nativeHistogramMaxSchema, + 0, 0, histogram.ExponentialSchemaMax, nil, interval, time.Hour, @@ -810,7 +810,7 @@ func TestScrapeLoopRun(t *testing.T) { true, false, true, - 0, 0, nativeHistogramMaxSchema, + 0, 0, histogram.ExponentialSchemaMax, nil, time.Second, time.Hour, @@ -953,7 +953,7 @@ func TestScrapeLoopMetadata(t *testing.T) { true, false, true, - 0, 0, nativeHistogramMaxSchema, + 0, 0, histogram.ExponentialSchemaMax, nil, 0, 0, diff --git a/scrape/target.go b/scrape/target.go index ad4b4f6857..b796b4cd02 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -366,7 +366,7 @@ type bucketLimitAppender struct { func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { if h != nil { for len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit { - if h.Schema == -4 { + if h.Schema <= histogram.ExponentialSchemaMin || h.Schema > histogram.ExponentialSchemaMax { return 0, errBucketLimit } h = h.ReduceResolution(h.Schema - 1) @@ -374,7 +374,7 @@ func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labe } if fh != nil { for len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit { - if fh.Schema == -4 { + if fh.Schema <= histogram.ExponentialSchemaMin || fh.Schema > histogram.ExponentialSchemaMax { return 0, errBucketLimit } fh = fh.ReduceResolution(fh.Schema - 1) @@ -387,11 +387,6 @@ func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labe return ref, nil } -const ( - nativeHistogramMaxSchema int32 = 8 - nativeHistogramMinSchema int32 = -4 -) - type maxSchemaAppender struct { storage.Appender @@ -400,12 +395,12 @@ type maxSchemaAppender struct { func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { if h != nil { - if h.Schema > app.maxSchema { + if histogram.IsExponentialSchema(h.Schema) && h.Schema > app.maxSchema { h = h.ReduceResolution(app.maxSchema) } } if fh != nil { - if fh.Schema > app.maxSchema { + if histogram.IsExponentialSchema(fh.Schema) && fh.Schema > app.maxSchema { fh = fh.ReduceResolution(app.maxSchema) } } diff --git a/scrape/target_test.go b/scrape/target_test.go index 413fbc1b81..dead3c6fc4 100644 --- a/scrape/target_test.go +++ b/scrape/target_test.go @@ -473,6 +473,17 @@ func TestBucketLimitAppender(t *testing.T) { PositiveBuckets: []int64{1, 0}, // 1, 1 } + customBuckets := histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: 9, + Sum: 33, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{3, 0, 0}, + CustomBounds: []float64{1, 2, 3}, + } + cases := []struct { h histogram.Histogram limit int @@ -506,6 +517,18 @@ func TestBucketLimitAppender(t *testing.T) { expectBucketCount: 1, expectSchema: -2, }, + { + h: customBuckets, + limit: 2, + expectError: true, + }, + { + h: customBuckets, + limit: 3, + expectError: false, + expectBucketCount: 3, + expectSchema: histogram.CustomBucketsSchema, + }, } resApp := &collectResultAppender{} @@ -561,6 +584,17 @@ func TestMaxSchemaAppender(t *testing.T) { NegativeBuckets: []int64{3, 0, 0}, } + customBuckets := histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: 9, + Sum: 33, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{3, 0, 0}, + CustomBounds: []float64{1, 2, 3}, + } + cases := []struct { h histogram.Histogram maxSchema int32 @@ -576,6 +610,11 @@ func TestMaxSchemaAppender(t *testing.T) { maxSchema: 0, expectSchema: 0, }, + { + h: customBuckets, + maxSchema: -1, + expectSchema: histogram.CustomBucketsSchema, + }, } resApp := &collectResultAppender{} diff --git a/util/annotations/annotations.go b/util/annotations/annotations.go index 16a920d57f..f5f60ba874 100644 --- a/util/annotations/annotations.go +++ b/util/annotations/annotations.go @@ -103,12 +103,14 @@ var ( PromQLInfo = errors.New("PromQL info") PromQLWarning = errors.New("PromQL warning") - InvalidQuantileWarning = fmt.Errorf("%w: quantile value should be between 0 and 1", PromQLWarning) - BadBucketLabelWarning = fmt.Errorf("%w: bucket label %q is missing or has a malformed value", PromQLWarning, model.BucketLabel) - MixedFloatsHistogramsWarning = fmt.Errorf("%w: encountered a mix of histograms and floats for", PromQLWarning) - MixedClassicNativeHistogramsWarning = fmt.Errorf("%w: vector contains a mix of classic and native histograms for metric name", PromQLWarning) - NativeHistogramNotCounterWarning = fmt.Errorf("%w: this native histogram metric is not a counter:", PromQLWarning) - NativeHistogramNotGaugeWarning = fmt.Errorf("%w: this native histogram metric is not a gauge:", PromQLWarning) + InvalidQuantileWarning = fmt.Errorf("%w: quantile value should be between 0 and 1", PromQLWarning) + BadBucketLabelWarning = fmt.Errorf("%w: bucket label %q is missing or has a malformed value", PromQLWarning, model.BucketLabel) + MixedFloatsHistogramsWarning = fmt.Errorf("%w: encountered a mix of histograms and floats for", PromQLWarning) + MixedClassicNativeHistogramsWarning = fmt.Errorf("%w: vector contains a mix of classic and native histograms for metric name", PromQLWarning) + NativeHistogramNotCounterWarning = fmt.Errorf("%w: this native histogram metric is not a counter:", PromQLWarning) + NativeHistogramNotGaugeWarning = fmt.Errorf("%w: this native histogram metric is not a gauge:", PromQLWarning) + MixedExponentialCustomHistogramsWarning = fmt.Errorf("%w: vector contains a mix of histograms with exponential and custom buckets schemas for metric name", PromQLWarning) + IncompatibleCustomBucketsHistogramsWarning = fmt.Errorf("%w: vector contains histograms with incompatible custom buckets for metric name", PromQLWarning) PossibleNonCounterInfo = fmt.Errorf("%w: metric might not be a counter, name does not end in _total/_sum/_count/_bucket:", PromQLInfo) HistogramQuantileForcedMonotonicityInfo = fmt.Errorf("%w: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for metric name", PromQLInfo) @@ -195,6 +197,24 @@ func NewNativeHistogramNotGaugeWarning(metricName string, pos posrange.PositionR } } +// NewMixedExponentialCustomHistogramsWarning is used when the queried series includes +// histograms with both exponential and custom buckets schemas. +func NewMixedExponentialCustomHistogramsWarning(metricName string, pos posrange.PositionRange) error { + return annoErr{ + PositionRange: pos, + Err: fmt.Errorf("%w %q", MixedExponentialCustomHistogramsWarning, metricName), + } +} + +// NewIncompatibleCustomBucketsHistogramsWarning is used when the queried series includes +// custom buckets histograms with incompatible custom bounds. +func NewIncompatibleCustomBucketsHistogramsWarning(metricName string, pos posrange.PositionRange) error { + return annoErr{ + PositionRange: pos, + Err: fmt.Errorf("%w %q", IncompatibleCustomBucketsHistogramsWarning, metricName), + } +} + // NewPossibleNonCounterInfo is used when a named counter metric with only float samples does not // have the suffixes _total, _sum, _count, or _bucket. func NewPossibleNonCounterInfo(metricName string, pos posrange.PositionRange) error { From 4acbb7dea64f0ec326de953e553f6d6d28a0b8c0 Mon Sep 17 00:00:00 2001 From: zenador Date: Fri, 22 Mar 2024 21:36:39 +0800 Subject: [PATCH 02/30] Add custom buckets to native histogram chunks encoding (#13706) * add custom bounds to chunks encoding * change custom buckets schema number * rename custom bounds to custom values Signed-off-by: Jeanette Tan --- model/histogram/float_histogram.go | 52 ++-- model/histogram/float_histogram_test.go | 144 ++++----- model/histogram/generic.go | 18 +- model/histogram/histogram.go | 42 +-- model/histogram/histogram_test.go | 62 ++-- scrape/target_test.go | 4 +- tsdb/chunkenc/bstream.go | 16 + tsdb/chunkenc/float_histogram.go | 52 +++- tsdb/chunkenc/float_histogram_test.go | 328 ++++++++++++++++---- tsdb/chunkenc/histogram.go | 34 ++- tsdb/chunkenc/histogram_meta.go | 83 ++++- tsdb/chunkenc/histogram_meta_test.go | 30 +- tsdb/chunkenc/histogram_test.go | 391 ++++++++++++++++++------ tsdb/tsdbutil/histogram.go | 28 ++ 14 files changed, 953 insertions(+), 331 deletions(-) diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index ded7dc400d..58f13c8cf3 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -34,8 +34,8 @@ type FloatHistogram struct { // They are all for base-2 bucket schemas, where 1 is a bucket boundary in // each case, and then each power of two is divided into 2^n logarithmic buckets. // Or in other words, each bucket boundary is the previous boundary times - // 2^(2^-n). Another valid schema number is 127 for custom buckets, defined by - // the CustomBounds field. + // 2^(2^-n). Another valid schema number is -53 for custom buckets, defined by + // the CustomValues field. Schema int32 // Width of the zero bucket. ZeroThreshold float64 @@ -53,9 +53,9 @@ type FloatHistogram struct { // Holds the custom (usually upper) bounds for bucket definitions, otherwise nil. // This slice is interned, to be treated as immutable and copied by reference. // These numbers should be strictly increasing. This field is only used when the - // schema is 127, and the ZeroThreshold, ZeroCount, NegativeSpans and NegativeBuckets - // fields are not used. - CustomBounds []float64 + // schema is for custom buckets, and the ZeroThreshold, ZeroCount, NegativeSpans + // and NegativeBuckets fields are not used. + CustomValues []float64 } func (h *FloatHistogram) UsesCustomBuckets() bool { @@ -72,7 +72,10 @@ func (h *FloatHistogram) Copy() *FloatHistogram { } if h.UsesCustomBuckets() { - c.CustomBounds = h.CustomBounds + if len(h.CustomValues) != 0 { + c.CustomValues = make([]float64, len(h.CustomValues)) + copy(c.CustomValues, h.CustomValues) + } } else { c.ZeroThreshold = h.ZeroThreshold c.ZeroCount = h.ZeroCount @@ -114,7 +117,8 @@ func (h *FloatHistogram) CopyTo(to *FloatHistogram) { to.NegativeSpans = clearIfNotNil(to.NegativeSpans) to.NegativeBuckets = clearIfNotNil(to.NegativeBuckets) - to.CustomBounds = h.CustomBounds + to.CustomValues = resize(to.CustomValues, len(h.CustomValues)) + copy(to.CustomValues, h.CustomValues) } else { to.ZeroThreshold = h.ZeroThreshold to.ZeroCount = h.ZeroCount @@ -125,7 +129,7 @@ func (h *FloatHistogram) CopyTo(to *FloatHistogram) { to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets)) copy(to.NegativeBuckets, h.NegativeBuckets) - to.CustomBounds = clearIfNotNil(to.CustomBounds) + to.CustomValues = clearIfNotNil(to.CustomValues) } to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans)) @@ -311,7 +315,7 @@ func (h *FloatHistogram) Add(other *FloatHistogram) (*FloatHistogram, error) { if h.UsesCustomBuckets() != other.UsesCustomBuckets() { return nil, ErrHistogramsIncompatibleSchema } - if h.UsesCustomBuckets() && !floatBucketsMatch(h.CustomBounds, other.CustomBounds) { + if h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, other.CustomValues) { return nil, ErrHistogramsIncompatibleBounds } @@ -387,7 +391,7 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) (*FloatHistogram, error) { if h.UsesCustomBuckets() != other.UsesCustomBuckets() { return nil, ErrHistogramsIncompatibleSchema } - if h.UsesCustomBuckets() && !floatBucketsMatch(h.CustomBounds, other.CustomBounds) { + if h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, other.CustomValues) { return nil, ErrHistogramsIncompatibleBounds } @@ -454,7 +458,7 @@ func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool { } if h.UsesCustomBuckets() { - if !floatBucketsMatch(h.CustomBounds, h2.CustomBounds) { + if !FloatBucketsMatch(h.CustomValues, h2.CustomValues) { return false } } @@ -467,14 +471,14 @@ func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool { if !spansMatch(h.NegativeSpans, h2.NegativeSpans) { return false } - if !floatBucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) { + if !FloatBucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) { return false } if !spansMatch(h.PositiveSpans, h2.PositiveSpans) { return false } - if !floatBucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) { + if !FloatBucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) { return false } @@ -490,7 +494,7 @@ func (h *FloatHistogram) Size() int { negSpanSize := len(h.NegativeSpans) * 8 // 8 bytes (int32 + uint32). posBucketSize := len(h.PositiveBuckets) * 8 // 8 bytes (float64). negBucketSize := len(h.NegativeBuckets) * 8 // 8 bytes (float64). - customBoundSize := len(h.CustomBounds) * 8 // 8 bytes (float64). + customBoundSize := len(h.CustomValues) * 8 // 8 bytes (float64). // Total size of the struct. @@ -505,7 +509,7 @@ func (h *FloatHistogram) Size() int { // fh.NegativeSpans is 24 bytes. // fh.PositiveBuckets is 24 bytes. // fh.NegativeBuckets is 24 bytes. - // fh.CustomBounds is 24 bytes. + // fh.CustomValues is 24 bytes. structSize := 168 return structSize + posSpanSize + negSpanSize + posBucketSize + negBucketSize + customBoundSize @@ -593,7 +597,7 @@ func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool { if h.Count < previous.Count { return true } - if h.UsesCustomBuckets() != previous.UsesCustomBuckets() || (h.UsesCustomBuckets() && !floatBucketsMatch(h.CustomBounds, previous.CustomBounds)) { + if h.UsesCustomBuckets() != previous.UsesCustomBuckets() || (h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, previous.CustomValues)) { // Mark that something has changed or that the application has been restarted. However, this does // not matter so much since the change in schema will be handled directly in the chunks and PromQL // functions. @@ -704,7 +708,7 @@ func (h *FloatHistogram) NegativeBucketIterator() BucketIterator[float64] { // positive buckets in descending order (starting at the highest bucket and // going down towards the zero bucket). func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64] { - it := newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomBounds) + it := newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues) return &it } @@ -738,7 +742,7 @@ func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] { func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] { return &allFloatBucketIterator{ h: h, - leftIter: newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomBounds), + leftIter: newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues), rightIter: h.floatBucketIterator(false, 0, h.Schema), state: -1, } @@ -753,7 +757,7 @@ func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] { func (h *FloatHistogram) Validate() error { var nCount, pCount float64 if h.UsesCustomBuckets() { - if err := checkHistogramCustomBounds(h.CustomBounds, h.PositiveSpans, len(h.PositiveBuckets)); err != nil { + if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil { return fmt.Errorf("custom buckets: %w", err) } if h.ZeroCount != 0 { @@ -779,7 +783,7 @@ func (h *FloatHistogram) Validate() error { if err != nil { return fmt.Errorf("negative side: %w", err) } - if h.CustomBounds != nil { + if h.CustomValues != nil { return fmt.Errorf("histogram with exponential schema must not have custom bounds") } } @@ -940,7 +944,7 @@ func (h *FloatHistogram) floatBucketIterator( if positive { i.spans = h.PositiveSpans i.buckets = h.PositiveBuckets - i.customBounds = h.CustomBounds + i.customValues = h.CustomValues } else { i.spans = h.NegativeSpans i.buckets = h.NegativeBuckets @@ -950,7 +954,7 @@ func (h *FloatHistogram) floatBucketIterator( // reverseFloatBucketIterator is a low-level constructor for reverse bucket iterators. func newReverseFloatBucketIterator( - spans []Span, buckets []float64, schema int32, positive bool, customBounds []float64, + spans []Span, buckets []float64, schema int32, positive bool, customValues []float64, ) reverseFloatBucketIterator { r := reverseFloatBucketIterator{ baseBucketIterator: baseBucketIterator[float64, float64]{ @@ -958,7 +962,7 @@ func newReverseFloatBucketIterator( spans: spans, buckets: buckets, positive: positive, - customBounds: customBounds, + customValues: customValues, }, } @@ -1296,7 +1300,7 @@ func addBuckets( return spansA, bucketsA } -func floatBucketsMatch(b1, b2 []float64) bool { +func FloatBucketsMatch(b1, b2 []float64) bool { if len(b1) != len(b2) { return false } diff --git a/model/histogram/float_histogram_test.go b/model/histogram/float_histogram_test.go index 400645cad4..3b69defe0f 100644 --- a/model/histogram/float_histogram_test.go +++ b/model/histogram/float_histogram_test.go @@ -139,7 +139,7 @@ func TestFloatHistogramMul(t *testing.T) { Sum: 2349209.324, PositiveSpans: []Span{{0, 1}, {2, 3}}, PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, - CustomBounds: []float64{1, 2, 3}, + CustomValues: []float64{1, 2, 3}, }, 1, &FloatHistogram{ @@ -148,7 +148,7 @@ func TestFloatHistogramMul(t *testing.T) { Sum: 2349209.324, PositiveSpans: []Span{{0, 1}, {2, 3}}, PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, - CustomBounds: []float64{1, 2, 3}, + CustomValues: []float64{1, 2, 3}, }, }, { @@ -159,7 +159,7 @@ func TestFloatHistogramMul(t *testing.T) { Sum: 23, PositiveSpans: []Span{{0, 2}, {1, 3}}, PositiveBuckets: []float64{1, 0, 3, 4, 7}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, 3, &FloatHistogram{ @@ -168,7 +168,7 @@ func TestFloatHistogramMul(t *testing.T) { Sum: 69, PositiveSpans: []Span{{0, 2}, {1, 3}}, PositiveBuckets: []float64{3, 0, 9, 12, 21}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, }, } @@ -224,13 +224,13 @@ func TestFloatHistogramCopy(t *testing.T) { Schema: CustomBucketsSchema, PositiveSpans: []Span{{0, 1}}, PositiveBuckets: []float64{1, 3, -3, 42}, - CustomBounds: []float64{1, 2, 3}, + CustomValues: []float64{1, 2, 3}, }, expected: &FloatHistogram{ Schema: CustomBucketsSchema, PositiveSpans: []Span{{0, 1}}, PositiveBuckets: []float64{1, 3, -3, 42}, - CustomBounds: []float64{1, 2, 3}, + CustomValues: []float64{1, 2, 3}, }, }, } @@ -289,13 +289,13 @@ func TestFloatHistogramCopyTo(t *testing.T) { Schema: CustomBucketsSchema, PositiveSpans: []Span{{0, 1}}, PositiveBuckets: []float64{1, 3, -3, 42}, - CustomBounds: []float64{1, 2, 3}, + CustomValues: []float64{1, 2, 3}, }, expected: &FloatHistogram{ Schema: CustomBucketsSchema, PositiveSpans: []Span{{0, 1}}, PositiveBuckets: []float64{1, 3, -3, 42}, - CustomBounds: []float64{1, 2, 3}, + CustomValues: []float64{1, 2, 3}, }, }, } @@ -417,7 +417,7 @@ func TestFloatHistogramDiv(t *testing.T) { Sum: 2349209.324, PositiveSpans: []Span{{0, 1}, {2, 3}}, PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, - CustomBounds: []float64{1, 2, 3}, + CustomValues: []float64{1, 2, 3}, }, 1, &FloatHistogram{ @@ -426,7 +426,7 @@ func TestFloatHistogramDiv(t *testing.T) { Sum: 2349209.324, PositiveSpans: []Span{{0, 1}, {2, 3}}, PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, - CustomBounds: []float64{1, 2, 3}, + CustomValues: []float64{1, 2, 3}, }, }, { @@ -437,7 +437,7 @@ func TestFloatHistogramDiv(t *testing.T) { Sum: 23, PositiveSpans: []Span{{0, 2}, {1, 3}}, PositiveBuckets: []float64{1, 0, 3, 4, 7}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, 2, &FloatHistogram{ @@ -446,7 +446,7 @@ func TestFloatHistogramDiv(t *testing.T) { Sum: 11.5, PositiveSpans: []Span{{0, 2}, {1, 3}}, PositiveBuckets: []float64{0.5, 0, 1.5, 2, 3.5}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, }, } @@ -1051,7 +1051,7 @@ func TestFloatHistogramDetectReset(t *testing.T) { "no buckets to some buckets with custom bounds", &FloatHistogram{ Schema: CustomBucketsSchema, - CustomBounds: []float64{1, 2, 3}, + CustomValues: []float64{1, 2, 3}, }, &FloatHistogram{ Schema: CustomBucketsSchema, @@ -1059,7 +1059,7 @@ func TestFloatHistogramDetectReset(t *testing.T) { Sum: 2349209.324, PositiveSpans: []Span{{0, 1}, {2, 3}}, PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, - CustomBounds: []float64{1, 2, 3}, + CustomValues: []float64{1, 2, 3}, }, false, }, @@ -1071,11 +1071,11 @@ func TestFloatHistogramDetectReset(t *testing.T) { Sum: 2349209.324, PositiveSpans: []Span{{0, 1}, {2, 3}}, PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, - CustomBounds: []float64{1, 2, 3}, + CustomValues: []float64{1, 2, 3}, }, &FloatHistogram{ Schema: CustomBucketsSchema, - CustomBounds: []float64{1, 2, 3}, + CustomValues: []float64{1, 2, 3}, }, true, }, @@ -1087,7 +1087,7 @@ func TestFloatHistogramDetectReset(t *testing.T) { Sum: 2349209.324, PositiveSpans: []Span{{0, 1}, {2, 3}}, PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, &FloatHistogram{ Schema: CustomBucketsSchema, @@ -1095,7 +1095,7 @@ func TestFloatHistogramDetectReset(t *testing.T) { Sum: 2349209.324, PositiveSpans: []Span{{0, 2}, {1, 3}}, PositiveBuckets: []float64{1, 1.23, 3.3, 4.2, 0.1}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, false, }, @@ -1107,7 +1107,7 @@ func TestFloatHistogramDetectReset(t *testing.T) { Sum: 2349209.324, PositiveSpans: []Span{{0, 2}, {1, 3}}, PositiveBuckets: []float64{1, 1.23, 3.3, 4.2, 0.1}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, &FloatHistogram{ Schema: CustomBucketsSchema, @@ -1115,7 +1115,7 @@ func TestFloatHistogramDetectReset(t *testing.T) { Sum: 2349209.324, PositiveSpans: []Span{{0, 1}, {2, 3}}, PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, true, }, @@ -1127,7 +1127,7 @@ func TestFloatHistogramDetectReset(t *testing.T) { Sum: 2349209.324, PositiveSpans: []Span{{0, 2}, {1, 3}}, PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, &FloatHistogram{ Schema: CustomBucketsSchema, @@ -1135,7 +1135,7 @@ func TestFloatHistogramDetectReset(t *testing.T) { Sum: 2349209.324, PositiveSpans: []Span{{0, 1}, {2, 3}}, PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, false, }, @@ -1147,7 +1147,7 @@ func TestFloatHistogramDetectReset(t *testing.T) { Sum: 2349209.324, PositiveSpans: []Span{{0, 2}, {1, 3}}, PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, &FloatHistogram{ Schema: CustomBucketsSchema, @@ -1155,7 +1155,7 @@ func TestFloatHistogramDetectReset(t *testing.T) { Sum: 2349209.324, PositiveSpans: []Span{{0, 2}, {1, 3}}, PositiveBuckets: []float64{1, 0, 3.3, 4.3, 0.1}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, false, }, @@ -1167,7 +1167,7 @@ func TestFloatHistogramDetectReset(t *testing.T) { Sum: 2349209.324, PositiveSpans: []Span{{0, 2}, {1, 3}}, PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, &FloatHistogram{ Schema: CustomBucketsSchema, @@ -1175,7 +1175,7 @@ func TestFloatHistogramDetectReset(t *testing.T) { Sum: 2349209.324, PositiveSpans: []Span{{0, 2}, {1, 3}}, PositiveBuckets: []float64{1, 0, 3.3, 4.1, 0.1}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, true, }, @@ -1478,14 +1478,14 @@ func TestFloatHistogramCompact(t *testing.T) { Schema: CustomBucketsSchema, PositiveSpans: []Span{{0, 1}, {2, 3}}, PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, - CustomBounds: []float64{1, 2, 3}, + CustomValues: []float64{1, 2, 3}, }, 0, &FloatHistogram{ Schema: CustomBucketsSchema, PositiveSpans: []Span{{0, 1}, {2, 3}}, PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, - CustomBounds: []float64{1, 2, 3}, + CustomValues: []float64{1, 2, 3}, }, }, { @@ -1494,14 +1494,14 @@ func TestFloatHistogramCompact(t *testing.T) { Schema: CustomBucketsSchema, PositiveSpans: []Span{{0, 1}, {0, 3}, {0, 1}}, PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, 0, &FloatHistogram{ Schema: CustomBucketsSchema, PositiveSpans: []Span{{0, 5}}, PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, }, { @@ -1510,14 +1510,14 @@ func TestFloatHistogramCompact(t *testing.T) { Schema: CustomBucketsSchema, PositiveSpans: []Span{{0, 2}, {2, 0}, {2, 0}, {2, 0}, {3, 3}}, PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, 0, &FloatHistogram{ Schema: CustomBucketsSchema, PositiveSpans: []Span{{0, 2}, {9, 3}}, PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, }, { @@ -1526,14 +1526,14 @@ func TestFloatHistogramCompact(t *testing.T) { Schema: CustomBucketsSchema, PositiveSpans: []Span{{0, 4}, {5, 6}}, PositiveBuckets: []float64{0, 0, 1, 3.3, 4.2, 0.1, 3.3, 0, 0, 0}, - CustomBounds: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9}, + CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9}, }, 0, &FloatHistogram{ Schema: CustomBucketsSchema, PositiveSpans: []Span{{2, 2}, {5, 3}}, PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, - CustomBounds: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9}, + CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9}, }, }, } @@ -2009,7 +2009,7 @@ func TestFloatHistogramAdd(t *testing.T) { Sum: 2.345, PositiveSpans: []Span{{0, 2}, {1, 3}}, PositiveBuckets: []float64{1, 0, 3, 4, 7}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, &FloatHistogram{ Schema: CustomBucketsSchema, @@ -2017,7 +2017,7 @@ func TestFloatHistogramAdd(t *testing.T) { Sum: 1.234, PositiveSpans: []Span{{0, 2}, {1, 3}}, PositiveBuckets: []float64{0, 0, 2, 3, 6}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, &FloatHistogram{ Schema: CustomBucketsSchema, @@ -2025,7 +2025,7 @@ func TestFloatHistogramAdd(t *testing.T) { Sum: 3.579, PositiveSpans: []Span{{0, 2}, {1, 3}}, PositiveBuckets: []float64{1, 0, 5, 7, 13}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, "", }, @@ -2037,7 +2037,7 @@ func TestFloatHistogramAdd(t *testing.T) { Sum: 2.345, PositiveSpans: []Span{{0, 2}, {1, 1}, {0, 2}}, PositiveBuckets: []float64{1, 0, 3, 4, 7}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, &FloatHistogram{ Schema: CustomBucketsSchema, @@ -2045,7 +2045,7 @@ func TestFloatHistogramAdd(t *testing.T) { Sum: 1.234, PositiveSpans: []Span{{0, 2}, {1, 2}, {0, 1}}, PositiveBuckets: []float64{0, 0, 2, 3, 6}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, &FloatHistogram{ Schema: CustomBucketsSchema, @@ -2053,7 +2053,7 @@ func TestFloatHistogramAdd(t *testing.T) { Sum: 3.579, PositiveSpans: []Span{{0, 2}, {1, 1}, {0, 2}}, PositiveBuckets: []float64{1, 0, 5, 7, 13}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, "", }, @@ -2065,7 +2065,7 @@ func TestFloatHistogramAdd(t *testing.T) { Sum: 2.345, PositiveSpans: []Span{{0, 2}, {2, 3}}, PositiveBuckets: []float64{1, 0, 3, 4, 7}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, &FloatHistogram{ Schema: CustomBucketsSchema, @@ -2073,7 +2073,7 @@ func TestFloatHistogramAdd(t *testing.T) { Sum: 1.234, PositiveSpans: []Span{{2, 2}, {3, 3}}, PositiveBuckets: []float64{5, 4, 2, 3, 6}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, &FloatHistogram{ Schema: CustomBucketsSchema, @@ -2081,7 +2081,7 @@ func TestFloatHistogramAdd(t *testing.T) { Sum: 3.579, PositiveSpans: []Span{{0, 4}, {0, 6}}, PositiveBuckets: []float64{1, 0, 5, 4, 3, 4, 7, 2, 3, 6}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, "", }, @@ -2093,7 +2093,7 @@ func TestFloatHistogramAdd(t *testing.T) { Sum: 1.234, PositiveSpans: []Span{{2, 2}, {3, 3}}, PositiveBuckets: []float64{5, 4, 2, 3, 6}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, &FloatHistogram{ Schema: CustomBucketsSchema, @@ -2101,7 +2101,7 @@ func TestFloatHistogramAdd(t *testing.T) { Sum: 2.345, PositiveSpans: []Span{{0, 2}, {2, 3}}, PositiveBuckets: []float64{1, 0, 3, 4, 7}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, &FloatHistogram{ Schema: CustomBucketsSchema, @@ -2109,7 +2109,7 @@ func TestFloatHistogramAdd(t *testing.T) { Sum: 3.579, PositiveSpans: []Span{{0, 4}, {0, 6}}, PositiveBuckets: []float64{1, 0, 5, 4, 3, 4, 7, 2, 3, 6}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, "", }, @@ -2121,7 +2121,7 @@ func TestFloatHistogramAdd(t *testing.T) { Sum: 2.345, PositiveSpans: []Span{{0, 2}, {2, 3}}, PositiveBuckets: []float64{1, 0, 3, 4, 7}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, &FloatHistogram{ Schema: CustomBucketsSchema, @@ -2129,7 +2129,7 @@ func TestFloatHistogramAdd(t *testing.T) { Sum: 1.234, PositiveSpans: []Span{{1, 4}, {0, 3}}, PositiveBuckets: []float64{5, 4, 2, 3, 6, 2, 5}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, &FloatHistogram{ Schema: CustomBucketsSchema, @@ -2137,7 +2137,7 @@ func TestFloatHistogramAdd(t *testing.T) { Sum: 3.579, PositiveSpans: []Span{{0, 4}, {0, 4}}, PositiveBuckets: []float64{1, 5, 4, 2, 6, 10, 9, 5}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, "", }, @@ -2149,7 +2149,7 @@ func TestFloatHistogramAdd(t *testing.T) { Sum: 1.234, PositiveSpans: []Span{{1, 4}, {0, 3}}, PositiveBuckets: []float64{5, 4, 2, 3, 6, 2, 5}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, &FloatHistogram{ Schema: CustomBucketsSchema, @@ -2157,7 +2157,7 @@ func TestFloatHistogramAdd(t *testing.T) { Sum: 2.345, PositiveSpans: []Span{{0, 2}, {2, 3}}, PositiveBuckets: []float64{1, 0, 3, 4, 7}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, &FloatHistogram{ Schema: CustomBucketsSchema, @@ -2165,7 +2165,7 @@ func TestFloatHistogramAdd(t *testing.T) { Sum: 3.579, PositiveSpans: []Span{{0, 4}, {0, 4}}, PositiveBuckets: []float64{1, 5, 4, 2, 6, 10, 9, 5}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, "", }, @@ -2177,7 +2177,7 @@ func TestFloatHistogramAdd(t *testing.T) { Sum: 2.345, PositiveSpans: []Span{{0, 2}, {1, 3}}, PositiveBuckets: []float64{1, 0, 3, 4, 7}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, &FloatHistogram{ Schema: CustomBucketsSchema, @@ -2185,7 +2185,7 @@ func TestFloatHistogramAdd(t *testing.T) { Sum: 1.234, PositiveSpans: []Span{{0, 2}, {1, 3}}, PositiveBuckets: []float64{0, 0, 2, 3, 6}, - CustomBounds: []float64{1, 2, 3, 4, 5}, + CustomValues: []float64{1, 2, 3, 4, 5}, }, nil, "cannot apply this operation on custom buckets histograms with different custom bounds", @@ -2209,7 +2209,7 @@ func TestFloatHistogramAdd(t *testing.T) { Sum: 12, PositiveSpans: []Span{{0, 2}, {1, 3}}, PositiveBuckets: []float64{0, 0, 2, 3, 6}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, nil, "cannot apply this operation on histograms with a mix of exponential and custom bucket schemas", @@ -2342,7 +2342,7 @@ func TestFloatHistogramSub(t *testing.T) { Sum: 23, PositiveSpans: []Span{{0, 2}, {1, 3}}, PositiveBuckets: []float64{1, 0, 3, 4, 7}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, &FloatHistogram{ Schema: CustomBucketsSchema, @@ -2350,7 +2350,7 @@ func TestFloatHistogramSub(t *testing.T) { Sum: 12, PositiveSpans: []Span{{0, 2}, {1, 3}}, PositiveBuckets: []float64{0, 0, 2, 3, 6}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, &FloatHistogram{ Schema: CustomBucketsSchema, @@ -2358,7 +2358,7 @@ func TestFloatHistogramSub(t *testing.T) { Sum: 11, PositiveSpans: []Span{{0, 2}, {1, 3}}, PositiveBuckets: []float64{1, 0, 1, 1, 1}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, "", }, @@ -2370,7 +2370,7 @@ func TestFloatHistogramSub(t *testing.T) { Sum: 23, PositiveSpans: []Span{{0, 2}, {1, 3}}, PositiveBuckets: []float64{1, 0, 3, 4, 7}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, &FloatHistogram{ Schema: CustomBucketsSchema, @@ -2378,7 +2378,7 @@ func TestFloatHistogramSub(t *testing.T) { Sum: 12, PositiveSpans: []Span{{0, 2}, {1, 3}}, PositiveBuckets: []float64{0, 0, 2, 3, 6}, - CustomBounds: []float64{1, 2, 3, 4, 5}, + CustomValues: []float64{1, 2, 3, 4, 5}, }, nil, "cannot apply this operation on custom buckets histograms with different custom bounds", @@ -2402,7 +2402,7 @@ func TestFloatHistogramSub(t *testing.T) { Sum: 12, PositiveSpans: []Span{{0, 2}, {1, 3}}, PositiveBuckets: []float64{0, 0, 2, 3, 6}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, nil, "cannot apply this operation on histograms with a mix of exponential and custom bucket schemas", @@ -2521,7 +2521,7 @@ func TestFloatHistogramCopyToSchema(t *testing.T) { Schema: CustomBucketsSchema, PositiveSpans: []Span{{0, 3}, {5, 5}}, PositiveBuckets: []float64{1, 0, 0, 3, 2, 2, 3, 4}, - CustomBounds: []float64{1, 2, 3, 4, 5, 6, 7}, + CustomValues: []float64{1, 2, 3, 4, 5, 6, 7}, }, &FloatHistogram{ Count: 30, @@ -2529,7 +2529,7 @@ func TestFloatHistogramCopyToSchema(t *testing.T) { Schema: CustomBucketsSchema, PositiveSpans: []Span{{0, 3}, {5, 5}}, PositiveBuckets: []float64{1, 0, 0, 3, 2, 2, 3, 4}, - CustomBounds: []float64{1, 2, 3, 4, 5, 6, 7}, + CustomValues: []float64{1, 2, 3, 4, 5, 6, 7}, }, }, } @@ -3120,7 +3120,7 @@ func TestFloatCustomBucketsIterators(t *testing.T) { {Offset: 1, Length: 1}, }, PositiveBuckets: []float64{100, 344, 123, 55}, - CustomBounds: []float64{10, 25, 50, 100, 500}, + CustomValues: []float64{10, 25, 50, 100, 500}, }, expPositiveBuckets: []Bucket[float64]{ {Lower: math.Inf(-1), Upper: 10, LowerInclusive: true, UpperInclusive: true, Count: 100, Index: 0}, @@ -3140,7 +3140,7 @@ func TestFloatCustomBucketsIterators(t *testing.T) { {Offset: 1, Length: 1}, }, PositiveBuckets: []float64{100, 344, 123, 55}, - CustomBounds: []float64{-10, -5, 0, 10, 25}, + CustomValues: []float64{-10, -5, 0, 10, 25}, }, expPositiveBuckets: []Bucket[float64]{ {Lower: math.Inf(-1), Upper: -10, LowerInclusive: true, UpperInclusive: true, Count: 100, Index: 0}, @@ -3259,7 +3259,7 @@ func TestFloatHistogramEquals(t *testing.T) { // Custom bounds are defined for exponential schema. hCustom := h1.Copy() - hCustom.CustomBounds = []float64{1, 2, 3} + hCustom.CustomValues = []float64{1, 2, 3} equals(h1, *hCustom) cbh1 := FloatHistogram{ @@ -3268,7 +3268,7 @@ func TestFloatHistogramEquals(t *testing.T) { Sum: 9.7, PositiveSpans: []Span{{0, 1}}, PositiveBuckets: []float64{3}, - CustomBounds: []float64{1, 2, 3}, + CustomValues: []float64{1, 2, 3}, } require.NoError(t, cbh1.Validate()) @@ -3278,7 +3278,7 @@ func TestFloatHistogramEquals(t *testing.T) { // Has different custom bounds for custom buckets schema. cbh2 = cbh1.Copy() - cbh2.CustomBounds = []float64{1, 2, 3, 4} + cbh2.CustomValues = []float64{1, 2, 3, 4} notEquals(cbh1, *cbh2) // Has non-empty negative spans and buckets for custom buckets schema. @@ -3313,7 +3313,7 @@ func TestFloatHistogramSize(t *testing.T) { PositiveBuckets: nil, // 24 bytes. NegativeSpans: nil, // 24 bytes. NegativeBuckets: nil, // 24 bytes. - CustomBounds: nil, // 24 bytes. + CustomValues: nil, // 24 bytes. }, 8 + 4 + 4 + 8 + 8 + 8 + 8 + 24 + 24 + 24 + 24 + 24, }, @@ -3335,7 +3335,7 @@ func TestFloatHistogramSize(t *testing.T) { {3, 2}, // 2 * 4 bytes. {3, 2}}, // 2 * 4 bytes. NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, // 24 bytes + 4 * 8 bytes. - CustomBounds: nil, // 24 bytes. + CustomValues: nil, // 24 bytes. }, 8 + 4 + 4 + 8 + 8 + 8 + 8 + (24 + 2*4 + 2*4) + (24 + 2*4 + 2*4) + (24 + 4*8) + (24 + 4*8) + 24, }, @@ -3355,7 +3355,7 @@ func TestFloatHistogramSize(t *testing.T) { PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, // 24 bytes + 4 * 8 bytes. NegativeSpans: nil, // 24 bytes. NegativeBuckets: nil, // 24 bytes. - CustomBounds: []float64{1, 2, 3}, // 24 bytes + 3 * 8 bytes. + CustomValues: []float64{1, 2, 3}, // 24 bytes + 3 * 8 bytes. }, 8 + 4 + 4 + 8 + 8 + 8 + 8 + (24 + 2*4 + 2*4) + (24 + 4*8) + 24 + 24 + (24 + 3*8), }, @@ -3406,7 +3406,7 @@ func TestFloatHistogramString(t *testing.T) { {2, 4}, }, PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 5}, - CustomBounds: []float64{1, 2, 5, 10, 15, 20}, + CustomValues: []float64{1, 2, 5, 10, 15, 20}, }, `{count:3493.3, sum:2.349209324e+06, [-Inf,1]:1, (5,10]:3.3, (10,15]:4.2, (15,20]:0.1, (20,+Inf]:5}`, }, diff --git a/model/histogram/generic.go b/model/histogram/generic.go index c6780c2003..025888ccae 100644 --- a/model/histogram/generic.go +++ b/model/histogram/generic.go @@ -23,7 +23,7 @@ import ( const ( ExponentialSchemaMax int32 = 8 ExponentialSchemaMin int32 = -4 - CustomBucketsSchema int32 = 127 + CustomBucketsSchema int32 = -53 ) var ( @@ -134,7 +134,7 @@ type baseBucketIterator[BC BucketCount, IBC InternalBucketCount] struct { currCount IBC // Count in the current bucket. currIdx int32 // The actual bucket index. - customBounds []float64 // Bounds (usually upper) for histograms with custom buckets. + customValues []float64 // Bounds (usually upper) for histograms with custom buckets. } func (b *baseBucketIterator[BC, IBC]) At() Bucket[BC] { @@ -148,11 +148,11 @@ func (b *baseBucketIterator[BC, IBC]) at(schema int32) Bucket[BC] { Index: b.currIdx, } if b.positive { - bucket.Upper = getBound(b.currIdx, schema, b.customBounds) - bucket.Lower = getBound(b.currIdx-1, schema, b.customBounds) + bucket.Upper = getBound(b.currIdx, schema, b.customValues) + bucket.Lower = getBound(b.currIdx-1, schema, b.customValues) } else { - bucket.Lower = -getBound(b.currIdx, schema, b.customBounds) - bucket.Upper = -getBound(b.currIdx-1, schema, b.customBounds) + bucket.Lower = -getBound(b.currIdx, schema, b.customValues) + bucket.Upper = -getBound(b.currIdx-1, schema, b.customValues) } if IsCustomBucketsSchema(schema) { bucket.LowerInclusive = b.currIdx == 0 @@ -446,9 +446,9 @@ func checkHistogramCustomBounds(bounds []float64, spans []Span, numBuckets int) return nil } -func getBound(idx, schema int32, customBounds []float64) float64 { +func getBound(idx, schema int32, customValues []float64) float64 { if IsCustomBucketsSchema(schema) { - length := int32(len(customBounds)) + length := int32(len(customValues)) switch { case idx > length || idx < -1: panic(fmt.Errorf("index %d out of bounds for custom bounds of length %d", idx, length)) @@ -457,7 +457,7 @@ func getBound(idx, schema int32, customBounds []float64) float64 { case idx == -1: return math.Inf(-1) default: - return customBounds[idx] + return customValues[idx] } } return getBoundExponential(idx, schema) diff --git a/model/histogram/histogram.go b/model/histogram/histogram.go index 98a8a606c9..17cd036c04 100644 --- a/model/histogram/histogram.go +++ b/model/histogram/histogram.go @@ -54,8 +54,8 @@ type Histogram struct { // They are all for base-2 bucket schemas, where 1 is a bucket boundary in // each case, and then each power of two is divided into 2^n logarithmic buckets. // Or in other words, each bucket boundary is the previous boundary times - // 2^(2^-n). Another valid schema number is 127 for custom buckets, defined by - // the CustomBounds field. + // 2^(2^-n). Another valid schema number is -53 for custom buckets, defined by + // the CustomValues field. Schema int32 // Width of the zero bucket. ZeroThreshold float64 @@ -74,9 +74,9 @@ type Histogram struct { // Holds the custom (usually upper) bounds for bucket definitions, otherwise nil. // This slice is interned, to be treated as immutable and copied by reference. // These numbers should be strictly increasing. This field is only used when the - // schema is 127, and the ZeroThreshold, ZeroCount, NegativeSpans and NegativeBuckets - // fields are not used. - CustomBounds []float64 + // schema is for custom buckets, and the ZeroThreshold, ZeroCount, NegativeSpans + // and NegativeBuckets fields are not used. + CustomValues []float64 } // A Span defines a continuous sequence of buckets. @@ -102,7 +102,10 @@ func (h *Histogram) Copy() *Histogram { } if h.UsesCustomBuckets() { - c.CustomBounds = h.CustomBounds + if len(h.CustomValues) != 0 { + c.CustomValues = make([]float64, len(h.CustomValues)) + copy(c.CustomValues, h.CustomValues) + } } else { c.ZeroThreshold = h.ZeroThreshold c.ZeroCount = h.ZeroCount @@ -144,7 +147,8 @@ func (h *Histogram) CopyTo(to *Histogram) { to.NegativeSpans = clearIfNotNil(to.NegativeSpans) to.NegativeBuckets = clearIfNotNil(to.NegativeBuckets) - to.CustomBounds = h.CustomBounds + to.CustomValues = resize(to.CustomValues, len(h.CustomValues)) + copy(to.CustomValues, h.CustomValues) } else { to.ZeroThreshold = h.ZeroThreshold to.ZeroCount = h.ZeroCount @@ -155,7 +159,7 @@ func (h *Histogram) CopyTo(to *Histogram) { to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets)) copy(to.NegativeBuckets, h.NegativeBuckets) - to.CustomBounds = clearIfNotNil(to.CustomBounds) + to.CustomValues = clearIfNotNil(to.CustomValues) } to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans)) @@ -213,7 +217,7 @@ func (h *Histogram) ZeroBucket() Bucket[uint64] { // PositiveBucketIterator returns a BucketIterator to iterate over all positive // buckets in ascending order (starting next to the zero bucket and going up). func (h *Histogram) PositiveBucketIterator() BucketIterator[uint64] { - it := newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomBounds) + it := newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues) return &it } @@ -255,7 +259,7 @@ func (h *Histogram) Equals(h2 *Histogram) bool { } if h.UsesCustomBuckets() { - if !floatBucketsMatch(h.CustomBounds, h2.CustomBounds) { + if !FloatBucketsMatch(h.CustomValues, h2.CustomValues) { return false } } @@ -375,7 +379,9 @@ func (h *Histogram) ToFloat(fh *FloatHistogram) *FloatHistogram { fh.ZeroCount = 0 fh.NegativeSpans = clearIfNotNil(fh.NegativeSpans) fh.NegativeBuckets = clearIfNotNil(fh.NegativeBuckets) - fh.CustomBounds = h.CustomBounds + + fh.CustomValues = resize(fh.CustomValues, len(h.CustomValues)) + copy(fh.CustomValues, h.CustomValues) } else { fh.ZeroThreshold = h.ZeroThreshold fh.ZeroCount = float64(h.ZeroCount) @@ -389,7 +395,7 @@ func (h *Histogram) ToFloat(fh *FloatHistogram) *FloatHistogram { currentNegative += float64(b) fh.NegativeBuckets[i] = currentNegative } - fh.CustomBounds = clearIfNotNil(fh.CustomBounds) + fh.CustomValues = clearIfNotNil(fh.CustomValues) } fh.PositiveSpans = resize(fh.PositiveSpans, len(h.PositiveSpans)) @@ -430,7 +436,7 @@ func clearIfNotNil[T any](items []T) []T { func (h *Histogram) Validate() error { var nCount, pCount uint64 if h.UsesCustomBuckets() { - if err := checkHistogramCustomBounds(h.CustomBounds, h.PositiveSpans, len(h.PositiveBuckets)); err != nil { + if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil { return fmt.Errorf("custom buckets: %w", err) } if h.ZeroCount != 0 { @@ -456,7 +462,7 @@ func (h *Histogram) Validate() error { if err != nil { return fmt.Errorf("negative side: %w", err) } - if h.CustomBounds != nil { + if h.CustomValues != nil { return fmt.Errorf("histogram with exponential schema must not have custom bounds") } } @@ -483,13 +489,13 @@ type regularBucketIterator struct { baseBucketIterator[uint64, int64] } -func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool, customBounds []float64) regularBucketIterator { +func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool, customValues []float64) regularBucketIterator { i := baseBucketIterator[uint64, int64]{ schema: schema, spans: spans, buckets: buckets, positive: positive, - customBounds: customBounds, + customValues: customValues, } return regularBucketIterator{i} } @@ -563,7 +569,7 @@ func (c *cumulativeBucketIterator) Next() bool { if c.emptyBucketCount > 0 { // We are traversing through empty buckets at the moment. - c.currUpper = getBound(c.currIdx, c.h.Schema, c.h.CustomBounds) + c.currUpper = getBound(c.currIdx, c.h.Schema, c.h.CustomValues) c.currIdx++ c.emptyBucketCount-- return true @@ -580,7 +586,7 @@ func (c *cumulativeBucketIterator) Next() bool { c.currCount += c.h.PositiveBuckets[c.posBucketsIdx] c.currCumulativeCount += uint64(c.currCount) - c.currUpper = getBound(c.currIdx, c.h.Schema, c.h.CustomBounds) + c.currUpper = getBound(c.currIdx, c.h.Schema, c.h.CustomValues) c.posBucketsIdx++ c.idxInSpan++ diff --git a/model/histogram/histogram_test.go b/model/histogram/histogram_test.go index e63819debc..2c276e6c92 100644 --- a/model/histogram/histogram_test.go +++ b/model/histogram/histogram_test.go @@ -80,7 +80,7 @@ func TestHistogramString(t *testing.T) { {Offset: 0, Length: 3}, }, PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, - CustomBounds: []float64{1, 2, 5, 10, 15, 20, 25, 50}, + CustomValues: []float64{1, 2, 5, 10, 15, 20, 25, 50}, }, expectedString: "{count:19, sum:2.7, [-Inf,1]:1, (1,2]:3, (2,5]:1, (5,10]:2, (10,15]:1, (15,20]:1, (20,25]:1}", }, @@ -231,7 +231,7 @@ func TestCumulativeBucketIterator(t *testing.T) { {Offset: 1, Length: 2}, }, PositiveBuckets: []int64{1, 1, -1, 0}, - CustomBounds: []float64{5, 10, 20, 50}, + CustomValues: []float64{5, 10, 20, 50}, }, expectedBuckets: []Bucket[uint64]{ {Lower: math.Inf(-1), Upper: 5, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0}, @@ -411,7 +411,7 @@ func TestRegularBucketIterator(t *testing.T) { {Offset: 1, Length: 2}, }, PositiveBuckets: []int64{1, 1, -1, 0}, - CustomBounds: []float64{5, 10, 20, 50}, + CustomValues: []float64{5, 10, 20, 50}, }, expectedPositiveBuckets: []Bucket[uint64]{ {Lower: math.Inf(-1), Upper: 5, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0}, @@ -430,7 +430,7 @@ func TestRegularBucketIterator(t *testing.T) { {Offset: 1, Length: 2}, }, PositiveBuckets: []int64{1, 1, -1, 0}, - CustomBounds: []float64{0, 10, 20, 50}, + CustomValues: []float64{0, 10, 20, 50}, }, expectedPositiveBuckets: []Bucket[uint64]{ {Lower: math.Inf(-1), Upper: 0, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0}, @@ -448,7 +448,7 @@ func TestRegularBucketIterator(t *testing.T) { {Offset: 0, Length: 5}, }, PositiveBuckets: []int64{1, 1, 0, -1, 0}, - CustomBounds: []float64{-5, 0, 20, 50}, + CustomValues: []float64{-5, 0, 20, 50}, }, expectedPositiveBuckets: []Bucket[uint64]{ {Lower: math.Inf(-1), Upper: -5, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0}, @@ -563,7 +563,7 @@ func TestCustomBucketsHistogramToFloat(t *testing.T) { {Offset: 0, Length: 3}, }, PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, - CustomBounds: []float64{5, 10, 20, 50, 100, 500}, + CustomValues: []float64{5, 10, 20, 50, 100, 500}, } cases := []struct { name string @@ -773,7 +773,7 @@ func TestHistogramEquals(t *testing.T) { // Has non-empty custom bounds for exponential schema. hCustom := h1.Copy() - hCustom.CustomBounds = []float64{1, 2, 3} + hCustom.CustomValues = []float64{1, 2, 3} equals(h1, *hCustom) cbh1 := Histogram{ @@ -785,7 +785,7 @@ func TestHistogramEquals(t *testing.T) { {Offset: 10, Length: 3}, }, PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, - CustomBounds: []float64{0.1, 0.2, 0.5, 1, 2, 5, 10, 15, 20, 25, 50, 75, 100, 200, 250, 500, 1000}, + CustomValues: []float64{0.1, 0.2, 0.5, 1, 2, 5, 10, 15, 20, 25, 50, 75, 100, 200, 250, 500, 1000}, } require.NoError(t, cbh1.Validate()) @@ -795,7 +795,7 @@ func TestHistogramEquals(t *testing.T) { // Has different custom bounds for custom buckets schema. cbh2 = cbh1.Copy() - cbh2.CustomBounds = []float64{0.1, 0.2, 0.5} + cbh2.CustomValues = []float64{0.1, 0.2, 0.5} notEquals(cbh1, *cbh2) // Has non-empty negative spans and buckets for custom buckets schema. @@ -853,13 +853,13 @@ func TestHistogramCopy(t *testing.T) { Schema: CustomBucketsSchema, PositiveSpans: []Span{{-2, 1}}, PositiveBuckets: []int64{1, 3, -3, 42}, - CustomBounds: []float64{5, 10, 15}, + CustomValues: []float64{5, 10, 15}, }, expected: &Histogram{ Schema: CustomBucketsSchema, PositiveSpans: []Span{{-2, 1}}, PositiveBuckets: []int64{1, 3, -3, 42}, - CustomBounds: []float64{5, 10, 15}, + CustomValues: []float64{5, 10, 15}, }, }, } @@ -918,13 +918,13 @@ func TestHistogramCopyTo(t *testing.T) { Schema: CustomBucketsSchema, PositiveSpans: []Span{{-2, 1}}, PositiveBuckets: []int64{1, 3, -3, 42}, - CustomBounds: []float64{5, 10, 15}, + CustomValues: []float64{5, 10, 15}, }, expected: &Histogram{ Schema: CustomBucketsSchema, PositiveSpans: []Span{{-2, 1}}, PositiveBuckets: []int64{1, 3, -3, 42}, - CustomBounds: []float64{5, 10, 15}, + CustomValues: []float64{5, 10, 15}, }, }, } @@ -1214,14 +1214,14 @@ func TestHistogramCompact(t *testing.T) { Schema: CustomBucketsSchema, PositiveSpans: []Span{{-2, 1}, {2, 3}}, PositiveBuckets: []int64{1, 3, -3, 42}, - CustomBounds: []float64{5, 10, 15}, + CustomValues: []float64{5, 10, 15}, }, 0, &Histogram{ Schema: CustomBucketsSchema, PositiveSpans: []Span{{-2, 1}, {2, 3}}, PositiveBuckets: []int64{1, 3, -3, 42}, - CustomBounds: []float64{5, 10, 15}, + CustomValues: []float64{5, 10, 15}, }, }, { @@ -1230,14 +1230,14 @@ func TestHistogramCompact(t *testing.T) { Schema: CustomBucketsSchema, PositiveSpans: []Span{{-2, 1}, {0, 3}, {0, 1}}, PositiveBuckets: []int64{1, 3, -3, 42, 3}, - CustomBounds: []float64{5, 10, 15, 20}, + CustomValues: []float64{5, 10, 15, 20}, }, 0, &Histogram{ Schema: CustomBucketsSchema, PositiveSpans: []Span{{-2, 5}}, PositiveBuckets: []int64{1, 3, -3, 42, 3}, - CustomBounds: []float64{5, 10, 15, 20}, + CustomValues: []float64{5, 10, 15, 20}, }, }, { @@ -1246,14 +1246,14 @@ func TestHistogramCompact(t *testing.T) { Schema: CustomBucketsSchema, PositiveSpans: []Span{{-2, 2}, {2, 0}, {3, 3}}, PositiveBuckets: []int64{1, 3, -3, 42, 3}, - CustomBounds: []float64{5, 10, 15, 20}, + CustomValues: []float64{5, 10, 15, 20}, }, 0, &Histogram{ Schema: CustomBucketsSchema, PositiveSpans: []Span{{-2, 2}, {5, 3}}, PositiveBuckets: []int64{1, 3, -3, 42, 3}, - CustomBounds: []float64{5, 10, 15, 20}, + CustomValues: []float64{5, 10, 15, 20}, }, }, { @@ -1262,14 +1262,14 @@ func TestHistogramCompact(t *testing.T) { Schema: CustomBucketsSchema, PositiveSpans: []Span{{-2, 2}, {2, 0}, {2, 0}, {2, 0}, {3, 3}}, PositiveBuckets: []int64{1, 3, -3, 42, 3}, - CustomBounds: []float64{5, 10, 15, 20}, + CustomValues: []float64{5, 10, 15, 20}, }, 0, &Histogram{ Schema: CustomBucketsSchema, PositiveSpans: []Span{{-2, 2}, {9, 3}}, PositiveBuckets: []int64{1, 3, -3, 42, 3}, - CustomBounds: []float64{5, 10, 15, 20}, + CustomValues: []float64{5, 10, 15, 20}, }, }, { @@ -1278,14 +1278,14 @@ func TestHistogramCompact(t *testing.T) { Schema: CustomBucketsSchema, PositiveSpans: []Span{{-4, 6}, {3, 6}}, PositiveBuckets: []int64{0, 0, 1, 3, -4, 0, 1, 42, 3, -46, 0, 0}, - CustomBounds: []float64{5, 10, 15, 20}, + CustomValues: []float64{5, 10, 15, 20}, }, 0, &Histogram{ Schema: CustomBucketsSchema, PositiveSpans: []Span{{-2, 2}, {5, 3}}, PositiveBuckets: []int64{1, 3, -3, 42, 3}, - CustomBounds: []float64{5, 10, 15, 20}, + CustomValues: []float64{5, 10, 15, 20}, }, }, } @@ -1454,7 +1454,7 @@ func TestHistogramValidation(t *testing.T) { {Offset: 1, Length: 2}, }, PositiveBuckets: []int64{1, 1, -1, 0}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, errMsg: `histogram with exponential schema must not have custom bounds`, skipFloat: true, // Converting to float will remove the wrong fields so only the float version will pass validation @@ -1476,7 +1476,7 @@ func TestHistogramValidation(t *testing.T) { {Offset: 1, Length: 2}, }, NegativeBuckets: []int64{1, 1, -1, 0}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, errMsg: `custom buckets: must have zero count of 0`, skipFloat: true, // Converting to float will remove the wrong fields so only the float version will pass validation @@ -1491,7 +1491,7 @@ func TestHistogramValidation(t *testing.T) { {Offset: 1, Length: 2}, }, PositiveBuckets: []int64{1, 1, -1, 0}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, errMsg: `custom buckets: span number 1 with offset -1: histogram has a span whose offset is negative`, }, @@ -1505,7 +1505,7 @@ func TestHistogramValidation(t *testing.T) { {Offset: -1, Length: 2}, }, PositiveBuckets: []int64{1, 1, -1, 0}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, errMsg: `custom buckets: span number 2 with offset -1: histogram has a span whose offset is negative`, }, @@ -1519,7 +1519,7 @@ func TestHistogramValidation(t *testing.T) { {Offset: 1, Length: 2}, }, PositiveBuckets: []int64{1, 1, -1}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, errMsg: `custom buckets: spans need 4 buckets, have 3 buckets: histogram spans specify different number of buckets than provided`, }, @@ -1533,7 +1533,7 @@ func TestHistogramValidation(t *testing.T) { {Offset: 1, Length: 2}, }, PositiveBuckets: []int64{1, 1, -1, 0}, - CustomBounds: []float64{1, 2, 3}, + CustomValues: []float64{1, 2, 3}, }, errMsg: `custom buckets: only 3 custom bounds defined which is insufficient to cover total span length of 5: histogram custom bounds are too few`, }, @@ -1547,7 +1547,7 @@ func TestHistogramValidation(t *testing.T) { {Offset: 1, Length: 2}, }, PositiveBuckets: []int64{1, 1, -1, 0}, - CustomBounds: []float64{1, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4}, }, }, "valid custom buckets histogram with extra bounds": { @@ -1560,7 +1560,7 @@ func TestHistogramValidation(t *testing.T) { {Offset: 1, Length: 2}, }, PositiveBuckets: []int64{1, 1, -1, 0}, - CustomBounds: []float64{1, 2, 3, 4, 5, 6, 7, 8}, + CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8}, }, }, } diff --git a/scrape/target_test.go b/scrape/target_test.go index dead3c6fc4..3457af7f0d 100644 --- a/scrape/target_test.go +++ b/scrape/target_test.go @@ -481,7 +481,7 @@ func TestBucketLimitAppender(t *testing.T) { {Offset: 0, Length: 3}, }, PositiveBuckets: []int64{3, 0, 0}, - CustomBounds: []float64{1, 2, 3}, + CustomValues: []float64{1, 2, 3}, } cases := []struct { @@ -592,7 +592,7 @@ func TestMaxSchemaAppender(t *testing.T) { {Offset: 0, Length: 3}, }, PositiveBuckets: []int64{3, 0, 0}, - CustomBounds: []float64{1, 2, 3}, + CustomValues: []float64{1, 2, 3}, } cases := []struct { diff --git a/tsdb/chunkenc/bstream.go b/tsdb/chunkenc/bstream.go index 7b17f4686b..6e13c40464 100644 --- a/tsdb/chunkenc/bstream.go +++ b/tsdb/chunkenc/bstream.go @@ -113,6 +113,16 @@ func (b *bstream) writeBits(u uint64, nbits int) { } } +// wrapper for the standard library's PutUvarint to make it work +// with our bstream. +func (b *bstream) putUvarint(x uint64) { + buf := make([]byte, 2) + l := binary.PutUvarint(buf, x) + for i := 0; i < l; i++ { + b.writeByte(buf[i]) + } +} + type bstreamReader struct { stream []byte streamOffset int // The offset from which read the next byte from the stream. @@ -257,3 +267,9 @@ func (b *bstreamReader) loadNextBuffer(nbits uint8) bool { return true } + +// wrapper for the standard library's ReadUvarint to make it work +// with our bstream. +func (b *bstreamReader) readUvarint() (uint64, error) { + return binary.ReadUvarint(b) +} diff --git a/tsdb/chunkenc/float_histogram.go b/tsdb/chunkenc/float_histogram.go index 88d189254f..b5ac4ae6ed 100644 --- a/tsdb/chunkenc/float_histogram.go +++ b/tsdb/chunkenc/float_histogram.go @@ -72,6 +72,7 @@ func (c *FloatHistogramChunk) NumSamples() int { func (c *FloatHistogramChunk) Layout() ( schema int32, zeroThreshold float64, negativeSpans, positiveSpans []histogram.Span, + customValues []float64, err error, ) { if c.NumSamples() == 0 { @@ -129,17 +130,18 @@ func (c *FloatHistogramChunk) Appender() (Appender, error) { a := &FloatHistogramAppender{ b: &c.b, - schema: it.schema, - zThreshold: it.zThreshold, - pSpans: it.pSpans, - nSpans: it.nSpans, - t: it.t, - tDelta: it.tDelta, - cnt: it.cnt, - zCnt: it.zCnt, - pBuckets: pBuckets, - nBuckets: nBuckets, - sum: it.sum, + schema: it.schema, + zThreshold: it.zThreshold, + pSpans: it.pSpans, + nSpans: it.nSpans, + customValues: it.customValues, + t: it.t, + tDelta: it.tDelta, + cnt: it.cnt, + zCnt: it.zCnt, + pBuckets: pBuckets, + nBuckets: nBuckets, + sum: it.sum, } if it.numTotal == 0 { a.sum.leading = 0xff @@ -187,6 +189,7 @@ type FloatHistogramAppender struct { schema int32 zThreshold float64 pSpans, nSpans []histogram.Span + customValues []float64 t, tDelta int64 sum, cnt, zCnt xorValue @@ -218,6 +221,7 @@ func (a *FloatHistogramAppender) Append(int64, float64) { // // The chunk is not appendable in the following cases: // - The schema has changed. +// - The custom bounds have changed if the current schema is custom buckets. // - The threshold for the zero bucket has changed. // - Any buckets have disappeared. // - There was a counter reset in the count of observations or in any bucket, including the zero bucket. @@ -259,6 +263,11 @@ func (a *FloatHistogramAppender) appendable(h *histogram.FloatHistogram) ( return } + if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.FloatBucketsMatch(h.CustomValues, a.customValues) { + counterReset = true + return + } + if h.ZeroCount < a.zCnt.value { // There has been a counter reset since ZeroThreshold didn't change. counterReset = true @@ -299,6 +308,7 @@ func (a *FloatHistogramAppender) appendable(h *histogram.FloatHistogram) ( // // The chunk is not appendable in the following cases: // - The schema has changed. +// - The custom bounds have changed if the current schema is custom buckets. // - The threshold for the zero bucket has changed. // - The last sample in the chunk was stale while the current sample is not stale. func (a *FloatHistogramAppender) appendableGauge(h *histogram.FloatHistogram) ( @@ -325,6 +335,10 @@ func (a *FloatHistogramAppender) appendableGauge(h *histogram.FloatHistogram) ( return } + if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.FloatBucketsMatch(h.CustomValues, a.customValues) { + return + } + positiveInserts, backwardPositiveInserts, positiveSpans = expandSpansBothWays(a.pSpans, h.PositiveSpans) negativeInserts, backwardNegativeInserts, negativeSpans = expandSpansBothWays(a.nSpans, h.NegativeSpans) okToAppend = true @@ -418,7 +432,7 @@ func (a *FloatHistogramAppender) appendFloatHistogram(t int64, h *histogram.Floa if num == 0 { // The first append gets the privilege to dictate the layout // but it's also responsible for encoding it into the chunk! - writeHistogramChunkLayout(a.b, h.Schema, h.ZeroThreshold, h.PositiveSpans, h.NegativeSpans) + writeHistogramChunkLayout(a.b, h.Schema, h.ZeroThreshold, h.PositiveSpans, h.NegativeSpans, h.CustomValues) a.schema = h.Schema a.zThreshold = h.ZeroThreshold @@ -434,6 +448,12 @@ func (a *FloatHistogramAppender) appendFloatHistogram(t int64, h *histogram.Floa } else { a.nSpans = nil } + if len(h.CustomValues) > 0 { + a.customValues = make([]float64, len(h.CustomValues)) + copy(a.customValues, h.CustomValues) + } else { + a.customValues = nil + } numPBuckets, numNBuckets := countSpans(h.PositiveSpans), countSpans(h.NegativeSpans) if numPBuckets > 0 { @@ -689,6 +709,7 @@ type floatHistogramIterator struct { schema int32 zThreshold float64 pSpans, nSpans []histogram.Span + customValues []float64 // For the fields that are tracked as deltas and ultimately dod's. t int64 @@ -749,6 +770,7 @@ func (it *floatHistogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram) NegativeSpans: it.nSpans, PositiveBuckets: it.pBuckets, NegativeBuckets: it.nBuckets, + CustomValues: it.customValues, } } @@ -771,6 +793,9 @@ func (it *floatHistogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram) fh.NegativeBuckets = resize(fh.NegativeBuckets, len(it.nBuckets)) copy(fh.NegativeBuckets, it.nBuckets) + fh.CustomValues = resize(fh.CustomValues, len(it.customValues)) + copy(fh.CustomValues, it.customValues) + return it.t, fh } @@ -815,7 +840,7 @@ func (it *floatHistogramIterator) Next() ValueType { // The first read is responsible for reading the chunk layout // and for initializing fields that depend on it. We give // counter reset info at chunk level, hence we discard it here. - schema, zeroThreshold, posSpans, negSpans, err := readHistogramChunkLayout(&it.br) + schema, zeroThreshold, posSpans, negSpans, customValues, err := readHistogramChunkLayout(&it.br) if err != nil { it.err = err return ValNone @@ -823,6 +848,7 @@ func (it *floatHistogramIterator) Next() ValueType { it.schema = schema it.zThreshold = zeroThreshold it.pSpans, it.nSpans = posSpans, negSpans + it.customValues = customValues numPBuckets, numNBuckets := countSpans(posSpans), countSpans(negSpans) // Allocate bucket slices as needed, recycling existing slices // in case this iterator was reset and already has slices of a diff --git a/tsdb/chunkenc/float_histogram_test.go b/tsdb/chunkenc/float_histogram_test.go index 054c17f7d9..2ee4422b91 100644 --- a/tsdb/chunkenc/float_histogram_test.go +++ b/tsdb/chunkenc/float_histogram_test.go @@ -280,7 +280,38 @@ func TestFloatHistogramChunkBucketChanges(t *testing.T) { } func TestFloatHistogramChunkAppendable(t *testing.T) { - setup := func() (Chunk, *FloatHistogramAppender, int64, *histogram.FloatHistogram) { + eh := &histogram.FloatHistogram{ + Count: 5, + ZeroCount: 2, + Sum: 18.4, + ZeroThreshold: 1e-125, + Schema: 1, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 2, Length: 1}, + {Offset: 3, Length: 2}, + {Offset: 3, Length: 1}, + {Offset: 1, Length: 1}, + }, + PositiveBuckets: []float64{6, 3, 3, 2, 4, 5, 1}, + } + + cbh := &histogram.FloatHistogram{ + Count: 24, + Sum: 18.4, + Schema: histogram.CustomBucketsSchema, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 2, Length: 1}, + {Offset: 3, Length: 2}, + {Offset: 3, Length: 1}, + {Offset: 1, Length: 1}, + }, + PositiveBuckets: []float64{6, 3, 3, 2, 4, 5, 1}, + CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, + } + + setup := func(h *histogram.FloatHistogram) (Chunk, *FloatHistogramAppender, int64, *histogram.FloatHistogram) { c := Chunk(NewFloatHistogramChunk()) // Create fresh appender and add the first histogram. @@ -289,32 +320,17 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { require.Equal(t, 0, c.NumSamples()) ts := int64(1234567890) - h1 := &histogram.FloatHistogram{ - Count: 5, - ZeroCount: 2, - Sum: 18.4, - ZeroThreshold: 1e-125, - Schema: 1, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 2, Length: 1}, - {Offset: 3, Length: 2}, - {Offset: 3, Length: 1}, - {Offset: 1, Length: 1}, - }, - PositiveBuckets: []float64{6, 3, 3, 2, 4, 5, 1}, - } - chk, _, app, err := app.AppendFloatHistogram(nil, ts, h1.Copy(), false) + chk, _, app, err := app.AppendFloatHistogram(nil, ts, h.Copy(), false) require.NoError(t, err) require.Nil(t, chk) require.Equal(t, 1, c.NumSamples()) require.Equal(t, UnknownCounterReset, c.(*FloatHistogramChunk).GetCounterResetHeader()) - return c, app.(*FloatHistogramAppender), ts, h1 + return c, app.(*FloatHistogramAppender), ts, h } { // Schema change. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.Schema++ _, _, ok, _ := hApp.appendable(h2) @@ -324,7 +340,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { } { // Zero threshold change. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.ZeroThreshold += 0.1 _, _, ok, _ := hApp.appendable(h2) @@ -334,7 +350,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { } { // New histogram that has more buckets. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.PositiveSpans = []histogram.Span{ {Offset: 0, Length: 3}, @@ -357,7 +373,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { } { // New histogram that has a bucket missing. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.PositiveSpans = []histogram.Span{ @@ -379,7 +395,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { } { // New histogram that has a counter reset while buckets are same. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.Sum = 23 h2.PositiveBuckets = []float64{6, 2, 3, 2, 4, 5, 1} @@ -394,7 +410,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { } { // New histogram that has a counter reset while new buckets were added. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.PositiveSpans = []histogram.Span{ {Offset: 0, Length: 3}, @@ -415,7 +431,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { } { - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) // New histogram that has a counter reset while new buckets were // added before the first bucket and reset on first bucket. (to // catch the edge case where the new bucket should be forwarded @@ -442,7 +458,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { } { // New histogram that has an explicit counter reset. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.CounterResetHint = histogram.CounterReset @@ -450,7 +466,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { } { // Start new chunk explicitly, and append a new histogram that is considered appendable to the previous chunk. - _, hApp, ts, h1 := setup() + _, hApp, ts, h1 := setup(eh) h2 := h1.Copy() // Identity is appendable. nextChunk := NewFloatHistogramChunk() @@ -466,7 +482,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { } { // Start new chunk explicitly, and append a new histogram that is not considered appendable to the previous chunk. - _, hApp, ts, h1 := setup() + _, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.Count-- // Make this not appendable due to counter reset. @@ -483,7 +499,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { } { // Start new chunk explicitly, and append a new histogram that would need recoding if we added it to the chunk. - _, hApp, ts, h1 := setup() + _, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.PositiveSpans = []histogram.Span{ {Offset: 0, Length: 3}, @@ -507,6 +523,72 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { assertSampleCount(t, nextChunk, 1, ValFloatHistogram) require.Equal(t, NotCounterReset, nextChunk.GetCounterResetHeader()) } + + { // Custom buckets, no change. + c, hApp, ts, h1 := setup(cbh) + h2 := h1.Copy() + _, _, ok, _ := hApp.appendable(h2) + require.True(t, ok) + + assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset) + } + + { // Custom buckets, increase in bucket counts but no change in layout. + c, hApp, ts, h1 := setup(cbh) + h2 := h1.Copy() + h2.Count++ + h2.PositiveBuckets = []float64{6, 3, 3, 2, 4, 5, 2} + _, _, ok, _ := hApp.appendable(h2) + require.True(t, ok) + + assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset) + } + + { // Custom buckets, decrease in bucket counts but no change in layout. + c, hApp, ts, h1 := setup(cbh) + h2 := h1.Copy() + h2.Count-- + h2.PositiveBuckets = []float64{6, 3, 3, 2, 4, 5, 0} + _, _, ok, _ := hApp.appendable(h2) + require.False(t, ok) + + assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset) + } + + { // Custom buckets, change only in custom bounds. + c, hApp, ts, h1 := setup(cbh) + h2 := h1.Copy() + h2.CustomValues = []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21} + _, _, ok, _ := hApp.appendable(h2) + require.False(t, ok) + + assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset) + } + + { // Custom buckets, with more buckets. + c, hApp, ts, h1 := setup(cbh) + h2 := h1.Copy() + h2.PositiveSpans = []histogram.Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 1}, + {Offset: 1, Length: 4}, + {Offset: 3, Length: 3}, + } + h2.Count += 6 + h2.Sum = 30 + // Existing histogram should get values converted from the above to: + // 6 3 0 3 0 0 2 4 5 0 1 (previous values with some new empty buckets in between) + // so the new histogram should have new counts >= these per-bucket counts, e.g.: + h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1} // (total 30) + + posInterjections, negInterjections, ok, cr := hApp.appendable(h2) + require.NotEmpty(t, posInterjections) + require.Empty(t, negInterjections) + require.True(t, ok) // Only new buckets came in. + require.False(t, cr) + + assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset) + } } func assertNewFloatHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *FloatHistogramAppender, ts int64, h *histogram.FloatHistogram, expectHeader CounterResetHeader) { @@ -526,7 +608,7 @@ func assertNewFloatHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *Fl func assertNoNewFloatHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *FloatHistogramAppender, ts int64, h *histogram.FloatHistogram, expectHeader CounterResetHeader) { oldChunkBytes := oldChunk.Bytes() newChunk, recoded, newAppender, err := hApp.AppendFloatHistogram(nil, ts, h, false) - require.NotEqual(t, oldChunkBytes, oldChunk.Bytes()) // Sanity check that previous chunk is untouched. + require.Greater(t, len(oldChunk.Bytes()), len(oldChunkBytes)) // Check that current chunk is bigger than previously. require.NoError(t, err) require.Nil(t, newChunk) require.False(t, recoded) @@ -715,6 +797,32 @@ func TestFloatHistogramChunkAppendableWithEmptySpan(t *testing.T) { NegativeBuckets: []float64{1, 4, 2, 7, 5, 5, 2}, }, }, + "empty span in old and new custom buckets histogram": { + h1: &histogram.FloatHistogram{ + Schema: histogram.CustomBucketsSchema, + Count: 7, + Sum: 1234.5, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []float64{1, 2, 1, 1, 1, 1, 1}, + CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + }, + h2: &histogram.FloatHistogram{ + Schema: histogram.CustomBucketsSchema, + Count: 10, + Sum: 2345.6, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []float64{1, 3, 1, 2, 1, 1, 1}, + CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + }, + }, } for name, tc := range tests { @@ -741,7 +849,40 @@ func TestFloatHistogramChunkAppendableWithEmptySpan(t *testing.T) { } func TestFloatHistogramChunkAppendableGauge(t *testing.T) { - setup := func() (Chunk, *FloatHistogramAppender, int64, *histogram.FloatHistogram) { + eh := &histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Count: 5, + ZeroCount: 2, + Sum: 18.4, + ZeroThreshold: 1e-125, + Schema: 1, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 2, Length: 1}, + {Offset: 3, Length: 2}, + {Offset: 3, Length: 1}, + {Offset: 1, Length: 1}, + }, + PositiveBuckets: []float64{6, 3, 3, 2, 4, 5, 1}, + } + + cbh := &histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Count: 24, + Sum: 18.4, + Schema: histogram.CustomBucketsSchema, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 2, Length: 1}, + {Offset: 3, Length: 2}, + {Offset: 3, Length: 1}, + {Offset: 1, Length: 1}, + }, + PositiveBuckets: []float64{6, 3, 3, 2, 4, 5, 1}, + CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, + } + + setup := func(h *histogram.FloatHistogram) (Chunk, *FloatHistogramAppender, int64, *histogram.FloatHistogram) { c := Chunk(NewFloatHistogramChunk()) // Create fresh appender and add the first histogram. @@ -750,33 +891,17 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) { require.Equal(t, 0, c.NumSamples()) ts := int64(1234567890) - h1 := &histogram.FloatHistogram{ - CounterResetHint: histogram.GaugeType, - Count: 5, - ZeroCount: 2, - Sum: 18.4, - ZeroThreshold: 1e-125, - Schema: 1, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 2, Length: 1}, - {Offset: 3, Length: 2}, - {Offset: 3, Length: 1}, - {Offset: 1, Length: 1}, - }, - PositiveBuckets: []float64{6, 3, 3, 2, 4, 5, 1}, - } - chk, _, app, err := app.AppendFloatHistogram(nil, ts, h1.Copy(), false) + chk, _, app, err := app.AppendFloatHistogram(nil, ts, h.Copy(), false) require.NoError(t, err) require.Nil(t, chk) require.Equal(t, 1, c.NumSamples()) require.Equal(t, GaugeType, c.(*FloatHistogramChunk).GetCounterResetHeader()) - return c, app.(*FloatHistogramAppender), ts, h1 + return c, app.(*FloatHistogramAppender), ts, h } { // Schema change. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.Schema++ _, _, _, _, _, _, ok := hApp.appendableGauge(h2) @@ -786,7 +911,7 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) { } { // Zero threshold change. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.ZeroThreshold += 0.1 _, _, _, _, _, _, ok := hApp.appendableGauge(h2) @@ -796,7 +921,7 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) { } { // New histogram that has more buckets. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.PositiveSpans = []histogram.Span{ {Offset: 0, Length: 3}, @@ -820,7 +945,7 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) { } { // New histogram that has buckets missing. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.PositiveSpans = []histogram.Span{ {Offset: 0, Length: 2}, @@ -844,7 +969,7 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) { } { // New histogram that has a bucket missing and new buckets. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.PositiveSpans = []histogram.Span{ {Offset: 0, Length: 2}, @@ -866,7 +991,7 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) { } { // New histogram that has a counter reset while buckets are same. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.Sum = 23 h2.PositiveBuckets = []float64{6, 2, 3, 2, 4, 5, 1} @@ -882,7 +1007,7 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) { } { // New histogram that has a counter reset while new buckets were added. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.PositiveSpans = []histogram.Span{ {Offset: 0, Length: 3}, @@ -906,7 +1031,7 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) { { // New histogram that has a counter reset while new buckets were // added before the first bucket and reset on first bucket. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.PositiveSpans = []histogram.Span{ {Offset: -3, Length: 2}, @@ -928,6 +1053,73 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) { assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) } + + { // Custom buckets, no change. + c, hApp, ts, h1 := setup(cbh) + h2 := h1.Copy() + _, _, _, _, _, _, ok := hApp.appendableGauge(h2) + require.True(t, ok) + + assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) + } + + { // Custom buckets, increase in bucket counts but no change in layout. + c, hApp, ts, h1 := setup(cbh) + h2 := h1.Copy() + h2.Count++ + h2.PositiveBuckets = []float64{6, 3, 3, 2, 4, 5, 2} + _, _, _, _, _, _, ok := hApp.appendableGauge(h2) + require.True(t, ok) + + assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) + } + + { // Custom buckets, decrease in bucket counts but no change in layout. + c, hApp, ts, h1 := setup(cbh) + h2 := h1.Copy() + h2.Count-- + h2.PositiveBuckets = []float64{6, 3, 3, 2, 4, 5, 0} + _, _, _, _, _, _, ok := hApp.appendableGauge(h2) + require.True(t, ok) + + assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) + } + + { // Custom buckets, change only in custom bounds. + c, hApp, ts, h1 := setup(cbh) + h2 := h1.Copy() + h2.CustomValues = []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21} + _, _, _, _, _, _, ok := hApp.appendableGauge(h2) + require.False(t, ok) + + assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) + } + + { // Custom buckets, with more buckets. + c, hApp, ts, h1 := setup(cbh) + h2 := h1.Copy() + h2.PositiveSpans = []histogram.Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 1}, + {Offset: 1, Length: 4}, + {Offset: 3, Length: 3}, + } + h2.Count += 6 + h2.Sum = 30 + // Existing histogram should get values converted from the above to: + // 6 3 0 3 0 0 2 4 5 0 1 (previous values with some new empty buckets in between) + // so the new histogram should have new counts >= these per-bucket counts, e.g.: + h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1} // (total 30) + + posInterjections, negInterjections, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2) + require.NotEmpty(t, posInterjections) + require.Empty(t, negInterjections) + require.Empty(t, pBackwardI) + require.Empty(t, nBackwardI) + require.True(t, ok) // Only new buckets came in. + + assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) + } } func TestFloatHistogramAppendOnlyErrors(t *testing.T) { @@ -975,4 +1167,26 @@ func TestFloatHistogramAppendOnlyErrors(t *testing.T) { require.False(t, isRecoded) require.EqualError(t, err, "float histogram counter reset") }) + t.Run("counter reset error with custom buckets", func(t *testing.T) { + c := Chunk(NewFloatHistogramChunk()) + + // Create fresh appender and add the first histogram. + app, err := c.Appender() + require.NoError(t, err) + + h := tsdbutil.GenerateTestCustomBucketsFloatHistogram(0) + var isRecoded bool + c, isRecoded, app, err = app.AppendFloatHistogram(nil, 1, h, true) + require.Nil(t, c) + require.False(t, isRecoded) + require.NoError(t, err) + + // Add erroring histogram. + h2 := h.Copy() + h2.CustomValues = []float64{0, 1, 2, 3, 4, 5, 6, 7} + c, isRecoded, _, err = app.AppendFloatHistogram(nil, 2, h2, true) + require.Nil(t, c) + require.False(t, isRecoded) + require.EqualError(t, err, "float histogram counter reset") + }) } diff --git a/tsdb/chunkenc/histogram.go b/tsdb/chunkenc/histogram.go index cb09eda26d..48c13a3ac4 100644 --- a/tsdb/chunkenc/histogram.go +++ b/tsdb/chunkenc/histogram.go @@ -65,6 +65,7 @@ func (c *HistogramChunk) NumSamples() int { func (c *HistogramChunk) Layout() ( schema int32, zeroThreshold float64, negativeSpans, positiveSpans []histogram.Span, + customValues []float64, err error, ) { if c.NumSamples() == 0 { @@ -127,6 +128,7 @@ func (c *HistogramChunk) Appender() (Appender, error) { zThreshold: it.zThreshold, pSpans: it.pSpans, nSpans: it.nSpans, + customValues: it.customValues, t: it.t, cnt: it.cnt, zCnt: it.zCnt, @@ -194,6 +196,7 @@ type HistogramAppender struct { schema int32 zThreshold float64 pSpans, nSpans []histogram.Span + customValues []float64 // Although we intend to start new chunks on counter resets, we still // have to handle negative deltas for gauge histograms. Therefore, even @@ -237,6 +240,7 @@ func (a *HistogramAppender) Append(int64, float64) { // The chunk is not appendable in the following cases: // // - The schema has changed. +// - The custom bounds have changed if the current schema is custom buckets. // - The threshold for the zero bucket has changed. // - Any buckets have disappeared. // - There was a counter reset in the count of observations or in any bucket, @@ -279,6 +283,11 @@ func (a *HistogramAppender) appendable(h *histogram.Histogram) ( return } + if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.FloatBucketsMatch(h.CustomValues, a.customValues) { + counterReset = true + return + } + if h.ZeroCount < a.zCnt { // There has been a counter reset since ZeroThreshold didn't change. counterReset = true @@ -319,6 +328,7 @@ func (a *HistogramAppender) appendable(h *histogram.Histogram) ( // // The chunk is not appendable in the following cases: // - The schema has changed. +// - The custom bounds have changed if the current schema is custom buckets. // - The threshold for the zero bucket has changed. // - The last sample in the chunk was stale while the current sample is not stale. func (a *HistogramAppender) appendableGauge(h *histogram.Histogram) ( @@ -345,6 +355,10 @@ func (a *HistogramAppender) appendableGauge(h *histogram.Histogram) ( return } + if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.FloatBucketsMatch(h.CustomValues, a.customValues) { + return + } + positiveInserts, backwardPositiveInserts, positiveSpans = expandSpansBothWays(a.pSpans, h.PositiveSpans) negativeInserts, backwardNegativeInserts, negativeSpans = expandSpansBothWays(a.nSpans, h.NegativeSpans) okToAppend = true @@ -438,7 +452,7 @@ func (a *HistogramAppender) appendHistogram(t int64, h *histogram.Histogram) { if num == 0 { // The first append gets the privilege to dictate the layout // but it's also responsible for encoding it into the chunk! - writeHistogramChunkLayout(a.b, h.Schema, h.ZeroThreshold, h.PositiveSpans, h.NegativeSpans) + writeHistogramChunkLayout(a.b, h.Schema, h.ZeroThreshold, h.PositiveSpans, h.NegativeSpans, h.CustomValues) a.schema = h.Schema a.zThreshold = h.ZeroThreshold @@ -454,6 +468,12 @@ func (a *HistogramAppender) appendHistogram(t int64, h *histogram.Histogram) { } else { a.nSpans = nil } + if len(h.CustomValues) > 0 { + a.customValues = make([]float64, len(h.CustomValues)) + copy(a.customValues, h.CustomValues) + } else { + a.customValues = nil + } numPBuckets, numNBuckets := countSpans(h.PositiveSpans), countSpans(h.NegativeSpans) if numPBuckets > 0 { @@ -737,6 +757,7 @@ type histogramIterator struct { schema int32 zThreshold float64 pSpans, nSpans []histogram.Span + customValues []float64 // For the fields that are tracked as deltas and ultimately dod's. t int64 @@ -793,6 +814,7 @@ func (it *histogramIterator) AtHistogram(h *histogram.Histogram) (int64, *histog NegativeSpans: it.nSpans, PositiveBuckets: it.pBuckets, NegativeBuckets: it.nBuckets, + CustomValues: it.customValues, } } @@ -815,6 +837,9 @@ func (it *histogramIterator) AtHistogram(h *histogram.Histogram) (int64, *histog h.NegativeBuckets = resize(h.NegativeBuckets, len(it.nBuckets)) copy(h.NegativeBuckets, it.nBuckets) + h.CustomValues = resize(h.CustomValues, len(it.customValues)) + copy(h.CustomValues, it.customValues) + return it.t, h } @@ -835,6 +860,7 @@ func (it *histogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int NegativeSpans: it.nSpans, PositiveBuckets: it.pFloatBuckets, NegativeBuckets: it.nFloatBuckets, + CustomValues: it.customValues, } } @@ -865,6 +891,9 @@ func (it *histogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int fh.NegativeBuckets[i] = currentNegative } + fh.CustomValues = resize(fh.CustomValues, len(it.customValues)) + copy(fh.CustomValues, it.customValues) + return it.t, fh } @@ -923,7 +952,7 @@ func (it *histogramIterator) Next() ValueType { // The first read is responsible for reading the chunk layout // and for initializing fields that depend on it. We give // counter reset info at chunk level, hence we discard it here. - schema, zeroThreshold, posSpans, negSpans, err := readHistogramChunkLayout(&it.br) + schema, zeroThreshold, posSpans, negSpans, customValues, err := readHistogramChunkLayout(&it.br) if err != nil { it.err = err return ValNone @@ -931,6 +960,7 @@ func (it *histogramIterator) Next() ValueType { it.schema = schema it.zThreshold = zeroThreshold it.pSpans, it.nSpans = posSpans, negSpans + it.customValues = customValues numPBuckets, numNBuckets := countSpans(posSpans), countSpans(negSpans) // The code below recycles existing slices in case this iterator // was reset and already has slices of a sufficient capacity. diff --git a/tsdb/chunkenc/histogram_meta.go b/tsdb/chunkenc/histogram_meta.go index 70f129b953..9b65a05ba5 100644 --- a/tsdb/chunkenc/histogram_meta.go +++ b/tsdb/chunkenc/histogram_meta.go @@ -21,17 +21,21 @@ import ( func writeHistogramChunkLayout( b *bstream, schema int32, zeroThreshold float64, - positiveSpans, negativeSpans []histogram.Span, + positiveSpans, negativeSpans []histogram.Span, customValues []float64, ) { putZeroThreshold(b, zeroThreshold) putVarbitInt(b, int64(schema)) putHistogramChunkLayoutSpans(b, positiveSpans) putHistogramChunkLayoutSpans(b, negativeSpans) + if histogram.IsCustomBucketsSchema(schema) { + putHistogramChunkLayoutCustomBounds(b, customValues) + } } func readHistogramChunkLayout(b *bstreamReader) ( schema int32, zeroThreshold float64, positiveSpans, negativeSpans []histogram.Span, + customValues []float64, err error, ) { zeroThreshold, err = readZeroThreshold(b) @@ -55,6 +59,13 @@ func readHistogramChunkLayout(b *bstreamReader) ( return } + if histogram.IsCustomBucketsSchema(schema) { + customValues, err = readHistogramChunkLayoutCustomBounds(b) + if err != nil { + return + } + } + return } @@ -92,6 +103,30 @@ func readHistogramChunkLayoutSpans(b *bstreamReader) ([]histogram.Span, error) { return spans, nil } +func putHistogramChunkLayoutCustomBounds(b *bstream, customValues []float64) { + putVarbitUint(b, uint64(len(customValues))) + for _, bound := range customValues { + putCustomBound(b, bound) + } +} + +func readHistogramChunkLayoutCustomBounds(b *bstreamReader) ([]float64, error) { + var customValues []float64 + num, err := readVarbitUint(b) + if err != nil { + return nil, err + } + for i := 0; i < int(num); i++ { + bound, err := readCustomBound(b) + if err != nil { + return nil, err + } + + customValues = append(customValues, bound) + } + return customValues, nil +} + // putZeroThreshold writes the zero threshold to the bstream. It stores typical // values in just one byte, but needs 9 bytes for other values. In detail: // - If the threshold is 0, store a single zero byte. @@ -140,6 +175,52 @@ func readZeroThreshold(br *bstreamReader) (float64, error) { } } +// isWholeWhenMultiplied checks to see if the number when multiplied by 1000 can +// be converted into an integer without losing precision. +func isWholeWhenMultiplied(in float64) bool { + i := uint(math.Round(in * 1000)) + out := float64(i) / 1000 + return in == out +} + +// putCustomBound writes the custom bound to the bstream. It stores values from 0 to +// 16.382 (inclusive) that are multiples of 0.001 in an unsigned var int of up to 2 bytes, +// but needs 1 bit + 8 bytes for other values like negative numbers, numbers greater than +// 16.382, or numbers that are not a multiple of 0.001, on the assumption that they are +// less common. In detail: +// - Multiply the bound by 1000, without rounding. +// - If the multiplied bound is >= 0, <= 16382 and a whole number, store it as an +// unsigned var int. +// - Otherwise, store 0 as an unsigned var int, followed by the 8 bytes of the original +// bound as a float64. +func putCustomBound(b *bstream, f float64) { + tf := f * 1000 + if tf < 0 || tf > 16382 || !isWholeWhenMultiplied(f) { + b.putUvarint(0) + b.writeBits(math.Float64bits(f), 64) + return + } + b.putUvarint(uint64(math.Round(tf) + 1)) +} + +// readCustomBound reads the custom bound written with putCustomBound. +func readCustomBound(br *bstreamReader) (float64, error) { + b, err := br.readUvarint() + if err != nil { + return 0, err + } + switch b { + case 0: + v, err := br.readBits(64) + if err != nil { + return 0, err + } + return math.Float64frombits(v), nil + default: + return float64(b-1) / 1000, nil + } +} + type bucketIterator struct { spans []histogram.Span span int // Span position of last yielded bucket. diff --git a/tsdb/chunkenc/histogram_meta_test.go b/tsdb/chunkenc/histogram_meta_test.go index 0b2b187475..c3ff4aabc2 100644 --- a/tsdb/chunkenc/histogram_meta_test.go +++ b/tsdb/chunkenc/histogram_meta_test.go @@ -373,6 +373,7 @@ func TestWriteReadHistogramChunkLayout(t *testing.T) { schema int32 zeroThreshold float64 positiveSpans, negativeSpans []histogram.Span + customValues []float64 }{ { schema: 3, @@ -422,23 +423,48 @@ func TestWriteReadHistogramChunkLayout(t *testing.T) { positiveSpans: nil, negativeSpans: nil, }, + { + schema: histogram.CustomBucketsSchema, + positiveSpans: []histogram.Span{{Offset: -4, Length: 3}, {Offset: 2, Length: 42}}, + negativeSpans: nil, + customValues: []float64{-5, -2.5, 0, 0.1, 0.25, 0.5, 1, 2, 5, 10, 25, 50, 100, 255, 500, 1000}, + }, + { + schema: histogram.CustomBucketsSchema, + positiveSpans: []histogram.Span{{Offset: -4, Length: 3}, {Offset: 2, Length: 42}}, + negativeSpans: nil, + customValues: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0, 25.0, 50.0, 100.0}, + }, + { + schema: histogram.CustomBucketsSchema, + positiveSpans: []histogram.Span{{Offset: -4, Length: 3}, {Offset: 2, Length: 42}}, + negativeSpans: nil, + customValues: []float64{0.001, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512, 1.024, 2.048, 4.096, 8.192}, + }, + { + schema: histogram.CustomBucketsSchema, + positiveSpans: []histogram.Span{{Offset: -4, Length: 3}, {Offset: 2, Length: 42}}, + negativeSpans: nil, + customValues: []float64{1.001, 1.023, 2.01, 4.007, 4.095, 8.001, 8.19, 16.24}, + }, } bs := bstream{} for _, l := range layouts { - writeHistogramChunkLayout(&bs, l.schema, l.zeroThreshold, l.positiveSpans, l.negativeSpans) + writeHistogramChunkLayout(&bs, l.schema, l.zeroThreshold, l.positiveSpans, l.negativeSpans, l.customValues) } bsr := newBReader(bs.bytes()) for _, want := range layouts { - gotSchema, gotZeroThreshold, gotPositiveSpans, gotNegativeSpans, err := readHistogramChunkLayout(&bsr) + gotSchema, gotZeroThreshold, gotPositiveSpans, gotNegativeSpans, gotCustomBounds, err := readHistogramChunkLayout(&bsr) require.NoError(t, err) require.Equal(t, want.schema, gotSchema) require.Equal(t, want.zeroThreshold, gotZeroThreshold) require.Equal(t, want.positiveSpans, gotPositiveSpans) require.Equal(t, want.negativeSpans, gotNegativeSpans) + require.Equal(t, want.customValues, gotCustomBounds) } } diff --git a/tsdb/chunkenc/histogram_test.go b/tsdb/chunkenc/histogram_test.go index f7609c1936..d029aaefcb 100644 --- a/tsdb/chunkenc/histogram_test.go +++ b/tsdb/chunkenc/histogram_test.go @@ -294,7 +294,38 @@ func TestHistogramChunkBucketChanges(t *testing.T) { } func TestHistogramChunkAppendable(t *testing.T) { - setup := func() (Chunk, *HistogramAppender, int64, *histogram.Histogram) { + eh := &histogram.Histogram{ + Count: 5, + ZeroCount: 2, + Sum: 18.4, + ZeroThreshold: 1e-125, + Schema: 1, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 2, Length: 1}, + {Offset: 3, Length: 2}, + {Offset: 3, Length: 1}, + {Offset: 1, Length: 1}, + }, + PositiveBuckets: []int64{6, -3, 0, -1, 2, 1, -4}, // counts: 6, 3, 3, 2, 4, 5, 1 (total 24) + } + + cbh := &histogram.Histogram{ + Count: 24, + Sum: 18.4, + Schema: histogram.CustomBucketsSchema, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 2, Length: 1}, + {Offset: 3, Length: 2}, + {Offset: 3, Length: 1}, + {Offset: 1, Length: 1}, + }, + PositiveBuckets: []int64{6, -3, 0, -1, 2, 1, -4}, // counts: 6, 3, 3, 2, 4, 5, 1 (total 24) + CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, + } + + setup := func(h *histogram.Histogram) (Chunk, *HistogramAppender, int64, *histogram.Histogram) { c := Chunk(NewHistogramChunk()) // Create fresh appender and add the first histogram. @@ -303,32 +334,17 @@ func TestHistogramChunkAppendable(t *testing.T) { require.Equal(t, 0, c.NumSamples()) ts := int64(1234567890) - h1 := &histogram.Histogram{ - Count: 5, - ZeroCount: 2, - Sum: 18.4, - ZeroThreshold: 1e-125, - Schema: 1, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 2, Length: 1}, - {Offset: 3, Length: 2}, - {Offset: 3, Length: 1}, - {Offset: 1, Length: 1}, - }, - PositiveBuckets: []int64{6, -3, 0, -1, 2, 1, -4}, // counts: 6, 3, 3, 2, 4, 5, 1 (total 24) - } - chk, _, app, err := app.AppendHistogram(nil, ts, h1.Copy(), false) + chk, _, app, err := app.AppendHistogram(nil, ts, h.Copy(), false) require.NoError(t, err) require.Nil(t, chk) require.Equal(t, 1, c.NumSamples()) require.Equal(t, UnknownCounterReset, c.(*HistogramChunk).GetCounterResetHeader()) - return c, app.(*HistogramAppender), ts, h1 + return c, app.(*HistogramAppender), ts, h } { // Schema change. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.Schema++ _, _, ok, _ := hApp.appendable(h2) @@ -338,7 +354,7 @@ func TestHistogramChunkAppendable(t *testing.T) { } { // Zero threshold change. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.ZeroThreshold += 0.1 _, _, ok, _ := hApp.appendable(h2) @@ -348,7 +364,7 @@ func TestHistogramChunkAppendable(t *testing.T) { } { // New histogram that has more buckets. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.PositiveSpans = []histogram.Span{ {Offset: 0, Length: 3}, @@ -374,7 +390,7 @@ func TestHistogramChunkAppendable(t *testing.T) { } { // New histogram that has a bucket missing. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.PositiveSpans = []histogram.Span{ {Offset: 0, Length: 2}, @@ -395,7 +411,7 @@ func TestHistogramChunkAppendable(t *testing.T) { } { // New histogram that has a counter reset while buckets are same. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.Sum = 23 h2.PositiveBuckets = []int64{6, -4, 1, -1, 2, 1, -4} // counts: 6, 2, 3, 2, 4, 5, 1 (total 23) @@ -410,7 +426,7 @@ func TestHistogramChunkAppendable(t *testing.T) { } { // New histogram that has a counter reset while new buckets were added. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.PositiveSpans = []histogram.Span{ {Offset: 0, Length: 3}, @@ -438,7 +454,7 @@ func TestHistogramChunkAppendable(t *testing.T) { // added before the first bucket and reset on first bucket. (to // catch the edge case where the new bucket should be forwarded // ahead until first old bucket at start) - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.PositiveSpans = []histogram.Span{ {Offset: -3, Length: 2}, @@ -464,7 +480,7 @@ func TestHistogramChunkAppendable(t *testing.T) { } { // New histogram that has an explicit counter reset. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.CounterResetHint = histogram.CounterReset @@ -472,7 +488,7 @@ func TestHistogramChunkAppendable(t *testing.T) { } { // Start new chunk explicitly, and append a new histogram that is considered appendable to the previous chunk. - _, hApp, ts, h1 := setup() + _, hApp, ts, h1 := setup(eh) h2 := h1.Copy() // Identity is appendable. nextChunk := NewHistogramChunk() @@ -488,7 +504,7 @@ func TestHistogramChunkAppendable(t *testing.T) { } { // Start new chunk explicitly, and append a new histogram that is not considered appendable to the previous chunk. - _, hApp, ts, h1 := setup() + _, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.Count-- // Make this not appendable due to counter reset. @@ -505,7 +521,7 @@ func TestHistogramChunkAppendable(t *testing.T) { } { // Start new chunk explicitly, and append a new histogram that would need recoding if we added it to the chunk. - _, hApp, ts, h1 := setup() + _, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.PositiveSpans = []histogram.Span{ {Offset: 0, Length: 3}, @@ -532,6 +548,72 @@ func TestHistogramChunkAppendable(t *testing.T) { assertSampleCount(t, nextChunk, 1, ValHistogram) require.Equal(t, NotCounterReset, nextChunk.GetCounterResetHeader()) } + + { // Custom buckets, no change. + c, hApp, ts, h1 := setup(cbh) + h2 := h1.Copy() + _, _, ok, _ := hApp.appendable(h2) + require.True(t, ok) + + assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset) + } + + { // Custom buckets, increase in bucket counts but no change in layout. + c, hApp, ts, h1 := setup(cbh) + h2 := h1.Copy() + h2.Count++ + h2.PositiveBuckets = []int64{6, -3, 0, -1, 2, 1, -3} + _, _, ok, _ := hApp.appendable(h2) + require.True(t, ok) + + assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset) + } + + { // Custom buckets, decrease in bucket counts but no change in layout. + c, hApp, ts, h1 := setup(cbh) + h2 := h1.Copy() + h2.Count-- + h2.PositiveBuckets = []int64{6, -3, 0, -1, 2, 1, -5} + _, _, ok, _ := hApp.appendable(h2) + require.False(t, ok) + + assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset) + } + + { // Custom buckets, change only in custom bounds. + c, hApp, ts, h1 := setup(cbh) + h2 := h1.Copy() + h2.CustomValues = []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21} + _, _, ok, _ := hApp.appendable(h2) + require.False(t, ok) + + assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset) + } + + { // Custom buckets, with more buckets. + c, hApp, ts, h1 := setup(cbh) + h2 := h1.Copy() + h2.PositiveSpans = []histogram.Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 1}, + {Offset: 1, Length: 4}, + {Offset: 3, Length: 3}, + } + h2.Count += 6 + h2.Sum = 30 + // Existing histogram should get values converted from the above to: + // 6 3 0 3 0 0 2 4 5 0 1 (previous values with some new empty buckets in between) + // so the new histogram should have new counts >= these per-bucket counts, e.g.: + h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 1} // 7 5 1 3 1 0 2 5 5 0 1 (total 30) + + posInterjections, negInterjections, ok, cr := hApp.appendable(h2) + require.NotEmpty(t, posInterjections) + require.Empty(t, negInterjections) + require.True(t, ok) // Only new buckets came in. + require.False(t, cr) + + assertRecodedHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset) + } } func assertNewHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *HistogramAppender, ts int64, h *histogram.Histogram, expectHeader CounterResetHeader) { @@ -548,6 +630,19 @@ func assertNewHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *Histogr assertSampleCount(t, newChunk, 1, ValHistogram) } +func assertNoNewHistogramChunkOnAppend(t *testing.T, currChunk Chunk, hApp *HistogramAppender, ts int64, h *histogram.Histogram, expectHeader CounterResetHeader) { + prevChunkBytes := currChunk.Bytes() + newChunk, recoded, newAppender, err := hApp.AppendHistogram(nil, ts, h, false) + require.Greater(t, len(currChunk.Bytes()), len(prevChunkBytes)) // Check that current chunk is bigger than previously. + require.NoError(t, err) + require.Nil(t, newChunk) + require.False(t, recoded) + require.Equal(t, expectHeader, currChunk.(*HistogramChunk).GetCounterResetHeader()) + require.NotNil(t, newAppender) + require.Equal(t, hApp, newAppender) + assertSampleCount(t, currChunk, 2, ValHistogram) +} + func assertRecodedHistogramChunkOnAppend(t *testing.T, prevChunk Chunk, hApp *HistogramAppender, ts int64, h *histogram.Histogram, expectHeader CounterResetHeader) { prevChunkBytes := prevChunk.Bytes() newChunk, recoded, newAppender, err := hApp.AppendHistogram(nil, ts, h, false) @@ -738,6 +833,32 @@ func TestHistogramChunkAppendableWithEmptySpan(t *testing.T) { NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3}, }, }, + "empty span in old and new custom buckets histogram": { + h1: &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: 7, + Sum: 1234.5, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{1, 1, -1, 0, 0, 0, 0}, + CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + }, + h2: &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: 10, + Sum: 2345.6, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + }, + }, } for name, tc := range tests { @@ -905,7 +1026,40 @@ func TestAtFloatHistogram(t *testing.T) { } func TestHistogramChunkAppendableGauge(t *testing.T) { - setup := func() (Chunk, *HistogramAppender, int64, *histogram.Histogram) { + eh := &histogram.Histogram{ + CounterResetHint: histogram.GaugeType, + Count: 5, + ZeroCount: 2, + Sum: 18.4, + ZeroThreshold: 1e-125, + Schema: 1, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 2, Length: 1}, + {Offset: 3, Length: 2}, + {Offset: 3, Length: 1}, + {Offset: 1, Length: 1}, + }, + PositiveBuckets: []int64{6, -3, 0, -1, 2, 1, -4}, // {6, 3, 3, 2, 4, 5, 1} + } + + cbh := &histogram.Histogram{ + CounterResetHint: histogram.GaugeType, + Count: 24, + Sum: 18.4, + Schema: histogram.CustomBucketsSchema, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 2, Length: 1}, + {Offset: 3, Length: 2}, + {Offset: 3, Length: 1}, + {Offset: 1, Length: 1}, + }, + PositiveBuckets: []int64{6, -3, 0, -1, 2, 1, -4}, // {6, 3, 3, 2, 4, 5, 1} + CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, + } + + setup := func(h *histogram.Histogram) (Chunk, *HistogramAppender, int64, *histogram.Histogram) { c := Chunk(NewHistogramChunk()) // Create fresh appender and add the first histogram. @@ -914,66 +1068,38 @@ func TestHistogramChunkAppendableGauge(t *testing.T) { require.Equal(t, 0, c.NumSamples()) ts := int64(1234567890) - h1 := &histogram.Histogram{ - CounterResetHint: histogram.GaugeType, - Count: 5, - ZeroCount: 2, - Sum: 18.4, - ZeroThreshold: 1e-125, - Schema: 1, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 2, Length: 1}, - {Offset: 3, Length: 2}, - {Offset: 3, Length: 1}, - {Offset: 1, Length: 1}, - }, - PositiveBuckets: []int64{6, -3, 0, -1, 2, 1, -4}, // {6, 3, 3, 2, 4, 5, 1} - } - chk, _, app, err := app.AppendHistogram(nil, ts, h1.Copy(), false) + chk, _, app, err := app.AppendHistogram(nil, ts, h.Copy(), false) require.NoError(t, err) require.Nil(t, chk) require.Equal(t, 1, c.NumSamples()) require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader()) - return c, app.(*HistogramAppender), ts, h1 + return c, app.(*HistogramAppender), ts, h } { // Schema change. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.Schema++ _, _, _, _, _, _, ok := hApp.appendableGauge(h2) require.False(t, ok) - newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false) - require.NoError(t, err) - require.NotNil(t, newc) - require.False(t, recoded) - require.NotEqual(t, c, newc) - require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader()) - require.Equal(t, GaugeType, newc.(*HistogramChunk).GetCounterResetHeader()) + assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) } { // Zero threshold change. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.ZeroThreshold += 0.1 _, _, _, _, _, _, ok := hApp.appendableGauge(h2) require.False(t, ok) - newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false) - require.NoError(t, err) - require.NotNil(t, newc) - require.False(t, recoded) - require.NotEqual(t, c, newc) - require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader()) - require.Equal(t, GaugeType, newc.(*HistogramChunk).GetCounterResetHeader()) + assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) } { // New histogram that has more buckets. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.PositiveSpans = []histogram.Span{ {Offset: 0, Length: 3}, @@ -993,15 +1119,11 @@ func TestHistogramChunkAppendableGauge(t *testing.T) { require.Empty(t, nBackwardI) require.True(t, ok) - newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false) - require.NoError(t, err) - require.NotNil(t, newc) - require.True(t, recoded) - require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader()) + assertRecodedHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) } { // New histogram that has buckets missing. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.PositiveSpans = []histogram.Span{ {Offset: 0, Length: 2}, @@ -1021,15 +1143,11 @@ func TestHistogramChunkAppendableGauge(t *testing.T) { require.Empty(t, nBackwardI) require.True(t, ok) - newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false) - require.NoError(t, err) - require.Nil(t, newc) - require.False(t, recoded) - require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader()) + assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) } { // New histogram that has a bucket missing and new buckets. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.PositiveSpans = []histogram.Span{ {Offset: 0, Length: 2}, @@ -1047,15 +1165,11 @@ func TestHistogramChunkAppendableGauge(t *testing.T) { require.Empty(t, nBackwardI) require.True(t, ok) - newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false) - require.NoError(t, err) - require.NotNil(t, newc) - require.True(t, recoded) - require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader()) + assertRecodedHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) } { // New histogram that has a counter reset while buckets are same. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.Sum = 23 h2.PositiveBuckets = []int64{6, -4, 1, -1, 2, 1, -4} // {6, 2, 3, 2, 4, 5, 1} @@ -1067,15 +1181,11 @@ func TestHistogramChunkAppendableGauge(t *testing.T) { require.Empty(t, nBackwardI) require.True(t, ok) - newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false) - require.NoError(t, err) - require.Nil(t, newc) - require.False(t, recoded) - require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader()) + assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) } { // New histogram that has a counter reset while new buckets were added. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.PositiveSpans = []histogram.Span{ {Offset: 0, Length: 3}, @@ -1093,17 +1203,13 @@ func TestHistogramChunkAppendableGauge(t *testing.T) { require.Empty(t, nBackwardI) require.True(t, ok) - newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false) - require.NoError(t, err) - require.NotNil(t, newc) - require.True(t, recoded) - require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader()) + assertRecodedHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) } { // New histogram that has a counter reset while new buckets were // added before the first bucket and reset on first bucket. - c, hApp, ts, h1 := setup() + c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.PositiveSpans = []histogram.Span{ {Offset: -3, Length: 2}, @@ -1123,11 +1229,74 @@ func TestHistogramChunkAppendableGauge(t *testing.T) { require.Empty(t, nBackwardI) require.True(t, ok) - newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false) - require.NoError(t, err) - require.NotNil(t, newc) - require.True(t, recoded) - require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader()) + assertRecodedHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) + } + + { // Custom buckets, no change. + c, hApp, ts, h1 := setup(cbh) + h2 := h1.Copy() + _, _, _, _, _, _, ok := hApp.appendableGauge(h2) + require.True(t, ok) + + assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) + } + + { // Custom buckets, increase in bucket counts but no change in layout. + c, hApp, ts, h1 := setup(cbh) + h2 := h1.Copy() + h2.Count++ + h2.PositiveBuckets = []int64{6, -3, 0, -1, 2, 1, -3} + _, _, _, _, _, _, ok := hApp.appendableGauge(h2) + require.True(t, ok) + + assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) + } + + { // Custom buckets, decrease in bucket counts but no change in layout. + c, hApp, ts, h1 := setup(cbh) + h2 := h1.Copy() + h2.Count-- + h2.PositiveBuckets = []int64{6, -3, 0, -1, 2, 1, -5} + _, _, _, _, _, _, ok := hApp.appendableGauge(h2) + require.True(t, ok) + + assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) + } + + { // Custom buckets, change only in custom bounds. + c, hApp, ts, h1 := setup(cbh) + h2 := h1.Copy() + h2.CustomValues = []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21} + _, _, _, _, _, _, ok := hApp.appendableGauge(h2) + require.False(t, ok) + + assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) + } + + { // Custom buckets, with more buckets. + c, hApp, ts, h1 := setup(cbh) + h2 := h1.Copy() + h2.PositiveSpans = []histogram.Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 1}, + {Offset: 1, Length: 4}, + {Offset: 3, Length: 3}, + } + h2.Count += 6 + h2.Sum = 30 + // Existing histogram should get values converted from the above to: + // 6 3 0 3 0 0 2 4 5 0 1 (previous values with some new empty buckets in between) + // so the new histogram should have new counts >= these per-bucket counts, e.g.: + h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 1} // 7 5 1 3 1 0 2 5 5 0 1 (total 30) + + posInterjections, negInterjections, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2) + require.NotEmpty(t, posInterjections) + require.Empty(t, negInterjections) + require.Empty(t, pBackwardI) + require.Empty(t, nBackwardI) + require.True(t, ok) // Only new buckets came in. + + assertRecodedHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) } } @@ -1176,4 +1345,26 @@ func TestHistogramAppendOnlyErrors(t *testing.T) { require.False(t, isRecoded) require.EqualError(t, err, "histogram counter reset") }) + t.Run("counter reset error with custom buckets", func(t *testing.T) { + c := Chunk(NewHistogramChunk()) + + // Create fresh appender and add the first histogram. + app, err := c.Appender() + require.NoError(t, err) + + h := tsdbutil.GenerateTestCustomBucketsHistogram(0) + var isRecoded bool + c, isRecoded, app, err = app.AppendHistogram(nil, 1, h, true) + require.Nil(t, c) + require.False(t, isRecoded) + require.NoError(t, err) + + // Add erroring histogram. + h2 := h.Copy() + h2.CustomValues = []float64{0, 1, 2, 3, 4, 5, 6, 7} + c, isRecoded, _, err = app.AppendHistogram(nil, 2, h2, true) + require.Nil(t, c) + require.False(t, isRecoded) + require.EqualError(t, err, "histogram counter reset") + }) } diff --git a/tsdb/tsdbutil/histogram.go b/tsdb/tsdbutil/histogram.go index bb8d49b202..3c7349cf72 100644 --- a/tsdb/tsdbutil/histogram.go +++ b/tsdb/tsdbutil/histogram.go @@ -59,6 +59,20 @@ func GenerateTestHistogram(i int) *histogram.Histogram { } } +func GenerateTestCustomBucketsHistogram(i int) *histogram.Histogram { + return &histogram.Histogram{ + Count: 5 + uint64(i*4), + Sum: 18.4 * float64(i+1), + Schema: histogram.CustomBucketsSchema, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{int64(i + 1), 1, -1, 0}, + CustomValues: []float64{0, 1, 2, 3, 4}, + } +} + func GenerateTestGaugeHistograms(n int) (r []*histogram.Histogram) { for x := 0; x < n; x++ { i := int(math.Sin(float64(x))*100) + 100 @@ -105,6 +119,20 @@ func GenerateTestFloatHistogram(i int) *histogram.FloatHistogram { } } +func GenerateTestCustomBucketsFloatHistogram(i int) *histogram.FloatHistogram { + return &histogram.FloatHistogram{ + Count: 5 + float64(i*4), + Sum: 18.4 * float64(i+1), + Schema: histogram.CustomBucketsSchema, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []float64{float64(i + 1), float64(i + 2), float64(i + 1), float64(i + 1)}, + CustomValues: []float64{0, 1, 2, 3, 4}, + } +} + func GenerateTestGaugeFloatHistograms(n int) (r []*histogram.FloatHistogram) { for x := 0; x < n; x++ { i := int(math.Sin(float64(x))*100) + 100 From dc7b282d39e79b64ce4da8921700857ae5f457b9 Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Wed, 27 Mar 2024 18:19:14 +0100 Subject: [PATCH 03/30] engine_test: adjust and comment histogram sample counts (#13841) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The size of histogram points are now bigger by 24 bytes due to the custom values slice. When histograms are loaded into partial results in vector selectors we use HPoint type where the size is calculated as (size of histogram + 8 for timestamp)/16. https://github.com/prometheus/prometheus/blob/a3d1a46eda682590a80fb1f15959457dad1e5d91/promql/value.go#L176 When histograms are put into Sample type in range evaluations, the Sample has more overhead and the size is calculated differently: (size of histogram / 16) + 1 for time stamp. https://github.com/prometheus/prometheus/blob/a3d1a46eda682590a80fb1f15959457dad1e5d91/promql/engine.go#L1928 When the size of the histogram is 16k, then the first calculation gives k but the second gives k+1 for the sample count. If the histogram size is 16k+8, then both would give k+1. Signed-off-by: György Krajcsovits --- promql/engine_test.go | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/promql/engine_test.go b/promql/engine_test.go index cc5d0ee780..9da29c83ac 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -799,10 +799,10 @@ load 10s { Query: "metricWith1HistogramEvery10Seconds", Start: time.Unix(21, 0), - PeakSamples: 12, - TotalSamples: 12, // 1 histogram sample of size 12 / 10 seconds + PeakSamples: 13, + TotalSamples: 13, // 1 histogram HPoint of size 13 / 10 seconds TotalSamplesPerStep: stats.TotalSamplesPerStep{ - 21000: 12, + 21000: 13, }, }, { @@ -818,7 +818,7 @@ load 10s { Query: "timestamp(metricWith1HistogramEvery10Seconds)", Start: time.Unix(21, 0), - PeakSamples: 13, // histogram size 12 + 1 extra because of timestamp + PeakSamples: 15, // histogram size 13 + 1 extra because Sample overhead + 1 float result TotalSamples: 1, // 1 float sample (because of timestamp) / 10 seconds TotalSamplesPerStep: stats.TotalSamplesPerStep{ 21000: 1, @@ -899,10 +899,10 @@ load 10s { Query: "metricWith1HistogramEvery10Seconds[60s]", Start: time.Unix(201, 0), - PeakSamples: 72, - TotalSamples: 72, // 1 histogram (size 12) / 10 seconds * 60 seconds + PeakSamples: 78, + TotalSamples: 78, // 1 histogram (size 13 HPoint) / 10 seconds * 60 seconds TotalSamplesPerStep: stats.TotalSamplesPerStep{ - 201000: 72, + 201000: 78, }, }, { @@ -929,11 +929,11 @@ load 10s { Query: "max_over_time(metricWith1HistogramEvery10Seconds[60s])[20s:5s]", Start: time.Unix(201, 0), - PeakSamples: 72, - TotalSamples: 312, // (1 histogram (size 12) / 10 seconds * 60 seconds) * 4 + 2 * 12 as + PeakSamples: 78, + TotalSamples: 338, // (1 histogram (size 13 HPoint) / 10 seconds * 60 seconds) * 4 + 2 * 13 as // max_over_time(metricWith1SampleEvery10Seconds[60s]) @ 190 and 200 will return 7 samples. TotalSamplesPerStep: stats.TotalSamplesPerStep{ - 201000: 312, + 201000: 338, }, }, { @@ -948,10 +948,10 @@ load 10s { Query: "metricWith1HistogramEvery10Seconds[60s] @ 30", Start: time.Unix(201, 0), - PeakSamples: 48, - TotalSamples: 48, // @ modifier force the evaluation to at 30 seconds - So it brings 4 datapoints (0, 10, 20, 30 seconds) * 1 series + PeakSamples: 52, + TotalSamples: 52, // @ modifier force the evaluation to at 30 seconds - So it brings 4 datapoints (0, 10, 20, 30 seconds) * 1 series TotalSamplesPerStep: stats.TotalSamplesPerStep{ - 201000: 48, + 201000: 52, }, }, { @@ -1086,13 +1086,13 @@ load 10s Start: time.Unix(204, 0), End: time.Unix(223, 0), Interval: 5 * time.Second, - PeakSamples: 48, - TotalSamples: 48, // 1 histogram (size 12) per query * 4 steps + PeakSamples: 52, + TotalSamples: 52, // 1 histogram (size 13 HPoint) per query * 4 steps TotalSamplesPerStep: stats.TotalSamplesPerStep{ - 204000: 12, // aligned to the step time, not the sample time - 209000: 12, - 214000: 12, - 219000: 12, + 204000: 13, // aligned to the step time, not the sample time + 209000: 13, + 214000: 13, + 219000: 13, }, }, { @@ -1116,8 +1116,8 @@ load 10s Start: time.Unix(201, 0), End: time.Unix(220, 0), Interval: 5 * time.Second, - PeakSamples: 16, - TotalSamples: 4, // 1 sample per query * 4 steps + PeakSamples: 18, // 13 histogram size + 1 extra because of Sample overhead + 4 float results + TotalSamples: 4, // 1 sample per query * 4 steps TotalSamplesPerStep: stats.TotalSamplesPerStep{ 201000: 1, 206000: 1, From 4eab18abd627fd02136af1bf217c9894a0c0893c Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Wed, 27 Mar 2024 18:40:59 +0100 Subject: [PATCH 04/30] [nhcb branch] Use single bit to differentiate between optimized bounds and floats (#13828) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Use single bit to differentiate between optimized bounds and floats Use one bit to decide what kind of data to read/write. This reduces storage need of floats from 72 bits to 65 bits and makes the integers store in 5 to 32 bits instead of 16. Signed-off-by: György Krajcsovits Signed-off-by: Jeanette Tan Signed-off-by: György Krajcsovits Signed-off-by: Jeanette Tan Signed-off-by: George Krajcsovits Co-authored-by: Jeanette Tan --- tsdb/chunkenc/bstream.go | 16 -------------- tsdb/chunkenc/histogram_meta.go | 31 +++++++++++++++++----------- tsdb/chunkenc/histogram_meta_test.go | 2 +- 3 files changed, 20 insertions(+), 29 deletions(-) diff --git a/tsdb/chunkenc/bstream.go b/tsdb/chunkenc/bstream.go index 6e13c40464..7b17f4686b 100644 --- a/tsdb/chunkenc/bstream.go +++ b/tsdb/chunkenc/bstream.go @@ -113,16 +113,6 @@ func (b *bstream) writeBits(u uint64, nbits int) { } } -// wrapper for the standard library's PutUvarint to make it work -// with our bstream. -func (b *bstream) putUvarint(x uint64) { - buf := make([]byte, 2) - l := binary.PutUvarint(buf, x) - for i := 0; i < l; i++ { - b.writeByte(buf[i]) - } -} - type bstreamReader struct { stream []byte streamOffset int // The offset from which read the next byte from the stream. @@ -267,9 +257,3 @@ func (b *bstreamReader) loadNextBuffer(nbits uint8) bool { return true } - -// wrapper for the standard library's ReadUvarint to make it work -// with our bstream. -func (b *bstreamReader) readUvarint() (uint64, error) { - return binary.ReadUvarint(b) -} diff --git a/tsdb/chunkenc/histogram_meta.go b/tsdb/chunkenc/histogram_meta.go index 9b65a05ba5..ba83dabc4c 100644 --- a/tsdb/chunkenc/histogram_meta.go +++ b/tsdb/chunkenc/histogram_meta.go @@ -183,29 +183,36 @@ func isWholeWhenMultiplied(in float64) bool { return in == out } -// putCustomBound writes the custom bound to the bstream. It stores values from 0 to -// 16.382 (inclusive) that are multiples of 0.001 in an unsigned var int of up to 2 bytes, -// but needs 1 bit + 8 bytes for other values like negative numbers, numbers greater than -// 16.382, or numbers that are not a multiple of 0.001, on the assumption that they are -// less common. In detail: +// putCustomBound writes a custom bound to the bstream. It stores values from +// 0 to 33554.430 (inclusive) that are multiples of 0.001 in unsigned varbit +// encoding of up to 4 bytes, but needs 1 bit + 8 bytes for other values like +// negative numbers, numbers greater than 33554.430, or numbers that are not +// a multiple of 0.001, on the assumption that they are less common. In detail: // - Multiply the bound by 1000, without rounding. -// - If the multiplied bound is >= 0, <= 16382 and a whole number, store it as an -// unsigned var int. -// - Otherwise, store 0 as an unsigned var int, followed by the 8 bytes of the original +// - If the multiplied bound is >= 0, <= 33554430 and a whole number, +// add 1 and store it in unsigned varbit encoding. All these numbers are +// greater than 0, so the leading bit of the varbit is always 1! +// - Otherwise, store a 0 bit, followed by the 8 bytes of the original // bound as a float64. +// +// When reading the values, we can first decode a value as unsigned varbit, +// if it's 0, then we read the next 8 bytes as a float64, otherwise +// we can convert the value to a float64 by subtracting 1 and dividing by 1000. func putCustomBound(b *bstream, f float64) { tf := f * 1000 - if tf < 0 || tf > 16382 || !isWholeWhenMultiplied(f) { - b.putUvarint(0) + // 33554431-1 comes from the maximum that can be stored in a varint in 4 + // bytes, other values are stored in 8 bytes anyway. + if tf < 0 || tf > 33554430 || !isWholeWhenMultiplied(f) { + b.writeBit(zero) b.writeBits(math.Float64bits(f), 64) return } - b.putUvarint(uint64(math.Round(tf) + 1)) + putVarbitUint(b, uint64(math.Round(tf))+1) } // readCustomBound reads the custom bound written with putCustomBound. func readCustomBound(br *bstreamReader) (float64, error) { - b, err := br.readUvarint() + b, err := readVarbitUint(br) if err != nil { return 0, err } diff --git a/tsdb/chunkenc/histogram_meta_test.go b/tsdb/chunkenc/histogram_meta_test.go index c3ff4aabc2..fdbd1825aa 100644 --- a/tsdb/chunkenc/histogram_meta_test.go +++ b/tsdb/chunkenc/histogram_meta_test.go @@ -427,7 +427,7 @@ func TestWriteReadHistogramChunkLayout(t *testing.T) { schema: histogram.CustomBucketsSchema, positiveSpans: []histogram.Span{{Offset: -4, Length: 3}, {Offset: 2, Length: 42}}, negativeSpans: nil, - customValues: []float64{-5, -2.5, 0, 0.1, 0.25, 0.5, 1, 2, 5, 10, 25, 50, 100, 255, 500, 1000}, + customValues: []float64{-5, -2.5, 0, 0.1, 0.25, 0.5, 1, 2, 5, 10, 25, 50, 100, 255, 500, 1000, 50000, 1e7}, }, { schema: histogram.CustomBucketsSchema, From 81862aabd7eb972f1a202a896198565fe0251f41 Mon Sep 17 00:00:00 2001 From: zenador Date: Wed, 24 Apr 2024 15:36:05 +0800 Subject: [PATCH 05/30] [nhcb branch] Add basic unit tests for native histograms with custom buckets converted from classic histograms (#13794) * modify unit test framework to automatically generate native histograms with custom buckets from classic histogram series * add very basic tests for classic histogram converted into native histogram with custom bounds * fix histogram_quantile for native histograms with custom buckets * make loading with nhcb explicit * evaluate native histograms with custom buckets on queries with explicit keyword * use regex replacer * use temp histogram struct for automatically loading converted nhcb Signed-off-by: Jeanette Tan Signed-off-by: George Krajcsovits --- model/histogram/generic.go | 4 + promql/quantile.go | 16 +- promql/test.go | 361 +++++++++++++++++++++++++------- promql/testdata/histograms.test | 164 +++++++++++---- 4 files changed, 426 insertions(+), 119 deletions(-) diff --git a/model/histogram/generic.go b/model/histogram/generic.go index 025888ccae..7b770d83dd 100644 --- a/model/histogram/generic.go +++ b/model/histogram/generic.go @@ -34,6 +34,7 @@ var ( ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided") ErrHistogramCustomBucketsMismatch = errors.New("histogram custom bounds are too few") ErrHistogramCustomBucketsInvalid = errors.New("histogram custom bounds must be in strictly increasing order") + ErrHistogramCustomBucketsInfinite = errors.New("histogram custom bounds must be finite") ErrHistogramsIncompatibleSchema = errors.New("cannot apply this operation on histograms with a mix of exponential and custom bucket schemas") ErrHistogramsIncompatibleBounds = errors.New("cannot apply this operation on custom buckets histograms with different custom bounds") ) @@ -426,6 +427,9 @@ func checkHistogramCustomBounds(bounds []float64, spans []Span, numBuckets int) } prev = curr } + if prev == math.Inf(1) { + return fmt.Errorf("last +Inf bound must not be explicitly defined: %w", ErrHistogramCustomBucketsInfinite) + } var spanBuckets int var totalSpanLength int diff --git a/promql/quantile.go b/promql/quantile.go index 6a225afb11..3bdaed1dd5 100644 --- a/promql/quantile.go +++ b/promql/quantile.go @@ -205,12 +205,15 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 { for it.Next() { bucket = it.At() + if bucket.Count == 0 { + continue + } count += bucket.Count if count >= rank { break } } - if bucket.Lower < 0 && bucket.Upper > 0 { + if !h.UsesCustomBuckets() && bucket.Lower < 0 && bucket.Upper > 0 { switch { case len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0: // The result is in the zero bucket and the histogram has only @@ -221,6 +224,17 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 { // negative buckets. So we consider 0 to be the upper bound. bucket.Upper = 0 } + } else if h.UsesCustomBuckets() { + if bucket.Lower == math.Inf(-1) { + // first bucket, with lower bound -Inf + if bucket.Upper <= 0 { + return bucket.Upper + } + bucket.Lower = 0 + } else if bucket.Upper == math.Inf(1) { + // last bucket, with upper bound +Inf + return bucket.Lower + } } // Due to numerical inaccuracies, we could end up with a higher count // than h.Count. Thus, make sure count is never higher than h.Count. diff --git a/promql/test.go b/promql/test.go index 296b3d3cad..b52043329d 100644 --- a/promql/test.go +++ b/promql/test.go @@ -20,6 +20,7 @@ import ( "fmt" "io/fs" "math" + "sort" "strconv" "strings" "testing" @@ -43,10 +44,35 @@ import ( var ( minNormal = math.Float64frombits(0x0010000000000000) // The smallest positive normal value of type float64. - patSpace = regexp.MustCompile("[\t ]+") - patLoad = regexp.MustCompile(`^load\s+(.+?)$`) - patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`) - patEvalRange = regexp.MustCompile(`^eval(?:_(fail))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`) + patSpace = regexp.MustCompile("[\t ]+") + patLoad = regexp.MustCompile(`^load(?:_(with_nhcb))?\s+(.+?)$`) + patEvalInstant = regexp.MustCompile(`^eval(?:_(with_nhcb))?(?:_(fail|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`) + patEvalRange = regexp.MustCompile(`^eval(?:_(fail))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`) + histogramBucketReplacements = []struct { + pattern *regexp.Regexp + repl string + }{ + { + pattern: regexp.MustCompile(`_bucket\b`), + repl: "", + }, + { + pattern: regexp.MustCompile(`\s+by\s+\(le\)`), + repl: "", + }, + { + pattern: regexp.MustCompile(`\(le,\s*`), + repl: "(", + }, + { + pattern: regexp.MustCompile(`,\s*le,\s*`), + repl: ", ", + }, + { + pattern: regexp.MustCompile(`,\s*le\)`), + repl: ")", + }, + } ) const ( @@ -163,15 +189,18 @@ func raise(line int, format string, v ...interface{}) error { func parseLoad(lines []string, i int) (int, *loadCmd, error) { if !patLoad.MatchString(lines[i]) { - return i, nil, raise(i, "invalid load command. (load )") + return i, nil, raise(i, "invalid load command. (load[_with_nhcb] )") } parts := patLoad.FindStringSubmatch(lines[i]) - - gap, err := model.ParseDuration(parts[1]) + var ( + withNhcb = parts[1] == "with_nhcb" + step = parts[2] + ) + gap, err := model.ParseDuration(step) if err != nil { - return i, nil, raise(i, "invalid step definition %q: %s", parts[1], err) + return i, nil, raise(i, "invalid step definition %q: %s", step, err) } - cmd := newLoadCmd(time.Duration(gap)) + cmd := newLoadCmd(time.Duration(gap), withNhcb) for i+1 < len(lines) { i++ defLine := lines[i] @@ -204,17 +233,19 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) { rangeParts := patEvalRange.FindStringSubmatch(lines[i]) if instantParts == nil && rangeParts == nil { - return i, nil, raise(i, "invalid evaluation command. Must be either 'eval[_fail|_ordered] instant [at ] ' or 'eval[_fail] range from to step '") + return i, nil, raise(i, "invalid evaluation command. Must be either 'eval[_with_nhcb][_fail|_ordered] instant [at ] ' or 'eval[_fail] range from to step '") } isInstant := instantParts != nil + var withNhcb bool var mod string var expr string if isInstant { - mod = instantParts[1] - expr = instantParts[3] + withNhcb = instantParts[1] == "with_nhcb" + mod = instantParts[2] + expr = instantParts[4] } else { mod = rangeParts[1] expr = rangeParts[5] @@ -242,7 +273,7 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) { var cmd *evalCmd if isInstant { - at := instantParts[2] + at := instantParts[3] offset, err := model.ParseDuration(at) if err != nil { return i, nil, formatErr("invalid timestamp definition %q: %s", at, err) @@ -284,6 +315,7 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) { case "fail": cmd.fail = true } + cmd.withNhcb = withNhcb for j := 1; i+1 < len(lines); j++ { i++ @@ -338,7 +370,7 @@ func (t *test) parse(input string) error { switch c := strings.ToLower(patSpace.Split(l, 2)[0]); { case c == "clear": cmd = &clearCmd{} - case c == "load": + case strings.HasPrefix(c, "load"): i, cmd, err = parseLoad(lines, i) case strings.HasPrefix(c, "eval"): i, cmd, err = t.parseEval(lines, i) @@ -370,14 +402,16 @@ type loadCmd struct { metrics map[uint64]labels.Labels defs map[uint64][]Sample exemplars map[uint64][]exemplar.Exemplar + withNhcb bool } -func newLoadCmd(gap time.Duration) *loadCmd { +func newLoadCmd(gap time.Duration, withNhcb bool) *loadCmd { return &loadCmd{ gap: gap, metrics: map[uint64]labels.Labels{}, defs: map[uint64][]Sample{}, exemplars: map[uint64][]exemplar.Exemplar{}, + withNhcb: withNhcb, } } @@ -416,6 +450,167 @@ func (cmd *loadCmd) append(a storage.Appender) error { } } } + if cmd.withNhcb { + return cmd.appendCustomHistogram(a) + } + return nil +} + +func getHistogramMetricBase(m labels.Labels, suffix string) (labels.Labels, uint64) { + mName := m.Get(labels.MetricName) + baseM := labels.NewBuilder(m). + Set(labels.MetricName, strings.TrimSuffix(mName, suffix)). + Del(labels.BucketLabel). + Labels() + hash := baseM.Hash() + return baseM, hash +} + +type tempHistogramWrapper struct { + metric labels.Labels + upperBounds []float64 + histByTs map[int64]tempHistogram +} + +func newTempHistogramWrapper() tempHistogramWrapper { + return tempHistogramWrapper{ + upperBounds: []float64{}, + histByTs: map[int64]tempHistogram{}, + } +} + +type tempHistogram struct { + bucketCounts map[float64]float64 + count float64 + sum float64 +} + +func newTempHistogram() tempHistogram { + return tempHistogram{ + bucketCounts: map[float64]float64{}, + } +} + +func processClassicHistogramSeries(m labels.Labels, suffix string, histMap map[uint64]tempHistogramWrapper, smpls []Sample, updateHistWrapper func(*tempHistogramWrapper), updateHist func(*tempHistogram, float64)) { + m2, m2hash := getHistogramMetricBase(m, suffix) + histWrapper, exists := histMap[m2hash] + if !exists { + histWrapper = newTempHistogramWrapper() + } + histWrapper.metric = m2 + if updateHistWrapper != nil { + updateHistWrapper(&histWrapper) + } + for _, s := range smpls { + if s.H != nil { + continue + } + hist, exists := histWrapper.histByTs[s.T] + if !exists { + hist = newTempHistogram() + } + updateHist(&hist, s.F) + histWrapper.histByTs[s.T] = hist + } + histMap[m2hash] = histWrapper +} + +func processUpperBoundsAndCreateBaseHistogram(upperBounds0 []float64) ([]float64, *histogram.FloatHistogram) { + sort.Float64s(upperBounds0) + upperBounds := make([]float64, 0, len(upperBounds0)) + prevLe := math.Inf(-1) + for _, le := range upperBounds0 { + if le != prevLe { // deduplicate + upperBounds = append(upperBounds, le) + prevLe = le + } + } + var customBounds []float64 + if upperBounds[len(upperBounds)-1] == math.Inf(1) { + customBounds = upperBounds[:len(upperBounds)-1] + } else { + customBounds = upperBounds + } + return upperBounds, &histogram.FloatHistogram{ + Count: 0, + Sum: 0, + Schema: histogram.CustomBucketsSchema, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: uint32(len(upperBounds))}, + }, + PositiveBuckets: make([]float64, len(upperBounds)), + CustomValues: customBounds, + } +} + +// If classic histograms are defined, convert them into native histograms with custom +// bounds and append the defined time series to the storage. +func (cmd *loadCmd) appendCustomHistogram(a storage.Appender) error { + histMap := map[uint64]tempHistogramWrapper{} + + // Go through all the time series to collate classic histogram data + // and organise them by timestamp. + for hash, smpls := range cmd.defs { + m := cmd.metrics[hash] + mName := m.Get(labels.MetricName) + switch { + case strings.HasSuffix(mName, "_bucket") && m.Has(labels.BucketLabel): + le, err := strconv.ParseFloat(m.Get(labels.BucketLabel), 64) + if err != nil || math.IsNaN(le) { + continue + } + processClassicHistogramSeries(m, "_bucket", histMap, smpls, func(histWrapper *tempHistogramWrapper) { + histWrapper.upperBounds = append(histWrapper.upperBounds, le) + }, func(hist *tempHistogram, f float64) { + hist.bucketCounts[le] = f + }) + case strings.HasSuffix(mName, "_count"): + processClassicHistogramSeries(m, "_count", histMap, smpls, nil, func(hist *tempHistogram, f float64) { + hist.count = f + }) + case strings.HasSuffix(mName, "_sum"): + processClassicHistogramSeries(m, "_sum", histMap, smpls, nil, func(hist *tempHistogram, f float64) { + hist.sum = f + }) + } + } + + // Convert the collated classic histogram data into native histograms + // with custom bounds and append them to the storage. + for _, histWrapper := range histMap { + upperBounds, fhBase := processUpperBoundsAndCreateBaseHistogram(histWrapper.upperBounds) + samples := make([]Sample, 0, len(histWrapper.histByTs)) + for t, hist := range histWrapper.histByTs { + fh := fhBase.Copy() + var prevCount, total float64 + for i, le := range upperBounds { + currCount, exists := hist.bucketCounts[le] + if !exists { + currCount = 0 + } + count := currCount - prevCount + fh.PositiveBuckets[i] = count + total += count + prevCount = currCount + } + fh.Sum = hist.sum + if hist.count != 0 { + total = hist.count + } + fh.Count = total + s := Sample{T: t, H: fh.Compact(0)} + if err := s.H.Validate(); err != nil { + return err + } + samples = append(samples, s) + } + sort.Slice(samples, func(i, j int) bool { return samples[i].T < samples[j].T }) + for _, s := range samples { + if err := appendSample(a, s, histWrapper.metric); err != nil { + return err + } + } + } return nil } @@ -443,6 +638,7 @@ type evalCmd struct { isRange bool // if false, instant query fail, ordered bool + withNhcb bool metrics map[uint64]labels.Labels expected map[uint64]entry @@ -796,72 +992,89 @@ func (t *test) execInstantEval(cmd *evalCmd, engine QueryEngine) error { } queries = append([]atModifierTestCase{{expr: cmd.expr, evalTime: cmd.start}}, queries...) for _, iq := range queries { - q, err := engine.NewInstantQuery(t.context, t.storage, nil, iq.expr, iq.evalTime) - if err != nil { + if err := t.runInstantQuery(iq, cmd, engine); err != nil { return err } - defer q.Close() - res := q.Exec(t.context) - if res.Err != nil { - if cmd.fail { - continue + if cmd.withNhcb { + if !strings.Contains(iq.expr, "_bucket") { + return fmt.Errorf("expected _bucket in the expression %q", iq.expr) } - return fmt.Errorf("error evaluating query %q (line %d): %w", iq.expr, cmd.line, res.Err) - } - if res.Err == nil && cmd.fail { - return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line) - } - err = cmd.compareResult(res.Value) - if err != nil { - return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err) - } - - // Check query returns same result in range mode, - // by checking against the middle step. - q, err = engine.NewRangeQuery(t.context, t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute) - if err != nil { - return err - } - rangeRes := q.Exec(t.context) - if rangeRes.Err != nil { - return fmt.Errorf("error evaluating query %q (line %d) in range mode: %w", iq.expr, cmd.line, rangeRes.Err) - } - defer q.Close() - if cmd.ordered { - // Range queries are always sorted by labels, so skip this test case that expects results in a particular order. - continue - } - mat := rangeRes.Value.(Matrix) - if err := assertMatrixSorted(mat); err != nil { - return err - } - - vec := make(Vector, 0, len(mat)) - for _, series := range mat { - // We expect either Floats or Histograms. - for _, point := range series.Floats { - if point.T == timeMilliseconds(iq.evalTime) { - vec = append(vec, Sample{Metric: series.Metric, T: point.T, F: point.F}) - break - } + for _, rep := range histogramBucketReplacements { + iq.expr = rep.pattern.ReplaceAllString(iq.expr, rep.repl) } - for _, point := range series.Histograms { - if point.T == timeMilliseconds(iq.evalTime) { - vec = append(vec, Sample{Metric: series.Metric, T: point.T, H: point.H}) - break - } + if err := t.runInstantQuery(iq, cmd, engine); err != nil { + return err } } - if _, ok := res.Value.(Scalar); ok { - err = cmd.compareResult(Scalar{V: vec[0].F}) - } else { - err = cmd.compareResult(vec) - } - if err != nil { - return fmt.Errorf("error in %s %s (line %d) range mode: %w", cmd, iq.expr, cmd.line, err) - } } + return nil +} +func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine QueryEngine) error { + q, err := engine.NewInstantQuery(t.context, t.storage, nil, iq.expr, iq.evalTime) + if err != nil { + return err + } + defer q.Close() + res := q.Exec(t.context) + if res.Err != nil { + if cmd.fail { + return nil + } + return fmt.Errorf("error evaluating query %q (line %d): %w", iq.expr, cmd.line, res.Err) + } + if res.Err == nil && cmd.fail { + return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line) + } + err = cmd.compareResult(res.Value) + if err != nil { + return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err) + } + + // Check query returns same result in range mode, + // by checking against the middle step. + q, err = engine.NewRangeQuery(t.context, t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute) + if err != nil { + return err + } + rangeRes := q.Exec(t.context) + if rangeRes.Err != nil { + return fmt.Errorf("error evaluating query %q (line %d) in range mode: %w", iq.expr, cmd.line, rangeRes.Err) + } + defer q.Close() + if cmd.ordered { + // Range queries are always sorted by labels, so skip this test case that expects results in a particular order. + return nil + } + mat := rangeRes.Value.(Matrix) + if err := assertMatrixSorted(mat); err != nil { + return err + } + + vec := make(Vector, 0, len(mat)) + for _, series := range mat { + // We expect either Floats or Histograms. + for _, point := range series.Floats { + if point.T == timeMilliseconds(iq.evalTime) { + vec = append(vec, Sample{Metric: series.Metric, T: point.T, F: point.F}) + break + } + } + for _, point := range series.Histograms { + if point.T == timeMilliseconds(iq.evalTime) { + vec = append(vec, Sample{Metric: series.Metric, T: point.T, H: point.H}) + break + } + } + } + if _, ok := res.Value.(Scalar); ok { + err = cmd.compareResult(Scalar{V: vec[0].F}) + } else { + err = cmd.compareResult(vec) + } + if err != nil { + return fmt.Errorf("error in %s %s (line %d) range mode: %w", cmd, iq.expr, cmd.line, err) + } return nil } @@ -975,7 +1188,7 @@ func (ll *LazyLoader) parse(input string) error { if len(l) == 0 { continue } - if strings.ToLower(patSpace.Split(l, 2)[0]) == "load" { + if strings.HasPrefix(strings.ToLower(patSpace.Split(l, 2)[0]), "load") { _, cmd, err := parseLoad(lines, i) if err != nil { return err diff --git a/promql/testdata/histograms.test b/promql/testdata/histograms.test index f30c07e7b3..0aab3117dc 100644 --- a/promql/testdata/histograms.test +++ b/promql/testdata/histograms.test @@ -5,7 +5,7 @@ # server has to cope with it. # Test histogram. -load 5m +load_with_nhcb 5m testhistogram_bucket{le="0.1", start="positive"} 0+5x10 testhistogram_bucket{le=".2", start="positive"} 0+7x10 testhistogram_bucket{le="1e0", start="positive"} 0+11x10 @@ -18,15 +18,34 @@ load 5m # Another test histogram, where q(1/6), q(1/2), and q(5/6) are each in # the middle of a bucket and should therefore be 1, 3, and 5, # respectively. -load 5m +load_with_nhcb 5m testhistogram2_bucket{le="0"} 0+0x10 testhistogram2_bucket{le="2"} 0+1x10 testhistogram2_bucket{le="4"} 0+2x10 testhistogram2_bucket{le="6"} 0+3x10 testhistogram2_bucket{le="+Inf"} 0+3x10 +# Another test histogram, where there are 0 counts where there is +# an infinite bound, allowing us to calculate standard deviation +# and variance properly. +load_with_nhcb 5m + testhistogram3_bucket{le="0", start="positive"} 0+0x10 + testhistogram3_bucket{le="0.1", start="positive"} 0+5x10 + testhistogram3_bucket{le=".2", start="positive"} 0+7x10 + testhistogram3_bucket{le="1e0", start="positive"} 0+11x10 + testhistogram3_bucket{le="+Inf", start="positive"} 0+11x10 + testhistogram3_sum{start="positive"} 0+33x10 + testhistogram3_count{start="positive"} 0+11x10 + testhistogram3_bucket{le="-.25", start="negative"} 0+0x10 + testhistogram3_bucket{le="-.2", start="negative"} 0+1x10 + testhistogram3_bucket{le="-0.1", start="negative"} 0+2x10 + testhistogram3_bucket{le="0.3", start="negative"} 0+2x10 + testhistogram3_bucket{le="+Inf", start="negative"} 0+2x10 + testhistogram3_sum{start="negative"} 0+8x10 + testhistogram3_count{start="negative"} 0+2x10 + # Now a more realistic histogram per job and instance to test aggregation. -load 5m +load_with_nhcb 5m request_duration_seconds_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10 request_duration_seconds_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10 request_duration_seconds_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10 @@ -41,7 +60,7 @@ load 5m request_duration_seconds_bucket{job="job2", instance="ins2", le="+Inf"} 0+9x10 # Different le representations in one histogram. -load 5m +load_with_nhcb 5m mixed_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10 mixed_bucket{job="job1", instance="ins1", le="0.2"} 0+1x10 mixed_bucket{job="job1", instance="ins1", le="2e-1"} 0+1x10 @@ -50,133 +69,186 @@ load 5m mixed_bucket{job="job1", instance="ins2", le="+inf"} 0+0x10 mixed_bucket{job="job1", instance="ins2", le="+Inf"} 0+0x10 +# Test histogram_count. +eval instant at 50m histogram_count(testhistogram3) + {start="positive"} 110 + {start="negative"} 20 + +# Test histogram_sum. +eval instant at 50m histogram_sum(testhistogram3) + {start="positive"} 330 + {start="negative"} 80 + +# Test histogram_avg. +eval instant at 50m histogram_avg(testhistogram3) + {start="positive"} 3 + {start="negative"} 4 + +# Test histogram_stddev. +eval instant at 50m histogram_stddev(testhistogram3) + {start="positive"} 2.8189265757336734 + {start="negative"} 4.182715937754936 + +# Test histogram_stdvar. +eval instant at 50m histogram_stdvar(testhistogram3) + {start="positive"} 7.946347039377573 + {start="negative"} 17.495112615949154 + +# Test histogram_fraction. + +eval instant at 50m histogram_fraction(0, 0.2, testhistogram3) + {start="positive"} 0.6363636363636364 + {start="negative"} 0 + +eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[5m])) + {start="positive"} 0.6363636363636364 + {start="negative"} 0 + +# Test histogram_quantile. + +eval_with_nhcb instant at 50m histogram_quantile(0, testhistogram3_bucket) + {start="positive"} 0 + {start="negative"} -0.25 + +eval_with_nhcb instant at 50m histogram_quantile(0.25, testhistogram3_bucket) + {start="positive"} 0.055 + {start="negative"} -0.225 + +eval_with_nhcb instant at 50m histogram_quantile(0.5, testhistogram3_bucket) + {start="positive"} 0.125 + {start="negative"} -0.2 + +eval_with_nhcb instant at 50m histogram_quantile(0.75, testhistogram3_bucket) + {start="positive"} 0.45 + {start="negative"} -0.15 + +eval_with_nhcb instant at 50m histogram_quantile(1, testhistogram3_bucket) + {start="positive"} 1 + {start="negative"} -0.1 + # Quantile too low. -eval instant at 50m histogram_quantile(-0.1, testhistogram_bucket) +eval_with_nhcb instant at 50m histogram_quantile(-0.1, testhistogram_bucket) {start="positive"} -Inf {start="negative"} -Inf # Quantile too high. -eval instant at 50m histogram_quantile(1.01, testhistogram_bucket) +eval_with_nhcb instant at 50m histogram_quantile(1.01, testhistogram_bucket) {start="positive"} +Inf {start="negative"} +Inf # Quantile invalid. -eval instant at 50m histogram_quantile(NaN, testhistogram_bucket) +eval_with_nhcb instant at 50m histogram_quantile(NaN, testhistogram_bucket) {start="positive"} NaN {start="negative"} NaN -# Quantile value in lowest bucket, which is positive. -eval instant at 50m histogram_quantile(0, testhistogram_bucket{start="positive"}) +# Quantile value in lowest bucket. +eval_with_nhcb instant at 50m histogram_quantile(0, testhistogram_bucket) {start="positive"} 0 - -# Quantile value in lowest bucket, which is negative. -eval instant at 50m histogram_quantile(0, testhistogram_bucket{start="negative"}) {start="negative"} -0.2 # Quantile value in highest bucket. -eval instant at 50m histogram_quantile(1, testhistogram_bucket) +eval_with_nhcb instant at 50m histogram_quantile(1, testhistogram_bucket) {start="positive"} 1 {start="negative"} 0.3 # Finally some useful quantiles. -eval instant at 50m histogram_quantile(0.2, testhistogram_bucket) +eval_with_nhcb instant at 50m histogram_quantile(0.2, testhistogram_bucket) {start="positive"} 0.048 {start="negative"} -0.2 - -eval instant at 50m histogram_quantile(0.5, testhistogram_bucket) +eval_with_nhcb instant at 50m histogram_quantile(0.5, testhistogram_bucket) {start="positive"} 0.15 {start="negative"} -0.15 -eval instant at 50m histogram_quantile(0.8, testhistogram_bucket) +eval_with_nhcb instant at 50m histogram_quantile(0.8, testhistogram_bucket) {start="positive"} 0.72 {start="negative"} 0.3 # More realistic with rates. -eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[5m])) +eval_with_nhcb instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[5m])) {start="positive"} 0.048 {start="negative"} -0.2 -eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[5m])) +eval_with_nhcb instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[5m])) {start="positive"} 0.15 {start="negative"} -0.15 -eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[5m])) +eval_with_nhcb instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[5m])) {start="positive"} 0.72 {start="negative"} 0.3 # Want results exactly in the middle of the bucket. -eval instant at 7m histogram_quantile(1./6., testhistogram2_bucket) +eval_with_nhcb instant at 7m histogram_quantile(1./6., testhistogram2_bucket) {} 1 -eval instant at 7m histogram_quantile(0.5, testhistogram2_bucket) +eval_with_nhcb instant at 7m histogram_quantile(0.5, testhistogram2_bucket) {} 3 -eval instant at 7m histogram_quantile(5./6., testhistogram2_bucket) +eval_with_nhcb instant at 7m histogram_quantile(5./6., testhistogram2_bucket) {} 5 -eval instant at 47m histogram_quantile(1./6., rate(testhistogram2_bucket[15m])) +eval_with_nhcb instant at 47m histogram_quantile(1./6., rate(testhistogram2_bucket[15m])) {} 1 -eval instant at 47m histogram_quantile(0.5, rate(testhistogram2_bucket[15m])) +eval_with_nhcb instant at 47m histogram_quantile(0.5, rate(testhistogram2_bucket[15m])) {} 3 -eval instant at 47m histogram_quantile(5./6., rate(testhistogram2_bucket[15m])) +eval_with_nhcb instant at 47m histogram_quantile(5./6., rate(testhistogram2_bucket[15m])) {} 5 # Aggregated histogram: Everything in one. -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le)) +eval_with_nhcb instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le)) {} 0.075 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le)) +eval_with_nhcb instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le)) {} 0.1277777777777778 # Aggregated histogram: Everything in one. Now with avg, which does not change anything. -eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[5m])) by (le)) +eval_with_nhcb instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[5m])) by (le)) {} 0.075 -eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[5m])) by (le)) +eval_with_nhcb instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[5m])) by (le)) {} 0.12777777777777778 # Aggregated histogram: By instance. -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance)) +eval_with_nhcb instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance)) {instance="ins1"} 0.075 {instance="ins2"} 0.075 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance)) +eval_with_nhcb instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance)) {instance="ins1"} 0.1333333333 {instance="ins2"} 0.125 # Aggregated histogram: By job. -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job)) +eval_with_nhcb instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job)) {job="job1"} 0.1 {job="job2"} 0.0642857142857143 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job)) +eval_with_nhcb instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job)) {job="job1"} 0.14 {job="job2"} 0.1125 # Aggregated histogram: By job and instance. -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance)) +eval_with_nhcb instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance)) {instance="ins1", job="job1"} 0.11 {instance="ins2", job="job1"} 0.09 {instance="ins1", job="job2"} 0.06 {instance="ins2", job="job2"} 0.0675 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance)) +eval_with_nhcb instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance)) {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} 0.1333333333333333 {instance="ins1", job="job2"} 0.1 {instance="ins2", job="job2"} 0.1166666666666667 # The unaggregated histogram for comparison. Same result as the previous one. -eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[5m])) +eval_with_nhcb instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[5m])) {instance="ins1", job="job1"} 0.11 {instance="ins2", job="job1"} 0.09 {instance="ins1", job="job2"} 0.06 {instance="ins2", job="job2"} 0.0675 -eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[5m])) +eval_with_nhcb instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[5m])) {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} 0.13333333333333333 {instance="ins1", job="job2"} 0.1 @@ -209,27 +281,31 @@ eval instant at 50m histogram_quantile(0.5, rate(mixed_bucket[5m])) {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} NaN -eval instant at 50m histogram_quantile(0.75, rate(mixed_bucket[5m])) +eval instant at 50m histogram_quantile(0.5, rate(mixed[5m])) {instance="ins1", job="job1"} 0.2 {instance="ins2", job="job1"} NaN -eval instant at 50m histogram_quantile(1, rate(mixed_bucket[5m])) +eval_with_nhcb instant at 50m histogram_quantile(0.75, rate(mixed_bucket[5m])) {instance="ins1", job="job1"} 0.2 {instance="ins2", job="job1"} NaN -load 5m +eval_with_nhcb instant at 50m histogram_quantile(1, rate(mixed_bucket[5m])) + {instance="ins1", job="job1"} 0.2 + {instance="ins2", job="job1"} NaN + +load_with_nhcb 5m empty_bucket{le="0.1", job="job1", instance="ins1"} 0x10 empty_bucket{le="0.2", job="job1", instance="ins1"} 0x10 empty_bucket{le="+Inf", job="job1", instance="ins1"} 0x10 -eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[5m])) +eval_with_nhcb instant at 50m histogram_quantile(0.2, rate(empty_bucket[5m])) {instance="ins1", job="job1"} NaN # Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set # https://github.com/prometheus/prometheus/issues/9910 -load 5m +load_with_nhcb 5m request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10 request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10 request_duration_seconds2_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10 -eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration.*"}) +eval_with_nhcb_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket$"}) From 545a7e48f980ef6249dc5eee321e53c74c5d0693 Mon Sep 17 00:00:00 2001 From: zenador Date: Wed, 8 May 2024 19:58:24 +0800 Subject: [PATCH 06/30] [nhcb branch] Enhance unit test framework for nhcb and warnings (#14009) * process custom values in histogram unit test framework * check for warnings when evaluating in unit test framework * add test cases for custom buckets in test framework Signed-off-by: Jeanette Tan --- model/histogram/float_histogram.go | 3 + promql/parser/generated_parser.y | 6 + promql/parser/generated_parser.y.go | 697 +++++++++++++------------ promql/parser/lex.go | 19 +- promql/parser/parse.go | 9 + promql/test.go | 30 +- promql/testdata/aggregators.test | 2 +- promql/testdata/functions.test | 6 +- promql/testdata/histograms.test | 9 +- promql/testdata/native_histograms.test | 15 + util/annotations/annotations.go | 13 + 11 files changed, 443 insertions(+), 366 deletions(-) diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index 58f13c8cf3..5431892dd1 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -225,6 +225,9 @@ func (h *FloatHistogram) TestExpression() string { if m.ZeroThreshold != 0 { res = append(res, fmt.Sprintf("z_bucket_w:%g", m.ZeroThreshold)) } + if m.UsesCustomBuckets() { + res = append(res, fmt.Sprintf("custom_values:%g", m.CustomValues)) + } addBuckets := func(kind, bucketsKey, offsetKey string, buckets []float64, spans []Span) []string { if len(spans) > 1 { diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y index 841bd31c19..b39c1150a5 100644 --- a/promql/parser/generated_parser.y +++ b/promql/parser/generated_parser.y @@ -84,6 +84,7 @@ BUCKETS_DESC NEGATIVE_BUCKETS_DESC ZERO_BUCKET_DESC ZERO_BUCKET_WIDTH_DESC +CUSTOM_VALUES_DESC %token histogramDescEnd // Operators. @@ -797,6 +798,11 @@ histogram_desc_item $$ = yylex.(*parser).newMap() $$["z_bucket_w"] = $3 } + | CUSTOM_VALUES_DESC COLON bucket_set + { + $$ = yylex.(*parser).newMap() + $$["custom_values"] = $3 + } | BUCKETS_DESC COLON bucket_set { $$ = yylex.(*parser).newMap() diff --git a/promql/parser/generated_parser.y.go b/promql/parser/generated_parser.y.go index 3075b9b1b1..d9a312a137 100644 --- a/promql/parser/generated_parser.y.go +++ b/promql/parser/generated_parser.y.go @@ -67,62 +67,63 @@ const BUCKETS_DESC = 57375 const NEGATIVE_BUCKETS_DESC = 57376 const ZERO_BUCKET_DESC = 57377 const ZERO_BUCKET_WIDTH_DESC = 57378 -const histogramDescEnd = 57379 -const operatorsStart = 57380 -const ADD = 57381 -const DIV = 57382 -const EQLC = 57383 -const EQL_REGEX = 57384 -const GTE = 57385 -const GTR = 57386 -const LAND = 57387 -const LOR = 57388 -const LSS = 57389 -const LTE = 57390 -const LUNLESS = 57391 -const MOD = 57392 -const MUL = 57393 -const NEQ = 57394 -const NEQ_REGEX = 57395 -const POW = 57396 -const SUB = 57397 -const AT = 57398 -const ATAN2 = 57399 -const operatorsEnd = 57400 -const aggregatorsStart = 57401 -const AVG = 57402 -const BOTTOMK = 57403 -const COUNT = 57404 -const COUNT_VALUES = 57405 -const GROUP = 57406 -const MAX = 57407 -const MIN = 57408 -const QUANTILE = 57409 -const STDDEV = 57410 -const STDVAR = 57411 -const SUM = 57412 -const TOPK = 57413 -const aggregatorsEnd = 57414 -const keywordsStart = 57415 -const BOOL = 57416 -const BY = 57417 -const GROUP_LEFT = 57418 -const GROUP_RIGHT = 57419 -const IGNORING = 57420 -const OFFSET = 57421 -const ON = 57422 -const WITHOUT = 57423 -const keywordsEnd = 57424 -const preprocessorStart = 57425 -const START = 57426 -const END = 57427 -const preprocessorEnd = 57428 -const startSymbolsStart = 57429 -const START_METRIC = 57430 -const START_SERIES_DESCRIPTION = 57431 -const START_EXPRESSION = 57432 -const START_METRIC_SELECTOR = 57433 -const startSymbolsEnd = 57434 +const CUSTOM_VALUES_DESC = 57379 +const histogramDescEnd = 57380 +const operatorsStart = 57381 +const ADD = 57382 +const DIV = 57383 +const EQLC = 57384 +const EQL_REGEX = 57385 +const GTE = 57386 +const GTR = 57387 +const LAND = 57388 +const LOR = 57389 +const LSS = 57390 +const LTE = 57391 +const LUNLESS = 57392 +const MOD = 57393 +const MUL = 57394 +const NEQ = 57395 +const NEQ_REGEX = 57396 +const POW = 57397 +const SUB = 57398 +const AT = 57399 +const ATAN2 = 57400 +const operatorsEnd = 57401 +const aggregatorsStart = 57402 +const AVG = 57403 +const BOTTOMK = 57404 +const COUNT = 57405 +const COUNT_VALUES = 57406 +const GROUP = 57407 +const MAX = 57408 +const MIN = 57409 +const QUANTILE = 57410 +const STDDEV = 57411 +const STDVAR = 57412 +const SUM = 57413 +const TOPK = 57414 +const aggregatorsEnd = 57415 +const keywordsStart = 57416 +const BOOL = 57417 +const BY = 57418 +const GROUP_LEFT = 57419 +const GROUP_RIGHT = 57420 +const IGNORING = 57421 +const OFFSET = 57422 +const ON = 57423 +const WITHOUT = 57424 +const keywordsEnd = 57425 +const preprocessorStart = 57426 +const START = 57427 +const END = 57428 +const preprocessorEnd = 57429 +const startSymbolsStart = 57430 +const START_METRIC = 57431 +const START_SERIES_DESCRIPTION = 57432 +const START_EXPRESSION = 57433 +const START_METRIC_SELECTOR = 57434 +const startSymbolsEnd = 57435 var yyToknames = [...]string{ "$end", @@ -161,6 +162,7 @@ var yyToknames = [...]string{ "NEGATIVE_BUCKETS_DESC", "ZERO_BUCKET_DESC", "ZERO_BUCKET_WIDTH_DESC", + "CUSTOM_VALUES_DESC", "histogramDescEnd", "operatorsStart", "ADD", @@ -235,270 +237,273 @@ var yyExca = [...]int16{ 24, 134, -2, 0, -1, 58, - 2, 171, - 15, 171, - 75, 171, - 81, 171, - -2, 100, - -1, 59, 2, 172, 15, 172, - 75, 172, - 81, 172, - -2, 101, - -1, 60, + 76, 172, + 82, 172, + -2, 100, + -1, 59, 2, 173, 15, 173, - 75, 173, - 81, 173, - -2, 103, - -1, 61, + 76, 173, + 82, 173, + -2, 101, + -1, 60, 2, 174, 15, 174, - 75, 174, - 81, 174, - -2, 104, - -1, 62, + 76, 174, + 82, 174, + -2, 103, + -1, 61, 2, 175, 15, 175, - 75, 175, - 81, 175, - -2, 105, - -1, 63, + 76, 175, + 82, 175, + -2, 104, + -1, 62, 2, 176, 15, 176, - 75, 176, - 81, 176, - -2, 110, - -1, 64, + 76, 176, + 82, 176, + -2, 105, + -1, 63, 2, 177, 15, 177, - 75, 177, - 81, 177, - -2, 112, - -1, 65, + 76, 177, + 82, 177, + -2, 110, + -1, 64, 2, 178, 15, 178, - 75, 178, - 81, 178, - -2, 114, - -1, 66, + 76, 178, + 82, 178, + -2, 112, + -1, 65, 2, 179, 15, 179, - 75, 179, - 81, 179, - -2, 115, - -1, 67, + 76, 179, + 82, 179, + -2, 114, + -1, 66, 2, 180, 15, 180, - 75, 180, - 81, 180, - -2, 116, - -1, 68, + 76, 180, + 82, 180, + -2, 115, + -1, 67, 2, 181, 15, 181, - 75, 181, - 81, 181, - -2, 117, - -1, 69, + 76, 181, + 82, 181, + -2, 116, + -1, 68, 2, 182, 15, 182, - 75, 182, - 81, 182, + 76, 182, + 82, 182, + -2, 117, + -1, 69, + 2, 183, + 15, 183, + 76, 183, + 82, 183, -2, 118, -1, 195, - 12, 230, - 13, 230, - 18, 230, - 19, 230, - 25, 230, - 39, 230, - 45, 230, - 46, 230, - 49, 230, - 55, 230, - 60, 230, - 61, 230, - 62, 230, - 63, 230, - 64, 230, - 65, 230, - 66, 230, - 67, 230, - 68, 230, - 69, 230, - 70, 230, - 71, 230, - 75, 230, - 79, 230, - 81, 230, - 84, 230, - 85, 230, + 12, 231, + 13, 231, + 18, 231, + 19, 231, + 25, 231, + 40, 231, + 46, 231, + 47, 231, + 50, 231, + 56, 231, + 61, 231, + 62, 231, + 63, 231, + 64, 231, + 65, 231, + 66, 231, + 67, 231, + 68, 231, + 69, 231, + 70, 231, + 71, 231, + 72, 231, + 76, 231, + 80, 231, + 82, 231, + 85, 231, + 86, 231, -2, 0, -1, 196, - 12, 230, - 13, 230, - 18, 230, - 19, 230, - 25, 230, - 39, 230, - 45, 230, - 46, 230, - 49, 230, - 55, 230, - 60, 230, - 61, 230, - 62, 230, - 63, 230, - 64, 230, - 65, 230, - 66, 230, - 67, 230, - 68, 230, - 69, 230, - 70, 230, - 71, 230, - 75, 230, - 79, 230, - 81, 230, - 84, 230, - 85, 230, + 12, 231, + 13, 231, + 18, 231, + 19, 231, + 25, 231, + 40, 231, + 46, 231, + 47, 231, + 50, 231, + 56, 231, + 61, 231, + 62, 231, + 63, 231, + 64, 231, + 65, 231, + 66, 231, + 67, 231, + 68, 231, + 69, 231, + 70, 231, + 71, 231, + 72, 231, + 76, 231, + 80, 231, + 82, 231, + 85, 231, + 86, 231, -2, 0, -1, 217, - 21, 228, - -2, 0, - -1, 285, 21, 229, -2, 0, + -1, 286, + 21, 230, + -2, 0, } const yyPrivate = 57344 -const yyLast = 742 +const yyLast = 778 var yyAct = [...]int16{ - 151, 322, 320, 268, 327, 148, 221, 37, 187, 144, - 281, 280, 152, 113, 77, 173, 104, 102, 101, 6, - 128, 223, 105, 193, 155, 194, 195, 196, 339, 262, - 260, 233, 317, 316, 57, 100, 294, 239, 103, 146, - 300, 313, 263, 156, 156, 283, 147, 338, 259, 123, - 337, 106, 252, 311, 155, 299, 340, 301, 264, 157, - 157, 108, 298, 109, 235, 236, 292, 251, 237, 107, - 155, 292, 174, 191, 175, 96, 250, 99, 258, 224, + 151, 324, 322, 268, 329, 148, 221, 37, 187, 144, + 282, 281, 152, 113, 77, 173, 104, 102, 101, 6, + 223, 193, 105, 194, 195, 196, 128, 262, 260, 155, + 233, 103, 342, 293, 100, 319, 239, 116, 146, 318, + 315, 263, 156, 123, 106, 147, 284, 114, 295, 116, + 156, 341, 175, 259, 340, 253, 57, 264, 157, 114, + 117, 108, 313, 109, 235, 236, 157, 112, 237, 107, + 323, 174, 117, 175, 155, 96, 250, 99, 293, 224, 226, 228, 229, 230, 238, 240, 243, 244, 245, 246, - 247, 110, 145, 225, 227, 231, 232, 234, 241, 242, - 98, 257, 321, 248, 249, 2, 3, 4, 5, 218, - 158, 104, 177, 217, 168, 162, 165, 105, 175, 160, - 164, 161, 176, 178, 189, 213, 106, 328, 216, 256, - 183, 179, 192, 163, 181, 100, 190, 197, 198, 199, + 247, 177, 145, 225, 227, 231, 232, 234, 241, 242, + 98, 176, 178, 248, 249, 104, 2, 3, 4, 5, + 158, 105, 177, 110, 168, 162, 165, 302, 150, 160, + 191, 161, 176, 178, 189, 155, 213, 343, 106, 330, + 72, 179, 192, 33, 181, 155, 190, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, - 210, 211, 255, 182, 72, 212, 177, 214, 215, 33, - 82, 84, 85, 7, 86, 87, 176, 178, 90, 91, - 223, 93, 94, 95, 116, 96, 97, 99, 83, 147, - 233, 286, 289, 116, 114, 254, 239, 288, 147, 172, - 220, 124, 253, 114, 171, 310, 309, 117, 120, 261, - 98, 112, 287, 119, 278, 279, 117, 170, 282, 10, - 308, 159, 307, 235, 236, 312, 118, 237, 147, 74, - 306, 305, 304, 303, 302, 250, 81, 285, 224, 226, - 228, 229, 230, 238, 240, 243, 244, 245, 246, 247, - 79, 79, 225, 227, 231, 232, 234, 241, 242, 48, - 78, 78, 248, 249, 122, 73, 121, 150, 180, 76, - 290, 291, 293, 56, 295, 8, 9, 9, 34, 35, - 1, 284, 296, 297, 155, 129, 130, 131, 132, 133, + 210, 211, 185, 301, 258, 212, 156, 214, 215, 188, + 256, 183, 290, 191, 252, 164, 155, 289, 300, 218, + 223, 79, 157, 217, 7, 299, 312, 257, 163, 251, + 233, 78, 288, 255, 182, 254, 239, 156, 216, 180, + 220, 124, 172, 120, 147, 311, 314, 171, 119, 261, + 287, 153, 154, 157, 279, 280, 79, 147, 283, 310, + 170, 118, 159, 10, 235, 236, 78, 309, 237, 147, + 308, 307, 306, 74, 76, 305, 250, 286, 304, 224, + 226, 228, 229, 230, 238, 240, 243, 244, 245, 246, + 247, 303, 81, 225, 227, 231, 232, 234, 241, 242, + 48, 34, 1, 248, 249, 122, 73, 121, 285, 47, + 291, 292, 294, 56, 296, 8, 9, 9, 46, 35, + 45, 44, 297, 298, 127, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, - 47, 46, 45, 44, 156, 314, 315, 127, 43, 42, - 41, 185, 319, 125, 166, 324, 325, 326, 188, 323, - 157, 329, 191, 331, 330, 155, 40, 126, 332, 333, - 100, 51, 72, 334, 53, 39, 38, 22, 52, 336, - 49, 167, 186, 335, 54, 156, 265, 80, 341, 153, - 154, 184, 219, 75, 115, 82, 84, 149, 70, 55, - 222, 157, 50, 111, 18, 19, 93, 94, 20, 0, - 96, 97, 99, 83, 71, 0, 0, 0, 0, 58, + 43, 42, 41, 125, 166, 40, 316, 317, 126, 39, + 38, 49, 186, 321, 338, 265, 326, 327, 328, 80, + 325, 184, 219, 332, 331, 334, 333, 75, 115, 149, + 335, 336, 100, 51, 72, 337, 53, 55, 222, 22, + 52, 339, 50, 167, 111, 0, 54, 0, 0, 0, + 0, 344, 0, 0, 0, 0, 0, 0, 82, 84, + 0, 70, 0, 0, 0, 0, 0, 18, 19, 93, + 94, 20, 0, 96, 97, 99, 83, 71, 0, 0, + 0, 0, 58, 59, 60, 61, 62, 63, 64, 65, + 66, 67, 68, 69, 0, 0, 0, 13, 98, 0, + 0, 24, 0, 30, 0, 0, 31, 32, 36, 100, + 51, 72, 0, 53, 267, 0, 22, 52, 0, 0, + 0, 266, 0, 54, 0, 270, 271, 269, 276, 278, + 275, 277, 272, 273, 274, 0, 84, 0, 70, 0, + 0, 0, 0, 0, 18, 19, 93, 94, 20, 0, + 96, 0, 99, 83, 71, 0, 0, 0, 0, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 0, 0, 0, 13, 98, 0, 0, 24, 0, - 30, 0, 0, 31, 32, 36, 100, 51, 72, 0, - 53, 267, 0, 22, 52, 0, 0, 0, 266, 0, - 54, 0, 270, 271, 269, 275, 277, 274, 276, 272, - 273, 0, 84, 0, 70, 0, 0, 0, 0, 0, - 18, 19, 93, 94, 20, 0, 96, 0, 99, 83, - 71, 0, 0, 0, 0, 58, 59, 60, 61, 62, - 63, 64, 65, 66, 67, 68, 69, 0, 0, 0, - 13, 98, 0, 0, 24, 0, 30, 0, 0, 31, - 32, 51, 72, 0, 53, 318, 0, 22, 52, 0, - 0, 0, 0, 0, 54, 0, 270, 271, 269, 275, - 277, 274, 276, 272, 273, 0, 0, 0, 70, 0, - 0, 0, 0, 0, 18, 19, 0, 0, 20, 0, - 0, 0, 17, 72, 71, 0, 0, 0, 22, 58, - 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, - 69, 0, 0, 0, 13, 0, 0, 0, 24, 0, - 30, 0, 0, 31, 32, 18, 19, 0, 0, 20, - 0, 0, 0, 17, 33, 0, 0, 0, 0, 22, - 11, 12, 14, 15, 16, 21, 23, 25, 26, 27, - 28, 29, 0, 0, 0, 13, 0, 0, 0, 24, - 0, 30, 0, 0, 31, 32, 18, 19, 0, 0, - 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 11, 12, 14, 15, 16, 21, 23, 25, 26, - 27, 28, 29, 100, 0, 0, 13, 0, 0, 0, - 24, 169, 30, 0, 0, 31, 32, 0, 0, 0, - 0, 0, 100, 0, 0, 0, 0, 0, 82, 84, - 85, 0, 86, 87, 88, 89, 90, 91, 92, 93, + 30, 0, 0, 31, 32, 51, 72, 0, 53, 320, + 0, 22, 52, 0, 0, 0, 0, 0, 54, 0, + 270, 271, 269, 276, 278, 275, 277, 272, 273, 274, + 0, 0, 0, 70, 0, 0, 17, 72, 0, 18, + 19, 0, 22, 20, 0, 0, 0, 0, 0, 71, + 0, 0, 0, 0, 58, 59, 60, 61, 62, 63, + 64, 65, 66, 67, 68, 69, 0, 0, 0, 13, + 18, 19, 0, 24, 20, 30, 0, 0, 31, 32, + 0, 0, 0, 0, 0, 11, 12, 14, 15, 16, + 21, 23, 25, 26, 27, 28, 29, 17, 33, 0, + 13, 0, 0, 22, 24, 0, 30, 0, 0, 31, + 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 18, 19, 0, 0, 20, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 11, 12, 14, 15, + 16, 21, 23, 25, 26, 27, 28, 29, 100, 0, + 0, 13, 0, 0, 0, 24, 169, 30, 0, 0, + 31, 32, 0, 0, 0, 0, 0, 100, 0, 0, + 0, 0, 0, 0, 82, 84, 85, 0, 86, 87, + 88, 89, 90, 91, 92, 93, 94, 95, 0, 96, + 97, 99, 83, 82, 84, 85, 0, 86, 87, 88, + 89, 90, 91, 92, 93, 94, 95, 0, 96, 97, + 99, 83, 100, 0, 98, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 100, 0, 98, 0, 0, 0, 0, 82, 84, + 85, 0, 86, 87, 88, 0, 90, 91, 92, 93, 94, 95, 0, 96, 97, 99, 83, 82, 84, 85, - 0, 86, 87, 88, 89, 90, 91, 92, 93, 94, - 95, 0, 96, 97, 99, 83, 100, 0, 98, 0, + 0, 86, 87, 0, 0, 90, 91, 0, 93, 94, + 95, 0, 96, 97, 99, 83, 0, 0, 98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 98, 0, 0, - 0, 82, 84, 85, 0, 86, 87, 88, 0, 90, - 91, 92, 93, 94, 95, 0, 96, 97, 99, 83, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 98, + 0, 0, 0, 0, 0, 0, 0, 98, } var yyPact = [...]int16{ - 17, 153, 541, 541, 385, 500, -1000, -1000, -1000, 146, + 17, 164, 555, 555, 388, 494, -1000, -1000, -1000, 120, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, 239, -1000, 224, -1000, 618, -1000, -1000, + -1000, -1000, -1000, 204, -1000, 240, -1000, 633, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - 36, 111, -1000, 459, -1000, 459, 141, -1000, -1000, -1000, + 29, 113, -1000, 463, -1000, 463, 117, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 181, -1000, -1000, 196, -1000, -1000, 252, -1000, - 25, -1000, -54, -54, -54, -54, -54, -54, -54, -54, - -54, -54, -54, -54, -54, -54, -54, -54, 37, 255, - 209, 111, -59, -1000, 118, 118, 309, -1000, 599, 21, - -1000, 187, -1000, -1000, 70, 114, -1000, -1000, -1000, 238, - -1000, 128, -1000, 296, 459, -1000, -55, -50, -1000, 459, - 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, - 459, 459, 459, 459, -1000, 170, -1000, -1000, -1000, 110, - -1000, -1000, -1000, -1000, -1000, -1000, 51, 51, 107, -1000, - -1000, -1000, -1000, 168, -1000, -1000, 45, -1000, 618, -1000, - -1000, 172, -1000, 127, -1000, -1000, -1000, -1000, -1000, 76, - -1000, -1000, -1000, -1000, -1000, 22, 4, 3, -1000, -1000, - -1000, 384, 382, 118, 118, 118, 118, 21, 21, 306, - 306, 306, 121, 662, 306, 306, 121, 21, 21, 306, - 21, 382, -1000, 23, -1000, -1000, -1000, 179, -1000, 180, + -1000, -1000, 47, -1000, -1000, 191, -1000, -1000, 253, -1000, + 19, -1000, -49, -49, -49, -49, -49, -49, -49, -49, + -49, -49, -49, -49, -49, -49, -49, -49, 36, 116, + 210, 113, -60, -1000, 163, 163, 311, -1000, 614, 20, + -1000, 190, -1000, -1000, 69, 48, -1000, -1000, -1000, 169, + -1000, 159, -1000, 147, 463, -1000, -58, -53, -1000, 463, + 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, + 463, 463, 463, 463, -1000, 185, -1000, -1000, -1000, 111, + -1000, -1000, -1000, -1000, -1000, -1000, 55, 55, 167, -1000, + -1000, -1000, -1000, 168, -1000, -1000, 157, -1000, 633, -1000, + -1000, 35, -1000, 158, -1000, -1000, -1000, -1000, -1000, 152, + -1000, -1000, -1000, -1000, -1000, 27, 2, 1, -1000, -1000, + -1000, 387, 385, 163, 163, 163, 163, 20, 20, 308, + 308, 308, 697, 678, 308, 308, 697, 20, 20, 308, + 20, 385, -1000, 24, -1000, -1000, -1000, 198, -1000, 160, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 459, -1000, -1000, -1000, -1000, -1000, -1000, 52, - 52, 10, 52, 57, 57, 38, 40, -1000, -1000, 218, - 217, 216, 215, 214, 206, 204, 190, 189, -1000, -1000, - -1000, -1000, -1000, -1000, 32, 213, -1000, -1000, 19, -1000, - 618, -1000, -1000, -1000, 52, -1000, 7, 6, 458, -1000, - -1000, -1000, 47, 5, 51, 51, 51, 113, 47, 113, - 47, -1000, -1000, -1000, -1000, -1000, 52, 52, -1000, -1000, - -1000, 52, -1000, -1000, -1000, -1000, -1000, -1000, 51, -1000, - -1000, -1000, -1000, -1000, -1000, 26, -1000, 35, -1000, -1000, - -1000, -1000, + -1000, -1000, 463, -1000, -1000, -1000, -1000, -1000, -1000, 59, + 59, 22, 59, 104, 104, 151, 100, -1000, -1000, 235, + 222, 219, 216, 215, 214, 211, 203, 189, 170, -1000, + -1000, -1000, -1000, -1000, -1000, 41, 194, -1000, -1000, 18, + -1000, 633, -1000, -1000, -1000, 59, -1000, 13, 9, 462, + -1000, -1000, -1000, 14, 10, 55, 55, 55, 115, 115, + 14, 115, 14, -1000, -1000, -1000, -1000, -1000, 59, 59, + -1000, -1000, -1000, 59, -1000, -1000, -1000, -1000, -1000, -1000, + 55, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 30, -1000, + 106, -1000, -1000, -1000, -1000, } var yyPgo = [...]int16{ - 0, 353, 13, 352, 6, 15, 350, 263, 349, 347, - 344, 209, 265, 343, 14, 342, 10, 11, 341, 337, - 8, 336, 3, 4, 333, 2, 1, 0, 332, 12, - 5, 330, 326, 18, 191, 325, 317, 7, 316, 304, - 17, 303, 34, 300, 299, 298, 297, 293, 292, 291, - 290, 249, 9, 271, 270, 268, + 0, 334, 13, 332, 6, 15, 328, 263, 327, 319, + 318, 213, 265, 317, 14, 312, 10, 11, 311, 309, + 8, 305, 3, 4, 304, 2, 1, 0, 302, 12, + 5, 301, 300, 18, 191, 299, 298, 7, 295, 294, + 17, 293, 56, 292, 291, 290, 274, 271, 270, 268, + 259, 250, 9, 258, 252, 251, } var yyR1 = [...]int8{ @@ -518,14 +523,14 @@ var yyR1 = [...]int8{ 14, 14, 14, 55, 19, 19, 19, 19, 18, 18, 18, 18, 18, 18, 18, 18, 18, 28, 28, 28, 20, 20, 20, 20, 21, 21, 21, 22, 22, 22, - 22, 22, 22, 22, 22, 22, 23, 23, 24, 24, - 24, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 6, 6, 6, 6, 6, 6, 6, + 22, 22, 22, 22, 22, 22, 22, 23, 23, 24, + 24, 24, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 8, 8, 5, 5, 5, 5, 44, 27, 29, 29, - 30, 30, 26, 25, 25, 52, 48, 10, 53, 53, - 17, 17, + 6, 8, 8, 5, 5, 5, 5, 44, 27, 29, + 29, 30, 30, 26, 25, 25, 52, 48, 10, 53, + 53, 17, 17, } var yyR2 = [...]int8{ @@ -545,52 +550,52 @@ var yyR2 = [...]int8{ 3, 2, 1, 2, 0, 3, 2, 1, 1, 3, 1, 3, 4, 1, 3, 5, 5, 1, 1, 1, 4, 3, 3, 2, 3, 1, 2, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 4, 3, 3, 1, - 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 3, 3, 3, 3, 3, 3, 3, 4, 3, 3, + 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, - 1, 1, 1, 2, 1, 1, 1, 1, 0, 1, - 0, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, + 2, 1, 1, 1, 2, 1, 1, 1, 1, 0, + 1, 0, 1, } var yyChk = [...]int16{ - -1000, -54, 88, 89, 90, 91, 2, 10, -12, -7, - -11, 60, 61, 75, 62, 63, 64, 12, 45, 46, - 49, 65, 18, 66, 79, 67, 68, 69, 70, 71, - 81, 84, 85, 13, -55, -12, 10, -37, -32, -35, + -1000, -54, 89, 90, 91, 92, 2, 10, -12, -7, + -11, 61, 62, 76, 63, 64, 65, 12, 46, 47, + 50, 66, 18, 67, 80, 68, 69, 70, 71, 72, + 82, 85, 86, 13, -55, -12, 10, -37, -32, -35, -38, -43, -44, -45, -47, -48, -49, -50, -51, -31, - -3, 12, 19, 15, 25, -8, -7, -42, 60, 61, - 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, - 39, 55, 13, -51, -11, -13, 20, -14, 12, 2, - -19, 2, 39, 57, 40, 41, 43, 44, 45, 46, - 47, 48, 49, 50, 51, 52, 54, 55, 79, 56, - 14, -33, -40, 2, 75, 81, 15, -40, -37, -37, + -3, 12, 19, 15, 25, -8, -7, -42, 61, 62, + 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, + 40, 56, 13, -51, -11, -13, 20, -14, 12, 2, + -19, 2, 40, 58, 41, 42, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 55, 56, 80, 57, + 14, -33, -40, 2, 76, 82, 15, -40, -37, -37, -42, -1, 20, -2, 12, -10, 2, 25, 20, 7, - 2, 4, 2, 24, -34, -41, -36, -46, 74, -34, + 2, 4, 2, 24, -34, -41, -36, -46, 75, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, - -34, -34, -34, -34, -52, 55, 2, 9, -30, -9, - 2, -27, -29, 84, 85, 19, 39, 55, -52, 2, + -34, -34, -34, -34, -52, 56, 2, 9, -30, -9, + 2, -27, -29, 85, 86, 19, 40, 56, -52, 2, -40, -33, -16, 15, 2, -16, -39, 22, -37, 22, - 20, 7, 2, -5, 2, 4, 52, 42, 53, -5, + 20, 7, 2, -5, 2, 4, 53, 43, 54, -5, 20, -14, 25, 2, -18, 5, -28, -20, 12, -27, - -29, 16, -37, 78, 80, 76, 77, -37, -37, -37, + -29, 16, -37, 79, 81, 77, 78, -37, -37, -37, -37, -37, -37, -37, -37, -37, -37, -37, -37, -37, -37, -37, -52, 15, -27, -27, 21, 6, 2, -15, - 22, -4, -6, 2, 60, 74, 61, 75, 62, 63, - 64, 76, 77, 12, 78, 45, 46, 49, 65, 18, - 66, 79, 80, 67, 68, 69, 70, 71, 84, 85, - 57, 22, 7, 20, -2, 25, 2, 25, 2, 26, - 26, -29, 26, 39, 55, -21, 24, 17, -22, 30, - 28, 29, 35, 36, 33, 31, 34, 32, -16, -16, - -17, -16, -17, 22, -53, -52, 2, 22, 7, 2, - -37, -26, 19, -26, 26, -26, -20, -20, 24, 17, - 2, 17, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 21, 2, 22, -4, -26, 26, 26, 17, -22, - -25, 55, -26, -30, -27, -27, -27, -23, 14, -25, - -23, -25, -26, -26, -26, -24, -27, 24, 21, 2, - 21, -27, + 22, -4, -6, 2, 61, 75, 62, 76, 63, 64, + 65, 77, 78, 12, 79, 46, 47, 50, 66, 18, + 67, 80, 81, 68, 69, 70, 71, 72, 85, 86, + 58, 22, 7, 20, -2, 25, 2, 25, 2, 26, + 26, -29, 26, 40, 56, -21, 24, 17, -22, 30, + 28, 29, 35, 36, 37, 33, 31, 34, 32, -16, + -16, -17, -16, -17, 22, -53, -52, 2, 22, 7, + 2, -37, -26, 19, -26, 26, -26, -20, -20, 24, + 17, 2, 17, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 21, 2, 22, -4, -26, 26, 26, + 17, -22, -25, 56, -26, -30, -27, -27, -27, -23, + 14, -23, -25, -23, -25, -26, -26, -26, -24, -27, + 24, 21, 2, 21, -27, } var yyDef = [...]int16{ @@ -599,36 +604,36 @@ var yyDef = [...]int16{ 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 0, 2, -2, 3, 4, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 0, 106, 216, 0, 226, 0, 83, 84, -2, -2, + 0, 106, 217, 0, 227, 0, 83, 84, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, - 210, 211, 0, 5, 98, 0, 124, 127, 0, 132, + 211, 212, 0, 5, 98, 0, 124, 127, 0, 132, 133, 137, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 0, 0, 0, 0, 22, 23, 0, 0, 0, 60, 0, 81, - 82, 0, 87, 89, 0, 93, 97, 227, 122, 0, + 82, 0, 87, 89, 0, 93, 97, 228, 122, 0, 128, 0, 131, 136, 0, 42, 47, 48, 44, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 67, 0, 69, 225, 70, 0, - 72, 220, 221, 73, 74, 217, 0, 0, 0, 80, + 0, 0, 0, 0, 67, 0, 69, 226, 70, 0, + 72, 221, 222, 73, 74, 218, 0, 0, 0, 80, 20, 21, 24, 0, 54, 25, 0, 62, 64, 66, - 85, 0, 90, 0, 96, 212, 213, 214, 215, 0, + 85, 0, 90, 0, 96, 213, 214, 215, 216, 0, 123, 126, 129, 130, 135, 138, 140, 143, 147, 148, 149, 0, 26, 0, 0, -2, -2, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 68, 0, 218, 219, 75, -2, 79, 0, - 53, 56, 58, 59, 183, 184, 185, 186, 187, 188, - 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, - 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, - 209, 61, 65, 86, 88, 91, 95, 92, 94, 0, + 40, 41, 68, 0, 219, 220, 75, -2, 79, 0, + 53, 56, 58, 59, 184, 185, 186, 187, 188, 189, + 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, + 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, + 210, 61, 65, 86, 88, 91, 95, 92, 94, 0, 0, 0, 0, 0, 0, 0, 0, 153, 155, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 45, 46, - 49, 231, 50, 71, 0, -2, 78, 51, 0, 57, - 63, 139, 222, 141, 0, 144, 0, 0, 0, 151, - 156, 152, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 76, 77, 52, 55, 142, 0, 0, 150, 154, - 157, 0, 224, 158, 159, 160, 161, 162, 0, 163, - 164, 165, 145, 146, 223, 0, 169, 0, 167, 170, - 166, 168, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 45, + 46, 49, 232, 50, 71, 0, -2, 78, 51, 0, + 57, 63, 139, 223, 141, 0, 144, 0, 0, 0, + 151, 156, 152, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 76, 77, 52, 55, 142, 0, 0, + 150, 154, 157, 0, 225, 158, 159, 160, 161, 162, + 0, 163, 164, 165, 166, 145, 146, 224, 0, 170, + 0, 168, 171, 167, 169, } var yyTok1 = [...]int8{ @@ -645,7 +650,7 @@ var yyTok2 = [...]int8{ 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, - 92, + 92, 93, } var yyTok3 = [...]int8{ @@ -1738,47 +1743,53 @@ yydefault: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set + yyVAL.descriptors["custom_values"] = yyDollar[3].bucket_set } case 163: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["offset"] = yyDollar[3].int + yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set } case 164: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set + yyVAL.descriptors["offset"] = yyDollar[3].int } case 165: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["n_offset"] = yyDollar[3].int + yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set } case 166: - yyDollar = yyS[yypt-4 : yypt+1] + yyDollar = yyS[yypt-3 : yypt+1] { - yyVAL.bucket_set = yyDollar[2].bucket_set + yyVAL.descriptors = yylex.(*parser).newMap() + yyVAL.descriptors["n_offset"] = yyDollar[3].int } case 167: - yyDollar = yyS[yypt-3 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.bucket_set = yyDollar[2].bucket_set } case 168: yyDollar = yyS[yypt-3 : yypt+1] { - yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float) + yyVAL.bucket_set = yyDollar[2].bucket_set } case 169: + yyDollar = yyS[yypt-3 : yypt+1] + { + yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float) + } + case 170: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.bucket_set = []float64{yyDollar[1].float} } - case 216: + case 217: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = &NumberLiteral{ @@ -1786,22 +1797,22 @@ yydefault: PosRange: yyDollar[1].item.PositionRange(), } } - case 217: + case 218: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.float = yylex.(*parser).number(yyDollar[1].item.Val) } - case 218: + case 219: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.float = yyDollar[2].float } - case 219: + case 220: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.float = -yyDollar[2].float } - case 222: + case 223: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -1810,17 +1821,17 @@ yydefault: yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid repetition in series values: %s", err) } } - case 223: + case 224: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.int = -int64(yyDollar[2].uint) } - case 224: + case 225: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.int = int64(yyDollar[1].uint) } - case 225: + case 226: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -1829,7 +1840,7 @@ yydefault: yylex.(*parser).addParseErr(yyDollar[1].item.PositionRange(), err) } } - case 226: + case 227: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = &StringLiteral{ @@ -1837,7 +1848,7 @@ yydefault: PosRange: yyDollar[1].item.PositionRange(), } } - case 227: + case 228: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.item = Item{ @@ -1846,12 +1857,12 @@ yydefault: Val: yylex.(*parser).unquoteString(yyDollar[1].item.Val), } } - case 228: + case 229: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.duration = 0 } - case 230: + case 231: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.strings = nil diff --git a/promql/parser/lex.go b/promql/parser/lex.go index 4e3de2a668..6da3fe6e25 100644 --- a/promql/parser/lex.go +++ b/promql/parser/lex.go @@ -135,15 +135,16 @@ var key = map[string]ItemType{ } var histogramDesc = map[string]ItemType{ - "sum": SUM_DESC, - "count": COUNT_DESC, - "schema": SCHEMA_DESC, - "offset": OFFSET_DESC, - "n_offset": NEGATIVE_OFFSET_DESC, - "buckets": BUCKETS_DESC, - "n_buckets": NEGATIVE_BUCKETS_DESC, - "z_bucket": ZERO_BUCKET_DESC, - "z_bucket_w": ZERO_BUCKET_WIDTH_DESC, + "sum": SUM_DESC, + "count": COUNT_DESC, + "schema": SCHEMA_DESC, + "offset": OFFSET_DESC, + "n_offset": NEGATIVE_OFFSET_DESC, + "buckets": BUCKETS_DESC, + "n_buckets": NEGATIVE_BUCKETS_DESC, + "z_bucket": ZERO_BUCKET_DESC, + "z_bucket_w": ZERO_BUCKET_WIDTH_DESC, + "custom_values": CUSTOM_VALUES_DESC, } // ItemTypeStr is the default string representations for common Items. It does not diff --git a/promql/parser/parse.go b/promql/parser/parse.go index 104b80ce52..f3fa27f84e 100644 --- a/promql/parser/parse.go +++ b/promql/parser/parse.go @@ -566,6 +566,15 @@ func (p *parser) buildHistogramFromMap(desc *map[string]interface{}) *histogram. p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing z_bucket_w number: %v", val) } } + val, ok = (*desc)["custom_values"] + if ok { + customValues, ok := val.([]float64) + if ok { + output.CustomValues = customValues + } else { + p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing custom_values: %v", val) + } + } buckets, spans := p.buildHistogramBucketsAndSpans(desc, "buckets", "offset") output.PositiveBuckets = buckets diff --git a/promql/test.go b/promql/test.go index 6fe233b6f4..da42468022 100644 --- a/promql/test.go +++ b/promql/test.go @@ -46,8 +46,8 @@ var ( patSpace = regexp.MustCompile("[\t ]+") patLoad = regexp.MustCompile(`^load(?:_(with_nhcb))?\s+(.+?)$`) - patEvalInstant = regexp.MustCompile(`^eval(?:_(with_nhcb))?(?:_(fail|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`) - patEvalRange = regexp.MustCompile(`^eval(?:_(fail))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`) + patEvalInstant = regexp.MustCompile(`^eval(?:_(with_nhcb))?(?:_(fail|warn|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`) + patEvalRange = regexp.MustCompile(`^eval(?:_(fail|warn))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`) histogramBucketReplacements = []struct { pattern *regexp.Regexp repl string @@ -233,7 +233,7 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) { rangeParts := patEvalRange.FindStringSubmatch(lines[i]) if instantParts == nil && rangeParts == nil { - return i, nil, raise(i, "invalid evaluation command. Must be either 'eval[_with_nhcb][_fail|_ordered] instant [at ] ' or 'eval[_fail] range from to step '") + return i, nil, raise(i, "invalid evaluation command. Must be either 'eval[_with_nhcb][_fail|_warn|_ordered] instant [at ] ' or 'eval[_fail|_warn] range from to step '") } isInstant := instantParts != nil @@ -314,6 +314,8 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) { cmd.ordered = true case "fail": cmd.fail = true + case "warn": + cmd.warn = true } cmd.withNhcb = withNhcb @@ -636,9 +638,9 @@ type evalCmd struct { step time.Duration line int - isRange bool // if false, instant query - fail, ordered bool - withNhcb bool + isRange bool // if false, instant query + fail, warn, ordered bool + withNhcb bool metrics map[uint64]labels.Labels expected map[uint64]entry @@ -965,6 +967,13 @@ func (t *test) execRangeEval(cmd *evalCmd, engine QueryEngine) error { return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err) } res := q.Exec(t.context) + countWarnings, _ := res.Warnings.CountWarningsAndInfo() + if !cmd.warn && countWarnings > 0 { + return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", cmd.expr, cmd.line, res.Warnings) + } + if cmd.warn && countWarnings == 0 { + return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", cmd.expr, cmd.line) + } if res.Err != nil { if cmd.fail { return nil @@ -996,7 +1005,7 @@ func (t *test) execInstantEval(cmd *evalCmd, engine QueryEngine) error { } if cmd.withNhcb { if !strings.Contains(iq.expr, "_bucket") { - return fmt.Errorf("expected _bucket in the expression %q", iq.expr) + return fmt.Errorf("expected '_bucket' in the expression %q", iq.expr) } for _, rep := range histogramBucketReplacements { iq.expr = rep.pattern.ReplaceAllString(iq.expr, rep.repl) @@ -1016,6 +1025,13 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine Query } defer q.Close() res := q.Exec(t.context) + countWarnings, _ := res.Warnings.CountWarningsAndInfo() + if !cmd.warn && countWarnings > 0 { + return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings) + } + if cmd.warn && countWarnings == 0 { + return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", iq.expr, cmd.line) + } if res.Err != nil { if cmd.fail { return nil diff --git a/promql/testdata/aggregators.test b/promql/testdata/aggregators.test index 8709b393b2..86336a5d9d 100644 --- a/promql/testdata/aggregators.test +++ b/promql/testdata/aggregators.test @@ -399,7 +399,7 @@ eval instant at 1m quantile without(point)((scalar(foo)), data) {test="three samples"} 1.6 {test="uneven samples"} 2.8 -eval instant at 1m quantile without(point)(NaN, data) +eval_warn instant at 1m quantile without(point)(NaN, data) {test="two samples"} NaN {test="three samples"} NaN {test="uneven samples"} NaN diff --git a/promql/testdata/functions.test b/promql/testdata/functions.test index e01c75a7f6..d68a1f2f52 100644 --- a/promql/testdata/functions.test +++ b/promql/testdata/functions.test @@ -830,17 +830,17 @@ eval instant at 1m quantile_over_time(1, data[1m]) {test="three samples"} 2 {test="uneven samples"} 4 -eval instant at 1m quantile_over_time(-1, data[1m]) +eval_warn instant at 1m quantile_over_time(-1, data[1m]) {test="two samples"} -Inf {test="three samples"} -Inf {test="uneven samples"} -Inf -eval instant at 1m quantile_over_time(2, data[1m]) +eval_warn instant at 1m quantile_over_time(2, data[1m]) {test="two samples"} +Inf {test="three samples"} +Inf {test="uneven samples"} +Inf -eval instant at 1m (quantile_over_time(2, (data[1m]))) +eval_warn instant at 1m (quantile_over_time(2, (data[1m]))) {test="two samples"} +Inf {test="three samples"} +Inf {test="uneven samples"} +Inf diff --git a/promql/testdata/histograms.test b/promql/testdata/histograms.test index 0aab3117dc..b7214cfade 100644 --- a/promql/testdata/histograms.test +++ b/promql/testdata/histograms.test @@ -127,17 +127,17 @@ eval_with_nhcb instant at 50m histogram_quantile(1, testhistogram3_bucket) {start="negative"} -0.1 # Quantile too low. -eval_with_nhcb instant at 50m histogram_quantile(-0.1, testhistogram_bucket) +eval_with_nhcb_warn instant at 50m histogram_quantile(-0.1, testhistogram_bucket) {start="positive"} -Inf {start="negative"} -Inf # Quantile too high. -eval_with_nhcb instant at 50m histogram_quantile(1.01, testhistogram_bucket) +eval_with_nhcb_warn instant at 50m histogram_quantile(1.01, testhistogram_bucket) {start="positive"} +Inf {start="negative"} +Inf # Quantile invalid. -eval_with_nhcb instant at 50m histogram_quantile(NaN, testhistogram_bucket) +eval_with_nhcb_warn instant at 50m histogram_quantile(NaN, testhistogram_bucket) {start="positive"} NaN {start="negative"} NaN @@ -254,6 +254,9 @@ eval_with_nhcb instant at 50m histogram_quantile(0.5, rate(request_duration_seco {instance="ins1", job="job2"} 0.1 {instance="ins2", job="job2"} 0.11666666666666667 +eval instant at 50m sum(request_duration_seconds) + {} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}} + # A histogram with nonmonotonic bucket counts. This may happen when recording # rule evaluation or federation races scrape ingestion, causing some buckets # counts to be derived from fewer samples. diff --git a/promql/testdata/native_histograms.test b/promql/testdata/native_histograms.test index 1da68a385f..a4fc3d8403 100644 --- a/promql/testdata/native_histograms.test +++ b/promql/testdata/native_histograms.test @@ -269,3 +269,18 @@ eval instant at 50m histogram_sum(sum(incr_sum_histogram)) eval instant at 50m histogram_sum(sum(last_over_time(incr_sum_histogram[5m]))) {} 30 + + + +# Test native histograms with custom buckets. +load 5m + custom_buckets_histogram {{schema:-53 sum:5 count:4 custom_values:[5 10] buckets:[1 2 1]}}x10 + +eval instant at 5m histogram_fraction(5, 10, custom_buckets_histogram) + {} 0.5 + +eval instant at 5m histogram_quantile(0.5, custom_buckets_histogram) + {} 7.5 + +eval instant at 5m sum(custom_buckets_histogram) + {} {{schema:-53 sum:5 count:4 custom_values:[5 10] buckets:[1 2 1]}} diff --git a/util/annotations/annotations.go b/util/annotations/annotations.go index f5f60ba874..6415f44744 100644 --- a/util/annotations/annotations.go +++ b/util/annotations/annotations.go @@ -94,6 +94,19 @@ func (a Annotations) AsStrings(query string, maxAnnos int) []string { return arr } +func (a Annotations) CountWarningsAndInfo() (int, int) { + var countWarnings, countInfo int + for _, err := range a { + if errors.Is(err, PromQLWarning) { + countWarnings++ + } + if errors.Is(err, PromQLInfo) { + countInfo++ + } + } + return countWarnings, countInfo +} + //nolint:revive // error-naming. var ( // Currently there are only 2 types, warnings and info. From 9adc1699c36a024c381d76607f7cf0ba5e27e1f0 Mon Sep 17 00:00:00 2001 From: Jeanette Tan Date: Fri, 7 Jun 2024 18:50:59 +0800 Subject: [PATCH 07/30] fix according to code review Signed-off-by: Jeanette Tan --- model/histogram/float_histogram.go | 14 +++++++++----- model/histogram/generic.go | 7 +++++++ model/histogram/histogram.go | 13 ++++--------- promql/promqltest/test.go | 28 ++++++++++++++-------------- rules/manager_test.go | 3 ++- scrape/target.go | 10 ++++++++-- tsdb/chunkenc/histogram_meta.go | 2 +- 7 files changed, 45 insertions(+), 32 deletions(-) diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index 5431892dd1..2a37ea66d4 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -30,7 +30,7 @@ import ( type FloatHistogram struct { // Counter reset information. CounterResetHint CounterResetHint - // Currently valid schema numbers are -4 <= n <= 8 for exponential buckets, + // Currently valid schema numbers are -4 <= n <= 8 for exponential buckets. // They are all for base-2 bucket schemas, where 1 is a bucket boundary in // each case, and then each power of two is divided into 2^n logarithmic buckets. // Or in other words, each bucket boundary is the previous boundary times @@ -54,7 +54,7 @@ type FloatHistogram struct { // This slice is interned, to be treated as immutable and copied by reference. // These numbers should be strictly increasing. This field is only used when the // schema is for custom buckets, and the ZeroThreshold, ZeroCount, NegativeSpans - // and NegativeBuckets fields are not used. + // and NegativeBuckets fields are not used in that case. CustomValues []float64 } @@ -141,7 +141,8 @@ func (h *FloatHistogram) CopyTo(to *FloatHistogram) { // CopyToSchema works like Copy, but the returned deep copy has the provided // target schema, which must be ≤ the original schema (i.e. it must have a lower -// resolution). +// resolution). This method panics if a custom buckets schema is used in the +// receiving FloatHistogram or as the provided targetSchema. func (h *FloatHistogram) CopyToSchema(targetSchema int32) *FloatHistogram { if targetSchema == h.Schema { // Fast path. @@ -253,7 +254,7 @@ func (h *FloatHistogram) TestExpression() string { return "{{" + strings.Join(res, " ") + "}}" } -// ZeroBucket returns the zero bucket. +// ZeroBucket returns the zero bucket. This method panics if the schema is for custom buckets. func (h *FloatHistogram) ZeroBucket() Bucket[float64] { if h.UsesCustomBuckets() { panic("histograms with custom buckets have no zero bucket") @@ -922,7 +923,8 @@ func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 { // // targetSchema must be ≤ the schema of FloatHistogram (and of course within the // legal values for schemas in general). The buckets are merged to match the -// targetSchema prior to iterating (without mutating FloatHistogram). +// targetSchema prior to iterating (without mutating FloatHistogram), but custom buckets +// schemas cannot be merged with other schemas. func (h *FloatHistogram) floatBucketIterator( positive bool, absoluteStartValue float64, targetSchema int32, ) floatBucketIterator { @@ -1317,6 +1319,8 @@ func FloatBucketsMatch(b1, b2 []float64) bool { // ReduceResolution reduces the float histogram's spans, buckets into target schema. // The target schema must be smaller than the current float histogram's schema. +// This will panic if the histogram has custom buckets or if the target schema is +// a custom buckets schema. func (h *FloatHistogram) ReduceResolution(targetSchema int32) *FloatHistogram { if h.UsesCustomBuckets() { panic("cannot reduce resolution when there are custom buckets") diff --git a/model/histogram/generic.go b/model/histogram/generic.go index 7b770d83dd..ff45564ce3 100644 --- a/model/histogram/generic.go +++ b/model/histogram/generic.go @@ -777,3 +777,10 @@ func reduceResolution[IBC InternalBucketCount]( return targetSpans, targetBuckets } + +func clearIfNotNil[T any](items []T) []T { + if items == nil { + return nil + } + return items[:0] +} diff --git a/model/histogram/histogram.go b/model/histogram/histogram.go index f02b4ba1da..e4b99ec420 100644 --- a/model/histogram/histogram.go +++ b/model/histogram/histogram.go @@ -74,7 +74,7 @@ type Histogram struct { // This slice is interned, to be treated as immutable and copied by reference. // These numbers should be strictly increasing. This field is only used when the // schema is for custom buckets, and the ZeroThreshold, ZeroCount, NegativeSpans - // and NegativeBuckets fields are not used. + // and NegativeBuckets fields are not used in that case. CustomValues []float64 } @@ -199,7 +199,7 @@ func (h *Histogram) String() string { return sb.String() } -// ZeroBucket returns the zero bucket. +// ZeroBucket returns the zero bucket. This method panics if the schema is for custom buckets. func (h *Histogram) ZeroBucket() Bucket[uint64] { if h.UsesCustomBuckets() { panic("histograms with custom buckets have no zero bucket") @@ -417,13 +417,6 @@ func resize[T any](items []T, n int) []T { return items[:n] } -func clearIfNotNil[T any](items []T) []T { - if items == nil { - return nil - } - return items[:0] -} - // Validate validates consistency between span and bucket slices. Also, buckets are checked // against negative values. We check to make sure there are no unexpected fields or field values // based on the exponential / custom buckets schema. @@ -615,6 +608,8 @@ func (c *cumulativeBucketIterator) At() Bucket[uint64] { // ReduceResolution reduces the histogram's spans, buckets into target schema. // The target schema must be smaller than the current histogram's schema. +// This will panic if the histogram has custom buckets or if the target schema is +// a custom buckets schema. func (h *Histogram) ReduceResolution(targetSchema int32) *Histogram { if h.UsesCustomBuckets() { panic("cannot reduce resolution when there are custom buckets") diff --git a/promql/promqltest/test.go b/promql/promqltest/test.go index 9754a7fe78..e238ed0783 100644 --- a/promql/promqltest/test.go +++ b/promql/promqltest/test.go @@ -208,14 +208,14 @@ func parseLoad(lines []string, i int) (int, *loadCmd, error) { } parts := patLoad.FindStringSubmatch(lines[i]) var ( - withNhcb = parts[1] == "with_nhcb" + withNHCB = parts[1] == "with_nhcb" step = parts[2] ) gap, err := model.ParseDuration(step) if err != nil { return i, nil, raise(i, "invalid step definition %q: %s", step, err) } - cmd := newLoadCmd(time.Duration(gap), withNhcb) + cmd := newLoadCmd(time.Duration(gap), withNHCB) for i+1 < len(lines) { i++ defLine := lines[i] @@ -253,12 +253,12 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) { isInstant := instantParts != nil - var withNhcb bool + var withNHCB bool var mod string var expr string if isInstant { - withNhcb = instantParts[1] == "with_nhcb" + withNHCB = instantParts[1] == "with_nhcb" mod = instantParts[2] expr = instantParts[4] } else { @@ -332,7 +332,7 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) { case "warn": cmd.warn = true } - cmd.withNhcb = withNhcb + cmd.withNHCB = withNHCB for j := 1; i+1 < len(lines); j++ { i++ @@ -419,16 +419,16 @@ type loadCmd struct { metrics map[uint64]labels.Labels defs map[uint64][]promql.Sample exemplars map[uint64][]exemplar.Exemplar - withNhcb bool + withNHCB bool } -func newLoadCmd(gap time.Duration, withNhcb bool) *loadCmd { +func newLoadCmd(gap time.Duration, withNHCB bool) *loadCmd { return &loadCmd{ gap: gap, metrics: map[uint64]labels.Labels{}, defs: map[uint64][]promql.Sample{}, exemplars: map[uint64][]exemplar.Exemplar{}, - withNhcb: withNhcb, + withNHCB: withNHCB, } } @@ -467,7 +467,7 @@ func (cmd *loadCmd) append(a storage.Appender) error { } } } - if cmd.withNhcb { + if cmd.withNHCB { return cmd.appendCustomHistogram(a) } return nil @@ -535,11 +535,11 @@ func processClassicHistogramSeries(m labels.Labels, suffix string, histMap map[u func processUpperBoundsAndCreateBaseHistogram(upperBounds0 []float64) ([]float64, *histogram.FloatHistogram) { sort.Float64s(upperBounds0) upperBounds := make([]float64, 0, len(upperBounds0)) - prevLe := math.Inf(-1) + prevLE := math.Inf(-1) for _, le := range upperBounds0 { - if le != prevLe { // deduplicate + if le != prevLE { // deduplicate upperBounds = append(upperBounds, le) - prevLe = le + prevLE = le } } var customBounds []float64 @@ -655,7 +655,7 @@ type evalCmd struct { isRange bool // if false, instant query fail, warn, ordered bool - withNhcb bool + withNHCB bool metrics map[uint64]labels.Labels expected map[uint64]entry @@ -1028,7 +1028,7 @@ func (t *test) execInstantEval(cmd *evalCmd, engine promql.QueryEngine) error { if err := t.runInstantQuery(iq, cmd, engine); err != nil { return err } - if cmd.withNhcb { + if cmd.withNHCB { if !strings.Contains(iq.expr, "_bucket") { return fmt.Errorf("expected '_bucket' in the expression %q", iq.expr) } diff --git a/rules/manager_test.go b/rules/manager_test.go index a70bbe4729..f088407e36 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -1400,7 +1400,8 @@ func TestNativeHistogramsInRecordingRules(t *testing.T) { expHist := hists[0].ToFloat(nil) for _, h := range hists[1:] { - expHist, _ = expHist.Add(h.ToFloat(nil)) + expHist, err = expHist.Add(h.ToFloat(nil)) + require.NoError(t, err) } it := s.Iterator(nil) diff --git a/scrape/target.go b/scrape/target.go index b796b4cd02..3afe500a9d 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -365,16 +365,22 @@ type bucketLimitAppender struct { func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { if h != nil { + if len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit && h.Schema > histogram.ExponentialSchemaMax { + return 0, errBucketLimit + } for len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit { - if h.Schema <= histogram.ExponentialSchemaMin || h.Schema > histogram.ExponentialSchemaMax { + if h.Schema <= histogram.ExponentialSchemaMin { return 0, errBucketLimit } h = h.ReduceResolution(h.Schema - 1) } } if fh != nil { + if len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit && fh.Schema > histogram.ExponentialSchemaMax { + return 0, errBucketLimit + } for len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit { - if fh.Schema <= histogram.ExponentialSchemaMin || fh.Schema > histogram.ExponentialSchemaMax { + if fh.Schema <= histogram.ExponentialSchemaMin { return 0, errBucketLimit } fh = fh.ReduceResolution(fh.Schema - 1) diff --git a/tsdb/chunkenc/histogram_meta.go b/tsdb/chunkenc/histogram_meta.go index b03181e0b0..c5381ba2fb 100644 --- a/tsdb/chunkenc/histogram_meta.go +++ b/tsdb/chunkenc/histogram_meta.go @@ -199,7 +199,7 @@ func isWholeWhenMultiplied(in float64) bool { // we can convert the value to a float64 by subtracting 1 and dividing by 1000. func putCustomBound(b *bstream, f float64) { tf := f * 1000 - // 33554431-1 comes from the maximum that can be stored in a varint in 4 + // 33554431-1 comes from the maximum that can be stored in a varbit in 4 // bytes, other values are stored in 8 bytes anyway. if tf < 0 || tf > 33554430 || !isWholeWhenMultiplied(f) { b.writeBit(zero) From b0e320425c69fa8d87fc605f634395c04e749b5f Mon Sep 17 00:00:00 2001 From: Jeanette Tan Date: Fri, 7 Jun 2024 18:50:59 +0800 Subject: [PATCH 08/30] refine test rewriting regex and add validation Signed-off-by: Jeanette Tan --- promql/promqltest/test.go | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/promql/promqltest/test.go b/promql/promqltest/test.go index e238ed0783..4efebadfb9 100644 --- a/promql/promqltest/test.go +++ b/promql/promqltest/test.go @@ -48,6 +48,9 @@ var ( patLoad = regexp.MustCompile(`^load(?:_(with_nhcb))?\s+(.+?)$`) patEvalInstant = regexp.MustCompile(`^eval(?:_(with_nhcb))?(?:_(fail|warn|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`) patEvalRange = regexp.MustCompile(`^eval(?:_(fail|warn))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`) + patWhitespace = regexp.MustCompile(`\s+`) + patBucket = regexp.MustCompile(`_bucket\b`) + patLE = regexp.MustCompile(`\ble\b`) histogramBucketReplacements = []struct { pattern *regexp.Regexp repl string @@ -57,19 +60,19 @@ var ( repl: "", }, { - pattern: regexp.MustCompile(`\s+by\s+\(le\)`), + pattern: regexp.MustCompile(`\s+by\s+\(\s*le\s*\)`), repl: "", }, { - pattern: regexp.MustCompile(`\(le,\s*`), + pattern: regexp.MustCompile(`\(\s*le\s*,\s*`), repl: "(", }, { - pattern: regexp.MustCompile(`,\s*le,\s*`), + pattern: regexp.MustCompile(`,\s*le\s*,\s*`), repl: ", ", }, { - pattern: regexp.MustCompile(`,\s*le\)`), + pattern: regexp.MustCompile(`,\s*le\s*\)`), repl: ")", }, } @@ -1030,11 +1033,20 @@ func (t *test) execInstantEval(cmd *evalCmd, engine promql.QueryEngine) error { } if cmd.withNHCB { if !strings.Contains(iq.expr, "_bucket") { - return fmt.Errorf("expected '_bucket' in the expression %q", iq.expr) + return fmt.Errorf("expected '_bucket' in the expression '%q'", iq.expr) } + origExpr := iq.expr for _, rep := range histogramBucketReplacements { iq.expr = rep.pattern.ReplaceAllString(iq.expr, rep.repl) } + switch { + case patWhitespace.ReplaceAllString(iq.expr, "") == patWhitespace.ReplaceAllString(origExpr, ""): + return fmt.Errorf("query rewrite of '%q' had no effect", iq.expr) + case patBucket.MatchString(iq.expr): + return fmt.Errorf("rewritten query '%q' still has '_bucket'", iq.expr) + case patLE.MatchString(iq.expr): + return fmt.Errorf("rewritten query '%q' still has 'le'", iq.expr) + } if err := t.runInstantQuery(iq, cmd, engine); err != nil { return err } From 5e4e93c316a4468d7c447483d4e3efffa210e7c9 Mon Sep 17 00:00:00 2001 From: Jeanette Tan Date: Fri, 7 Jun 2024 19:24:05 +0800 Subject: [PATCH 09/30] fix lint Signed-off-by: Jeanette Tan --- model/histogram/float_histogram_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/model/histogram/float_histogram_test.go b/model/histogram/float_histogram_test.go index ccd98e9942..1558a6d679 100644 --- a/model/histogram/float_histogram_test.go +++ b/model/histogram/float_histogram_test.go @@ -3152,7 +3152,7 @@ func TestFloatCustomBucketsIterators(t *testing.T) { } for i, c := range cases { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { { it := c.h.AllBucketIterator() for i, b := range c.expPositiveBuckets { From 4c2aa872d49d3a1416964679f9e4a30db55c7db9 Mon Sep 17 00:00:00 2001 From: Jeanette Tan Date: Fri, 7 Jun 2024 20:21:05 +0800 Subject: [PATCH 10/30] update readme for testing framework Signed-off-by: Jeanette Tan --- promql/promqltest/README.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/promql/promqltest/README.md b/promql/promqltest/README.md index 82ba92476e..05ead6a72c 100644 --- a/promql/promqltest/README.md +++ b/promql/promqltest/README.md @@ -63,6 +63,10 @@ load 1m Each `load` command is additive - it does not replace any data loaded in a previous `load` command. Use `clear` to remove all loaded data. +### Native histograms with custom buckets (NHCB) + +When loading a batch of classic histogram float series, you can optionally append the suffix `_with_nhcb` to convert them to native histograms with custom buckets and load both the original float series and the new histogram series. + ## `clear` command `clear` removes all data previously loaded with `load` commands. @@ -126,3 +130,14 @@ eval_fail instant at 1m ceil({__name__=~'testmetric1|testmetric2'}) eval_fail instant at 1m ceil({__name__=~'testmetric1|testmetric2'}) expected_fail_regexp (vector cannot contain metrics .*|something else went wrong) ``` + +### Native histograms with custom buckets (NHCB) + +For native histogram with custom buckets (NHCB) series that have been loaded with `load_with_nhcb`, you can use `eval_with_nhcb` instead on the queries of the classic histogram float bucket series to run additional queries on the +NHCB version. We use best effort heuristics to convert the query to its NHCB equivalent, and raise an error if it looks like the conversion was not effective. + +For example, `eval_with_nhcb instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job))` is shorthand for running these queries: +``` +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job)) # Classic histogram +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (job)) # NHCB +``` From b8cb12b989aac2bdcb5b443dbd3c5ae364617f31 Mon Sep 17 00:00:00 2001 From: Jeanette Tan Date: Fri, 7 Jun 2024 20:26:41 +0800 Subject: [PATCH 11/30] rename hist to histogram according to code review Signed-off-by: Jeanette Tan --- promql/promqltest/test.go | 68 +++++++++++++++++++-------------------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/promql/promqltest/test.go b/promql/promqltest/test.go index ebd67a2176..4ce4b9bcbd 100644 --- a/promql/promqltest/test.go +++ b/promql/promqltest/test.go @@ -502,15 +502,15 @@ func getHistogramMetricBase(m labels.Labels, suffix string) (labels.Labels, uint } type tempHistogramWrapper struct { - metric labels.Labels - upperBounds []float64 - histByTs map[int64]tempHistogram + metric labels.Labels + upperBounds []float64 + histogramByTs map[int64]tempHistogram } func newTempHistogramWrapper() tempHistogramWrapper { return tempHistogramWrapper{ - upperBounds: []float64{}, - histByTs: map[int64]tempHistogram{}, + upperBounds: []float64{}, + histogramByTs: map[int64]tempHistogram{}, } } @@ -526,28 +526,28 @@ func newTempHistogram() tempHistogram { } } -func processClassicHistogramSeries(m labels.Labels, suffix string, histMap map[uint64]tempHistogramWrapper, smpls []promql.Sample, updateHistWrapper func(*tempHistogramWrapper), updateHist func(*tempHistogram, float64)) { +func processClassicHistogramSeries(m labels.Labels, suffix string, histogramMap map[uint64]tempHistogramWrapper, smpls []promql.Sample, updateHistogramWrapper func(*tempHistogramWrapper), updateHistogram func(*tempHistogram, float64)) { m2, m2hash := getHistogramMetricBase(m, suffix) - histWrapper, exists := histMap[m2hash] + histogramWrapper, exists := histogramMap[m2hash] if !exists { - histWrapper = newTempHistogramWrapper() + histogramWrapper = newTempHistogramWrapper() } - histWrapper.metric = m2 - if updateHistWrapper != nil { - updateHistWrapper(&histWrapper) + histogramWrapper.metric = m2 + if updateHistogramWrapper != nil { + updateHistogramWrapper(&histogramWrapper) } for _, s := range smpls { if s.H != nil { continue } - hist, exists := histWrapper.histByTs[s.T] + histogram, exists := histogramWrapper.histogramByTs[s.T] if !exists { - hist = newTempHistogram() + histogram = newTempHistogram() } - updateHist(&hist, s.F) - histWrapper.histByTs[s.T] = hist + updateHistogram(&histogram, s.F) + histogramWrapper.histogramByTs[s.T] = histogram } - histMap[m2hash] = histWrapper + histogramMap[m2hash] = histogramWrapper } func processUpperBoundsAndCreateBaseHistogram(upperBounds0 []float64) ([]float64, *histogram.FloatHistogram) { @@ -581,7 +581,7 @@ func processUpperBoundsAndCreateBaseHistogram(upperBounds0 []float64) ([]float64 // If classic histograms are defined, convert them into native histograms with custom // bounds and append the defined time series to the storage. func (cmd *loadCmd) appendCustomHistogram(a storage.Appender) error { - histMap := map[uint64]tempHistogramWrapper{} + histogramMap := map[uint64]tempHistogramWrapper{} // Go through all the time series to collate classic histogram data // and organise them by timestamp. @@ -594,32 +594,32 @@ func (cmd *loadCmd) appendCustomHistogram(a storage.Appender) error { if err != nil || math.IsNaN(le) { continue } - processClassicHistogramSeries(m, "_bucket", histMap, smpls, func(histWrapper *tempHistogramWrapper) { - histWrapper.upperBounds = append(histWrapper.upperBounds, le) - }, func(hist *tempHistogram, f float64) { - hist.bucketCounts[le] = f + processClassicHistogramSeries(m, "_bucket", histogramMap, smpls, func(histogramWrapper *tempHistogramWrapper) { + histogramWrapper.upperBounds = append(histogramWrapper.upperBounds, le) + }, func(histogram *tempHistogram, f float64) { + histogram.bucketCounts[le] = f }) case strings.HasSuffix(mName, "_count"): - processClassicHistogramSeries(m, "_count", histMap, smpls, nil, func(hist *tempHistogram, f float64) { - hist.count = f + processClassicHistogramSeries(m, "_count", histogramMap, smpls, nil, func(histogram *tempHistogram, f float64) { + histogram.count = f }) case strings.HasSuffix(mName, "_sum"): - processClassicHistogramSeries(m, "_sum", histMap, smpls, nil, func(hist *tempHistogram, f float64) { - hist.sum = f + processClassicHistogramSeries(m, "_sum", histogramMap, smpls, nil, func(histogram *tempHistogram, f float64) { + histogram.sum = f }) } } // Convert the collated classic histogram data into native histograms // with custom bounds and append them to the storage. - for _, histWrapper := range histMap { - upperBounds, fhBase := processUpperBoundsAndCreateBaseHistogram(histWrapper.upperBounds) - samples := make([]promql.Sample, 0, len(histWrapper.histByTs)) - for t, hist := range histWrapper.histByTs { + for _, histogramWrapper := range histogramMap { + upperBounds, fhBase := processUpperBoundsAndCreateBaseHistogram(histogramWrapper.upperBounds) + samples := make([]promql.Sample, 0, len(histogramWrapper.histogramByTs)) + for t, histogram := range histogramWrapper.histogramByTs { fh := fhBase.Copy() var prevCount, total float64 for i, le := range upperBounds { - currCount, exists := hist.bucketCounts[le] + currCount, exists := histogram.bucketCounts[le] if !exists { currCount = 0 } @@ -628,9 +628,9 @@ func (cmd *loadCmd) appendCustomHistogram(a storage.Appender) error { total += count prevCount = currCount } - fh.Sum = hist.sum - if hist.count != 0 { - total = hist.count + fh.Sum = histogram.sum + if histogram.count != 0 { + total = histogram.count } fh.Count = total s := promql.Sample{T: t, H: fh.Compact(0)} @@ -641,7 +641,7 @@ func (cmd *loadCmd) appendCustomHistogram(a storage.Appender) error { } sort.Slice(samples, func(i, j int) bool { return samples[i].T < samples[j].T }) for _, s := range samples { - if err := appendSample(a, s, histWrapper.metric); err != nil { + if err := appendSample(a, s, histogramWrapper.metric); err != nil { return err } } From 0793a26d96b56ea3110994d763da8c7f0678062a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Tue, 11 Jun 2024 11:08:25 +0200 Subject: [PATCH 12/30] native histograms: only reduce resolution for exponential histograms MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently we can only reduce the resolution of exponential native histograms, so checking the schema for that is slightly more precise than checking against max schema. Signed-off-by: György Krajcsovits --- scrape/target.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scrape/target.go b/scrape/target.go index 3afe500a9d..cbc39b38e5 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -365,7 +365,7 @@ type bucketLimitAppender struct { func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { if h != nil { - if len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit && h.Schema > histogram.ExponentialSchemaMax { + if len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit && !histogram.IsExponentialSchema(h.Schema) { return 0, errBucketLimit } for len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit { @@ -376,7 +376,7 @@ func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labe } } if fh != nil { - if len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit && fh.Schema > histogram.ExponentialSchemaMax { + if len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit && !histogram.IsExponentialSchema(fh.Schema) { return 0, errBucketLimit } for len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit { From c309f50ee7a00c102cb5009dc9d8761b1a49b0f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Tue, 18 Jun 2024 15:21:17 +0200 Subject: [PATCH 13/30] Add comment to state intent of check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- scrape/target.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scrape/target.go b/scrape/target.go index cbc39b38e5..ebe86c156b 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -365,6 +365,8 @@ type bucketLimitAppender struct { func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { if h != nil { + // Return with an early error if the histogram has too many buckets and the + // schema is not exponential, in which case we can't reduce the resolution. if len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit && !histogram.IsExponentialSchema(h.Schema) { return 0, errBucketLimit } From 79020b1e8565de3578825ca756c4e0b9aea3c7f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Tue, 18 Jun 2024 15:22:03 +0200 Subject: [PATCH 14/30] Comment float histogram as well MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- scrape/target.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scrape/target.go b/scrape/target.go index ebe86c156b..9ef4471fbd 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -378,6 +378,8 @@ func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labe } } if fh != nil { + // Return with an early error if the histogram has too many buckets and the + // schema is not exponential, in which case we can't reduce the resolution. if len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit && !histogram.IsExponentialSchema(fh.Schema) { return 0, errBucketLimit } From a6d788b1beabf015d33aab74dbb3b06331aaefb4 Mon Sep 17 00:00:00 2001 From: Jeanette Tan Date: Thu, 20 Jun 2024 22:49:00 +0800 Subject: [PATCH 15/30] update missed suggested change from code review Signed-off-by: Jeanette Tan --- promql/promqltest/testdata/histograms.test | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/promql/promqltest/testdata/histograms.test b/promql/promqltest/testdata/histograms.test index b7214cfade..9abdaa7f64 100644 --- a/promql/promqltest/testdata/histograms.test +++ b/promql/promqltest/testdata/histograms.test @@ -25,9 +25,8 @@ load_with_nhcb 5m testhistogram2_bucket{le="6"} 0+3x10 testhistogram2_bucket{le="+Inf"} 0+3x10 -# Another test histogram, where there are 0 counts where there is -# an infinite bound, allowing us to calculate standard deviation -# and variance properly. +# Another test histogram, this time without any observations in the +Inf bucket. +# This enables a meaningful calculation of standard deviation and variance. load_with_nhcb 5m testhistogram3_bucket{le="0", start="positive"} 0+0x10 testhistogram3_bucket{le="0.1", start="positive"} 0+5x10 From fc9dc72028f0edd03fb7784e21758024d59ec41d Mon Sep 17 00:00:00 2001 From: Jeanette Tan Date: Thu, 20 Jun 2024 22:49:00 +0800 Subject: [PATCH 16/30] remove eval_with_nhcb Signed-off-by: Jeanette Tan --- promql/promqltest/README.md | 11 ---- promql/promqltest/test.go | 68 +++---------------- promql/promqltest/testdata/histograms.test | 76 +++++++++++----------- 3 files changed, 46 insertions(+), 109 deletions(-) diff --git a/promql/promqltest/README.md b/promql/promqltest/README.md index 05ead6a72c..af34354241 100644 --- a/promql/promqltest/README.md +++ b/promql/promqltest/README.md @@ -130,14 +130,3 @@ eval_fail instant at 1m ceil({__name__=~'testmetric1|testmetric2'}) eval_fail instant at 1m ceil({__name__=~'testmetric1|testmetric2'}) expected_fail_regexp (vector cannot contain metrics .*|something else went wrong) ``` - -### Native histograms with custom buckets (NHCB) - -For native histogram with custom buckets (NHCB) series that have been loaded with `load_with_nhcb`, you can use `eval_with_nhcb` instead on the queries of the classic histogram float bucket series to run additional queries on the -NHCB version. We use best effort heuristics to convert the query to its NHCB equivalent, and raise an error if it looks like the conversion was not effective. - -For example, `eval_with_nhcb instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job))` is shorthand for running these queries: -``` -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job)) # Classic histogram -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (job)) # NHCB -``` diff --git a/promql/promqltest/test.go b/promql/promqltest/test.go index 4ce4b9bcbd..f3a773be8d 100644 --- a/promql/promqltest/test.go +++ b/promql/promqltest/test.go @@ -44,38 +44,10 @@ import ( ) var ( - patSpace = regexp.MustCompile("[\t ]+") - patLoad = regexp.MustCompile(`^load(?:_(with_nhcb))?\s+(.+?)$`) - patEvalInstant = regexp.MustCompile(`^eval(?:_(with_nhcb))?(?:_(fail|warn|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`) - patEvalRange = regexp.MustCompile(`^eval(?:_(fail|warn))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`) - patWhitespace = regexp.MustCompile(`\s+`) - patBucket = regexp.MustCompile(`_bucket\b`) - patLE = regexp.MustCompile(`\ble\b`) - histogramBucketReplacements = []struct { - pattern *regexp.Regexp - repl string - }{ - { - pattern: regexp.MustCompile(`_bucket\b`), - repl: "", - }, - { - pattern: regexp.MustCompile(`\s+by\s+\(\s*le\s*\)`), - repl: "", - }, - { - pattern: regexp.MustCompile(`\(\s*le\s*,\s*`), - repl: "(", - }, - { - pattern: regexp.MustCompile(`,\s*le\s*,\s*`), - repl: ", ", - }, - { - pattern: regexp.MustCompile(`,\s*le\s*\)`), - repl: ")", - }, - } + patSpace = regexp.MustCompile("[\t ]+") + patLoad = regexp.MustCompile(`^load(?:_(with_nhcb))?\s+(.+?)$`) + patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|warn|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`) + patEvalRange = regexp.MustCompile(`^eval(?:_(fail|warn))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`) ) const ( @@ -251,19 +223,17 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) { rangeParts := patEvalRange.FindStringSubmatch(lines[i]) if instantParts == nil && rangeParts == nil { - return i, nil, raise(i, "invalid evaluation command. Must be either 'eval[_with_nhcb][_fail|_warn|_ordered] instant [at ] ' or 'eval[_fail|_warn] range from to step '") + return i, nil, raise(i, "invalid evaluation command. Must be either 'eval[_fail|_warn|_ordered] instant [at ] ' or 'eval[_fail|_warn] range from to step '") } isInstant := instantParts != nil - var withNHCB bool var mod string var expr string if isInstant { - withNHCB = instantParts[1] == "with_nhcb" - mod = instantParts[2] - expr = instantParts[4] + mod = instantParts[1] + expr = instantParts[3] } else { mod = rangeParts[1] expr = rangeParts[5] @@ -291,7 +261,7 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) { var cmd *evalCmd if isInstant { - at := instantParts[3] + at := instantParts[2] offset, err := model.ParseDuration(at) if err != nil { return i, nil, formatErr("invalid timestamp definition %q: %s", at, err) @@ -335,7 +305,6 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) { case "warn": cmd.warn = true } - cmd.withNHCB = withNHCB for j := 1; i+1 < len(lines); j++ { i++ @@ -673,7 +642,6 @@ type evalCmd struct { isRange bool // if false, instant query fail, warn, ordered bool - withNHCB bool expectedFailMessage string expectedFailRegexp *regexp.Regexp @@ -1066,26 +1034,6 @@ func (t *test) execInstantEval(cmd *evalCmd, engine promql.QueryEngine) error { if err := t.runInstantQuery(iq, cmd, engine); err != nil { return err } - if cmd.withNHCB { - if !strings.Contains(iq.expr, "_bucket") { - return fmt.Errorf("expected '_bucket' in the expression '%q'", iq.expr) - } - origExpr := iq.expr - for _, rep := range histogramBucketReplacements { - iq.expr = rep.pattern.ReplaceAllString(iq.expr, rep.repl) - } - switch { - case patWhitespace.ReplaceAllString(iq.expr, "") == patWhitespace.ReplaceAllString(origExpr, ""): - return fmt.Errorf("query rewrite of '%q' had no effect", iq.expr) - case patBucket.MatchString(iq.expr): - return fmt.Errorf("rewritten query '%q' still has '_bucket'", iq.expr) - case patLE.MatchString(iq.expr): - return fmt.Errorf("rewritten query '%q' still has 'le'", iq.expr) - } - if err := t.runInstantQuery(iq, cmd, engine); err != nil { - return err - } - } } return nil } diff --git a/promql/promqltest/testdata/histograms.test b/promql/promqltest/testdata/histograms.test index 9abdaa7f64..e1fb1d85ac 100644 --- a/promql/promqltest/testdata/histograms.test +++ b/promql/promqltest/testdata/histograms.test @@ -105,149 +105,149 @@ eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[5m])) # Test histogram_quantile. -eval_with_nhcb instant at 50m histogram_quantile(0, testhistogram3_bucket) +eval instant at 50m histogram_quantile(0, testhistogram3_bucket) {start="positive"} 0 {start="negative"} -0.25 -eval_with_nhcb instant at 50m histogram_quantile(0.25, testhistogram3_bucket) +eval instant at 50m histogram_quantile(0.25, testhistogram3_bucket) {start="positive"} 0.055 {start="negative"} -0.225 -eval_with_nhcb instant at 50m histogram_quantile(0.5, testhistogram3_bucket) +eval instant at 50m histogram_quantile(0.5, testhistogram3_bucket) {start="positive"} 0.125 {start="negative"} -0.2 -eval_with_nhcb instant at 50m histogram_quantile(0.75, testhistogram3_bucket) +eval instant at 50m histogram_quantile(0.75, testhistogram3_bucket) {start="positive"} 0.45 {start="negative"} -0.15 -eval_with_nhcb instant at 50m histogram_quantile(1, testhistogram3_bucket) +eval instant at 50m histogram_quantile(1, testhistogram3_bucket) {start="positive"} 1 {start="negative"} -0.1 # Quantile too low. -eval_with_nhcb_warn instant at 50m histogram_quantile(-0.1, testhistogram_bucket) +eval_warn instant at 50m histogram_quantile(-0.1, testhistogram_bucket) {start="positive"} -Inf {start="negative"} -Inf # Quantile too high. -eval_with_nhcb_warn instant at 50m histogram_quantile(1.01, testhistogram_bucket) +eval_warn instant at 50m histogram_quantile(1.01, testhistogram_bucket) {start="positive"} +Inf {start="negative"} +Inf # Quantile invalid. -eval_with_nhcb_warn instant at 50m histogram_quantile(NaN, testhistogram_bucket) +eval_warn instant at 50m histogram_quantile(NaN, testhistogram_bucket) {start="positive"} NaN {start="negative"} NaN # Quantile value in lowest bucket. -eval_with_nhcb instant at 50m histogram_quantile(0, testhistogram_bucket) +eval instant at 50m histogram_quantile(0, testhistogram_bucket) {start="positive"} 0 {start="negative"} -0.2 # Quantile value in highest bucket. -eval_with_nhcb instant at 50m histogram_quantile(1, testhistogram_bucket) +eval instant at 50m histogram_quantile(1, testhistogram_bucket) {start="positive"} 1 {start="negative"} 0.3 # Finally some useful quantiles. -eval_with_nhcb instant at 50m histogram_quantile(0.2, testhistogram_bucket) +eval instant at 50m histogram_quantile(0.2, testhistogram_bucket) {start="positive"} 0.048 {start="negative"} -0.2 -eval_with_nhcb instant at 50m histogram_quantile(0.5, testhistogram_bucket) +eval instant at 50m histogram_quantile(0.5, testhistogram_bucket) {start="positive"} 0.15 {start="negative"} -0.15 -eval_with_nhcb instant at 50m histogram_quantile(0.8, testhistogram_bucket) +eval instant at 50m histogram_quantile(0.8, testhistogram_bucket) {start="positive"} 0.72 {start="negative"} 0.3 # More realistic with rates. -eval_with_nhcb instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[5m])) +eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[5m])) {start="positive"} 0.048 {start="negative"} -0.2 -eval_with_nhcb instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[5m])) +eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[5m])) {start="positive"} 0.15 {start="negative"} -0.15 -eval_with_nhcb instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[5m])) +eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[5m])) {start="positive"} 0.72 {start="negative"} 0.3 # Want results exactly in the middle of the bucket. -eval_with_nhcb instant at 7m histogram_quantile(1./6., testhistogram2_bucket) +eval instant at 7m histogram_quantile(1./6., testhistogram2_bucket) {} 1 -eval_with_nhcb instant at 7m histogram_quantile(0.5, testhistogram2_bucket) +eval instant at 7m histogram_quantile(0.5, testhistogram2_bucket) {} 3 -eval_with_nhcb instant at 7m histogram_quantile(5./6., testhistogram2_bucket) +eval instant at 7m histogram_quantile(5./6., testhistogram2_bucket) {} 5 -eval_with_nhcb instant at 47m histogram_quantile(1./6., rate(testhistogram2_bucket[15m])) +eval instant at 47m histogram_quantile(1./6., rate(testhistogram2_bucket[15m])) {} 1 -eval_with_nhcb instant at 47m histogram_quantile(0.5, rate(testhistogram2_bucket[15m])) +eval instant at 47m histogram_quantile(0.5, rate(testhistogram2_bucket[15m])) {} 3 -eval_with_nhcb instant at 47m histogram_quantile(5./6., rate(testhistogram2_bucket[15m])) +eval instant at 47m histogram_quantile(5./6., rate(testhistogram2_bucket[15m])) {} 5 # Aggregated histogram: Everything in one. -eval_with_nhcb instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le)) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le)) {} 0.075 -eval_with_nhcb instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le)) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le)) {} 0.1277777777777778 # Aggregated histogram: Everything in one. Now with avg, which does not change anything. -eval_with_nhcb instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[5m])) by (le)) +eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[5m])) by (le)) {} 0.075 -eval_with_nhcb instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[5m])) by (le)) +eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[5m])) by (le)) {} 0.12777777777777778 # Aggregated histogram: By instance. -eval_with_nhcb instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance)) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance)) {instance="ins1"} 0.075 {instance="ins2"} 0.075 -eval_with_nhcb instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance)) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance)) {instance="ins1"} 0.1333333333 {instance="ins2"} 0.125 # Aggregated histogram: By job. -eval_with_nhcb instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job)) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job)) {job="job1"} 0.1 {job="job2"} 0.0642857142857143 -eval_with_nhcb instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job)) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job)) {job="job1"} 0.14 {job="job2"} 0.1125 # Aggregated histogram: By job and instance. -eval_with_nhcb instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance)) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance)) {instance="ins1", job="job1"} 0.11 {instance="ins2", job="job1"} 0.09 {instance="ins1", job="job2"} 0.06 {instance="ins2", job="job2"} 0.0675 -eval_with_nhcb instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance)) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance)) {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} 0.1333333333333333 {instance="ins1", job="job2"} 0.1 {instance="ins2", job="job2"} 0.1166666666666667 # The unaggregated histogram for comparison. Same result as the previous one. -eval_with_nhcb instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[5m])) +eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[5m])) {instance="ins1", job="job1"} 0.11 {instance="ins2", job="job1"} 0.09 {instance="ins1", job="job2"} 0.06 {instance="ins2", job="job2"} 0.0675 -eval_with_nhcb instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[5m])) +eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[5m])) {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} 0.13333333333333333 {instance="ins1", job="job2"} 0.1 @@ -287,11 +287,11 @@ eval instant at 50m histogram_quantile(0.5, rate(mixed[5m])) {instance="ins1", job="job1"} 0.2 {instance="ins2", job="job1"} NaN -eval_with_nhcb instant at 50m histogram_quantile(0.75, rate(mixed_bucket[5m])) +eval instant at 50m histogram_quantile(0.75, rate(mixed_bucket[5m])) {instance="ins1", job="job1"} 0.2 {instance="ins2", job="job1"} NaN -eval_with_nhcb instant at 50m histogram_quantile(1, rate(mixed_bucket[5m])) +eval instant at 50m histogram_quantile(1, rate(mixed_bucket[5m])) {instance="ins1", job="job1"} 0.2 {instance="ins2", job="job1"} NaN @@ -300,7 +300,7 @@ load_with_nhcb 5m empty_bucket{le="0.2", job="job1", instance="ins1"} 0x10 empty_bucket{le="+Inf", job="job1", instance="ins1"} 0x10 -eval_with_nhcb instant at 50m histogram_quantile(0.2, rate(empty_bucket[5m])) +eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[5m])) {instance="ins1", job="job1"} NaN # Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set @@ -310,4 +310,4 @@ load_with_nhcb 5m request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10 request_duration_seconds2_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10 -eval_with_nhcb_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket$"}) +eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket$"}) From 2ced2f6aeca39027ca0b2954d32cd67b014ef745 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Sun, 12 May 2024 17:41:07 +0100 Subject: [PATCH 17/30] [PERF] Labels: faster varint for dedupelabels Including tests. Signed-off-by: Bryan Boreham --- model/labels/labels_dedupelabels.go | 64 ++++++++++++------------ model/labels/labels_dedupelabels_test.go | 50 ++++++++++++++++++ 2 files changed, 81 insertions(+), 33 deletions(-) create mode 100644 model/labels/labels_dedupelabels_test.go diff --git a/model/labels/labels_dedupelabels.go b/model/labels/labels_dedupelabels.go index 972f5dc164..c0d84d02fd 100644 --- a/model/labels/labels_dedupelabels.go +++ b/model/labels/labels_dedupelabels.go @@ -104,25 +104,27 @@ func (t *nameTable) ToName(num int) string { return t.byNum[num] } +// "Varint" in this file is non-standard: we encode small numbers (up to 32767) in 2 bytes, +// because we expect most Prometheus to have more than 127 unique strings. +// And we don't encode numbers larger than 4 bytes because we don't expect more than 536,870,912 unique strings. func decodeVarint(data string, index int) (int, int) { - // Fast-path for common case of a single byte, value 0..127. - b := data[index] + b := int(data[index]) + int(data[index+1])<<8 + index += 2 + if b < 0x8000 { + return b, index + } + + value := int(b & 0x7FFF) + b = int(data[index]) index++ if b < 0x80 { - return int(b), index + return value | (b << 15), index } - value := int(b & 0x7F) - for shift := uint(7); ; shift += 7 { - // Just panic if we go of the end of data, since all Labels strings are constructed internally and - // malformed data indicates a bug, or memory corruption. - b := data[index] - index++ - value |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - return value, index + + value |= (b & 0x7f) << 15 + b = int(data[index]) + index++ + return value | (b << 22), index } func decodeString(t *nameTable, data string, index int) (string, int) { @@ -641,29 +643,24 @@ func marshalNumbersToSizedBuffer(nums []int, data []byte) int { func sizeVarint(x uint64) (n int) { // Most common case first - if x < 1<<7 { - return 1 + if x < 1<<15 { + return 2 } - if x >= 1<<56 { - return 9 + if x < 1<<22 { + return 3 } - if x >= 1<<28 { - x >>= 28 - n = 4 + if x >= 1<<29 { + panic("Number too large to represent") } - if x >= 1<<14 { - x >>= 14 - n += 2 - } - if x >= 1<<7 { - n++ - } - return n + 1 + return 4 } func encodeVarintSlow(data []byte, offset int, v uint64) int { offset -= sizeVarint(v) base := offset + data[offset] = uint8(v) + v >>= 8 + offset++ for v >= 1<<7 { data[offset] = uint8(v&0x7f | 0x80) v >>= 7 @@ -673,11 +670,12 @@ func encodeVarintSlow(data []byte, offset int, v uint64) int { return base } -// Special code for the common case that a value is less than 128 +// Special code for the common case that a value is less than 32768 func encodeVarint(data []byte, offset, v int) int { - if v < 1<<7 { - offset-- + if v < 1<<15 { + offset -= 2 data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) return offset } return encodeVarintSlow(data, offset, uint64(v)) diff --git a/model/labels/labels_dedupelabels_test.go b/model/labels/labels_dedupelabels_test.go new file mode 100644 index 0000000000..5ef9255c21 --- /dev/null +++ b/model/labels/labels_dedupelabels_test.go @@ -0,0 +1,50 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build dedupelabels + +package labels + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestVarint(t *testing.T) { + cases := []struct { + v int + expected []byte + }{ + {0, []byte{0, 0}}, + {1, []byte{1, 0}}, + {2, []byte{2, 0}}, + {0x7FFF, []byte{0xFF, 0x7F}}, + {0x8000, []byte{0x00, 0x80, 0x01}}, + {0x8001, []byte{0x01, 0x80, 0x01}}, + {0x3FFFFF, []byte{0xFF, 0xFF, 0x7F}}, + {0x400000, []byte{0x00, 0x80, 0x80, 0x01}}, + {0x400001, []byte{0x01, 0x80, 0x80, 0x01}}, + {0x1FFFFFFF, []byte{0xFF, 0xFF, 0xFF, 0x7F}}, + } + var buf [16]byte + for _, c := range cases { + n := encodeVarint(buf[:], len(buf), c.v) + require.Equal(t, len(c.expected), len(buf)-n) + require.Equal(t, c.expected, buf[n:]) + got, m := decodeVarint(string(buf[:]), n) + require.Equal(t, c.v, got) + require.Equal(t, len(buf), m) + } + require.Panics(t, func() { encodeVarint(buf[:], len(buf), 1<<29) }) +} From 2ba7bc94460304d3cf114e26537d58640f4a7941 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Wed, 5 Jun 2024 18:54:13 +0100 Subject: [PATCH 18/30] Labels: further optimisation for dedupelabels Inline (by copy-paste) the fast path of `decodeVarint` in various places where it gets called a lot. Signed-off-by: Bryan Boreham --- model/labels/labels_dedupelabels.go | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/model/labels/labels_dedupelabels.go b/model/labels/labels_dedupelabels.go index c0d84d02fd..0e5bb048be 100644 --- a/model/labels/labels_dedupelabels.go +++ b/model/labels/labels_dedupelabels.go @@ -113,7 +113,10 @@ func decodeVarint(data string, index int) (int, int) { if b < 0x8000 { return b, index } + return decodeVarintRest(b, data, index) +} +func decodeVarintRest(b int, data string, index int) (int, int) { value := int(b & 0x7FFF) b = int(data[index]) index++ @@ -128,8 +131,12 @@ func decodeVarint(data string, index int) (int, int) { } func decodeString(t *nameTable, data string, index int) (string, int) { - var num int - num, index = decodeVarint(data, index) + // Copy decodeVarint here, because the Go compiler says it's too big to inline. + num := int(data[index]) + int(data[index+1])<<8 + index += 2 + if num >= 0x8000 { + num, index = decodeVarintRest(num, data, index) + } return t.ToName(num), index } @@ -323,7 +330,12 @@ func (ls Labels) Get(name string) string { } else if lName[0] > name[0] { // Stop looking if we've gone past. break } - _, i = decodeVarint(ls.data, i) + // Copy decodeVarint here, because the Go compiler says it's too big to inline. + num := int(ls.data[i]) + int(ls.data[i+1])<<8 + i += 2 + if num >= 0x8000 { + _, i = decodeVarintRest(num, ls.data, i) + } } return "" } @@ -341,7 +353,12 @@ func (ls Labels) Has(name string) bool { } else if lName[0] > name[0] { // Stop looking if we've gone past. break } - _, i = decodeVarint(ls.data, i) + // Copy decodeVarint here, because the Go compiler says it's too big to inline. + num := int(ls.data[i]) + int(ls.data[i+1])<<8 + i += 2 + if num >= 0x8000 { + _, i = decodeVarintRest(num, ls.data, i) + } } return false } From 7a82e4b503790c96bcc83311bcd1964d8c3da04d Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Fri, 21 Jun 2024 16:49:07 +0100 Subject: [PATCH 19/30] Labels benchmarks: remove artefact of small symbol-tables Symbol tables with fewer than 128 entries, so everything can be represented as a single byte, are not realistic. Stuff the symbol table with fake entries before adding the real ones. Signed-off-by: Bryan Boreham --- model/labels/labels_test.go | 60 ++++++++++++++++++++++++++++--------- 1 file changed, 46 insertions(+), 14 deletions(-) diff --git a/model/labels/labels_test.go b/model/labels/labels_test.go index 6464d007d3..d8910cdc85 100644 --- a/model/labels/labels_test.go +++ b/model/labels/labels_test.go @@ -466,6 +466,38 @@ func TestLabels_DropMetricName(t *testing.T) { require.True(t, Equal(original, check)) } +func ScratchBuilderForBenchmark() ScratchBuilder { + // (Only relevant to -tags dedupelabels: stuff the symbol table before adding the real labels, to avoid having everything fitting into 1 byte.) + b := NewScratchBuilder(256) + for i := 0; i < 256; i++ { + b.Add(fmt.Sprintf("name%d", i), fmt.Sprintf("value%d", i)) + } + b.Labels() + b.Reset() + return b +} + +func NewForBenchmark(ls ...Label) Labels { + b := ScratchBuilderForBenchmark() + for _, l := range ls { + b.Add(l.Name, l.Value) + } + b.Sort() + return b.Labels() +} + +func FromStringsForBenchmark(ss ...string) Labels { + if len(ss)%2 != 0 { + panic("invalid number of strings") + } + b := ScratchBuilderForBenchmark() + for i := 0; i < len(ss); i += 2 { + b.Add(ss[i], ss[i+1]) + } + b.Sort() + return b.Labels() +} + // BenchmarkLabels_Get was written to check whether a binary search can improve the performance vs the linear search implementation // The results have shown that binary search would only be better when searching last labels in scenarios with more than 10 labels. // In the following list, `old` is the linear search while `new` is the binary search implementation (without calling sort.Search, which performs even worse here) @@ -488,7 +520,7 @@ func BenchmarkLabels_Get(b *testing.B) { } for _, size := range []int{5, 10, maxLabels} { b.Run(fmt.Sprintf("with %d labels", size), func(b *testing.B) { - labels := New(allLabels[:size]...) + labels := NewForBenchmark(allLabels[:size]...) for _, scenario := range []struct { desc, label string }{ @@ -520,33 +552,33 @@ var comparisonBenchmarkScenarios = []struct { }{ { "equal", - FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), - FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), + FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "another_label_value"), + FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "another_label_value"), }, { "not equal", - FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), - FromStrings("a_label_name", "a_label_value", "another_label_name", "a_different_label_value"), + FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "another_label_value"), + FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "a_different_label_value"), }, { "different sizes", - FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), - FromStrings("a_label_name", "a_label_value"), + FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "another_label_value"), + FromStringsForBenchmark("a_label_name", "a_label_value"), }, { "lots", - FromStrings("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrz"), - FromStrings("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrr"), + FromStringsForBenchmark("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrz"), + FromStringsForBenchmark("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrr"), }, { "real long equal", - FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"), - FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"), + FromStringsForBenchmark("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"), + FromStringsForBenchmark("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"), }, { "real long different end", - FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"), - FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "deadbeef-0000-1111-2222-b9ad64bb417e"), + FromStringsForBenchmark("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"), + FromStringsForBenchmark("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "deadbeef-0000-1111-2222-b9ad64bb417e"), }, } @@ -834,7 +866,7 @@ func BenchmarkBuilder(b *testing.B) { } func BenchmarkLabels_Copy(b *testing.B) { - l := New(benchmarkLabels...) + l := NewForBenchmark(benchmarkLabels...) for i := 0; i < b.N; i++ { l = l.Copy() From f45709e710fef40839db230c61428247ea67536c Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Mon, 24 Jun 2024 07:51:56 +0200 Subject: [PATCH 20/30] Update model/histogram/histogram_test.go Signed-off-by: George Krajcsovits --- model/histogram/histogram_test.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/model/histogram/histogram_test.go b/model/histogram/histogram_test.go index b6a0f209a1..9d8514b04b 100644 --- a/model/histogram/histogram_test.go +++ b/model/histogram/histogram_test.go @@ -613,9 +613,11 @@ func TestCustomBucketsHistogramToFloat(t *testing.T) { require.NoError(t, h.Validate()) for _, c := range cases { t.Run(c.name, func(t *testing.T) { + hStr := h.String() fh := h.ToFloat(c.fh) require.NoError(t, fh.Validate()) - require.Equal(t, h.String(), fh.String()) + require.Equal(t, hStr, h.String()) + require.Equal(t, hStr, fh.String()) }) } } From 505ffd34efa8d4ab6e38c3ff88c84bf6a2e5e77f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Mon, 24 Jun 2024 09:33:03 +0200 Subject: [PATCH 21/30] Fix lint error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some weird formatting issue in using comment suggestion Signed-off-by: György Krajcsovits --- model/histogram/histogram_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/model/histogram/histogram_test.go b/model/histogram/histogram_test.go index 9d8514b04b..edc8663c94 100644 --- a/model/histogram/histogram_test.go +++ b/model/histogram/histogram_test.go @@ -613,7 +613,7 @@ func TestCustomBucketsHistogramToFloat(t *testing.T) { require.NoError(t, h.Validate()) for _, c := range cases { t.Run(c.name, func(t *testing.T) { - hStr := h.String() + hStr := h.String() fh := h.ToFloat(c.fh) require.NoError(t, fh.Validate()) require.Equal(t, hStr, h.String()) From 3c9e3ee55259abc879e59fb1471afe1aa7ec3238 Mon Sep 17 00:00:00 2001 From: Pranshu Srivastava Date: Sun, 10 Sep 2023 04:27:20 +0530 Subject: [PATCH 22/30] parser: support underscores Support underscores in numbers, namely, decimals, hexadecimals, and exponentials. Fixes #12769 Signed-off-by: Pranshu Srivastava Signed-off-by: Pranshu Srivastava --- promql/parser/lex.go | 84 ++++++++++++++++--- promql/parser/lex_test.go | 78 +++++++++++++++++ promql/parser/parse_test.go | 4 +- web/ui/module/lezer-promql/src/promql.grammar | 2 +- 4 files changed, 155 insertions(+), 13 deletions(-) diff --git a/promql/parser/lex.go b/promql/parser/lex.go index 4e3de2a668..98ce370b0f 100644 --- a/promql/parser/lex.go +++ b/promql/parser/lex.go @@ -313,6 +313,11 @@ func (l *Lexer) accept(valid string) bool { return false } +// is peeks and returns true if the next rune is contained in the provided string. +func (l *Lexer) is(valid string) bool { + return strings.ContainsRune(valid, l.peek()) +} + // acceptRun consumes a run of runes from the valid set. func (l *Lexer) acceptRun(valid string) { for strings.ContainsRune(valid, l.next()) { @@ -901,19 +906,78 @@ func acceptRemainingDuration(l *Lexer) bool { // scanNumber scans numbers of different formats. The scanned Item is // not necessarily a valid number. This case is caught by the parser. func (l *Lexer) scanNumber() bool { - digits := "0123456789" + // Modify the digit pattern if the number is hexadecimal. + digitPattern := "0123456789" // Disallow hexadecimal in series descriptions as the syntax is ambiguous. - if !l.seriesDesc && l.accept("0") && l.accept("xX") { - digits = "0123456789abcdefABCDEF" + if !l.seriesDesc && + l.accept("0") && l.accept("xX") { + l.accept("_") // eg., 0X_1FFFP-16 == 0.1249847412109375 + digitPattern = "0123456789abcdefABCDEF" } - l.acceptRun(digits) - if l.accept(".") { - l.acceptRun(digits) - } - if l.accept("eE") { - l.accept("+-") - l.acceptRun("0123456789") + const ( + // Define dot, exponent, and underscore patterns. + dotPattern = "." + exponentPattern = "eE" + underscorePattern = "_" + // Anti-patterns are rune sets that cannot follow their respective rune. + dotAntiPattern = "_." + exponentAntiPattern = "._eE" // and EOL. + underscoreAntiPattern = "._eE" // and EOL. + ) + // All numbers follow the prefix: [.][d][d._eE]* + l.accept(dotPattern) + l.accept(digitPattern) + // [d._eE]* hereon. + dotConsumed := false + exponentConsumed := false + for l.is(digitPattern + dotPattern + underscorePattern + exponentPattern) { + // "." cannot repeat. + if l.is(dotPattern) { + if dotConsumed { + l.accept(dotPattern) + return false + } + } + // "eE" cannot repeat. + if l.is(exponentPattern) { + if exponentConsumed { + l.accept(exponentPattern) + return false + } + } + // Handle dots. + if l.accept(dotPattern) { + dotConsumed = true + if l.accept(dotAntiPattern) { + return false + } + // Fractional hexadecimal literals are not allowed. + if len(digitPattern) > 10 /* 0x[\da-fA-F].[\d]+p[\d] */ { + return false + } + continue + } + // Handle exponents. + if l.accept(exponentPattern) { + exponentConsumed = true + l.accept("+-") + if l.accept(exponentAntiPattern) || l.peek() == eof { + return false + } + continue + } + // Handle underscores. + if l.accept(underscorePattern) { + if l.accept(underscoreAntiPattern) || l.peek() == eof { + return false + } + + continue + } + // Handle digits at the end since we already consumed before this loop. + l.acceptRun(digitPattern) } + // Next thing must not be alphanumeric unless it's the times token // for series repetitions. if r := l.peek(); (l.seriesDesc && r == 'x') || !isAlphaNumeric(r) { diff --git a/promql/parser/lex_test.go b/promql/parser/lex_test.go index f48c457c0c..ac9aa27625 100644 --- a/promql/parser/lex_test.go +++ b/promql/parser/lex_test.go @@ -132,6 +132,84 @@ var tests = []struct { }, { input: "0x123", expected: []Item{{NUMBER, 0, "0x123"}}, + }, { + input: "1..2", + fail: true, + }, { + input: "1.2.", + fail: true, + }, { + input: "00_1_23_4.56_7_8", + expected: []Item{{NUMBER, 0, "00_1_23_4.56_7_8"}}, + }, { + input: "00_1_23__4.56_7_8", + fail: true, + }, { + input: "00_1_23_4._56_7_8", + fail: true, + }, { + input: "00_1_23_4_.56_7_8", + fail: true, + }, { + input: "0x1_2_34", + expected: []Item{{NUMBER, 0, "0x1_2_34"}}, + }, { + input: "0x1_2__34", + fail: true, + }, { + input: "0x1_2__34.5_6p1", // "0x1.1p1"-based formats are not supported yet. + fail: true, + }, { + input: "0x1_2__34.5_6", + fail: true, + }, { + input: "0x1_2__34.56", + fail: true, + }, { + input: "1_e2", + fail: true, + }, { + input: "1.e2", + expected: []Item{{NUMBER, 0, "1.e2"}}, + }, { + input: "1e.2", + fail: true, + }, { + input: "1e+.2", + fail: true, + }, { + input: "1ee2", + fail: true, + }, { + input: "1e+e2", + fail: true, + }, { + input: "1e", + fail: true, + }, { + input: "1e+", + fail: true, + }, { + input: "1e1_2_34", + expected: []Item{{NUMBER, 0, "1e1_2_34"}}, + }, { + input: "1e_1_2_34", + fail: true, + }, { + input: "1e1_2__34", + fail: true, + }, { + input: "1e+_1_2_34", + fail: true, + }, { + input: "1e-_1_2_34", + fail: true, + }, { + input: "12_", + fail: true, + }, { + input: "_1_2", + expected: []Item{{IDENTIFIER, 0, "_1_2"}}, }, }, }, diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index c56d845947..a4fe28e5b8 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -513,12 +513,12 @@ var testExpr = []struct { { input: "2.5.", fail: true, - errMsg: "unexpected character: '.'", + errMsg: `1:1: parse error: bad number or duration syntax: "2.5."`, }, { input: "100..4", fail: true, - errMsg: `unexpected number ".4"`, + errMsg: `1:1: parse error: bad number or duration syntax: "100.."`, }, { input: "0deadbeef", diff --git a/web/ui/module/lezer-promql/src/promql.grammar b/web/ui/module/lezer-promql/src/promql.grammar index fd4edddf24..89aa23c79a 100644 --- a/web/ui/module/lezer-promql/src/promql.grammar +++ b/web/ui/module/lezer-promql/src/promql.grammar @@ -261,7 +261,7 @@ NumberLiteral { LineComment { "#" ![\n]* } number { - (std.digit+ ("." std.digit*)? | "." std.digit+) (("e" | "E") ("+" | "-")? std.digit+)? | + (std.digit+ (("_")? std.digit)* ("." std.digit+ (("_")? std.digit)*)? | "." std.digit+ (("_")? std.digit)*) (("e" | "E") ("+" | "-")? std.digit+ (("_")? std.digit)*)? | "0x" (std.digit | $[a-fA-F])+ } StringLiteral { // TODO: This is for JS, make this work for PromQL. From 9290d1308dfe5286e8e67cff45460662d6b6b6dc Mon Sep 17 00:00:00 2001 From: Rick Rackow Date: Fri, 28 Jun 2024 22:18:04 +0200 Subject: [PATCH 23/30] fix(docs/querying): explain `ceil` behaviour more explicitly with examples (#11987) * fix(docs/querying): explain `ceil` behaviour more explicitly with examples Signed-off-by: Rick Rackow * fix(docs/querying): explain `floor` behaviour more explicitly with examples Signed-off-by: Rick Rackow --------- Signed-off-by: Rick Rackow Signed-off-by: Rick Rackow Co-authored-by: Rick Rackow --- docs/querying/functions.md | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/docs/querying/functions.md b/docs/querying/functions.md index c8fda28655..9a552f697a 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -79,7 +79,12 @@ labels of the 1-element output vector from the input vector. ## `ceil()` `ceil(v instant-vector)` rounds the sample values of all elements in `v` up to -the nearest integer. +the nearest integer value greater than or equal to v. + +* `ceil(+Inf) = +Inf` +* `ceil(±0) = ±0` +* `ceil(1.49) = 2.0` +* `ceil(1.78) = 2.0` ## `changes()` @@ -173,7 +178,12 @@ Special cases are: ## `floor()` `floor(v instant-vector)` rounds the sample values of all elements in `v` down -to the nearest integer. +to the nearest integer value smaller than or equal to v. + +* `floor(+Inf) = +Inf` +* `floor(±0) = ±0` +* `floor(1.49) = 1.0` +* `floor(1.78) = 1.0` ## `histogram_avg()` From cd5a7b50209910954ed4dbe7d6fa0836511a6332 Mon Sep 17 00:00:00 2001 From: Raphael Silva Date: Fri, 28 Jun 2024 22:50:54 +0000 Subject: [PATCH 24/30] Make rules Manager Update method no-op after Close This has to be done because Close and Update methods are accessed concurrently. Signed-off-by: Raphael Silva --- rules/manager.go | 8 ++++++++ rules/manager_test.go | 17 +++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/rules/manager.go b/rules/manager.go index 063189e0ab..92675e71d5 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -190,10 +190,18 @@ func (m *Manager) Stop() { // Update the rule manager's state as the config requires. If // loading the new rules failed the old rule set is restored. +// This method will no-op in case the manager is already stopped func (m *Manager) Update(interval time.Duration, files []string, externalLabels labels.Labels, externalURL string, groupEvalIterationFunc GroupEvalIterationFunc) error { m.mtx.Lock() defer m.mtx.Unlock() + // We cannot update a stopped manager + select { + case <-m.done: + return nil + default: + } + groups, errs := m.LoadGroups(interval, externalLabels, externalURL, groupEvalIterationFunc, files...) if errs != nil { diff --git a/rules/manager_test.go b/rules/manager_test.go index 3bf5fac32b..ec967df242 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -2099,6 +2099,23 @@ func TestBoundedRuleEvalConcurrency(t *testing.T) { require.EqualValues(t, maxInflight.Load(), int32(maxConcurrency)+int32(groupCount)) } +func TestUpdateWhenStopped(t *testing.T) { + files := []string{"fixtures/rules.yaml"} + ruleManager := NewManager(&ManagerOptions{ + Context: context.Background(), + Logger: log.NewNopLogger(), + }) + ruleManager.start() + err := ruleManager.Update(10*time.Second, files, labels.EmptyLabels(), "", nil) + require.NoError(t, err) + require.NotEmpty(t, ruleManager.groups) + + ruleManager.Stop() + // Updates following a stop are no-op + err = ruleManager.Update(10*time.Second, []string{}, labels.EmptyLabels(), "", nil) + require.NoError(t, err) +} + const artificialDelay = 250 * time.Millisecond func optsFactory(storage storage.Storage, maxInflight, inflightQueries *atomic.Int32, maxConcurrent int64) *ManagerOptions { From e0c9b2ee199b899891cc17b28a145db393ae23ec Mon Sep 17 00:00:00 2001 From: Raphael Silva Date: Fri, 28 Jun 2024 23:43:22 +0000 Subject: [PATCH 25/30] Fix linting errors Signed-off-by: Raphael Silva --- rules/manager.go | 2 +- rules/manager_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/rules/manager.go b/rules/manager.go index 92675e71d5..acc637e718 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -190,7 +190,7 @@ func (m *Manager) Stop() { // Update the rule manager's state as the config requires. If // loading the new rules failed the old rule set is restored. -// This method will no-op in case the manager is already stopped +// This method will no-op in case the manager is already stopped. func (m *Manager) Update(interval time.Duration, files []string, externalLabels labels.Labels, externalURL string, groupEvalIterationFunc GroupEvalIterationFunc) error { m.mtx.Lock() defer m.mtx.Unlock() diff --git a/rules/manager_test.go b/rules/manager_test.go index ec967df242..51239e6c90 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -2111,7 +2111,7 @@ func TestUpdateWhenStopped(t *testing.T) { require.NotEmpty(t, ruleManager.groups) ruleManager.Stop() - // Updates following a stop are no-op + // Updates following a stop are no-op. err = ruleManager.Update(10*time.Second, []string{}, labels.EmptyLabels(), "", nil) require.NoError(t, err) } From f4b1fcb73e6f9f91518571ff296e258e5d6d0dbd Mon Sep 17 00:00:00 2001 From: Paulo Dias <44772900+paulojmdias@users.noreply.github.com> Date: Sun, 30 Jun 2024 18:18:18 +0100 Subject: [PATCH 26/30] discovery: add support for gathering flavor name in Openstack discovery (#14312) * feat: add support for gathering flavor name in Openstack discovery Signed-off-by: Paulo Dias * Update instance.go Co-authored-by: Ayoub Mrini Signed-off-by: Paulo Dias <44772900+paulojmdias@users.noreply.github.com> * Update configuration.md Co-authored-by: Ayoub Mrini Signed-off-by: Paulo Dias <44772900+paulojmdias@users.noreply.github.com> * fix: fix linting Signed-off-by: Paulo Dias * fix: fix instance type Signed-off-by: Paulo Dias * Update docs/configuration/configuration.md Co-authored-by: Simon Pasquier Signed-off-by: Paulo Dias <44772900+paulojmdias@users.noreply.github.com> --------- Signed-off-by: Paulo Dias Signed-off-by: Paulo Dias <44772900+paulojmdias@users.noreply.github.com> Co-authored-by: Ayoub Mrini Co-authored-by: Simon Pasquier --- discovery/openstack/instance.go | 16 +++++++++---- discovery/openstack/instance_test.go | 6 ++--- discovery/openstack/mock_test.go | 36 +++++++++++++++++----------- docs/configuration/configuration.md | 2 +- 4 files changed, 37 insertions(+), 23 deletions(-) diff --git a/discovery/openstack/instance.go b/discovery/openstack/instance.go index 750d414a2b..78c669e6f7 100644 --- a/discovery/openstack/instance.go +++ b/discovery/openstack/instance.go @@ -146,12 +146,18 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, openstackLabelUserID: model.LabelValue(s.UserID), } - flavorID, ok := s.Flavor["id"].(string) - if !ok { - level.Warn(i.logger).Log("msg", "Invalid type for flavor id, expected string") - continue + flavorName, nameOk := s.Flavor["original_name"].(string) + // "original_name" is only available for microversion >= 2.47. It was added in favor of "id". + if !nameOk { + flavorID, idOk := s.Flavor["id"].(string) + if !idOk { + level.Warn(i.logger).Log("msg", "Invalid type for both flavor original_name and flavor id, expected string") + continue + } + labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorID) + } else { + labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorName) } - labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorID) imageID, ok := s.Image["id"].(string) if ok { diff --git a/discovery/openstack/instance_test.go b/discovery/openstack/instance_test.go index 9e124b6053..2b5ac1b89e 100644 --- a/discovery/openstack/instance_test.go +++ b/discovery/openstack/instance_test.go @@ -84,7 +84,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) { }, { "__address__": model.LabelValue("10.0.0.31:0"), - "__meta_openstack_instance_flavor": model.LabelValue("1"), + "__meta_openstack_instance_flavor": model.LabelValue("m1.medium"), "__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682ba"), "__meta_openstack_instance_image": model.LabelValue("f90f6034-2570-4974-8351-6b49732ef2eb"), "__meta_openstack_instance_status": model.LabelValue("ACTIVE"), @@ -96,7 +96,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) { }, { "__address__": model.LabelValue("10.0.0.33:0"), - "__meta_openstack_instance_flavor": model.LabelValue("4"), + "__meta_openstack_instance_flavor": model.LabelValue("m1.small"), "__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682bb"), "__meta_openstack_instance_status": model.LabelValue("ACTIVE"), "__meta_openstack_instance_name": model.LabelValue("merp"), @@ -108,7 +108,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) { }, { "__address__": model.LabelValue("10.0.0.34:0"), - "__meta_openstack_instance_flavor": model.LabelValue("4"), + "__meta_openstack_instance_flavor": model.LabelValue("m1.small"), "__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682bb"), "__meta_openstack_instance_status": model.LabelValue("ACTIVE"), "__meta_openstack_instance_name": model.LabelValue("merp"), diff --git a/discovery/openstack/mock_test.go b/discovery/openstack/mock_test.go index b1267db90e..4518f41166 100644 --- a/discovery/openstack/mock_test.go +++ b/discovery/openstack/mock_test.go @@ -427,13 +427,17 @@ const serverListBody = ` "OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000", "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", "flavor": { - "id": "1", - "links": [ - { - "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", - "rel": "bookmark" - } - ] + "vcpus": 2, + "ram": 4096, + "disk": 0, + "ephemeral": 0, + "swap": 0, + "original_name": "m1.medium", + "extra_specs": { + "aggregate_instance_extra_specs:general": "true", + "hw:mem_page_size": "large", + "hw:vif_multiqueue_enabled": "true" + } }, "id": "9e5476bd-a4ec-4653-93d6-72c93aa682ba", "security_groups": [ @@ -498,13 +502,17 @@ const serverListBody = ` "OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000", "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", "flavor": { - "id": "4", - "links": [ - { - "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", - "rel": "bookmark" - } - ] + "vcpus": 2, + "ram": 4096, + "disk": 0, + "ephemeral": 0, + "swap": 0, + "original_name": "m1.small", + "extra_specs": { + "aggregate_instance_extra_specs:general": "true", + "hw:mem_page_size": "large", + "hw:vif_multiqueue_enabled": "true" + } }, "id": "9e5476bd-a4ec-4653-93d6-72c93aa682bb", "security_groups": [ diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 86599c40e0..c03ed49715 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -1363,7 +1363,7 @@ interface. The following meta labels are available on targets during [relabeling](#relabel_config): * `__meta_openstack_address_pool`: the pool of the private IP. -* `__meta_openstack_instance_flavor`: the flavor ID of the OpenStack instance. +* `__meta_openstack_instance_flavor`: the flavor name of the OpenStack instance, or the flavor ID if the flavor name isn't available. * `__meta_openstack_instance_id`: the OpenStack instance ID. * `__meta_openstack_instance_image`: the ID of the image the OpenStack instance is using. * `__meta_openstack_instance_name`: the OpenStack instance name. From ec31acaf0241bb061ae2621a0b8ed4131bf1146f Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Mon, 1 Jul 2024 10:12:50 +0200 Subject: [PATCH 27/30] FastRegexMatcher: small optimization for the literal prefix case Signed-off-by: Marco Pracucci --- model/labels/regexp.go | 48 +++++++++++++++++++++++++++---------- model/labels/regexp_test.go | 48 +++++++++++++++++++++---------------- 2 files changed, 63 insertions(+), 33 deletions(-) diff --git a/model/labels/regexp.go b/model/labels/regexp.go index 1e9db882bf..767bd6942f 100644 --- a/model/labels/regexp.go +++ b/model/labels/regexp.go @@ -549,11 +549,7 @@ func stringMatcherFromRegexpInternal(re *syntax.Regexp) StringMatcher { // Right matcher with 1 fixed set match. case left == nil && len(matches) == 1: - return &literalPrefixStringMatcher{ - prefix: matches[0], - prefixCaseSensitive: matchesCaseSensitive, - right: right, - } + return newLiteralPrefixStringMatcher(matches[0], matchesCaseSensitive, right) // Left matcher with 1 fixed set match. case right == nil && len(matches) == 1: @@ -631,21 +627,47 @@ func (m *containsStringMatcher) Matches(s string) bool { return false } -// literalPrefixStringMatcher matches a string with the given literal prefix and right side matcher. -type literalPrefixStringMatcher struct { - prefix string - prefixCaseSensitive bool +func newLiteralPrefixStringMatcher(prefix string, prefixCaseSensitive bool, right StringMatcher) StringMatcher { + if prefixCaseSensitive { + return &literalPrefixSensitiveStringMatcher{ + prefix: prefix, + right: right, + } + } + + return &literalPrefixInsensitiveStringMatcher{ + prefix: prefix, + right: right, + } +} + +// literalPrefixSensitiveStringMatcher matches a string with the given literal case-sensitive prefix and right side matcher. +type literalPrefixSensitiveStringMatcher struct { + prefix string // The matcher that must match the right side. Can be nil. right StringMatcher } -func (m *literalPrefixStringMatcher) Matches(s string) bool { - // Ensure the prefix matches. - if m.prefixCaseSensitive && !strings.HasPrefix(s, m.prefix) { +func (m *literalPrefixSensitiveStringMatcher) Matches(s string) bool { + if !strings.HasPrefix(s, m.prefix) { return false } - if !m.prefixCaseSensitive && !hasPrefixCaseInsensitive(s, m.prefix) { + + // Ensure the right side matches. + return m.right.Matches(s[len(m.prefix):]) +} + +// literalPrefixInsensitiveStringMatcher matches a string with the given literal case-insensitive prefix and right side matcher. +type literalPrefixInsensitiveStringMatcher struct { + prefix string + + // The matcher that must match the right side. Can be nil. + right StringMatcher +} + +func (m *literalPrefixInsensitiveStringMatcher) Matches(s string) bool { + if !hasPrefixCaseInsensitive(s, m.prefix) { return false } diff --git a/model/labels/regexp_test.go b/model/labels/regexp_test.go index 008eae702c..fa5c96f420 100644 --- a/model/labels/regexp_test.go +++ b/model/labels/regexp_test.go @@ -376,7 +376,7 @@ func TestStringMatcherFromRegexp(t *testing.T) { {"10\\.0\\.(1|2)\\.+", nil}, {"10\\.0\\.(1|2).+", &containsStringMatcher{substrings: []string{"10.0.1", "10.0.2"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: false}}}, {"^.+foo", &literalSuffixStringMatcher{left: &anyNonEmptyStringMatcher{}, suffix: "foo", suffixCaseSensitive: true}}, - {"foo-.*$", &literalPrefixStringMatcher{prefix: "foo-", prefixCaseSensitive: true, right: anyStringWithoutNewlineMatcher{}}}, + {"foo-.*$", &literalPrefixSensitiveStringMatcher{prefix: "foo-", right: anyStringWithoutNewlineMatcher{}}}, {"(prometheus|api_prom)_api_v1_.+", &containsStringMatcher{substrings: []string{"prometheus_api_v1_", "api_prom_api_v1_"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: false}}}, {"^((.*)(bar|b|buzz)(.+)|foo)$", orStringMatcher([]StringMatcher{&containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: anyStringWithoutNewlineMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: false}}, &equalStringMatcher{s: "foo", caseSensitive: true}})}, {"((fo(bar))|.+foo)", orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "fobar", caseSensitive: true}}), &literalSuffixStringMatcher{suffix: "foo", suffixCaseSensitive: true, left: &anyNonEmptyStringMatcher{matchNL: false}}})}, @@ -391,15 +391,15 @@ func TestStringMatcherFromRegexp(t *testing.T) { {".*foo.*bar.*", nil}, {`\d*`, nil}, {".", nil}, - {"/|/bar.*", &literalPrefixStringMatcher{prefix: "/", prefixCaseSensitive: true, right: orStringMatcher{emptyStringMatcher{}, &literalPrefixStringMatcher{prefix: "bar", prefixCaseSensitive: true, right: anyStringWithoutNewlineMatcher{}}}}}, + {"/|/bar.*", &literalPrefixSensitiveStringMatcher{prefix: "/", right: orStringMatcher{emptyStringMatcher{}, &literalPrefixSensitiveStringMatcher{prefix: "bar", right: anyStringWithoutNewlineMatcher{}}}}}, // This one is not supported because `stringMatcherFromRegexp` is not reentrant for syntax.OpConcat. // It would make the code too complex to handle it. {"(.+)/(foo.*|bar$)", nil}, // Case sensitive alternate with same literal prefix and .* suffix. - {"(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixStringMatcher{prefix: "xyz-016a-ixb-", prefixCaseSensitive: true, right: orStringMatcher{&literalPrefixStringMatcher{prefix: "dp", prefixCaseSensitive: true, right: anyStringWithoutNewlineMatcher{}}, &literalPrefixStringMatcher{prefix: "op", prefixCaseSensitive: true, right: anyStringWithoutNewlineMatcher{}}}}}, + {"(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixSensitiveStringMatcher{prefix: "xyz-016a-ixb-", right: orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "dp", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixSensitiveStringMatcher{prefix: "op", right: anyStringWithoutNewlineMatcher{}}}}}, // Case insensitive alternate with same literal prefix and .* suffix. - {"(?i:(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*))", &literalPrefixStringMatcher{prefix: "XYZ-016A-IXB-", prefixCaseSensitive: false, right: orStringMatcher{&literalPrefixStringMatcher{prefix: "DP", prefixCaseSensitive: false, right: anyStringWithoutNewlineMatcher{}}, &literalPrefixStringMatcher{prefix: "OP", prefixCaseSensitive: false, right: anyStringWithoutNewlineMatcher{}}}}}, - {"(?i)(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixStringMatcher{prefix: "XYZ-016A-IXB-", prefixCaseSensitive: false, right: orStringMatcher{&literalPrefixStringMatcher{prefix: "DP", prefixCaseSensitive: false, right: anyStringWithoutNewlineMatcher{}}, &literalPrefixStringMatcher{prefix: "OP", prefixCaseSensitive: false, right: anyStringWithoutNewlineMatcher{}}}}}, + {"(?i:(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*))", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: anyStringWithoutNewlineMatcher{}}}}}, + {"(?i)(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: anyStringWithoutNewlineMatcher{}}}}}, // Concatenated variable length selectors are not supported. {"foo.*.*", nil}, {"foo.+.+", nil}, @@ -408,9 +408,9 @@ func TestStringMatcherFromRegexp(t *testing.T) { {"aaa.?.?", nil}, {"aaa.?.*", nil}, // Regexps with ".?". - {"ext.?|xfs", orStringMatcher{&literalPrefixStringMatcher{prefix: "ext", prefixCaseSensitive: true, right: &zeroOrOneCharacterStringMatcher{matchNL: false}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}}, - {"(?s)(ext.?|xfs)", orStringMatcher{&literalPrefixStringMatcher{prefix: "ext", prefixCaseSensitive: true, right: &zeroOrOneCharacterStringMatcher{matchNL: true}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}}, - {"foo.?", &literalPrefixStringMatcher{prefix: "foo", prefixCaseSensitive: true, right: &zeroOrOneCharacterStringMatcher{matchNL: false}}}, + {"ext.?|xfs", orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "ext", right: &zeroOrOneCharacterStringMatcher{matchNL: false}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}}, + {"(?s)(ext.?|xfs)", orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "ext", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}}, + {"foo.?", &literalPrefixSensitiveStringMatcher{prefix: "foo", right: &zeroOrOneCharacterStringMatcher{matchNL: false}}}, {"f.?o", nil}, } { c := c @@ -480,10 +480,13 @@ func TestStringMatcherFromRegexp_LiteralPrefix(t *testing.T) { re := regexp.MustCompile("^" + c.pattern + "$") - // Pre-condition check: ensure it contains literalPrefixStringMatcher. + // Pre-condition check: ensure it contains literalPrefixSensitiveStringMatcher or literalPrefixInsensitiveStringMatcher. numPrefixMatchers := 0 visitStringMatcher(matcher, func(matcher StringMatcher) { - if _, ok := matcher.(*literalPrefixStringMatcher); ok { + if _, ok := matcher.(*literalPrefixSensitiveStringMatcher); ok { + numPrefixMatchers++ + } + if _, ok := matcher.(*literalPrefixInsensitiveStringMatcher); ok { numPrefixMatchers++ } }) @@ -1074,20 +1077,14 @@ func BenchmarkZeroOrOneCharacterStringMatcher(b *testing.B) { } } -func TestLiteralPrefixStringMatcher(t *testing.T) { - m := &literalPrefixStringMatcher{prefix: "mar", prefixCaseSensitive: true, right: &emptyStringMatcher{}} +func TestLiteralPrefixSensitiveStringMatcher(t *testing.T) { + m := &literalPrefixSensitiveStringMatcher{prefix: "mar", right: &emptyStringMatcher{}} require.True(t, m.Matches("mar")) require.False(t, m.Matches("marco")) require.False(t, m.Matches("ma")) require.False(t, m.Matches("mAr")) - m = &literalPrefixStringMatcher{prefix: "mar", prefixCaseSensitive: false, right: &emptyStringMatcher{}} - require.True(t, m.Matches("mar")) - require.False(t, m.Matches("marco")) - require.False(t, m.Matches("ma")) - require.True(t, m.Matches("mAr")) - - m = &literalPrefixStringMatcher{prefix: "mar", prefixCaseSensitive: true, right: &equalStringMatcher{s: "co", caseSensitive: false}} + m = &literalPrefixSensitiveStringMatcher{prefix: "mar", right: &equalStringMatcher{s: "co", caseSensitive: false}} require.True(t, m.Matches("marco")) require.True(t, m.Matches("marCO")) require.False(t, m.Matches("MARco")) @@ -1095,6 +1092,14 @@ func TestLiteralPrefixStringMatcher(t *testing.T) { require.False(t, m.Matches("marcopracucci")) } +func TestLiteralPrefixInsensitiveStringMatcher(t *testing.T) { + m := &literalPrefixInsensitiveStringMatcher{prefix: "mar", right: &emptyStringMatcher{}} + require.True(t, m.Matches("mar")) + require.False(t, m.Matches("marco")) + require.False(t, m.Matches("ma")) + require.True(t, m.Matches("mAr")) +} + func TestLiteralSuffixStringMatcher(t *testing.T) { m := &literalSuffixStringMatcher{left: &emptyStringMatcher{}, suffix: "co", suffixCaseSensitive: true} require.True(t, m.Matches("co")) @@ -1184,7 +1189,10 @@ func visitStringMatcher(matcher StringMatcher, callback func(matcher StringMatch visitStringMatcher(casted.right, callback) } - case *literalPrefixStringMatcher: + case *literalPrefixSensitiveStringMatcher: + visitStringMatcher(casted.right, callback) + + case *literalPrefixInsensitiveStringMatcher: visitStringMatcher(casted.right, callback) case *literalSuffixStringMatcher: From 24b78bef32e83ba0d882fb3ada3819e83d6b387d Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Mon, 1 Jul 2024 11:02:09 +0200 Subject: [PATCH 28/30] otlp: Clean up exponential histogram code slightly Signed-off-by: Arve Knudsen --- .../prometheusremotewrite/helper.go | 7 ++++--- .../prometheusremotewrite/histograms.go | 17 +++++++++-------- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/storage/remote/otlptranslator/prometheusremotewrite/helper.go index 68be82e443..acd400320e 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -182,12 +182,13 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa if i+1 >= len(extras) { break } - _, found := l[extras[i]] + + name := extras[i] + _, found := l[name] if found && logOnOverwrite { - log.Println("label " + extras[i] + " is overwritten. Check if Prometheus reserved labels are used.") + log.Println("label " + name + " is overwritten. Check if Prometheus reserved labels are used.") } // internal labels should be maintained - name := extras[i] if !(len(name) > 4 && name[:2] == "__" && name[len(name)-2:] == "__") { name = prometheustranslator.NormalizeLabel(name) } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go index 31d343fe4d..e26ce6a575 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go @@ -31,9 +31,15 @@ import ( const defaultZeroThreshold = 1e-128 func (c *PrometheusConverter) addExponentialHistogramDataPoints(dataPoints pmetric.ExponentialHistogramDataPointSlice, - resource pcommon.Resource, settings Settings, baseName string) error { + resource pcommon.Resource, settings Settings, promName string) error { for x := 0; x < dataPoints.Len(); x++ { pt := dataPoints.At(x) + + histogram, err := exponentialToNativeHistogram(pt) + if err != nil { + return err + } + lbls := createAttributes( resource, pt.Attributes(), @@ -41,14 +47,9 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(dataPoints pmetr nil, true, model.MetricNameLabel, - baseName, + promName, ) ts, _ := c.getOrCreateTimeSeries(lbls) - - histogram, err := exponentialToNativeHistogram(pt) - if err != nil { - return err - } ts.Histograms = append(ts.Histograms, histogram) exemplars := getPromExemplars[pmetric.ExponentialHistogramDataPoint](pt) @@ -58,7 +59,7 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(dataPoints pmetr return nil } -// exponentialToNativeHistogram translates OTel Exponential Histogram data point +// exponentialToNativeHistogram translates OTel Exponential Histogram data point // to Prometheus Native Histogram. func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (prompb.Histogram, error) { scale := p.Scale() From 9595b174e5eebb4e24fc95044abfd9a946fd1ed1 Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Mon, 1 Jul 2024 13:35:40 +0200 Subject: [PATCH 29/30] otlp: Document regular and exponential histogram conversions Signed-off-by: Arve Knudsen --- .../remote/otlptranslator/prometheusremotewrite/helper.go | 7 +++++++ .../otlptranslator/prometheusremotewrite/histograms.go | 2 ++ 2 files changed, 9 insertions(+) diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/storage/remote/otlptranslator/prometheusremotewrite/helper.go index acd400320e..2571338532 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -220,6 +220,13 @@ func isValidAggregationTemporality(metric pmetric.Metric) bool { return false } +// addHistogramDataPoints adds OTel histogram data points to the corresponding Prometheus time series +// as classical histogram samples. +// +// Note that we can't convert to native histograms, since these have exponential buckets and don't line up +// with the user defined bucket boundaries of non-exponential OTel histograms. +// However, work is under way to resolve this shortcoming through a feature called native histograms custom buckets: +// https://github.com/prometheus/prometheus/issues/13485. func (c *PrometheusConverter) addHistogramDataPoints(dataPoints pmetric.HistogramDataPointSlice, resource pcommon.Resource, settings Settings, baseName string) { for x := 0; x < dataPoints.Len(); x++ { diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go index e26ce6a575..21b3f5dd9f 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go @@ -30,6 +30,8 @@ import ( const defaultZeroThreshold = 1e-128 +// addExponentialHistogramDataPoints adds OTel exponential histogram data points to the corresponding time series +// as native histogram samples. func (c *PrometheusConverter) addExponentialHistogramDataPoints(dataPoints pmetric.ExponentialHistogramDataPointSlice, resource pcommon.Resource, settings Settings, promName string) error { for x := 0; x < dataPoints.Len(); x++ { From c53fdade42c129c6999bcb5659f28fe04d9c7333 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Jul 2024 23:29:21 +0000 Subject: [PATCH 30/30] build(deps): bump github.com/prometheus/common Bumps [github.com/prometheus/common](https://github.com/prometheus/common) from 0.54.0 to 0.55.0. - [Release notes](https://github.com/prometheus/common/releases) - [Changelog](https://github.com/prometheus/common/blob/main/RELEASE.md) - [Commits](https://github.com/prometheus/common/compare/v0.54.0...v0.55.0) --- updated-dependencies: - dependency-name: github.com/prometheus/common dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- documentation/examples/remote_storage/go.mod | 17 +++++---- documentation/examples/remote_storage/go.sum | 40 ++++++++++---------- 2 files changed, 29 insertions(+), 28 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index 5278cae096..d4f19749dc 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -9,7 +9,7 @@ require ( github.com/golang/snappy v0.0.4 github.com/influxdata/influxdb v1.11.5 github.com/prometheus/client_golang v1.19.1 - github.com/prometheus/common v0.54.0 + github.com/prometheus/common v0.55.0 github.com/prometheus/prometheus v0.52.1 github.com/stretchr/testify v1.9.0 ) @@ -41,12 +41,13 @@ require ( github.com/kylelemons/godebug v1.1.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect go.opentelemetry.io/collector/featuregate v1.5.0 // indirect go.opentelemetry.io/collector/pdata v1.5.0 // indirect @@ -57,15 +58,15 @@ require ( go.opentelemetry.io/otel/trace v1.25.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.22.0 // indirect - golang.org/x/net v0.24.0 // indirect - golang.org/x/oauth2 v0.19.0 // indirect - golang.org/x/sys v0.19.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/crypto v0.24.0 // indirect + golang.org/x/net v0.26.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be // indirect google.golang.org/grpc v1.63.2 // indirect - google.golang.org/protobuf v1.34.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apimachinery v0.29.3 // indirect diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index 9717fceaed..ec04347111 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -269,16 +269,16 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= -github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/prometheus v0.52.1 h1:BrQ29YG+mzdGh8DgHPirHbeMGNqtL+INe0rqg7ttBJ4= github.com/prometheus/prometheus v0.52.1/go.mod h1:3z74cVsmVH0iXOR5QBjB7Pa6A0KJeEAK5A6UsmAFb1g= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= @@ -330,8 +330,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -351,12 +351,12 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg= -golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -380,25 +380,25 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY= -golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -419,8 +419,8 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.34.0 h1:Qo/qEd2RZPCf2nKuorzksSknv0d3ERwp1vFG38gSmH4= -google.golang.org/protobuf v1.34.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=