Add custom buckets to native histogram model (#13592)

* add custom buckets to native histogram model
* simple copy for custom bounds
* return errors for unsupported add/sub operations
* add test cases for string and update appendhistogram in scrape to account for new schema
* check fields which are supposed to be unused but may affect results in equals
* allow appending custom buckets histograms regardless of max schema

Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
This commit is contained in:
György Krajcsovits 2024-02-28 14:06:43 +01:00
parent 11932cd345
commit 5d0a0a7542
15 changed files with 2036 additions and 271 deletions

View file

@ -30,11 +30,12 @@ import (
type FloatHistogram struct { type FloatHistogram struct {
// Counter reset information. // Counter reset information.
CounterResetHint CounterResetHint CounterResetHint CounterResetHint
// Currently valid schema numbers are -4 <= n <= 8. They are all for // Currently valid schema numbers are -4 <= n <= 8 for exponential buckets,
// base-2 bucket schemas, where 1 is a bucket boundary in each case, and // They are all for base-2 bucket schemas, where 1 is a bucket boundary in
// then each power of two is divided into 2^n logarithmic buckets. Or // each case, and then each power of two is divided into 2^n logarithmic buckets.
// in other words, each bucket boundary is the previous boundary times // Or in other words, each bucket boundary is the previous boundary times
// 2^(2^-n). // 2^(2^-n). Another valid schema number is 127 for custom buckets, defined by
// the CustomBounds field.
Schema int32 Schema int32
// Width of the zero bucket. // Width of the zero bucket.
ZeroThreshold float64 ZeroThreshold float64
@ -49,6 +50,16 @@ type FloatHistogram struct {
// Observation counts in buckets. Each represents an absolute count and // Observation counts in buckets. Each represents an absolute count and
// must be zero or positive. // must be zero or positive.
PositiveBuckets, NegativeBuckets []float64 PositiveBuckets, NegativeBuckets []float64
// Holds the custom (usually upper) bounds for bucket definitions, otherwise nil.
// This slice is interned, to be treated as immutable and copied by reference.
// These numbers should be strictly increasing. This field is only used when the
// schema is 127, and the ZeroThreshold, ZeroCount, NegativeSpans and NegativeBuckets
// fields are not used.
CustomBounds []float64
}
func (h *FloatHistogram) UsesCustomBuckets() bool {
return IsCustomBucketsSchema(h.Schema)
} }
// Copy returns a deep copy of the Histogram. // Copy returns a deep copy of the Histogram.
@ -56,28 +67,34 @@ func (h *FloatHistogram) Copy() *FloatHistogram {
c := FloatHistogram{ c := FloatHistogram{
CounterResetHint: h.CounterResetHint, CounterResetHint: h.CounterResetHint,
Schema: h.Schema, Schema: h.Schema,
ZeroThreshold: h.ZeroThreshold,
ZeroCount: h.ZeroCount,
Count: h.Count, Count: h.Count,
Sum: h.Sum, Sum: h.Sum,
} }
if h.UsesCustomBuckets() {
c.CustomBounds = h.CustomBounds
} else {
c.ZeroThreshold = h.ZeroThreshold
c.ZeroCount = h.ZeroCount
if len(h.NegativeSpans) != 0 {
c.NegativeSpans = make([]Span, len(h.NegativeSpans))
copy(c.NegativeSpans, h.NegativeSpans)
}
if len(h.NegativeBuckets) != 0 {
c.NegativeBuckets = make([]float64, len(h.NegativeBuckets))
copy(c.NegativeBuckets, h.NegativeBuckets)
}
}
if len(h.PositiveSpans) != 0 { if len(h.PositiveSpans) != 0 {
c.PositiveSpans = make([]Span, len(h.PositiveSpans)) c.PositiveSpans = make([]Span, len(h.PositiveSpans))
copy(c.PositiveSpans, h.PositiveSpans) copy(c.PositiveSpans, h.PositiveSpans)
} }
if len(h.NegativeSpans) != 0 {
c.NegativeSpans = make([]Span, len(h.NegativeSpans))
copy(c.NegativeSpans, h.NegativeSpans)
}
if len(h.PositiveBuckets) != 0 { if len(h.PositiveBuckets) != 0 {
c.PositiveBuckets = make([]float64, len(h.PositiveBuckets)) c.PositiveBuckets = make([]float64, len(h.PositiveBuckets))
copy(c.PositiveBuckets, h.PositiveBuckets) copy(c.PositiveBuckets, h.PositiveBuckets)
} }
if len(h.NegativeBuckets) != 0 {
c.NegativeBuckets = make([]float64, len(h.NegativeBuckets))
copy(c.NegativeBuckets, h.NegativeBuckets)
}
return &c return &c
} }
@ -87,22 +104,35 @@ func (h *FloatHistogram) Copy() *FloatHistogram {
func (h *FloatHistogram) CopyTo(to *FloatHistogram) { func (h *FloatHistogram) CopyTo(to *FloatHistogram) {
to.CounterResetHint = h.CounterResetHint to.CounterResetHint = h.CounterResetHint
to.Schema = h.Schema to.Schema = h.Schema
to.ZeroThreshold = h.ZeroThreshold
to.ZeroCount = h.ZeroCount
to.Count = h.Count to.Count = h.Count
to.Sum = h.Sum to.Sum = h.Sum
if h.UsesCustomBuckets() {
to.ZeroThreshold = 0
to.ZeroCount = 0
to.NegativeSpans = clearIfNotNil(to.NegativeSpans)
to.NegativeBuckets = clearIfNotNil(to.NegativeBuckets)
to.CustomBounds = h.CustomBounds
} else {
to.ZeroThreshold = h.ZeroThreshold
to.ZeroCount = h.ZeroCount
to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans))
copy(to.NegativeSpans, h.NegativeSpans)
to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets))
copy(to.NegativeBuckets, h.NegativeBuckets)
to.CustomBounds = clearIfNotNil(to.CustomBounds)
}
to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans)) to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans))
copy(to.PositiveSpans, h.PositiveSpans) copy(to.PositiveSpans, h.PositiveSpans)
to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans))
copy(to.NegativeSpans, h.NegativeSpans)
to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets)) to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets))
copy(to.PositiveBuckets, h.PositiveBuckets) copy(to.PositiveBuckets, h.PositiveBuckets)
to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets))
copy(to.NegativeBuckets, h.NegativeBuckets)
} }
// CopyToSchema works like Copy, but the returned deep copy has the provided // CopyToSchema works like Copy, but the returned deep copy has the provided
@ -113,6 +143,12 @@ func (h *FloatHistogram) CopyToSchema(targetSchema int32) *FloatHistogram {
// Fast path. // Fast path.
return h.Copy() return h.Copy()
} }
if h.UsesCustomBuckets() {
panic(fmt.Errorf("cannot reduce resolution to %d when there are custom buckets", targetSchema))
}
if IsCustomBucketsSchema(targetSchema) {
panic("cannot reduce resolution to custom buckets schema")
}
if targetSchema > h.Schema { if targetSchema > h.Schema {
panic(fmt.Errorf("cannot copy from schema %d to %d", h.Schema, targetSchema)) panic(fmt.Errorf("cannot copy from schema %d to %d", h.Schema, targetSchema))
} }
@ -212,12 +248,16 @@ func (h *FloatHistogram) TestExpression() string {
// ZeroBucket returns the zero bucket. // ZeroBucket returns the zero bucket.
func (h *FloatHistogram) ZeroBucket() Bucket[float64] { func (h *FloatHistogram) ZeroBucket() Bucket[float64] {
if h.UsesCustomBuckets() {
panic("histograms with custom buckets have no zero bucket")
}
return Bucket[float64]{ return Bucket[float64]{
Lower: -h.ZeroThreshold, Lower: -h.ZeroThreshold,
Upper: h.ZeroThreshold, Upper: h.ZeroThreshold,
LowerInclusive: true, LowerInclusive: true,
UpperInclusive: true, UpperInclusive: true,
Count: h.ZeroCount, Count: h.ZeroCount,
// Index is irrelevant for the zero bucket.
} }
} }
@ -263,9 +303,18 @@ func (h *FloatHistogram) Div(scalar float64) *FloatHistogram {
// //
// The method reconciles differences in the zero threshold and in the schema, and // The method reconciles differences in the zero threshold and in the schema, and
// changes them if needed. The other histogram will not be modified in any case. // changes them if needed. The other histogram will not be modified in any case.
// Adding is currently only supported between 2 exponential histograms, or between
// 2 custom buckets histograms with the exact same custom bounds.
// //
// This method returns a pointer to the receiving histogram for convenience. // This method returns a pointer to the receiving histogram for convenience.
func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { func (h *FloatHistogram) Add(other *FloatHistogram) (*FloatHistogram, error) {
if h.UsesCustomBuckets() != other.UsesCustomBuckets() {
return nil, ErrHistogramsIncompatibleSchema
}
if h.UsesCustomBuckets() && !floatBucketsMatch(h.CustomBounds, other.CustomBounds) {
return nil, ErrHistogramsIncompatibleBounds
}
switch { switch {
case other.CounterResetHint == h.CounterResetHint: case other.CounterResetHint == h.CounterResetHint:
// Adding apples to apples, all good. No need to change anything. // Adding apples to apples, all good. No need to change anything.
@ -290,19 +339,28 @@ func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram {
// TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place // TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place
} }
otherZeroCount := h.reconcileZeroBuckets(other) if !h.UsesCustomBuckets() {
h.ZeroCount += otherZeroCount otherZeroCount := h.reconcileZeroBuckets(other)
h.ZeroCount += otherZeroCount
}
h.Count += other.Count h.Count += other.Count
h.Sum += other.Sum h.Sum += other.Sum
var ( var (
hPositiveSpans = h.PositiveSpans hPositiveSpans = h.PositiveSpans
hPositiveBuckets = h.PositiveBuckets hPositiveBuckets = h.PositiveBuckets
hNegativeSpans = h.NegativeSpans
hNegativeBuckets = h.NegativeBuckets
otherPositiveSpans = other.PositiveSpans otherPositiveSpans = other.PositiveSpans
otherPositiveBuckets = other.PositiveBuckets otherPositiveBuckets = other.PositiveBuckets
)
if h.UsesCustomBuckets() {
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
return h, nil
}
var (
hNegativeSpans = h.NegativeSpans
hNegativeBuckets = h.NegativeBuckets
otherNegativeSpans = other.NegativeSpans otherNegativeSpans = other.NegativeSpans
otherNegativeBuckets = other.NegativeBuckets otherNegativeBuckets = other.NegativeBuckets
) )
@ -321,24 +379,40 @@ func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram {
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets) h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets)
return h return h, nil
} }
// Sub works like Add but subtracts the other histogram. // Sub works like Add but subtracts the other histogram.
func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram { func (h *FloatHistogram) Sub(other *FloatHistogram) (*FloatHistogram, error) {
otherZeroCount := h.reconcileZeroBuckets(other) if h.UsesCustomBuckets() != other.UsesCustomBuckets() {
h.ZeroCount -= otherZeroCount return nil, ErrHistogramsIncompatibleSchema
}
if h.UsesCustomBuckets() && !floatBucketsMatch(h.CustomBounds, other.CustomBounds) {
return nil, ErrHistogramsIncompatibleBounds
}
if !h.UsesCustomBuckets() {
otherZeroCount := h.reconcileZeroBuckets(other)
h.ZeroCount -= otherZeroCount
}
h.Count -= other.Count h.Count -= other.Count
h.Sum -= other.Sum h.Sum -= other.Sum
var ( var (
hPositiveSpans = h.PositiveSpans hPositiveSpans = h.PositiveSpans
hPositiveBuckets = h.PositiveBuckets hPositiveBuckets = h.PositiveBuckets
hNegativeSpans = h.NegativeSpans
hNegativeBuckets = h.NegativeBuckets
otherPositiveSpans = other.PositiveSpans otherPositiveSpans = other.PositiveSpans
otherPositiveBuckets = other.PositiveBuckets otherPositiveBuckets = other.PositiveBuckets
)
if h.UsesCustomBuckets() {
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
return h, nil
}
var (
hNegativeSpans = h.NegativeSpans
hNegativeBuckets = h.NegativeBuckets
otherNegativeSpans = other.NegativeSpans otherNegativeSpans = other.NegativeSpans
otherNegativeBuckets = other.NegativeBuckets otherNegativeBuckets = other.NegativeBuckets
) )
@ -356,7 +430,7 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram {
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets) h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets)
return h return h, nil
} }
// Equals returns true if the given float histogram matches exactly. // Equals returns true if the given float histogram matches exactly.
@ -365,31 +439,44 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram {
// but they must represent the same bucket layout to match. // but they must represent the same bucket layout to match.
// Sum, Count, ZeroCount and bucket values are compared based on their bit patterns // Sum, Count, ZeroCount and bucket values are compared based on their bit patterns
// because this method is about data equality rather than mathematical equality. // because this method is about data equality rather than mathematical equality.
// We ignore fields that are not used based on the exponential / custom buckets schema,
// but check fields where differences may cause unintended behaviour even if they are not
// supposed to be used according to the schema.
func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool { func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool {
if h2 == nil { if h2 == nil {
return false return false
} }
if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold || if h.Schema != h2.Schema ||
math.Float64bits(h.ZeroCount) != math.Float64bits(h2.ZeroCount) ||
math.Float64bits(h.Count) != math.Float64bits(h2.Count) || math.Float64bits(h.Count) != math.Float64bits(h2.Count) ||
math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) { math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) {
return false return false
} }
if h.UsesCustomBuckets() {
if !floatBucketsMatch(h.CustomBounds, h2.CustomBounds) {
return false
}
}
if h.ZeroThreshold != h2.ZeroThreshold ||
math.Float64bits(h.ZeroCount) != math.Float64bits(h2.ZeroCount) {
return false
}
if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
return false
}
if !floatBucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) {
return false
}
if !spansMatch(h.PositiveSpans, h2.PositiveSpans) { if !spansMatch(h.PositiveSpans, h2.PositiveSpans) {
return false return false
} }
if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
return false
}
if !floatBucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) { if !floatBucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) {
return false return false
} }
if !floatBucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) {
return false
}
return true return true
} }
@ -403,6 +490,7 @@ func (h *FloatHistogram) Size() int {
negSpanSize := len(h.NegativeSpans) * 8 // 8 bytes (int32 + uint32). negSpanSize := len(h.NegativeSpans) * 8 // 8 bytes (int32 + uint32).
posBucketSize := len(h.PositiveBuckets) * 8 // 8 bytes (float64). posBucketSize := len(h.PositiveBuckets) * 8 // 8 bytes (float64).
negBucketSize := len(h.NegativeBuckets) * 8 // 8 bytes (float64). negBucketSize := len(h.NegativeBuckets) * 8 // 8 bytes (float64).
customBoundSize := len(h.CustomBounds) * 8 // 8 bytes (float64).
// Total size of the struct. // Total size of the struct.
@ -417,9 +505,10 @@ func (h *FloatHistogram) Size() int {
// fh.NegativeSpans is 24 bytes. // fh.NegativeSpans is 24 bytes.
// fh.PositiveBuckets is 24 bytes. // fh.PositiveBuckets is 24 bytes.
// fh.NegativeBuckets is 24 bytes. // fh.NegativeBuckets is 24 bytes.
structSize := 144 // fh.CustomBounds is 24 bytes.
structSize := 168
return structSize + posSpanSize + negSpanSize + posBucketSize + negBucketSize return structSize + posSpanSize + negSpanSize + posBucketSize + negBucketSize + customBoundSize
} }
// Compact eliminates empty buckets at the beginning and end of each span, then // Compact eliminates empty buckets at the beginning and end of each span, then
@ -504,6 +593,12 @@ func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool {
if h.Count < previous.Count { if h.Count < previous.Count {
return true return true
} }
if h.UsesCustomBuckets() != previous.UsesCustomBuckets() || (h.UsesCustomBuckets() && !floatBucketsMatch(h.CustomBounds, previous.CustomBounds)) {
// Mark that something has changed or that the application has been restarted. However, this does
// not matter so much since the change in schema will be handled directly in the chunks and PromQL
// functions.
return true
}
if h.Schema > previous.Schema { if h.Schema > previous.Schema {
return true return true
} }
@ -609,7 +704,7 @@ func (h *FloatHistogram) NegativeBucketIterator() BucketIterator[float64] {
// positive buckets in descending order (starting at the highest bucket and // positive buckets in descending order (starting at the highest bucket and
// going down towards the zero bucket). // going down towards the zero bucket).
func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64] { func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64] {
it := newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true) it := newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomBounds)
return &it return &it
} }
@ -617,7 +712,7 @@ func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64]
// negative buckets in ascending order (starting at the lowest bucket and going // negative buckets in ascending order (starting at the lowest bucket and going
// up towards the zero bucket). // up towards the zero bucket).
func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64] { func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64] {
it := newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false) it := newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil)
return &it return &it
} }
@ -629,7 +724,7 @@ func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64]
func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] { func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] {
return &allFloatBucketIterator{ return &allFloatBucketIterator{
h: h, h: h,
leftIter: newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false), leftIter: newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil),
rightIter: h.floatBucketIterator(true, 0, h.Schema), rightIter: h.floatBucketIterator(true, 0, h.Schema),
state: -1, state: -1,
} }
@ -643,30 +738,52 @@ func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] {
func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] { func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] {
return &allFloatBucketIterator{ return &allFloatBucketIterator{
h: h, h: h,
leftIter: newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true), leftIter: newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomBounds),
rightIter: h.floatBucketIterator(false, 0, h.Schema), rightIter: h.floatBucketIterator(false, 0, h.Schema),
state: -1, state: -1,
} }
} }
// Validate validates consistency between span and bucket slices. Also, buckets are checked // Validate validates consistency between span and bucket slices. Also, buckets are checked
// against negative values. // against negative values. We check to make sure there are no unexpected fields or field values
// based on the exponential / custom buckets schema.
// We do not check for h.Count being at least as large as the sum of the // We do not check for h.Count being at least as large as the sum of the
// counts in the buckets because floating point precision issues can // counts in the buckets because floating point precision issues can
// create false positives here. // create false positives here.
func (h *FloatHistogram) Validate() error { func (h *FloatHistogram) Validate() error {
if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
return fmt.Errorf("negative side: %w", err)
}
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
return fmt.Errorf("positive side: %w", err)
}
var nCount, pCount float64 var nCount, pCount float64
err := checkHistogramBuckets(h.NegativeBuckets, &nCount, false) if h.UsesCustomBuckets() {
if err != nil { if err := checkHistogramCustomBounds(h.CustomBounds, h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
return fmt.Errorf("negative side: %w", err) return fmt.Errorf("custom buckets: %w", err)
}
if h.ZeroCount != 0 {
return fmt.Errorf("custom buckets: must have zero count of 0")
}
if h.ZeroThreshold != 0 {
return fmt.Errorf("custom buckets: must have zero threshold of 0")
}
if len(h.NegativeSpans) > 0 {
return fmt.Errorf("custom buckets: must not have negative spans")
}
if len(h.NegativeBuckets) > 0 {
return fmt.Errorf("custom buckets: must not have negative buckets")
}
} else {
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
return fmt.Errorf("positive side: %w", err)
}
if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
return fmt.Errorf("negative side: %w", err)
}
err := checkHistogramBuckets(h.NegativeBuckets, &nCount, false)
if err != nil {
return fmt.Errorf("negative side: %w", err)
}
if h.CustomBounds != nil {
return fmt.Errorf("histogram with exponential schema must not have custom bounds")
}
} }
err = checkHistogramBuckets(h.PositiveBuckets, &pCount, false) err := checkHistogramBuckets(h.PositiveBuckets, &pCount, false)
if err != nil { if err != nil {
return fmt.Errorf("positive side: %w", err) return fmt.Errorf("positive side: %w", err)
} }
@ -790,10 +907,11 @@ func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 {
// If positive is true, the returned iterator iterates through the positive // If positive is true, the returned iterator iterates through the positive
// buckets, otherwise through the negative buckets. // buckets, otherwise through the negative buckets.
// //
// If absoluteStartValue is < the lowest absolute value of any upper bucket // Only for exponential schemas, if absoluteStartValue is < the lowest absolute
// boundary, the iterator starts with the first bucket. Otherwise, it will skip // value of any upper bucket boundary, the iterator starts with the first bucket.
// all buckets with an absolute value of their upper boundary ≤ // Otherwise, it will skip all buckets with an absolute value of their upper boundary ≤
// absoluteStartValue. // absoluteStartValue. For custom bucket schemas, absoluteStartValue is ignored and
// no buckets are skipped.
// //
// targetSchema must be ≤ the schema of FloatHistogram (and of course within the // targetSchema must be ≤ the schema of FloatHistogram (and of course within the
// legal values for schemas in general). The buckets are merged to match the // legal values for schemas in general). The buckets are merged to match the
@ -801,6 +919,12 @@ func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 {
func (h *FloatHistogram) floatBucketIterator( func (h *FloatHistogram) floatBucketIterator(
positive bool, absoluteStartValue float64, targetSchema int32, positive bool, absoluteStartValue float64, targetSchema int32,
) floatBucketIterator { ) floatBucketIterator {
if h.UsesCustomBuckets() && targetSchema != h.Schema {
panic(fmt.Errorf("cannot merge from custom buckets schema to exponential schema"))
}
if !h.UsesCustomBuckets() && IsCustomBucketsSchema(targetSchema) {
panic(fmt.Errorf("cannot merge from exponential buckets schema to custom schema"))
}
if targetSchema > h.Schema { if targetSchema > h.Schema {
panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema)) panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema))
} }
@ -816,6 +940,7 @@ func (h *FloatHistogram) floatBucketIterator(
if positive { if positive {
i.spans = h.PositiveSpans i.spans = h.PositiveSpans
i.buckets = h.PositiveBuckets i.buckets = h.PositiveBuckets
i.customBounds = h.CustomBounds
} else { } else {
i.spans = h.NegativeSpans i.spans = h.NegativeSpans
i.buckets = h.NegativeBuckets i.buckets = h.NegativeBuckets
@ -825,14 +950,15 @@ func (h *FloatHistogram) floatBucketIterator(
// reverseFloatBucketIterator is a low-level constructor for reverse bucket iterators. // reverseFloatBucketIterator is a low-level constructor for reverse bucket iterators.
func newReverseFloatBucketIterator( func newReverseFloatBucketIterator(
spans []Span, buckets []float64, schema int32, positive bool, spans []Span, buckets []float64, schema int32, positive bool, customBounds []float64,
) reverseFloatBucketIterator { ) reverseFloatBucketIterator {
r := reverseFloatBucketIterator{ r := reverseFloatBucketIterator{
baseBucketIterator: baseBucketIterator[float64, float64]{ baseBucketIterator: baseBucketIterator[float64, float64]{
schema: schema, schema: schema,
spans: spans, spans: spans,
buckets: buckets, buckets: buckets,
positive: positive, positive: positive,
customBounds: customBounds,
}, },
} }
@ -946,9 +1072,9 @@ func (i *floatBucketIterator) Next() bool {
} }
} }
// Skip buckets before absoluteStartValue. // Skip buckets before absoluteStartValue for exponential schemas.
// TODO(beorn7): Maybe do something more efficient than this recursive call. // TODO(beorn7): Maybe do something more efficient than this recursive call.
if !i.boundReachedStartValue && getBound(i.currIdx, i.targetSchema) <= i.absoluteStartValue { if !i.boundReachedStartValue && IsExponentialSchema(i.targetSchema) && getBoundExponential(i.currIdx, i.targetSchema) <= i.absoluteStartValue {
return i.Next() return i.Next()
} }
i.boundReachedStartValue = true i.boundReachedStartValue = true
@ -1010,14 +1136,7 @@ func (i *allFloatBucketIterator) Next() bool {
case 0: case 0:
i.state = 1 i.state = 1
if i.h.ZeroCount > 0 { if i.h.ZeroCount > 0 {
i.currBucket = Bucket[float64]{ i.currBucket = i.h.ZeroBucket()
Lower: -i.h.ZeroThreshold,
Upper: i.h.ZeroThreshold,
LowerInclusive: true,
UpperInclusive: true,
Count: i.h.ZeroCount,
// Index is irrelevant for the zero bucket.
}
return true return true
} }
return i.Next() return i.Next()
@ -1076,7 +1195,7 @@ func addBuckets(
for _, spanB := range spansB { for _, spanB := range spansB {
indexB += spanB.Offset indexB += spanB.Offset
for j := 0; j < int(spanB.Length); j++ { for j := 0; j < int(spanB.Length); j++ {
if lowerThanThreshold && getBound(indexB, schema) <= threshold { if lowerThanThreshold && IsExponentialSchema(schema) && getBoundExponential(indexB, schema) <= threshold {
goto nextLoop goto nextLoop
} }
lowerThanThreshold = false lowerThanThreshold = false
@ -1192,6 +1311,12 @@ func floatBucketsMatch(b1, b2 []float64) bool {
// ReduceResolution reduces the float histogram's spans, buckets into target schema. // ReduceResolution reduces the float histogram's spans, buckets into target schema.
// The target schema must be smaller than the current float histogram's schema. // The target schema must be smaller than the current float histogram's schema.
func (h *FloatHistogram) ReduceResolution(targetSchema int32) *FloatHistogram { func (h *FloatHistogram) ReduceResolution(targetSchema int32) *FloatHistogram {
if h.UsesCustomBuckets() {
panic("cannot reduce resolution when there are custom buckets")
}
if IsCustomBucketsSchema(targetSchema) {
panic("cannot reduce resolution to custom buckets schema")
}
if targetSchema >= h.Schema { if targetSchema >= h.Schema {
panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema)) panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema))
} }

File diff suppressed because it is too large Load diff

View file

@ -20,14 +20,32 @@ import (
"strings" "strings"
) )
var ( const (
ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets") ExponentialSchemaMax int32 = 8
ErrHistogramCountMismatch = errors.New("histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)") ExponentialSchemaMin int32 = -4
ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative") CustomBucketsSchema int32 = 127
ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative")
ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided")
) )
var (
ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets")
ErrHistogramCountMismatch = errors.New("histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)")
ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative")
ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative")
ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided")
ErrHistogramCustomBucketsMismatch = errors.New("histogram custom bounds are too few")
ErrHistogramCustomBucketsInvalid = errors.New("histogram custom bounds must be in strictly increasing order")
ErrHistogramsIncompatibleSchema = errors.New("cannot apply this operation on histograms with a mix of exponential and custom bucket schemas")
ErrHistogramsIncompatibleBounds = errors.New("cannot apply this operation on custom buckets histograms with different custom bounds")
)
func IsCustomBucketsSchema(s int32) bool {
return s == CustomBucketsSchema
}
func IsExponentialSchema(s int32) bool {
return s >= ExponentialSchemaMin && s <= ExponentialSchemaMax
}
// BucketCount is a type constraint for the count in a bucket, which can be // BucketCount is a type constraint for the count in a bucket, which can be
// float64 (for type FloatHistogram) or uint64 (for type Histogram). // float64 (for type FloatHistogram) or uint64 (for type Histogram).
type BucketCount interface { type BucketCount interface {
@ -115,6 +133,8 @@ type baseBucketIterator[BC BucketCount, IBC InternalBucketCount] struct {
currCount IBC // Count in the current bucket. currCount IBC // Count in the current bucket.
currIdx int32 // The actual bucket index. currIdx int32 // The actual bucket index.
customBounds []float64 // Bounds (usually upper) for histograms with custom buckets.
} }
func (b *baseBucketIterator[BC, IBC]) At() Bucket[BC] { func (b *baseBucketIterator[BC, IBC]) At() Bucket[BC] {
@ -128,14 +148,19 @@ func (b *baseBucketIterator[BC, IBC]) at(schema int32) Bucket[BC] {
Index: b.currIdx, Index: b.currIdx,
} }
if b.positive { if b.positive {
bucket.Upper = getBound(b.currIdx, schema) bucket.Upper = getBound(b.currIdx, schema, b.customBounds)
bucket.Lower = getBound(b.currIdx-1, schema) bucket.Lower = getBound(b.currIdx-1, schema, b.customBounds)
} else { } else {
bucket.Lower = -getBound(b.currIdx, schema) bucket.Lower = -getBound(b.currIdx, schema, b.customBounds)
bucket.Upper = -getBound(b.currIdx-1, schema) bucket.Upper = -getBound(b.currIdx-1, schema, b.customBounds)
}
if IsCustomBucketsSchema(schema) {
bucket.LowerInclusive = b.currIdx == 0
bucket.UpperInclusive = true
} else {
bucket.LowerInclusive = bucket.Lower < 0
bucket.UpperInclusive = bucket.Upper > 0
} }
bucket.LowerInclusive = bucket.Lower < 0
bucket.UpperInclusive = bucket.Upper > 0
return bucket return bucket
} }
@ -393,7 +418,52 @@ func checkHistogramBuckets[BC BucketCount, IBC InternalBucketCount](buckets []IB
return nil return nil
} }
func getBound(idx, schema int32) float64 { func checkHistogramCustomBounds(bounds []float64, spans []Span, numBuckets int) error {
prev := math.Inf(-1)
for _, curr := range bounds {
if curr <= prev {
return fmt.Errorf("previous bound is %f and current is %f: %w", prev, curr, ErrHistogramCustomBucketsInvalid)
}
prev = curr
}
var spanBuckets int
var totalSpanLength int
for n, span := range spans {
if span.Offset < 0 {
return fmt.Errorf("span number %d with offset %d: %w", n+1, span.Offset, ErrHistogramSpanNegativeOffset)
}
spanBuckets += int(span.Length)
totalSpanLength += int(span.Length) + int(span.Offset)
}
if spanBuckets != numBuckets {
return fmt.Errorf("spans need %d buckets, have %d buckets: %w", spanBuckets, numBuckets, ErrHistogramSpansBucketsMismatch)
}
if (len(bounds) + 1) < totalSpanLength {
return fmt.Errorf("only %d custom bounds defined which is insufficient to cover total span length of %d: %w", len(bounds), totalSpanLength, ErrHistogramCustomBucketsMismatch)
}
return nil
}
func getBound(idx, schema int32, customBounds []float64) float64 {
if IsCustomBucketsSchema(schema) {
length := int32(len(customBounds))
switch {
case idx > length || idx < -1:
panic(fmt.Errorf("index %d out of bounds for custom bounds of length %d", idx, length))
case idx == length:
return math.Inf(1)
case idx == -1:
return math.Inf(-1)
default:
return customBounds[idx]
}
}
return getBoundExponential(idx, schema)
}
func getBoundExponential(idx, schema int32) float64 {
// Here a bit of context about the behavior for the last bucket counting // Here a bit of context about the behavior for the last bucket counting
// regular numbers (called simply "last bucket" below) and the bucket // regular numbers (called simply "last bucket" below) and the bucket
// counting observations of ±Inf (called "inf bucket" below, with an idx // counting observations of ±Inf (called "inf bucket" below, with an idx

View file

@ -21,7 +21,7 @@ import (
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
) )
func TestGetBound(t *testing.T) { func TestGetBoundExponential(t *testing.T) {
scenarios := []struct { scenarios := []struct {
idx int32 idx int32
schema int32 schema int32
@ -105,7 +105,7 @@ func TestGetBound(t *testing.T) {
} }
for _, s := range scenarios { for _, s := range scenarios {
got := getBound(s.idx, s.schema) got := getBoundExponential(s.idx, s.schema)
if s.want != got { if s.want != got {
require.Equal(t, s.want, got, "idx %d, schema %d", s.idx, s.schema) require.Equal(t, s.want, got, "idx %d, schema %d", s.idx, s.schema)
} }

View file

@ -50,11 +50,12 @@ const (
type Histogram struct { type Histogram struct {
// Counter reset information. // Counter reset information.
CounterResetHint CounterResetHint CounterResetHint CounterResetHint
// Currently valid schema numbers are -4 <= n <= 8. They are all for // Currently valid schema numbers are -4 <= n <= 8 for exponential buckets,
// base-2 bucket schemas, where 1 is a bucket boundary in each case, and // They are all for base-2 bucket schemas, where 1 is a bucket boundary in
// then each power of two is divided into 2^n logarithmic buckets. Or // each case, and then each power of two is divided into 2^n logarithmic buckets.
// in other words, each bucket boundary is the previous boundary times // Or in other words, each bucket boundary is the previous boundary times
// 2^(2^-n). // 2^(2^-n). Another valid schema number is 127 for custom buckets, defined by
// the CustomBounds field.
Schema int32 Schema int32
// Width of the zero bucket. // Width of the zero bucket.
ZeroThreshold float64 ZeroThreshold float64
@ -70,6 +71,12 @@ type Histogram struct {
// count. All following ones are deltas relative to the previous // count. All following ones are deltas relative to the previous
// element. // element.
PositiveBuckets, NegativeBuckets []int64 PositiveBuckets, NegativeBuckets []int64
// Holds the custom (usually upper) bounds for bucket definitions, otherwise nil.
// This slice is interned, to be treated as immutable and copied by reference.
// These numbers should be strictly increasing. This field is only used when the
// schema is 127, and the ZeroThreshold, ZeroCount, NegativeSpans and NegativeBuckets
// fields are not used.
CustomBounds []float64
} }
// A Span defines a continuous sequence of buckets. // A Span defines a continuous sequence of buckets.
@ -81,33 +88,43 @@ type Span struct {
Length uint32 Length uint32
} }
func (h *Histogram) UsesCustomBuckets() bool {
return IsCustomBucketsSchema(h.Schema)
}
// Copy returns a deep copy of the Histogram. // Copy returns a deep copy of the Histogram.
func (h *Histogram) Copy() *Histogram { func (h *Histogram) Copy() *Histogram {
c := Histogram{ c := Histogram{
CounterResetHint: h.CounterResetHint, CounterResetHint: h.CounterResetHint,
Schema: h.Schema, Schema: h.Schema,
ZeroThreshold: h.ZeroThreshold,
ZeroCount: h.ZeroCount,
Count: h.Count, Count: h.Count,
Sum: h.Sum, Sum: h.Sum,
} }
if h.UsesCustomBuckets() {
c.CustomBounds = h.CustomBounds
} else {
c.ZeroThreshold = h.ZeroThreshold
c.ZeroCount = h.ZeroCount
if len(h.NegativeSpans) != 0 {
c.NegativeSpans = make([]Span, len(h.NegativeSpans))
copy(c.NegativeSpans, h.NegativeSpans)
}
if len(h.NegativeBuckets) != 0 {
c.NegativeBuckets = make([]int64, len(h.NegativeBuckets))
copy(c.NegativeBuckets, h.NegativeBuckets)
}
}
if len(h.PositiveSpans) != 0 { if len(h.PositiveSpans) != 0 {
c.PositiveSpans = make([]Span, len(h.PositiveSpans)) c.PositiveSpans = make([]Span, len(h.PositiveSpans))
copy(c.PositiveSpans, h.PositiveSpans) copy(c.PositiveSpans, h.PositiveSpans)
} }
if len(h.NegativeSpans) != 0 {
c.NegativeSpans = make([]Span, len(h.NegativeSpans))
copy(c.NegativeSpans, h.NegativeSpans)
}
if len(h.PositiveBuckets) != 0 { if len(h.PositiveBuckets) != 0 {
c.PositiveBuckets = make([]int64, len(h.PositiveBuckets)) c.PositiveBuckets = make([]int64, len(h.PositiveBuckets))
copy(c.PositiveBuckets, h.PositiveBuckets) copy(c.PositiveBuckets, h.PositiveBuckets)
} }
if len(h.NegativeBuckets) != 0 {
c.NegativeBuckets = make([]int64, len(h.NegativeBuckets))
copy(c.NegativeBuckets, h.NegativeBuckets)
}
return &c return &c
} }
@ -117,22 +134,35 @@ func (h *Histogram) Copy() *Histogram {
func (h *Histogram) CopyTo(to *Histogram) { func (h *Histogram) CopyTo(to *Histogram) {
to.CounterResetHint = h.CounterResetHint to.CounterResetHint = h.CounterResetHint
to.Schema = h.Schema to.Schema = h.Schema
to.ZeroThreshold = h.ZeroThreshold
to.ZeroCount = h.ZeroCount
to.Count = h.Count to.Count = h.Count
to.Sum = h.Sum to.Sum = h.Sum
if h.UsesCustomBuckets() {
to.ZeroThreshold = 0
to.ZeroCount = 0
to.NegativeSpans = clearIfNotNil(to.NegativeSpans)
to.NegativeBuckets = clearIfNotNil(to.NegativeBuckets)
to.CustomBounds = h.CustomBounds
} else {
to.ZeroThreshold = h.ZeroThreshold
to.ZeroCount = h.ZeroCount
to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans))
copy(to.NegativeSpans, h.NegativeSpans)
to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets))
copy(to.NegativeBuckets, h.NegativeBuckets)
to.CustomBounds = clearIfNotNil(to.CustomBounds)
}
to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans)) to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans))
copy(to.PositiveSpans, h.PositiveSpans) copy(to.PositiveSpans, h.PositiveSpans)
to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans))
copy(to.NegativeSpans, h.NegativeSpans)
to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets)) to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets))
copy(to.PositiveBuckets, h.PositiveBuckets) copy(to.PositiveBuckets, h.PositiveBuckets)
to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets))
copy(to.NegativeBuckets, h.NegativeBuckets)
} }
// String returns a string representation of the Histogram. // String returns a string representation of the Histogram.
@ -168,6 +198,9 @@ func (h *Histogram) String() string {
// ZeroBucket returns the zero bucket. // ZeroBucket returns the zero bucket.
func (h *Histogram) ZeroBucket() Bucket[uint64] { func (h *Histogram) ZeroBucket() Bucket[uint64] {
if h.UsesCustomBuckets() {
panic("histograms with custom buckets have no zero bucket")
}
return Bucket[uint64]{ return Bucket[uint64]{
Lower: -h.ZeroThreshold, Lower: -h.ZeroThreshold,
Upper: h.ZeroThreshold, Upper: h.ZeroThreshold,
@ -180,14 +213,14 @@ func (h *Histogram) ZeroBucket() Bucket[uint64] {
// PositiveBucketIterator returns a BucketIterator to iterate over all positive // PositiveBucketIterator returns a BucketIterator to iterate over all positive
// buckets in ascending order (starting next to the zero bucket and going up). // buckets in ascending order (starting next to the zero bucket and going up).
func (h *Histogram) PositiveBucketIterator() BucketIterator[uint64] { func (h *Histogram) PositiveBucketIterator() BucketIterator[uint64] {
it := newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true) it := newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomBounds)
return &it return &it
} }
// NegativeBucketIterator returns a BucketIterator to iterate over all negative // NegativeBucketIterator returns a BucketIterator to iterate over all negative
// buckets in descending order (starting next to the zero bucket and going down). // buckets in descending order (starting next to the zero bucket and going down).
func (h *Histogram) NegativeBucketIterator() BucketIterator[uint64] { func (h *Histogram) NegativeBucketIterator() BucketIterator[uint64] {
it := newRegularBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false) it := newRegularBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil)
return &it return &it
} }
@ -208,30 +241,42 @@ func (h *Histogram) CumulativeBucketIterator() BucketIterator[uint64] {
// but they must represent the same bucket layout to match. // but they must represent the same bucket layout to match.
// Sum is compared based on its bit pattern because this method // Sum is compared based on its bit pattern because this method
// is about data equality rather than mathematical equality. // is about data equality rather than mathematical equality.
// We ignore fields that are not used based on the exponential / custom buckets schema,
// but check fields where differences may cause unintended behaviour even if they are not
// supposed to be used according to the schema.
func (h *Histogram) Equals(h2 *Histogram) bool { func (h *Histogram) Equals(h2 *Histogram) bool {
if h2 == nil { if h2 == nil {
return false return false
} }
if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold || if h.Schema != h2.Schema || h.Count != h2.Count ||
h.ZeroCount != h2.ZeroCount || h.Count != h2.Count ||
math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) { math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) {
return false return false
} }
if h.UsesCustomBuckets() {
if !floatBucketsMatch(h.CustomBounds, h2.CustomBounds) {
return false
}
}
if h.ZeroThreshold != h2.ZeroThreshold || h.ZeroCount != h2.ZeroCount {
return false
}
if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
return false
}
if !slices.Equal(h.NegativeBuckets, h2.NegativeBuckets) {
return false
}
if !spansMatch(h.PositiveSpans, h2.PositiveSpans) { if !spansMatch(h.PositiveSpans, h2.PositiveSpans) {
return false return false
} }
if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
return false
}
if !slices.Equal(h.PositiveBuckets, h2.PositiveBuckets) { if !slices.Equal(h.PositiveBuckets, h2.PositiveBuckets) {
return false return false
} }
if !slices.Equal(h.NegativeBuckets, h2.NegativeBuckets) {
return false
}
return true return true
} }
@ -322,17 +367,34 @@ func (h *Histogram) ToFloat(fh *FloatHistogram) *FloatHistogram {
} }
fh.CounterResetHint = h.CounterResetHint fh.CounterResetHint = h.CounterResetHint
fh.Schema = h.Schema fh.Schema = h.Schema
fh.ZeroThreshold = h.ZeroThreshold
fh.ZeroCount = float64(h.ZeroCount)
fh.Count = float64(h.Count) fh.Count = float64(h.Count)
fh.Sum = h.Sum fh.Sum = h.Sum
if h.UsesCustomBuckets() {
fh.ZeroThreshold = 0
fh.ZeroCount = 0
fh.NegativeSpans = clearIfNotNil(fh.NegativeSpans)
fh.NegativeBuckets = clearIfNotNil(fh.NegativeBuckets)
fh.CustomBounds = h.CustomBounds
} else {
fh.ZeroThreshold = h.ZeroThreshold
fh.ZeroCount = float64(h.ZeroCount)
fh.NegativeSpans = resize(fh.NegativeSpans, len(h.NegativeSpans))
copy(fh.NegativeSpans, h.NegativeSpans)
fh.NegativeBuckets = resize(fh.NegativeBuckets, len(h.NegativeBuckets))
var currentNegative float64
for i, b := range h.NegativeBuckets {
currentNegative += float64(b)
fh.NegativeBuckets[i] = currentNegative
}
fh.CustomBounds = clearIfNotNil(fh.CustomBounds)
}
fh.PositiveSpans = resize(fh.PositiveSpans, len(h.PositiveSpans)) fh.PositiveSpans = resize(fh.PositiveSpans, len(h.PositiveSpans))
copy(fh.PositiveSpans, h.PositiveSpans) copy(fh.PositiveSpans, h.PositiveSpans)
fh.NegativeSpans = resize(fh.NegativeSpans, len(h.NegativeSpans))
copy(fh.NegativeSpans, h.NegativeSpans)
fh.PositiveBuckets = resize(fh.PositiveBuckets, len(h.PositiveBuckets)) fh.PositiveBuckets = resize(fh.PositiveBuckets, len(h.PositiveBuckets))
var currentPositive float64 var currentPositive float64
for i, b := range h.PositiveBuckets { for i, b := range h.PositiveBuckets {
@ -340,13 +402,6 @@ func (h *Histogram) ToFloat(fh *FloatHistogram) *FloatHistogram {
fh.PositiveBuckets[i] = currentPositive fh.PositiveBuckets[i] = currentPositive
} }
fh.NegativeBuckets = resize(fh.NegativeBuckets, len(h.NegativeBuckets))
var currentNegative float64
for i, b := range h.NegativeBuckets {
currentNegative += float64(b)
fh.NegativeBuckets[i] = currentNegative
}
return fh return fh
} }
@ -357,26 +412,55 @@ func resize[T any](items []T, n int) []T {
return items[:n] return items[:n]
} }
func clearIfNotNil[T any](items []T) []T {
if items == nil {
return nil
}
return items[:0]
}
// Validate validates consistency between span and bucket slices. Also, buckets are checked // Validate validates consistency between span and bucket slices. Also, buckets are checked
// against negative values. // against negative values. We check to make sure there are no unexpected fields or field values
// based on the exponential / custom buckets schema.
// For histograms that have not observed any NaN values (based on IsNaN(h.Sum) check), a // For histograms that have not observed any NaN values (based on IsNaN(h.Sum) check), a
// strict h.Count = nCount + pCount + h.ZeroCount check is performed. // strict h.Count = nCount + pCount + h.ZeroCount check is performed.
// Otherwise, only a lower bound check will be done (h.Count >= nCount + pCount + h.ZeroCount), // Otherwise, only a lower bound check will be done (h.Count >= nCount + pCount + h.ZeroCount),
// because NaN observations do not increment the values of buckets (but they do increment // because NaN observations do not increment the values of buckets (but they do increment
// the total h.Count). // the total h.Count).
func (h *Histogram) Validate() error { func (h *Histogram) Validate() error {
if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
return fmt.Errorf("negative side: %w", err)
}
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
return fmt.Errorf("positive side: %w", err)
}
var nCount, pCount uint64 var nCount, pCount uint64
err := checkHistogramBuckets(h.NegativeBuckets, &nCount, true) if h.UsesCustomBuckets() {
if err != nil { if err := checkHistogramCustomBounds(h.CustomBounds, h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
return fmt.Errorf("negative side: %w", err) return fmt.Errorf("custom buckets: %w", err)
}
if h.ZeroCount != 0 {
return fmt.Errorf("custom buckets: must have zero count of 0")
}
if h.ZeroThreshold != 0 {
return fmt.Errorf("custom buckets: must have zero threshold of 0")
}
if len(h.NegativeSpans) > 0 {
return fmt.Errorf("custom buckets: must not have negative spans")
}
if len(h.NegativeBuckets) > 0 {
return fmt.Errorf("custom buckets: must not have negative buckets")
}
} else {
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
return fmt.Errorf("positive side: %w", err)
}
if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
return fmt.Errorf("negative side: %w", err)
}
err := checkHistogramBuckets(h.NegativeBuckets, &nCount, true)
if err != nil {
return fmt.Errorf("negative side: %w", err)
}
if h.CustomBounds != nil {
return fmt.Errorf("histogram with exponential schema must not have custom bounds")
}
} }
err = checkHistogramBuckets(h.PositiveBuckets, &pCount, true) err := checkHistogramBuckets(h.PositiveBuckets, &pCount, true)
if err != nil { if err != nil {
return fmt.Errorf("positive side: %w", err) return fmt.Errorf("positive side: %w", err)
} }
@ -399,12 +483,13 @@ type regularBucketIterator struct {
baseBucketIterator[uint64, int64] baseBucketIterator[uint64, int64]
} }
func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool) regularBucketIterator { func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool, customBounds []float64) regularBucketIterator {
i := baseBucketIterator[uint64, int64]{ i := baseBucketIterator[uint64, int64]{
schema: schema, schema: schema,
spans: spans, spans: spans,
buckets: buckets, buckets: buckets,
positive: positive, positive: positive,
customBounds: customBounds,
} }
return regularBucketIterator{i} return regularBucketIterator{i}
} }
@ -478,7 +563,7 @@ func (c *cumulativeBucketIterator) Next() bool {
if c.emptyBucketCount > 0 { if c.emptyBucketCount > 0 {
// We are traversing through empty buckets at the moment. // We are traversing through empty buckets at the moment.
c.currUpper = getBound(c.currIdx, c.h.Schema) c.currUpper = getBound(c.currIdx, c.h.Schema, c.h.CustomBounds)
c.currIdx++ c.currIdx++
c.emptyBucketCount-- c.emptyBucketCount--
return true return true
@ -495,7 +580,7 @@ func (c *cumulativeBucketIterator) Next() bool {
c.currCount += c.h.PositiveBuckets[c.posBucketsIdx] c.currCount += c.h.PositiveBuckets[c.posBucketsIdx]
c.currCumulativeCount += uint64(c.currCount) c.currCumulativeCount += uint64(c.currCount)
c.currUpper = getBound(c.currIdx, c.h.Schema) c.currUpper = getBound(c.currIdx, c.h.Schema, c.h.CustomBounds)
c.posBucketsIdx++ c.posBucketsIdx++
c.idxInSpan++ c.idxInSpan++
@ -526,6 +611,12 @@ func (c *cumulativeBucketIterator) At() Bucket[uint64] {
// ReduceResolution reduces the histogram's spans, buckets into target schema. // ReduceResolution reduces the histogram's spans, buckets into target schema.
// The target schema must be smaller than the current histogram's schema. // The target schema must be smaller than the current histogram's schema.
func (h *Histogram) ReduceResolution(targetSchema int32) *Histogram { func (h *Histogram) ReduceResolution(targetSchema int32) *Histogram {
if h.UsesCustomBuckets() {
panic("cannot reduce resolution when there are custom buckets")
}
if IsCustomBucketsSchema(targetSchema) {
panic("cannot reduce resolution to custom buckets schema")
}
if targetSchema >= h.Schema { if targetSchema >= h.Schema {
panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema)) panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema))
} }

View file

@ -69,6 +69,21 @@ func TestHistogramString(t *testing.T) {
}, },
expectedString: "{count:19, sum:2.7, [-64,-32):1, [-16,-8):1, [-8,-4):2, [-4,-2):1, [-2,-1):3, [-1,-0.5):1, (0.5,1]:1, (1,2]:3, (2,4]:1, (4,8]:2, (8,16]:1, (16,32]:1, (32,64]:1}", expectedString: "{count:19, sum:2.7, [-64,-32):1, [-16,-8):1, [-8,-4):2, [-4,-2):1, [-2,-1):3, [-1,-0.5):1, (0.5,1]:1, (1,2]:3, (2,4]:1, (4,8]:2, (8,16]:1, (16,32]:1, (32,64]:1}",
}, },
{
histogram: Histogram{
Schema: CustomBucketsSchema,
Count: 19,
Sum: 2.7,
PositiveSpans: []Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
CustomBounds: []float64{1, 2, 5, 10, 15, 20, 25, 50},
},
expectedString: "{count:19, sum:2.7, [-Inf,1]:1, (1,2]:3, (2,5]:1, (5,10]:2, (10,15]:1, (15,20]:1, (20,25]:1}",
},
} }
for i, c := range cases { for i, c := range cases {
@ -208,6 +223,26 @@ func TestCumulativeBucketIterator(t *testing.T) {
{Lower: math.Inf(-1), Upper: 16, Count: 8, LowerInclusive: true, UpperInclusive: true, Index: 2}, {Lower: math.Inf(-1), Upper: 16, Count: 8, LowerInclusive: true, UpperInclusive: true, Index: 2},
}, },
}, },
{
histogram: Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
CustomBounds: []float64{5, 10, 20, 50},
},
expectedBuckets: []Bucket[uint64]{
{Lower: math.Inf(-1), Upper: 5, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0},
{Lower: math.Inf(-1), Upper: 10, Count: 3, LowerInclusive: true, UpperInclusive: true, Index: 1},
{Lower: math.Inf(-1), Upper: 20, Count: 3, LowerInclusive: true, UpperInclusive: true, Index: 2},
{Lower: math.Inf(-1), Upper: 50, Count: 4, LowerInclusive: true, UpperInclusive: true, Index: 3},
{Lower: math.Inf(-1), Upper: math.Inf(1), Count: 5, LowerInclusive: true, UpperInclusive: true, Index: 4},
},
},
} }
for i, c := range cases { for i, c := range cases {
@ -368,6 +403,62 @@ func TestRegularBucketIterator(t *testing.T) {
}, },
expectedNegativeBuckets: []Bucket[uint64]{}, expectedNegativeBuckets: []Bucket[uint64]{},
}, },
{
histogram: Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
CustomBounds: []float64{5, 10, 20, 50},
},
expectedPositiveBuckets: []Bucket[uint64]{
{Lower: math.Inf(-1), Upper: 5, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0},
{Lower: 5, Upper: 10, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1},
{Lower: 20, Upper: 50, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 3},
{Lower: 50, Upper: math.Inf(1), Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4},
},
expectedNegativeBuckets: []Bucket[uint64]{},
},
{
histogram: Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
CustomBounds: []float64{0, 10, 20, 50},
},
expectedPositiveBuckets: []Bucket[uint64]{
{Lower: math.Inf(-1), Upper: 0, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0},
{Lower: 0, Upper: 10, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1},
{Lower: 20, Upper: 50, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 3},
{Lower: 50, Upper: math.Inf(1), Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4},
},
expectedNegativeBuckets: []Bucket[uint64]{},
},
{
histogram: Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 5},
},
PositiveBuckets: []int64{1, 1, 0, -1, 0},
CustomBounds: []float64{-5, 0, 20, 50},
},
expectedPositiveBuckets: []Bucket[uint64]{
{Lower: math.Inf(-1), Upper: -5, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0},
{Lower: -5, Upper: 0, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1},
{Lower: 0, Upper: 20, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 2},
{Lower: 20, Upper: 50, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 3},
{Lower: 50, Upper: math.Inf(1), Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4},
},
expectedNegativeBuckets: []Bucket[uint64]{},
},
} }
for i, c := range cases { for i, c := range cases {
@ -461,11 +552,79 @@ func TestHistogramToFloat(t *testing.T) {
} }
} }
func TestCustomBucketsHistogramToFloat(t *testing.T) {
h := Histogram{
Schema: CustomBucketsSchema,
Count: 10,
Sum: 2.7,
PositiveSpans: []Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
CustomBounds: []float64{5, 10, 20, 50, 100, 500},
}
cases := []struct {
name string
fh *FloatHistogram
}{
{name: "without prior float histogram"},
{name: "prior float histogram with more buckets", fh: &FloatHistogram{
Schema: 2,
Count: 3,
Sum: 5,
ZeroThreshold: 4,
ZeroCount: 1,
PositiveSpans: []Span{
{Offset: 1, Length: 2},
{Offset: 1, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9},
NegativeSpans: []Span{
{Offset: 20, Length: 6},
{Offset: 12, Length: 7},
{Offset: 33, Length: 10},
},
NegativeBuckets: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9},
}},
{name: "prior float histogram with fewer buckets", fh: &FloatHistogram{
Schema: 2,
Count: 3,
Sum: 5,
ZeroThreshold: 4,
ZeroCount: 1,
PositiveSpans: []Span{
{Offset: 1, Length: 2},
{Offset: 1, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []float64{1, 2},
NegativeSpans: []Span{
{Offset: 20, Length: 6},
{Offset: 12, Length: 7},
{Offset: 33, Length: 10},
},
NegativeBuckets: []float64{1, 2},
}},
}
require.NoError(t, h.Validate())
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
fh := h.ToFloat(c.fh)
require.NoError(t, fh.Validate())
require.Equal(t, h.String(), fh.String())
})
}
}
// TestHistogramEquals tests both Histogram and FloatHistogram. // TestHistogramEquals tests both Histogram and FloatHistogram.
func TestHistogramEquals(t *testing.T) { func TestHistogramEquals(t *testing.T) {
h1 := Histogram{ h1 := Histogram{
Schema: 3, Schema: 3,
Count: 61, Count: 62,
Sum: 2.7, Sum: 2.7,
ZeroThreshold: 0.1, ZeroThreshold: 0.1,
ZeroCount: 42, ZeroCount: 42,
@ -495,6 +654,15 @@ func TestHistogramEquals(t *testing.T) {
require.False(t, h1f.Equals(h2f)) require.False(t, h1f.Equals(h2f))
require.False(t, h2f.Equals(h1f)) require.False(t, h2f.Equals(h1f))
} }
notEqualsUntilFloatConv := func(h1, h2 Histogram) {
require.False(t, h1.Equals(&h2))
require.False(t, h2.Equals(&h1))
h1f, h2f := h1.ToFloat(nil), h2.ToFloat(nil)
require.True(t, h1f.Equals(h2f))
require.True(t, h2f.Equals(h1f))
}
require.NoError(t, h1.Validate())
h2 := h1.Copy() h2 := h1.Copy()
equals(h1, *h2) equals(h1, *h2)
@ -602,6 +770,45 @@ func TestHistogramEquals(t *testing.T) {
// Sum StaleNaN vs regular NaN. // Sum StaleNaN vs regular NaN.
notEquals(*hStale, *hNaN) notEquals(*hStale, *hNaN)
// Has non-empty custom bounds for exponential schema.
hCustom := h1.Copy()
hCustom.CustomBounds = []float64{1, 2, 3}
equals(h1, *hCustom)
cbh1 := Histogram{
Schema: CustomBucketsSchema,
Count: 10,
Sum: 2.7,
PositiveSpans: []Span{
{Offset: 0, Length: 4},
{Offset: 10, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
CustomBounds: []float64{0.1, 0.2, 0.5, 1, 2, 5, 10, 15, 20, 25, 50, 75, 100, 200, 250, 500, 1000},
}
require.NoError(t, cbh1.Validate())
cbh2 := cbh1.Copy()
equals(cbh1, *cbh2)
// Has different custom bounds for custom buckets schema.
cbh2 = cbh1.Copy()
cbh2.CustomBounds = []float64{0.1, 0.2, 0.5}
notEquals(cbh1, *cbh2)
// Has non-empty negative spans and buckets for custom buckets schema.
cbh2 = cbh1.Copy()
cbh2.NegativeSpans = []Span{{Offset: 0, Length: 1}}
cbh2.NegativeBuckets = []int64{1}
notEqualsUntilFloatConv(cbh1, *cbh2)
// Has non-zero zero count and threshold for custom buckets schema.
cbh2 = cbh1.Copy()
cbh2.ZeroThreshold = 0.1
cbh2.ZeroCount = 10
notEqualsUntilFloatConv(cbh1, *cbh2)
} }
func TestHistogramCopy(t *testing.T) { func TestHistogramCopy(t *testing.T) {
@ -640,6 +847,21 @@ func TestHistogramCopy(t *testing.T) {
}, },
expected: &Histogram{}, expected: &Histogram{},
}, },
{
name: "with custom buckets",
orig: &Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 1}},
PositiveBuckets: []int64{1, 3, -3, 42},
CustomBounds: []float64{5, 10, 15},
},
expected: &Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 1}},
PositiveBuckets: []int64{1, 3, -3, 42},
CustomBounds: []float64{5, 10, 15},
},
},
} }
for _, tcase := range cases { for _, tcase := range cases {
@ -690,6 +912,21 @@ func TestHistogramCopyTo(t *testing.T) {
}, },
expected: &Histogram{}, expected: &Histogram{},
}, },
{
name: "with custom buckets",
orig: &Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 1}},
PositiveBuckets: []int64{1, 3, -3, 42},
CustomBounds: []float64{5, 10, 15},
},
expected: &Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 1}},
PositiveBuckets: []int64{1, 3, -3, 42},
CustomBounds: []float64{5, 10, 15},
},
},
} }
for _, tcase := range cases { for _, tcase := range cases {
@ -971,6 +1208,86 @@ func TestHistogramCompact(t *testing.T) {
NegativeBuckets: []int64{2, 3}, NegativeBuckets: []int64{2, 3},
}, },
}, },
{
"nothing should happen with custom buckets",
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 1}, {2, 3}},
PositiveBuckets: []int64{1, 3, -3, 42},
CustomBounds: []float64{5, 10, 15},
},
0,
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 1}, {2, 3}},
PositiveBuckets: []int64{1, 3, -3, 42},
CustomBounds: []float64{5, 10, 15},
},
},
{
"eliminate zero offsets with custom buckets",
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 1}, {0, 3}, {0, 1}},
PositiveBuckets: []int64{1, 3, -3, 42, 3},
CustomBounds: []float64{5, 10, 15, 20},
},
0,
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 5}},
PositiveBuckets: []int64{1, 3, -3, 42, 3},
CustomBounds: []float64{5, 10, 15, 20},
},
},
{
"eliminate zero length with custom buckets",
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 2}, {2, 0}, {3, 3}},
PositiveBuckets: []int64{1, 3, -3, 42, 3},
CustomBounds: []float64{5, 10, 15, 20},
},
0,
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 2}, {5, 3}},
PositiveBuckets: []int64{1, 3, -3, 42, 3},
CustomBounds: []float64{5, 10, 15, 20},
},
},
{
"eliminate multiple zero length spans with custom buckets",
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 2}, {2, 0}, {2, 0}, {2, 0}, {3, 3}},
PositiveBuckets: []int64{1, 3, -3, 42, 3},
CustomBounds: []float64{5, 10, 15, 20},
},
0,
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 2}, {9, 3}},
PositiveBuckets: []int64{1, 3, -3, 42, 3},
CustomBounds: []float64{5, 10, 15, 20},
},
},
{
"cut empty buckets at start or end of spans, even in the middle, with custom buckets",
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-4, 6}, {3, 6}},
PositiveBuckets: []int64{0, 0, 1, 3, -4, 0, 1, 42, 3, -46, 0, 0},
CustomBounds: []float64{5, 10, 15, 20},
},
0,
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 2}, {5, 3}},
PositiveBuckets: []int64{1, 3, -3, 42, 3},
CustomBounds: []float64{5, 10, 15, 20},
},
},
} }
for _, c := range cases { for _, c := range cases {
@ -1107,6 +1424,145 @@ func TestHistogramValidation(t *testing.T) {
errMsg: `3 observations found in buckets, but the Count field is 2: histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)`, errMsg: `3 observations found in buckets, but the Count field is 2: histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)`,
skipFloat: true, skipFloat: true,
}, },
"rejects an exponential histogram with custom buckets schema": {
h: &Histogram{
Count: 12,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 19.4,
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
NegativeSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
NegativeBuckets: []int64{1, 1, -1, 0},
},
errMsg: `custom buckets: only 0 custom bounds defined which is insufficient to cover total span length of 5: histogram custom bounds are too few`,
},
"rejects a custom buckets histogram with exponential schema": {
h: &Histogram{
Count: 5,
Sum: 19.4,
Schema: 0,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
CustomBounds: []float64{1, 2, 3, 4},
},
errMsg: `histogram with exponential schema must not have custom bounds`,
skipFloat: true, // Converting to float will remove the wrong fields so only the float version will pass validation
},
"rejects a custom buckets histogram with zero/negative buckets": {
h: &Histogram{
Count: 12,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 19.4,
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
NegativeSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
NegativeBuckets: []int64{1, 1, -1, 0},
CustomBounds: []float64{1, 2, 3, 4},
},
errMsg: `custom buckets: must have zero count of 0`,
skipFloat: true, // Converting to float will remove the wrong fields so only the float version will pass validation
},
"rejects a custom buckets histogram with negative offset in first span": {
h: &Histogram{
Count: 5,
Sum: 19.4,
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: -1, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
CustomBounds: []float64{1, 2, 3, 4},
},
errMsg: `custom buckets: span number 1 with offset -1: histogram has a span whose offset is negative`,
},
"rejects a custom buckets histogram with negative offset in subsequent spans": {
h: &Histogram{
Count: 5,
Sum: 19.4,
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: -1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
CustomBounds: []float64{1, 2, 3, 4},
},
errMsg: `custom buckets: span number 2 with offset -1: histogram has a span whose offset is negative`,
},
"rejects a custom buckets histogram with non-matching bucket counts": {
h: &Histogram{
Count: 5,
Sum: 19.4,
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1},
CustomBounds: []float64{1, 2, 3, 4},
},
errMsg: `custom buckets: spans need 4 buckets, have 3 buckets: histogram spans specify different number of buckets than provided`,
},
"rejects a custom buckets histogram with too few bounds": {
h: &Histogram{
Count: 5,
Sum: 19.4,
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
CustomBounds: []float64{1, 2, 3},
},
errMsg: `custom buckets: only 3 custom bounds defined which is insufficient to cover total span length of 5: histogram custom bounds are too few`,
},
"valid custom buckets histogram": {
h: &Histogram{
Count: 5,
Sum: 19.4,
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
CustomBounds: []float64{1, 2, 3, 4},
},
},
"valid custom buckets histogram with extra bounds": {
h: &Histogram{
Count: 5,
Sum: 19.4,
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
CustomBounds: []float64{1, 2, 3, 4, 5, 6, 7, 8},
},
},
} }
for testName, tc := range tests { for testName, tc := range tests {

View file

@ -1657,18 +1657,21 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
}, e.LHS, e.RHS) }, e.LHS, e.RHS)
default: default:
return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh), nil vec, err := ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh)
return vec, handleVectorBinopError(err, e)
}, e.LHS, e.RHS) }, e.LHS, e.RHS)
} }
case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar: case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar:
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh), nil vec, err := ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh)
return vec, handleVectorBinopError(err, e)
}, e.LHS, e.RHS) }, e.LHS, e.RHS)
case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector: case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector:
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh), nil vec, err := ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh)
return vec, handleVectorBinopError(err, e)
}, e.LHS, e.RHS) }, e.LHS, e.RHS)
} }
@ -2303,12 +2306,12 @@ func (ev *evaluator) VectorUnless(lhs, rhs Vector, matching *parser.VectorMatchi
} }
// VectorBinop evaluates a binary operation between two Vectors, excluding set operators. // VectorBinop evaluates a binary operation between two Vectors, excluding set operators.
func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *parser.VectorMatching, returnBool bool, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector { func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *parser.VectorMatching, returnBool bool, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) (Vector, error) {
if matching.Card == parser.CardManyToMany { if matching.Card == parser.CardManyToMany {
panic("many-to-many only allowed for set operators") panic("many-to-many only allowed for set operators")
} }
if len(lhs) == 0 || len(rhs) == 0 { if len(lhs) == 0 || len(rhs) == 0 {
return nil // Short-circuit: nothing is going to match. return nil, nil // Short-circuit: nothing is going to match.
} }
// The control flow below handles one-to-one or many-to-one matching. // The control flow below handles one-to-one or many-to-one matching.
@ -2361,6 +2364,7 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
// For all lhs samples find a respective rhs sample and perform // For all lhs samples find a respective rhs sample and perform
// the binary operation. // the binary operation.
var lastErr error
for i, ls := range lhs { for i, ls := range lhs {
sig := lhsh[i].signature sig := lhsh[i].signature
@ -2376,7 +2380,10 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
fl, fr = fr, fl fl, fr = fr, fl
hl, hr = hr, hl hl, hr = hr, hl
} }
floatValue, histogramValue, keep := vectorElemBinop(op, fl, fr, hl, hr) floatValue, histogramValue, keep, err := vectorElemBinop(op, fl, fr, hl, hr)
if err != nil {
lastErr = err
}
switch { switch {
case returnBool: case returnBool:
if keep { if keep {
@ -2418,7 +2425,7 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
H: histogramValue, H: histogramValue,
}) })
} }
return enh.Out return enh.Out, lastErr
} }
func signatureFunc(on bool, b []byte, names ...string) func(labels.Labels) string { func signatureFunc(on bool, b []byte, names ...string) func(labels.Labels) string {
@ -2481,7 +2488,8 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V
} }
// VectorscalarBinop evaluates a binary operation between a Vector and a Scalar. // VectorscalarBinop evaluates a binary operation between a Vector and a Scalar.
func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) Vector { func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) (Vector, error) {
var lastErr error
for _, lhsSample := range lhs { for _, lhsSample := range lhs {
lf, rf := lhsSample.F, rhs.V lf, rf := lhsSample.F, rhs.V
var rh *histogram.FloatHistogram var rh *histogram.FloatHistogram
@ -2492,7 +2500,10 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala
lf, rf = rf, lf lf, rf = rf, lf
lh, rh = rh, lh lh, rh = rh, lh
} }
float, histogram, keep := vectorElemBinop(op, lf, rf, lh, rh) float, histogram, keep, err := vectorElemBinop(op, lf, rf, lh, rh)
if err != nil {
lastErr = err
}
// Catch cases where the scalar is the LHS in a scalar-vector comparison operation. // Catch cases where the scalar is the LHS in a scalar-vector comparison operation.
// We want to always keep the vector element value as the output value, even if it's on the RHS. // We want to always keep the vector element value as the output value, even if it's on the RHS.
if op.IsComparisonOperator() && swap { if op.IsComparisonOperator() && swap {
@ -2516,7 +2527,7 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala
enh.Out = append(enh.Out, lhsSample) enh.Out = append(enh.Out, lhsSample)
} }
} }
return enh.Out return enh.Out, lastErr
} }
// scalarBinop evaluates a binary operation between two Scalars. // scalarBinop evaluates a binary operation between two Scalars.
@ -2553,49 +2564,57 @@ func scalarBinop(op parser.ItemType, lhs, rhs float64) float64 {
} }
// vectorElemBinop evaluates a binary operation between two Vector elements. // vectorElemBinop evaluates a binary operation between two Vector elements.
func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool) { func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) {
switch op { switch op {
case parser.ADD: case parser.ADD:
if hlhs != nil && hrhs != nil { if hlhs != nil && hrhs != nil {
return 0, hlhs.Copy().Add(hrhs).Compact(0), true res, err := hlhs.Copy().Add(hrhs)
if err != nil {
return 0, nil, false, err
}
return 0, res.Compact(0), true, nil
} }
return lhs + rhs, nil, true return lhs + rhs, nil, true, nil
case parser.SUB: case parser.SUB:
if hlhs != nil && hrhs != nil { if hlhs != nil && hrhs != nil {
return 0, hlhs.Copy().Sub(hrhs).Compact(0), true res, err := hlhs.Copy().Sub(hrhs)
if err != nil {
return 0, nil, false, err
}
return 0, res.Compact(0), true, nil
} }
return lhs - rhs, nil, true return lhs - rhs, nil, true, nil
case parser.MUL: case parser.MUL:
if hlhs != nil && hrhs == nil { if hlhs != nil && hrhs == nil {
return 0, hlhs.Copy().Mul(rhs), true return 0, hlhs.Copy().Mul(rhs), true, nil
} }
if hlhs == nil && hrhs != nil { if hlhs == nil && hrhs != nil {
return 0, hrhs.Copy().Mul(lhs), true return 0, hrhs.Copy().Mul(lhs), true, nil
} }
return lhs * rhs, nil, true return lhs * rhs, nil, true, nil
case parser.DIV: case parser.DIV:
if hlhs != nil && hrhs == nil { if hlhs != nil && hrhs == nil {
return 0, hlhs.Copy().Div(rhs), true return 0, hlhs.Copy().Div(rhs), true, nil
} }
return lhs / rhs, nil, true return lhs / rhs, nil, true, nil
case parser.POW: case parser.POW:
return math.Pow(lhs, rhs), nil, true return math.Pow(lhs, rhs), nil, true, nil
case parser.MOD: case parser.MOD:
return math.Mod(lhs, rhs), nil, true return math.Mod(lhs, rhs), nil, true, nil
case parser.EQLC: case parser.EQLC:
return lhs, nil, lhs == rhs return lhs, nil, lhs == rhs, nil
case parser.NEQ: case parser.NEQ:
return lhs, nil, lhs != rhs return lhs, nil, lhs != rhs, nil
case parser.GTR: case parser.GTR:
return lhs, nil, lhs > rhs return lhs, nil, lhs > rhs, nil
case parser.LSS: case parser.LSS:
return lhs, nil, lhs < rhs return lhs, nil, lhs < rhs, nil
case parser.GTE: case parser.GTE:
return lhs, nil, lhs >= rhs return lhs, nil, lhs >= rhs, nil
case parser.LTE: case parser.LTE:
return lhs, nil, lhs <= rhs return lhs, nil, lhs <= rhs, nil
case parser.ATAN2: case parser.ATAN2:
return math.Atan2(lhs, rhs), nil, true return math.Atan2(lhs, rhs), nil, true, nil
} }
panic(fmt.Errorf("operator %q not allowed for operations between Vectors", op)) panic(fmt.Errorf("operator %q not allowed for operations between Vectors", op))
} }
@ -2747,7 +2766,10 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, grouping []string, par
if s.H != nil { if s.H != nil {
group.hasHistogram = true group.hasHistogram = true
if group.histogramValue != nil { if group.histogramValue != nil {
group.histogramValue.Add(s.H) _, err := group.histogramValue.Add(s.H)
if err != nil {
handleAggregationError(err, e, group, &annos)
}
} }
// Otherwise the aggregation contained floats // Otherwise the aggregation contained floats
// previously and will be invalid anyway. No // previously and will be invalid anyway. No
@ -2764,8 +2786,14 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, grouping []string, par
if group.histogramMean != nil { if group.histogramMean != nil {
left := s.H.Copy().Div(float64(group.groupCount)) left := s.H.Copy().Div(float64(group.groupCount))
right := group.histogramMean.Copy().Div(float64(group.groupCount)) right := group.histogramMean.Copy().Div(float64(group.groupCount))
toAdd := left.Sub(right) toAdd, err := left.Sub(right)
group.histogramMean.Add(toAdd) if err != nil {
handleAggregationError(err, e, group, &annos)
}
_, err = group.histogramMean.Add(toAdd)
if err != nil {
handleAggregationError(err, e, group, &annos)
}
} }
// Otherwise the aggregation contained floats // Otherwise the aggregation contained floats
// previously and will be invalid anyway. No // previously and will be invalid anyway. No
@ -2941,6 +2969,32 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, grouping []string, par
return enh.Out, annos return enh.Out, annos
} }
// handleAggregationError adds the appropriate annotation based on the aggregation error.
func handleAggregationError(err error, e *parser.AggregateExpr, group *groupedAggregation, annos *annotations.Annotations) {
metricName := group.labels.Get(labels.MetricName)
pos := e.Expr.PositionRange()
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
annos.Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
} else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
annos.Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos))
}
}
// handleVectorBinopError returns the appropriate annotation based on the vector binary operation error.
func handleVectorBinopError(err error, e *parser.BinaryExpr) annotations.Annotations {
if err == nil {
return nil
}
metricName := ""
pos := e.PositionRange()
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
return annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
} else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
return annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos))
}
return nil
}
// groupingKey builds and returns the grouping key for the given metric and // groupingKey builds and returns the grouping key for the given metric and
// grouping labels. // grouping labels.
func generateGroupingKey(metric labels.Labels, grouping []string, without bool, buf []byte) (uint64, []byte) { func generateGroupingKey(metric labels.Labels, grouping []string, without bool, buf []byte) (uint64, []byte) {

View file

@ -14,6 +14,7 @@
package promql package promql
import ( import (
"errors"
"fmt" "fmt"
"math" "math"
"sort" "sort"
@ -211,14 +212,28 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra
} }
h := last.CopyToSchema(minSchema) h := last.CopyToSchema(minSchema)
h.Sub(prev) _, err := h.Sub(prev)
if err != nil {
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
} else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
return nil, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos))
}
}
if isCounter { if isCounter {
// Second iteration to deal with counter resets. // Second iteration to deal with counter resets.
for _, currPoint := range points[1:] { for _, currPoint := range points[1:] {
curr := currPoint.H curr := currPoint.H
if curr.DetectReset(prev) { if curr.DetectReset(prev) {
h.Add(prev) _, err := h.Add(prev)
if err != nil {
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
} else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
return nil, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos))
}
}
} }
prev = curr prev = curr
} }
@ -515,10 +530,11 @@ func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series)
return append(enh.Out, Sample{F: aggrFn(el)}) return append(enh.Out, Sample{F: aggrFn(el)})
} }
func aggrHistOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) *histogram.FloatHistogram) Vector { func aggrHistOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) (*histogram.FloatHistogram, error)) (Vector, error) {
el := vals[0].(Matrix)[0] el := vals[0].(Matrix)[0]
res, err := aggrFn(el)
return append(enh.Out, Sample{H: aggrFn(el)}) return append(enh.Out, Sample{H: res}), err
} }
// === avg_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === // === avg_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
@ -530,18 +546,33 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
} }
if len(firstSeries.Floats) == 0 { if len(firstSeries.Floats) == 0 {
// The passed values only contain histograms. // The passed values only contain histograms.
return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram { vec, err := aggrHistOverTime(vals, enh, func(s Series) (*histogram.FloatHistogram, error) {
count := 1 count := 1
mean := s.Histograms[0].H.Copy() mean := s.Histograms[0].H.Copy()
for _, h := range s.Histograms[1:] { for _, h := range s.Histograms[1:] {
count++ count++
left := h.H.Copy().Div(float64(count)) left := h.H.Copy().Div(float64(count))
right := mean.Copy().Div(float64(count)) right := mean.Copy().Div(float64(count))
toAdd := left.Sub(right) toAdd, err := left.Sub(right)
mean.Add(toAdd) if err != nil {
return mean, err
}
_, err = mean.Add(toAdd)
if err != nil {
return mean, err
}
} }
return mean return mean, nil
}), nil })
if err != nil {
metricName := firstSeries.Metric.Get(labels.MetricName)
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
return enh.Out, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, args[0].PositionRange()))
} else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
return enh.Out, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, args[0].PositionRange()))
}
}
return vec, nil
} }
return aggrOverTime(vals, enh, func(s Series) float64 { return aggrOverTime(vals, enh, func(s Series) float64 {
var mean, count, c float64 var mean, count, c float64
@ -675,13 +706,25 @@ func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
} }
if len(firstSeries.Floats) == 0 { if len(firstSeries.Floats) == 0 {
// The passed values only contain histograms. // The passed values only contain histograms.
return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram { vec, err := aggrHistOverTime(vals, enh, func(s Series) (*histogram.FloatHistogram, error) {
sum := s.Histograms[0].H.Copy() sum := s.Histograms[0].H.Copy()
for _, h := range s.Histograms[1:] { for _, h := range s.Histograms[1:] {
sum.Add(h.H) _, err := sum.Add(h.H)
if err != nil {
return sum, err
}
} }
return sum return sum, nil
}), nil })
if err != nil {
metricName := firstSeries.Metric.Get(labels.MetricName)
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
return enh.Out, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, args[0].PositionRange()))
} else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
return enh.Out, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, args[0].PositionRange()))
}
}
return vec, nil
} }
return aggrOverTime(vals, enh, func(s Series) float64 { return aggrOverTime(vals, enh, func(s Series) float64 {
var sum, c float64 var sum, c float64

View file

@ -482,19 +482,19 @@ func (p *parser) mergeMaps(left, right *map[string]interface{}) (ret *map[string
} }
func (p *parser) histogramsIncreaseSeries(base, inc *histogram.FloatHistogram, times uint64) ([]SequenceValue, error) { func (p *parser) histogramsIncreaseSeries(base, inc *histogram.FloatHistogram, times uint64) ([]SequenceValue, error) {
return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) *histogram.FloatHistogram { return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) (*histogram.FloatHistogram, error) {
return a.Add(b) return a.Add(b)
}) })
} }
func (p *parser) histogramsDecreaseSeries(base, inc *histogram.FloatHistogram, times uint64) ([]SequenceValue, error) { func (p *parser) histogramsDecreaseSeries(base, inc *histogram.FloatHistogram, times uint64) ([]SequenceValue, error) {
return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) *histogram.FloatHistogram { return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) (*histogram.FloatHistogram, error) {
return a.Sub(b) return a.Sub(b)
}) })
} }
func (p *parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uint64, func (p *parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uint64,
combine func(*histogram.FloatHistogram, *histogram.FloatHistogram) *histogram.FloatHistogram, combine func(*histogram.FloatHistogram, *histogram.FloatHistogram) (*histogram.FloatHistogram, error),
) ([]SequenceValue, error) { ) ([]SequenceValue, error) {
ret := make([]SequenceValue, times+1) ret := make([]SequenceValue, times+1)
// Add an additional value (the base) for time 0, which we ignore in tests. // Add an additional value (the base) for time 0, which we ignore in tests.
@ -505,7 +505,11 @@ func (p *parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uin
return nil, fmt.Errorf("error combining histograms: cannot merge from schema %d to %d", inc.Schema, cur.Schema) return nil, fmt.Errorf("error combining histograms: cannot merge from schema %d to %d", inc.Schema, cur.Schema)
} }
cur = combine(cur.Copy(), inc) var err error
cur, err = combine(cur.Copy(), inc)
if err != nil {
return ret, err
}
ret[i] = SequenceValue{Histogram: cur} ret[i] = SequenceValue{Histogram: cur}
} }

View file

@ -1399,7 +1399,7 @@ func TestNativeHistogramsInRecordingRules(t *testing.T) {
expHist := hists[0].ToFloat(nil) expHist := hists[0].ToFloat(nil)
for _, h := range hists[1:] { for _, h := range hists[1:] {
expHist = expHist.Add(h.ToFloat(nil)) expHist, _ = expHist.Add(h.ToFloat(nil))
} }
it := s.Iterator(nil) it := s.Iterator(nil)

View file

@ -660,7 +660,7 @@ func appender(app storage.Appender, sampleLimit, bucketLimit int, maxSchema int3
} }
} }
if maxSchema < nativeHistogramMaxSchema { if maxSchema < histogram.ExponentialSchemaMax {
app = &maxSchemaAppender{ app = &maxSchemaAppender{
Appender: app, Appender: app,
maxSchema: maxSchema, maxSchema: maxSchema,
@ -1956,10 +1956,10 @@ func pickSchema(bucketFactor float64) int32 {
} }
floor := math.Floor(-math.Log2(math.Log2(bucketFactor))) floor := math.Floor(-math.Log2(math.Log2(bucketFactor)))
switch { switch {
case floor >= float64(nativeHistogramMaxSchema): case floor >= float64(histogram.ExponentialSchemaMax):
return nativeHistogramMaxSchema return histogram.ExponentialSchemaMax
case floor <= float64(nativeHistogramMinSchema): case floor <= float64(histogram.ExponentialSchemaMin):
return nativeHistogramMinSchema return histogram.ExponentialSchemaMin
default: default:
return int32(floor) return int32(floor)
} }

View file

@ -510,7 +510,7 @@ func TestScrapePoolAppender(t *testing.T) {
appl, ok := loop.(*scrapeLoop) appl, ok := loop.(*scrapeLoop)
require.True(t, ok, "Expected scrapeLoop but got %T", loop) require.True(t, ok, "Expected scrapeLoop but got %T", loop)
wrapped := appender(appl.appender(context.Background()), 0, 0, nativeHistogramMaxSchema) wrapped := appender(appl.appender(context.Background()), 0, 0, histogram.ExponentialSchemaMax)
tl, ok := wrapped.(*timeLimitAppender) tl, ok := wrapped.(*timeLimitAppender)
require.True(t, ok, "Expected timeLimitAppender but got %T", wrapped) require.True(t, ok, "Expected timeLimitAppender but got %T", wrapped)
@ -526,7 +526,7 @@ func TestScrapePoolAppender(t *testing.T) {
appl, ok = loop.(*scrapeLoop) appl, ok = loop.(*scrapeLoop)
require.True(t, ok, "Expected scrapeLoop but got %T", loop) require.True(t, ok, "Expected scrapeLoop but got %T", loop)
wrapped = appender(appl.appender(context.Background()), sampleLimit, 0, nativeHistogramMaxSchema) wrapped = appender(appl.appender(context.Background()), sampleLimit, 0, histogram.ExponentialSchemaMax)
sl, ok := wrapped.(*limitAppender) sl, ok := wrapped.(*limitAppender)
require.True(t, ok, "Expected limitAppender but got %T", wrapped) require.True(t, ok, "Expected limitAppender but got %T", wrapped)
@ -537,7 +537,7 @@ func TestScrapePoolAppender(t *testing.T) {
_, ok = tl.Appender.(nopAppender) _, ok = tl.Appender.(nopAppender)
require.True(t, ok, "Expected base appender but got %T", tl.Appender) require.True(t, ok, "Expected base appender but got %T", tl.Appender)
wrapped = appender(appl.appender(context.Background()), sampleLimit, 100, nativeHistogramMaxSchema) wrapped = appender(appl.appender(context.Background()), sampleLimit, 100, histogram.ExponentialSchemaMax)
bl, ok := wrapped.(*bucketLimitAppender) bl, ok := wrapped.(*bucketLimitAppender)
require.True(t, ok, "Expected bucketLimitAppender but got %T", wrapped) require.True(t, ok, "Expected bucketLimitAppender but got %T", wrapped)
@ -669,7 +669,7 @@ func newBasicScrapeLoop(t testing.TB, ctx context.Context, scraper scraper, app
true, true,
false, false,
true, true,
0, 0, nativeHistogramMaxSchema, 0, 0, histogram.ExponentialSchemaMax,
nil, nil,
interval, interval,
time.Hour, time.Hour,
@ -810,7 +810,7 @@ func TestScrapeLoopRun(t *testing.T) {
true, true,
false, false,
true, true,
0, 0, nativeHistogramMaxSchema, 0, 0, histogram.ExponentialSchemaMax,
nil, nil,
time.Second, time.Second,
time.Hour, time.Hour,
@ -953,7 +953,7 @@ func TestScrapeLoopMetadata(t *testing.T) {
true, true,
false, false,
true, true,
0, 0, nativeHistogramMaxSchema, 0, 0, histogram.ExponentialSchemaMax,
nil, nil,
0, 0,
0, 0,

View file

@ -366,7 +366,7 @@ type bucketLimitAppender struct {
func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
if h != nil { if h != nil {
for len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit { for len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit {
if h.Schema == -4 { if h.Schema <= histogram.ExponentialSchemaMin || h.Schema > histogram.ExponentialSchemaMax {
return 0, errBucketLimit return 0, errBucketLimit
} }
h = h.ReduceResolution(h.Schema - 1) h = h.ReduceResolution(h.Schema - 1)
@ -374,7 +374,7 @@ func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labe
} }
if fh != nil { if fh != nil {
for len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit { for len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit {
if fh.Schema == -4 { if fh.Schema <= histogram.ExponentialSchemaMin || fh.Schema > histogram.ExponentialSchemaMax {
return 0, errBucketLimit return 0, errBucketLimit
} }
fh = fh.ReduceResolution(fh.Schema - 1) fh = fh.ReduceResolution(fh.Schema - 1)
@ -387,11 +387,6 @@ func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labe
return ref, nil return ref, nil
} }
const (
nativeHistogramMaxSchema int32 = 8
nativeHistogramMinSchema int32 = -4
)
type maxSchemaAppender struct { type maxSchemaAppender struct {
storage.Appender storage.Appender
@ -400,12 +395,12 @@ type maxSchemaAppender struct {
func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
if h != nil { if h != nil {
if h.Schema > app.maxSchema { if histogram.IsExponentialSchema(h.Schema) && h.Schema > app.maxSchema {
h = h.ReduceResolution(app.maxSchema) h = h.ReduceResolution(app.maxSchema)
} }
} }
if fh != nil { if fh != nil {
if fh.Schema > app.maxSchema { if histogram.IsExponentialSchema(fh.Schema) && fh.Schema > app.maxSchema {
fh = fh.ReduceResolution(app.maxSchema) fh = fh.ReduceResolution(app.maxSchema)
} }
} }

View file

@ -473,6 +473,17 @@ func TestBucketLimitAppender(t *testing.T) {
PositiveBuckets: []int64{1, 0}, // 1, 1 PositiveBuckets: []int64{1, 0}, // 1, 1
} }
customBuckets := histogram.Histogram{
Schema: histogram.CustomBucketsSchema,
Count: 9,
Sum: 33,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{3, 0, 0},
CustomBounds: []float64{1, 2, 3},
}
cases := []struct { cases := []struct {
h histogram.Histogram h histogram.Histogram
limit int limit int
@ -506,6 +517,18 @@ func TestBucketLimitAppender(t *testing.T) {
expectBucketCount: 1, expectBucketCount: 1,
expectSchema: -2, expectSchema: -2,
}, },
{
h: customBuckets,
limit: 2,
expectError: true,
},
{
h: customBuckets,
limit: 3,
expectError: false,
expectBucketCount: 3,
expectSchema: histogram.CustomBucketsSchema,
},
} }
resApp := &collectResultAppender{} resApp := &collectResultAppender{}
@ -561,6 +584,17 @@ func TestMaxSchemaAppender(t *testing.T) {
NegativeBuckets: []int64{3, 0, 0}, NegativeBuckets: []int64{3, 0, 0},
} }
customBuckets := histogram.Histogram{
Schema: histogram.CustomBucketsSchema,
Count: 9,
Sum: 33,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{3, 0, 0},
CustomBounds: []float64{1, 2, 3},
}
cases := []struct { cases := []struct {
h histogram.Histogram h histogram.Histogram
maxSchema int32 maxSchema int32
@ -576,6 +610,11 @@ func TestMaxSchemaAppender(t *testing.T) {
maxSchema: 0, maxSchema: 0,
expectSchema: 0, expectSchema: 0,
}, },
{
h: customBuckets,
maxSchema: -1,
expectSchema: histogram.CustomBucketsSchema,
},
} }
resApp := &collectResultAppender{} resApp := &collectResultAppender{}

View file

@ -103,12 +103,14 @@ var (
PromQLInfo = errors.New("PromQL info") PromQLInfo = errors.New("PromQL info")
PromQLWarning = errors.New("PromQL warning") PromQLWarning = errors.New("PromQL warning")
InvalidQuantileWarning = fmt.Errorf("%w: quantile value should be between 0 and 1", PromQLWarning) InvalidQuantileWarning = fmt.Errorf("%w: quantile value should be between 0 and 1", PromQLWarning)
BadBucketLabelWarning = fmt.Errorf("%w: bucket label %q is missing or has a malformed value", PromQLWarning, model.BucketLabel) BadBucketLabelWarning = fmt.Errorf("%w: bucket label %q is missing or has a malformed value", PromQLWarning, model.BucketLabel)
MixedFloatsHistogramsWarning = fmt.Errorf("%w: encountered a mix of histograms and floats for", PromQLWarning) MixedFloatsHistogramsWarning = fmt.Errorf("%w: encountered a mix of histograms and floats for", PromQLWarning)
MixedClassicNativeHistogramsWarning = fmt.Errorf("%w: vector contains a mix of classic and native histograms for metric name", PromQLWarning) MixedClassicNativeHistogramsWarning = fmt.Errorf("%w: vector contains a mix of classic and native histograms for metric name", PromQLWarning)
NativeHistogramNotCounterWarning = fmt.Errorf("%w: this native histogram metric is not a counter:", PromQLWarning) NativeHistogramNotCounterWarning = fmt.Errorf("%w: this native histogram metric is not a counter:", PromQLWarning)
NativeHistogramNotGaugeWarning = fmt.Errorf("%w: this native histogram metric is not a gauge:", PromQLWarning) NativeHistogramNotGaugeWarning = fmt.Errorf("%w: this native histogram metric is not a gauge:", PromQLWarning)
MixedExponentialCustomHistogramsWarning = fmt.Errorf("%w: vector contains a mix of histograms with exponential and custom buckets schemas for metric name", PromQLWarning)
IncompatibleCustomBucketsHistogramsWarning = fmt.Errorf("%w: vector contains histograms with incompatible custom buckets for metric name", PromQLWarning)
PossibleNonCounterInfo = fmt.Errorf("%w: metric might not be a counter, name does not end in _total/_sum/_count/_bucket:", PromQLInfo) PossibleNonCounterInfo = fmt.Errorf("%w: metric might not be a counter, name does not end in _total/_sum/_count/_bucket:", PromQLInfo)
HistogramQuantileForcedMonotonicityInfo = fmt.Errorf("%w: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for metric name", PromQLInfo) HistogramQuantileForcedMonotonicityInfo = fmt.Errorf("%w: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for metric name", PromQLInfo)
@ -195,6 +197,24 @@ func NewNativeHistogramNotGaugeWarning(metricName string, pos posrange.PositionR
} }
} }
// NewMixedExponentialCustomHistogramsWarning is used when the queried series includes
// histograms with both exponential and custom buckets schemas.
func NewMixedExponentialCustomHistogramsWarning(metricName string, pos posrange.PositionRange) error {
return annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %q", MixedExponentialCustomHistogramsWarning, metricName),
}
}
// NewIncompatibleCustomBucketsHistogramsWarning is used when the queried series includes
// custom buckets histograms with incompatible custom bounds.
func NewIncompatibleCustomBucketsHistogramsWarning(metricName string, pos posrange.PositionRange) error {
return annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %q", IncompatibleCustomBucketsHistogramsWarning, metricName),
}
}
// NewPossibleNonCounterInfo is used when a named counter metric with only float samples does not // NewPossibleNonCounterInfo is used when a named counter metric with only float samples does not
// have the suffixes _total, _sum, _count, or _bucket. // have the suffixes _total, _sum, _count, or _bucket.
func NewPossibleNonCounterInfo(metricName string, pos posrange.PositionRange) error { func NewPossibleNonCounterInfo(metricName string, pos posrange.PositionRange) error {