mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-24 05:04:05 -08:00
Unit tests for native histograms (#12668)
promql: Extend testing framework to support native histograms This includes both the internal testing framework as well as the rules unit test feature of promtool. This also adds a bunch of basic tests. Many of the code level tests can now be converted to tests within the framework, and more tests can be added easily. --------- Signed-off-by: Harold Dost <h.dost@criteo.com> Signed-off-by: Gregor Zeitlinger <gregor.zeitlinger@grafana.com> Signed-off-by: Stephen Lang <stephen.lang@grafana.com> Co-authored-by: Harold Dost <h.dost@criteo.com> Co-authored-by: Stephen Lang <stephen.lang@grafana.com> Co-authored-by: Gregor Zeitlinger <gregor.zeitlinger@grafana.com>
This commit is contained in:
parent
54aaa2bd7e
commit
f01718262a
46
cmd/promtool/testdata/unittest.yml
vendored
46
cmd/promtool/testdata/unittest.yml
vendored
|
@ -10,6 +10,21 @@ tests:
|
|||
- series: test_full
|
||||
values: "0 0"
|
||||
|
||||
- series: test_repeat
|
||||
values: "1x2"
|
||||
|
||||
- series: test_increase
|
||||
values: "1+1x2"
|
||||
|
||||
- series: test_histogram
|
||||
values: "{{schema:1 sum:-0.3 count:32.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5}}"
|
||||
|
||||
- series: test_histogram_repeat
|
||||
values: "{{sum:3 count:2 buckets:[2]}}x2"
|
||||
|
||||
- series: test_histogram_increase
|
||||
values: "{{sum:3 count:2 buckets:[2]}}+{{sum:1.3 count:1 buckets:[1]}}x2"
|
||||
|
||||
- series: test_stale
|
||||
values: "0 stale"
|
||||
|
||||
|
@ -31,6 +46,37 @@ tests:
|
|||
exp_samples:
|
||||
- value: 60
|
||||
|
||||
# Repeat & increase
|
||||
- expr: test_repeat
|
||||
eval_time: 2m
|
||||
exp_samples:
|
||||
- value: 1
|
||||
labels: "test_repeat"
|
||||
- expr: test_increase
|
||||
eval_time: 2m
|
||||
exp_samples:
|
||||
- value: 3
|
||||
labels: "test_increase"
|
||||
|
||||
# Histograms
|
||||
- expr: test_histogram
|
||||
eval_time: 1m
|
||||
exp_samples:
|
||||
- labels: "test_histogram"
|
||||
histogram: "{{schema:1 sum:-0.3 count:32.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5}}"
|
||||
|
||||
- expr: test_histogram_repeat
|
||||
eval_time: 2m
|
||||
exp_samples:
|
||||
- labels: "test_histogram_repeat"
|
||||
histogram: "{{count:2 sum:3 buckets:[2]}}"
|
||||
|
||||
- expr: test_histogram_increase
|
||||
eval_time: 2m
|
||||
exp_samples:
|
||||
- labels: "test_histogram_increase"
|
||||
histogram: "{{count:4 sum:5.6 buckets:[4]}}"
|
||||
|
||||
# Ensure a value is stale as soon as it is marked as such.
|
||||
- expr: test_stale
|
||||
eval_time: 59s
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"github.com/prometheus/common/model"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
"github.com/prometheus/prometheus/promql/parser"
|
||||
|
@ -348,12 +349,27 @@ Outer:
|
|||
gotSamples = append(gotSamples, parsedSample{
|
||||
Labels: s.Metric.Copy(),
|
||||
Value: s.F,
|
||||
Histogram: promql.HistogramTestExpression(s.H),
|
||||
})
|
||||
}
|
||||
|
||||
var expSamples []parsedSample
|
||||
for _, s := range testCase.ExpSamples {
|
||||
lb, err := parser.ParseMetric(s.Labels)
|
||||
var hist *histogram.FloatHistogram
|
||||
if err == nil && s.Histogram != "" {
|
||||
_, values, parseErr := parser.ParseSeriesDesc("{} " + s.Histogram)
|
||||
switch {
|
||||
case parseErr != nil:
|
||||
err = parseErr
|
||||
case len(values) != 1:
|
||||
err = fmt.Errorf("expected 1 value, got %d", len(values))
|
||||
case values[0].Histogram == nil:
|
||||
err = fmt.Errorf("expected histogram, got %v", values[0])
|
||||
default:
|
||||
hist = values[0].Histogram
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
err = fmt.Errorf("labels %q: %w", s.Labels, err)
|
||||
errs = append(errs, fmt.Errorf(" expr: %q, time: %s, err: %w", testCase.Expr,
|
||||
|
@ -363,6 +379,7 @@ Outer:
|
|||
expSamples = append(expSamples, parsedSample{
|
||||
Labels: lb,
|
||||
Value: s.Value,
|
||||
Histogram: promql.HistogramTestExpression(hist),
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -532,12 +549,14 @@ type promqlTestCase struct {
|
|||
type sample struct {
|
||||
Labels string `yaml:"labels"`
|
||||
Value float64 `yaml:"value"`
|
||||
Histogram string `yaml:"histogram"` // A non-empty string means Value is ignored.
|
||||
}
|
||||
|
||||
// parsedSample is a sample with parsed Labels.
|
||||
type parsedSample struct {
|
||||
Labels labels.Labels
|
||||
Value float64
|
||||
Histogram string // TestExpression() of histogram.FloatHistogram
|
||||
}
|
||||
|
||||
func parsedSamplesString(pss []parsedSample) string {
|
||||
|
@ -552,5 +571,8 @@ func parsedSamplesString(pss []parsedSample) string {
|
|||
}
|
||||
|
||||
func (ps *parsedSample) String() string {
|
||||
if ps.Histogram != "" {
|
||||
return ps.Labels.String() + " " + ps.Histogram
|
||||
}
|
||||
return ps.Labels.String() + " " + strconv.FormatFloat(ps.Value, 'E', -1, 64)
|
||||
}
|
||||
|
|
|
@ -76,10 +76,11 @@ series: <string>
|
|||
|
||||
# This uses expanding notation.
|
||||
# Expanding notation:
|
||||
# 'a+bxc' becomes 'a a+b a+(2*b) a+(3*b) … a+(c*b)'
|
||||
# Read this as series starts at a, then c further samples incrementing by b.
|
||||
# 'a-bxc' becomes 'a a-b a-(2*b) a-(3*b) … a-(c*b)'
|
||||
# Read this as series starts at a, then c further samples decrementing by b (or incrementing by negative b).
|
||||
# 'a+bxn' becomes 'a a+b a+(2*b) a+(3*b) … a+(n*b)'
|
||||
# Read this as series starts at a, then n further samples incrementing by b.
|
||||
# 'a-bxn' becomes 'a a-b a-(2*b) a-(3*b) … a-(n*b)'
|
||||
# Read this as series starts at a, then n further samples decrementing by b (or incrementing by negative b).
|
||||
# 'axn' becomes 'a a a … a' (n times) - it's a shorthand for 'a+0xn'
|
||||
# There are special values to indicate missing and stale samples:
|
||||
# '_' represents a missing sample from scrape
|
||||
# 'stale' indicates a stale sample
|
||||
|
@ -88,6 +89,36 @@ series: <string>
|
|||
# 2. ' 1-2x4' becomes '1 -1 -3 -5 -7' - series starts at 1, then 4 further samples decrementing by 2.
|
||||
# 3. ' 1x4' becomes '1 1 1 1 1' - shorthand for '1+0x4', series starts at 1, then 4 further samples incrementing by 0.
|
||||
# 4. ' 1 _x3 stale' becomes '1 _ _ _ stale' - the missing sample cannot increment, so 3 missing samples are produced by the '_x3' expression.
|
||||
#
|
||||
# Native histogram notation:
|
||||
# Native histograms can be used instead of floating point numbers using the following notation:
|
||||
# {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5}}
|
||||
# Native histograms support the same expanding notation as floating point numbers, i.e. 'axn', 'a+bxn' and 'a-bxn'.
|
||||
# All properties are optional and default to 0. The order is not important. The following properties are supported:
|
||||
# - schema (int):
|
||||
# Currently valid schema numbers are -4 <= n <= 8. They are all for
|
||||
# base-2 bucket schemas, where 1 is a bucket boundary in each case, and
|
||||
# then each power of two is divided into 2^n logarithmic buckets. Or
|
||||
# in other words, each bucket boundary is the previous boundary times
|
||||
# 2^(2^-n).
|
||||
# - sum (float):
|
||||
# The sum of all observations, including the zero bucket.
|
||||
# - count (non-negative float):
|
||||
# The number of observations, including those that are NaN and including the zero bucket.
|
||||
# - z_bucket (non-negative float):
|
||||
# The sum of all observations in the zero bucket.
|
||||
# - z_bucket_w (non-negative float):
|
||||
# The width of the zero bucket.
|
||||
# If z_bucket_w > 0, the zero bucket contains all observations -z_bucket_w <= x <= z_bucket_w.
|
||||
# Otherwise, the zero bucket only contains observations that are exactly 0.
|
||||
# - buckets (list of non-negative floats):
|
||||
# Observation counts in positive buckets. Each represents an absolute count.
|
||||
# - offset (int):
|
||||
# The starting index of the first entry in the positive buckets.
|
||||
# - n_buckets (list of non-negative floats):
|
||||
# Observation counts in negative buckets. Each represents an absolute count.
|
||||
# - n_offset (int):
|
||||
# The starting index of the first entry in the negative buckets.
|
||||
values: <string>
|
||||
```
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@ package histogram
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
)
|
||||
|
||||
|
@ -130,6 +131,55 @@ func (h *FloatHistogram) String() string {
|
|||
return sb.String()
|
||||
}
|
||||
|
||||
// TestExpression returns the string representation of this histogram as it is used in the internal PromQL testing
|
||||
// framework as well as in promtool rules unit tests.
|
||||
// The syntax is described in https://prometheus.io/docs/prometheus/latest/configuration/unit_testing_rules/#series
|
||||
func (h *FloatHistogram) TestExpression() string {
|
||||
var res []string
|
||||
m := h.Copy()
|
||||
|
||||
m.Compact(math.MaxInt) // Compact to reduce the number of positive and negative spans to 1.
|
||||
|
||||
if m.Schema != 0 {
|
||||
res = append(res, fmt.Sprintf("schema:%d", m.Schema))
|
||||
}
|
||||
if m.Count != 0 {
|
||||
res = append(res, fmt.Sprintf("count:%g", m.Count))
|
||||
}
|
||||
if m.Sum != 0 {
|
||||
res = append(res, fmt.Sprintf("sum:%g", m.Sum))
|
||||
}
|
||||
if m.ZeroCount != 0 {
|
||||
res = append(res, fmt.Sprintf("z_bucket:%g", m.ZeroCount))
|
||||
}
|
||||
if m.ZeroThreshold != 0 {
|
||||
res = append(res, fmt.Sprintf("z_bucket_w:%g", m.ZeroThreshold))
|
||||
}
|
||||
|
||||
addBuckets := func(kind, bucketsKey, offsetKey string, buckets []float64, spans []Span) []string {
|
||||
if len(spans) > 1 {
|
||||
panic(fmt.Sprintf("histogram with multiple %s spans not supported", kind))
|
||||
}
|
||||
for _, span := range spans {
|
||||
if span.Offset != 0 {
|
||||
res = append(res, fmt.Sprintf("%s:%d", offsetKey, span.Offset))
|
||||
}
|
||||
}
|
||||
|
||||
var bucketStr []string
|
||||
for _, bucket := range buckets {
|
||||
bucketStr = append(bucketStr, fmt.Sprintf("%g", bucket))
|
||||
}
|
||||
if len(bucketStr) > 0 {
|
||||
res = append(res, fmt.Sprintf("%s:[%s]", bucketsKey, strings.Join(bucketStr, " ")))
|
||||
}
|
||||
return res
|
||||
}
|
||||
res = addBuckets("positive", "buckets", "offset", m.PositiveBuckets, m.PositiveSpans)
|
||||
res = addBuckets("negative", "n_buckets", "n_offset", m.NegativeBuckets, m.NegativeSpans)
|
||||
return "{{" + strings.Join(res, " ") + "}}"
|
||||
}
|
||||
|
||||
// ZeroBucket returns the zero bucket.
|
||||
func (h *FloatHistogram) ZeroBucket() Bucket[float64] {
|
||||
return Bucket[float64]{
|
||||
|
|
|
@ -938,6 +938,21 @@ func TestFloatHistogramCompact(t *testing.T) {
|
|||
NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 3, 4},
|
||||
},
|
||||
},
|
||||
{
|
||||
"cut empty buckets in the middle",
|
||||
&FloatHistogram{
|
||||
PositiveSpans: []Span{{5, 4}},
|
||||
PositiveBuckets: []float64{1, 3, 0, 2},
|
||||
},
|
||||
0,
|
||||
&FloatHistogram{
|
||||
PositiveSpans: []Span{
|
||||
{Offset: 5, Length: 2},
|
||||
{Offset: 1, Length: 1},
|
||||
},
|
||||
PositiveBuckets: []float64{1, 3, 2},
|
||||
},
|
||||
},
|
||||
{
|
||||
"cut empty buckets at start or end of spans, even in the middle",
|
||||
&FloatHistogram{
|
||||
|
@ -955,7 +970,7 @@ func TestFloatHistogramCompact(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
"cut empty buckets at start or end but merge spans due to maxEmptyBuckets",
|
||||
"cut empty buckets at start and end - also merge spans due to maxEmptyBuckets",
|
||||
&FloatHistogram{
|
||||
PositiveSpans: []Span{{-4, 4}, {5, 3}},
|
||||
PositiveBuckets: []float64{0, 0, 1, 3.3, 4.2, 0.1, 3.3},
|
||||
|
@ -998,18 +1013,42 @@ func TestFloatHistogramCompact(t *testing.T) {
|
|||
PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3},
|
||||
},
|
||||
},
|
||||
{
|
||||
"cut empty buckets from the middle of a span, avoiding none due to maxEmptyBuckets",
|
||||
&FloatHistogram{
|
||||
PositiveSpans: []Span{{-2, 4}},
|
||||
PositiveBuckets: []float64{1, 0, 0, 3.3},
|
||||
},
|
||||
1,
|
||||
&FloatHistogram{
|
||||
PositiveSpans: []Span{{-2, 1}, {2, 1}},
|
||||
PositiveBuckets: []float64{1, 3.3},
|
||||
},
|
||||
},
|
||||
{
|
||||
"cut empty buckets and merge spans due to maxEmptyBuckets",
|
||||
&FloatHistogram{
|
||||
PositiveSpans: []Span{{-2, 4}, {3, 1}},
|
||||
PositiveBuckets: []float64{1, 0, 0, 3.3, 4.2},
|
||||
},
|
||||
1,
|
||||
&FloatHistogram{
|
||||
PositiveSpans: []Span{{-2, 1}, {2, 1}, {3, 1}},
|
||||
PositiveBuckets: []float64{1, 3.3, 4.2},
|
||||
},
|
||||
},
|
||||
{
|
||||
"cut empty buckets from the middle of a span, avoiding some due to maxEmptyBuckets",
|
||||
&FloatHistogram{
|
||||
PositiveSpans: []Span{{-4, 6}, {3, 3}},
|
||||
PositiveBuckets: []float64{0, 0, 1, 0, 0, 3.3, 4.2, 0.1, 3.3},
|
||||
PositiveSpans: []Span{{-4, 6}, {3, 3}, {10, 2}},
|
||||
PositiveBuckets: []float64{0, 0, 1, 0, 0, 3.3, 4.2, 0.1, 3.3, 2, 3},
|
||||
NegativeSpans: []Span{{0, 2}, {3, 5}},
|
||||
NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 0, 3, 4},
|
||||
},
|
||||
1,
|
||||
&FloatHistogram{
|
||||
PositiveSpans: []Span{{-2, 1}, {2, 1}, {3, 3}},
|
||||
PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3},
|
||||
PositiveSpans: []Span{{-2, 1}, {2, 1}, {3, 3}, {10, 2}},
|
||||
PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3, 2, 3},
|
||||
NegativeSpans: []Span{{0, 2}, {3, 5}},
|
||||
NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 0, 3, 4},
|
||||
},
|
||||
|
|
|
@ -4547,6 +4547,16 @@ func TestNativeHistogram_SubOperator(t *testing.T) {
|
|||
vector, err := res.Vector()
|
||||
require.NoError(t, err)
|
||||
|
||||
if len(vector) == len(exp) {
|
||||
for i, e := range exp {
|
||||
got := vector[i].H
|
||||
if got != e.H {
|
||||
// Error messages are better if we compare structs, not pointers.
|
||||
require.Equal(t, *e.H, *got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
require.Equal(t, exp, vector)
|
||||
}
|
||||
|
||||
|
@ -4557,8 +4567,8 @@ func TestNativeHistogram_SubOperator(t *testing.T) {
|
|||
}
|
||||
queryAndCheck(queryString, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}})
|
||||
})
|
||||
idx0++
|
||||
}
|
||||
idx0++
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/value"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
)
|
||||
|
||||
%}
|
||||
|
@ -35,6 +36,10 @@ import (
|
|||
lblList []labels.Label
|
||||
strings []string
|
||||
series []SequenceValue
|
||||
histogram *histogram.FloatHistogram
|
||||
descriptors map[string]interface{}
|
||||
bucket_set []float64
|
||||
int int64
|
||||
uint uint64
|
||||
float float64
|
||||
duration time.Duration
|
||||
|
@ -54,6 +59,8 @@ IDENTIFIER
|
|||
LEFT_BRACE
|
||||
LEFT_BRACKET
|
||||
LEFT_PAREN
|
||||
OPEN_HIST
|
||||
CLOSE_HIST
|
||||
METRIC_IDENTIFIER
|
||||
NUMBER
|
||||
RIGHT_BRACE
|
||||
|
@ -64,6 +71,20 @@ SPACE
|
|||
STRING
|
||||
TIMES
|
||||
|
||||
// Histogram Descriptors.
|
||||
%token histogramDescStart
|
||||
%token <item>
|
||||
SUM_DESC
|
||||
COUNT_DESC
|
||||
SCHEMA_DESC
|
||||
OFFSET_DESC
|
||||
NEGATIVE_OFFSET_DESC
|
||||
BUCKETS_DESC
|
||||
NEGATIVE_BUCKETS_DESC
|
||||
ZERO_BUCKET_DESC
|
||||
ZERO_BUCKET_WIDTH_DESC
|
||||
%token histogramDescEnd
|
||||
|
||||
// Operators.
|
||||
%token operatorsStart
|
||||
%token <item>
|
||||
|
@ -145,6 +166,10 @@ START_METRIC_SELECTOR
|
|||
%type <label> label_set_item
|
||||
%type <strings> grouping_label_list grouping_labels maybe_grouping_labels
|
||||
%type <series> series_item series_values
|
||||
%type <histogram> histogram_series_value
|
||||
%type <descriptors> histogram_desc_map histogram_desc_item
|
||||
%type <bucket_set> bucket_set bucket_set_list
|
||||
%type <int> int
|
||||
%type <uint> uint
|
||||
%type <float> number series_value signed_number signed_or_unsigned_number
|
||||
%type <node> step_invariant_expr aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_literal offset_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector
|
||||
|
@ -256,7 +281,7 @@ binary_expr : expr ADD bin_modifier expr { $$ = yylex.(*parser).newBinar
|
|||
;
|
||||
|
||||
// Using left recursion for the modifier rules, helps to keep the parser stack small and
|
||||
// reduces allocations
|
||||
// reduces allocations.
|
||||
bin_modifier : group_modifiers;
|
||||
|
||||
bool_modifier : /* empty */
|
||||
|
@ -470,7 +495,7 @@ subquery_expr : expr LEFT_BRACKET duration COLON maybe_duration RIGHT_BRACKET
|
|||
*/
|
||||
|
||||
unary_expr :
|
||||
/* gives the rule the same precedence as MUL. This aligns with mathematical conventions */
|
||||
/* Gives the rule the same precedence as MUL. This aligns with mathematical conventions. */
|
||||
unary_op expr %prec MUL
|
||||
{
|
||||
if nl, ok := $2.(*NumberLiteral); ok {
|
||||
|
@ -605,7 +630,10 @@ label_set_item : IDENTIFIER EQL STRING
|
|||
;
|
||||
|
||||
/*
|
||||
* Series descriptions (only used by unit tests).
|
||||
* Series descriptions:
|
||||
* A separate language that is used to generate series values promtool.
|
||||
* It is included in the promQL parser, because it shares common functionality, such as parsing a metric.
|
||||
* The syntax is described in https://prometheus.io/docs/prometheus/latest/configuration/unit_testing_rules/#series
|
||||
*/
|
||||
|
||||
series_description: metric series_values
|
||||
|
@ -641,6 +669,7 @@ series_item : BLANK
|
|||
| series_value TIMES uint
|
||||
{
|
||||
$$ = []SequenceValue{}
|
||||
// Add an additional value for time 0, which we ignore in tests.
|
||||
for i:=uint64(0); i <= $3; i++{
|
||||
$$ = append($$, SequenceValue{Value: $1})
|
||||
}
|
||||
|
@ -648,11 +677,42 @@ series_item : BLANK
|
|||
| series_value signed_number TIMES uint
|
||||
{
|
||||
$$ = []SequenceValue{}
|
||||
// Add an additional value for time 0, which we ignore in tests.
|
||||
for i:=uint64(0); i <= $4; i++{
|
||||
$$ = append($$, SequenceValue{Value: $1})
|
||||
$1 += $2
|
||||
}
|
||||
}
|
||||
// Histogram descriptions (part of unit testing).
|
||||
| histogram_series_value
|
||||
{
|
||||
$$ = []SequenceValue{{Histogram:$1}}
|
||||
}
|
||||
| histogram_series_value TIMES uint
|
||||
{
|
||||
$$ = []SequenceValue{}
|
||||
// Add an additional value for time 0, which we ignore in tests.
|
||||
for i:=uint64(0); i <= $3; i++{
|
||||
$$ = append($$, SequenceValue{Histogram:$1})
|
||||
//$1 += $2
|
||||
}
|
||||
}
|
||||
| histogram_series_value ADD histogram_series_value TIMES uint
|
||||
{
|
||||
val, err := yylex.(*parser).histogramsIncreaseSeries($1,$3,$5)
|
||||
if err != nil {
|
||||
yylex.(*parser).addSemanticError(err)
|
||||
}
|
||||
$$ = val
|
||||
}
|
||||
| histogram_series_value SUB histogram_series_value TIMES uint
|
||||
{
|
||||
val, err := yylex.(*parser).histogramsDecreaseSeries($1,$3,$5)
|
||||
if err != nil {
|
||||
yylex.(*parser).addSemanticError(err)
|
||||
}
|
||||
$$ = val
|
||||
}
|
||||
;
|
||||
|
||||
series_value : IDENTIFIER
|
||||
|
@ -666,7 +726,109 @@ series_value : IDENTIFIER
|
|||
| signed_number
|
||||
;
|
||||
|
||||
histogram_series_value
|
||||
: OPEN_HIST histogram_desc_map SPACE CLOSE_HIST
|
||||
{
|
||||
$$ = yylex.(*parser).buildHistogramFromMap(&$2)
|
||||
}
|
||||
| OPEN_HIST histogram_desc_map CLOSE_HIST
|
||||
{
|
||||
$$ = yylex.(*parser).buildHistogramFromMap(&$2)
|
||||
}
|
||||
| OPEN_HIST SPACE CLOSE_HIST
|
||||
{
|
||||
m := yylex.(*parser).newMap()
|
||||
$$ = yylex.(*parser).buildHistogramFromMap(&m)
|
||||
}
|
||||
| OPEN_HIST CLOSE_HIST
|
||||
{
|
||||
m := yylex.(*parser).newMap()
|
||||
$$ = yylex.(*parser).buildHistogramFromMap(&m)
|
||||
}
|
||||
;
|
||||
|
||||
histogram_desc_map
|
||||
: histogram_desc_map SPACE histogram_desc_item
|
||||
{
|
||||
$$ = *(yylex.(*parser).mergeMaps(&$1,&$3))
|
||||
}
|
||||
| histogram_desc_item
|
||||
{
|
||||
$$ = $1
|
||||
}
|
||||
| histogram_desc_map error {
|
||||
yylex.(*parser).unexpected("histogram description", "histogram description key, e.g. buckets:[5 10 7]")
|
||||
}
|
||||
;
|
||||
|
||||
histogram_desc_item
|
||||
: SCHEMA_DESC COLON int
|
||||
{
|
||||
$$ = yylex.(*parser).newMap()
|
||||
$$["schema"] = $3
|
||||
}
|
||||
| SUM_DESC COLON signed_or_unsigned_number
|
||||
{
|
||||
$$ = yylex.(*parser).newMap()
|
||||
$$["sum"] = $3
|
||||
}
|
||||
| COUNT_DESC COLON number
|
||||
{
|
||||
$$ = yylex.(*parser).newMap()
|
||||
$$["count"] = $3
|
||||
}
|
||||
| ZERO_BUCKET_DESC COLON number
|
||||
{
|
||||
$$ = yylex.(*parser).newMap()
|
||||
$$["z_bucket"] = $3
|
||||
}
|
||||
| ZERO_BUCKET_WIDTH_DESC COLON number
|
||||
{
|
||||
$$ = yylex.(*parser).newMap()
|
||||
$$["z_bucket_w"] = $3
|
||||
}
|
||||
| BUCKETS_DESC COLON bucket_set
|
||||
{
|
||||
$$ = yylex.(*parser).newMap()
|
||||
$$["buckets"] = $3
|
||||
}
|
||||
| OFFSET_DESC COLON int
|
||||
{
|
||||
$$ = yylex.(*parser).newMap()
|
||||
$$["offset"] = $3
|
||||
}
|
||||
| NEGATIVE_BUCKETS_DESC COLON bucket_set
|
||||
{
|
||||
$$ = yylex.(*parser).newMap()
|
||||
$$["n_buckets"] = $3
|
||||
}
|
||||
| NEGATIVE_OFFSET_DESC COLON int
|
||||
{
|
||||
$$ = yylex.(*parser).newMap()
|
||||
$$["n_offset"] = $3
|
||||
}
|
||||
;
|
||||
|
||||
bucket_set : LEFT_BRACKET bucket_set_list SPACE RIGHT_BRACKET
|
||||
{
|
||||
$$ = $2
|
||||
}
|
||||
| LEFT_BRACKET bucket_set_list RIGHT_BRACKET
|
||||
{
|
||||
$$ = $2
|
||||
}
|
||||
;
|
||||
|
||||
bucket_set_list : bucket_set_list SPACE number
|
||||
{
|
||||
$$ = append($1, $3)
|
||||
}
|
||||
| number
|
||||
{
|
||||
$$ = []float64{$1}
|
||||
}
|
||||
| bucket_set_list error
|
||||
;
|
||||
|
||||
|
||||
/*
|
||||
|
@ -675,7 +837,7 @@ series_value : IDENTIFIER
|
|||
|
||||
aggregate_op : AVG | BOTTOMK | COUNT | COUNT_VALUES | GROUP | MAX | MIN | QUANTILE | STDDEV | STDVAR | SUM | TOPK ;
|
||||
|
||||
// inside of grouping options label names can be recognized as keywords by the lexer. This is a list of keywords that could also be a label name.
|
||||
// Inside of grouping options label names can be recognized as keywords by the lexer. This is a list of keywords that could also be a label name.
|
||||
maybe_label : AVG | BOOL | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | GROUP_LEFT | GROUP_RIGHT | IDENTIFIER | IGNORING | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | ON | QUANTILE | STDDEV | STDVAR | SUM | TOPK | START | END | ATAN2;
|
||||
|
||||
unary_op : ADD | SUB;
|
||||
|
@ -713,6 +875,10 @@ uint : NUMBER
|
|||
}
|
||||
;
|
||||
|
||||
int : SUB uint { $$ = -int64($2) }
|
||||
| uint { $$ = int64($1) }
|
||||
;
|
||||
|
||||
duration : DURATION
|
||||
{
|
||||
var err error
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -133,9 +133,23 @@ var key = map[string]ItemType{
|
|||
"end": END,
|
||||
}
|
||||
|
||||
var histogramDesc = map[string]ItemType{
|
||||
"sum": SUM_DESC,
|
||||
"count": COUNT_DESC,
|
||||
"schema": SCHEMA_DESC,
|
||||
"offset": OFFSET_DESC,
|
||||
"n_offset": NEGATIVE_OFFSET_DESC,
|
||||
"buckets": BUCKETS_DESC,
|
||||
"n_buckets": NEGATIVE_BUCKETS_DESC,
|
||||
"z_bucket": ZERO_BUCKET_DESC,
|
||||
"z_bucket_w": ZERO_BUCKET_WIDTH_DESC,
|
||||
}
|
||||
|
||||
// ItemTypeStr is the default string representations for common Items. It does not
|
||||
// imply that those are the only character sequences that can be lexed to such an Item.
|
||||
var ItemTypeStr = map[ItemType]string{
|
||||
OPEN_HIST: "{{",
|
||||
CLOSE_HIST: "}}",
|
||||
LEFT_PAREN: "(",
|
||||
RIGHT_PAREN: ")",
|
||||
LEFT_BRACE: "{",
|
||||
|
@ -224,6 +238,16 @@ type stateFn func(*Lexer) stateFn
|
|||
// Negative numbers indicate undefined positions.
|
||||
type Pos int
|
||||
|
||||
type histogramState int
|
||||
|
||||
const (
|
||||
histogramStateNone histogramState = iota
|
||||
histogramStateOpen
|
||||
histogramStateMul
|
||||
histogramStateAdd
|
||||
histogramStateSub
|
||||
)
|
||||
|
||||
// Lexer holds the state of the scanner.
|
||||
type Lexer struct {
|
||||
input string // The string being scanned.
|
||||
|
@ -241,9 +265,10 @@ type Lexer struct {
|
|||
gotColon bool // Whether we got a ':' after [ was opened.
|
||||
stringOpen rune // Quote rune of the string currently being read.
|
||||
|
||||
// seriesDesc is set when a series description for the testing
|
||||
// language is lexed.
|
||||
seriesDesc bool
|
||||
// series description variables for internal PromQL testing framework as well as in promtool rules unit tests.
|
||||
// see https://prometheus.io/docs/prometheus/latest/configuration/unit_testing_rules/#series
|
||||
seriesDesc bool // Whether we are lexing a series description.
|
||||
histogramState histogramState // Determines whether or not inside of a histogram description.
|
||||
}
|
||||
|
||||
// next returns the next rune in the input.
|
||||
|
@ -338,6 +363,9 @@ const lineComment = "#"
|
|||
|
||||
// lexStatements is the top-level state for lexing.
|
||||
func lexStatements(l *Lexer) stateFn {
|
||||
if l.histogramState != histogramStateNone {
|
||||
return lexHistogram
|
||||
}
|
||||
if l.braceOpen {
|
||||
return lexInsideBraces
|
||||
}
|
||||
|
@ -460,6 +488,117 @@ func lexStatements(l *Lexer) stateFn {
|
|||
return lexStatements
|
||||
}
|
||||
|
||||
func lexHistogram(l *Lexer) stateFn {
|
||||
switch l.histogramState {
|
||||
case histogramStateMul:
|
||||
l.histogramState = histogramStateNone
|
||||
l.next()
|
||||
l.emit(TIMES)
|
||||
return lexNumber
|
||||
case histogramStateAdd:
|
||||
l.histogramState = histogramStateNone
|
||||
l.next()
|
||||
l.emit(ADD)
|
||||
return lexValueSequence
|
||||
case histogramStateSub:
|
||||
l.histogramState = histogramStateNone
|
||||
l.next()
|
||||
l.emit(SUB)
|
||||
return lexValueSequence
|
||||
}
|
||||
|
||||
if l.bracketOpen {
|
||||
return lexBuckets
|
||||
}
|
||||
switch r := l.next(); {
|
||||
case isSpace(r):
|
||||
l.emit(SPACE)
|
||||
return lexSpace
|
||||
case isAlpha(r):
|
||||
l.backup()
|
||||
return lexHistogramDescriptor
|
||||
case r == ':':
|
||||
l.emit(COLON)
|
||||
return lexHistogram
|
||||
case r == '-':
|
||||
l.emit(SUB)
|
||||
return lexNumber
|
||||
case r == 'x':
|
||||
l.emit(TIMES)
|
||||
return lexNumber
|
||||
case isDigit(r):
|
||||
l.backup()
|
||||
return lexNumber
|
||||
case r == '[':
|
||||
l.bracketOpen = true
|
||||
l.emit(LEFT_BRACKET)
|
||||
return lexBuckets
|
||||
case r == '}' && l.peek() == '}':
|
||||
l.next()
|
||||
l.emit(CLOSE_HIST)
|
||||
switch l.peek() {
|
||||
case 'x':
|
||||
l.histogramState = histogramStateMul
|
||||
return lexHistogram
|
||||
case '+':
|
||||
l.histogramState = histogramStateAdd
|
||||
return lexHistogram
|
||||
case '-':
|
||||
l.histogramState = histogramStateSub
|
||||
return lexHistogram
|
||||
default:
|
||||
l.histogramState = histogramStateNone
|
||||
return lexValueSequence
|
||||
}
|
||||
default:
|
||||
return l.errorf("histogram description incomplete unexpected: %q", r)
|
||||
}
|
||||
}
|
||||
|
||||
func lexHistogramDescriptor(l *Lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch r := l.next(); {
|
||||
case isAlpha(r):
|
||||
// absorb.
|
||||
default:
|
||||
l.backup()
|
||||
|
||||
word := l.input[l.start:l.pos]
|
||||
if desc, ok := histogramDesc[strings.ToLower(word)]; ok {
|
||||
if l.peek() == ':' {
|
||||
l.emit(desc)
|
||||
return lexHistogram
|
||||
} else {
|
||||
l.errorf("missing `:` for histogram descriptor")
|
||||
}
|
||||
} else {
|
||||
l.errorf("bad histogram descriptor found: %q", word)
|
||||
}
|
||||
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
return lexStatements
|
||||
}
|
||||
|
||||
func lexBuckets(l *Lexer) stateFn {
|
||||
switch r := l.next(); {
|
||||
case isSpace(r):
|
||||
l.emit(SPACE)
|
||||
return lexSpace
|
||||
case isDigit(r):
|
||||
l.backup()
|
||||
return lexNumber
|
||||
case r == ']':
|
||||
l.bracketOpen = false
|
||||
l.emit(RIGHT_BRACKET)
|
||||
return lexHistogram
|
||||
default:
|
||||
return l.errorf("invalid character in buckets description: %q", r)
|
||||
}
|
||||
}
|
||||
|
||||
// lexInsideBraces scans the inside of a vector selector. Keywords are ignored and
|
||||
// scanned as identifiers.
|
||||
func lexInsideBraces(l *Lexer) stateFn {
|
||||
|
@ -517,9 +656,20 @@ func lexInsideBraces(l *Lexer) stateFn {
|
|||
|
||||
// lexValueSequence scans a value sequence of a series description.
|
||||
func lexValueSequence(l *Lexer) stateFn {
|
||||
if l.histogramState != histogramStateNone {
|
||||
return lexHistogram
|
||||
}
|
||||
switch r := l.next(); {
|
||||
case r == eof:
|
||||
return lexStatements
|
||||
case r == '{' && l.peek() == '{':
|
||||
if l.histogramState != histogramStateNone {
|
||||
return l.errorf("unexpected histogram opening {{")
|
||||
}
|
||||
l.histogramState = histogramStateOpen
|
||||
l.next()
|
||||
l.emit(OPEN_HIST)
|
||||
return lexHistogram
|
||||
case isSpace(r):
|
||||
l.emit(SPACE)
|
||||
lexSpace(l)
|
||||
|
|
|
@ -494,6 +494,73 @@ var tests = []struct {
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "histogram series descriptions",
|
||||
tests: []testCase{
|
||||
{
|
||||
input: `{} {{buckets:[5]}}`,
|
||||
expected: []Item{
|
||||
{LEFT_BRACE, 0, `{`},
|
||||
{RIGHT_BRACE, 1, `}`},
|
||||
{SPACE, 2, ` `},
|
||||
{OPEN_HIST, 3, `{{`},
|
||||
{BUCKETS_DESC, 5, `buckets`},
|
||||
{COLON, 12, `:`},
|
||||
{LEFT_BRACKET, 13, `[`},
|
||||
{NUMBER, 14, `5`},
|
||||
{RIGHT_BRACKET, 15, `]`},
|
||||
{CLOSE_HIST, 16, `}}`},
|
||||
},
|
||||
seriesDesc: true,
|
||||
},
|
||||
{
|
||||
input: `{} {{buckets: [5 10 7]}}`,
|
||||
expected: []Item{
|
||||
{LEFT_BRACE, 0, `{`},
|
||||
{RIGHT_BRACE, 1, `}`},
|
||||
{SPACE, 2, ` `},
|
||||
{OPEN_HIST, 3, `{{`},
|
||||
{BUCKETS_DESC, 5, `buckets`},
|
||||
{COLON, 12, `:`},
|
||||
{SPACE, 13, ` `},
|
||||
{LEFT_BRACKET, 14, `[`},
|
||||
{NUMBER, 15, `5`},
|
||||
{SPACE, 16, ` `},
|
||||
{NUMBER, 17, `10`},
|
||||
{SPACE, 19, ` `},
|
||||
{NUMBER, 20, `7`},
|
||||
{RIGHT_BRACKET, 21, `]`},
|
||||
{CLOSE_HIST, 22, `}}`},
|
||||
},
|
||||
seriesDesc: true,
|
||||
},
|
||||
{
|
||||
input: `{} {{buckets: [5 10 7] schema:1}}`,
|
||||
expected: []Item{
|
||||
{LEFT_BRACE, 0, `{`},
|
||||
{RIGHT_BRACE, 1, `}`},
|
||||
{SPACE, 2, ` `},
|
||||
{OPEN_HIST, 3, `{{`},
|
||||
{BUCKETS_DESC, 5, `buckets`},
|
||||
{COLON, 12, `:`},
|
||||
{SPACE, 13, ` `},
|
||||
{LEFT_BRACKET, 14, `[`},
|
||||
{NUMBER, 15, `5`},
|
||||
{SPACE, 16, ` `},
|
||||
{NUMBER, 17, `10`},
|
||||
{SPACE, 19, ` `},
|
||||
{NUMBER, 20, `7`},
|
||||
{RIGHT_BRACKET, 21, `]`},
|
||||
{SPACE, 22, ` `},
|
||||
{SCHEMA_DESC, 23, `schema`},
|
||||
{COLON, 29, `:`},
|
||||
{NUMBER, 30, `1`},
|
||||
{CLOSE_HIST, 31, `}}`},
|
||||
},
|
||||
seriesDesc: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "series descriptions",
|
||||
tests: []testCase{
|
||||
|
@ -735,7 +802,6 @@ func TestLexer(t *testing.T) {
|
|||
|
||||
for l.state = lexStatements; l.state != nil; {
|
||||
out = append(out, Item{})
|
||||
|
||||
l.NextItem(&out[len(out)-1])
|
||||
}
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/timestamp"
|
||||
"github.com/prometheus/prometheus/util/strutil"
|
||||
|
@ -168,6 +169,21 @@ func (errs ParseErrors) Error() string {
|
|||
return "error contains no error message"
|
||||
}
|
||||
|
||||
// EnrichParseError enriches a single or list of parse errors (used for unit tests and promtool).
|
||||
func EnrichParseError(err error, enrich func(parseErr *ParseErr)) {
|
||||
var parseErr *ParseErr
|
||||
if errors.As(err, &parseErr) {
|
||||
enrich(parseErr)
|
||||
}
|
||||
var parseErrors ParseErrors
|
||||
if errors.As(err, &parseErrors) {
|
||||
for i, e := range parseErrors {
|
||||
enrich(&e)
|
||||
parseErrors[i] = e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ParseExpr returns the expression parsed from the input.
|
||||
func ParseExpr(input string) (expr Expr, err error) {
|
||||
p := NewParser(input)
|
||||
|
@ -216,12 +232,16 @@ func ParseMetricSelector(input string) (m []*labels.Matcher, err error) {
|
|||
type SequenceValue struct {
|
||||
Value float64
|
||||
Omitted bool
|
||||
Histogram *histogram.FloatHistogram
|
||||
}
|
||||
|
||||
func (v SequenceValue) String() string {
|
||||
if v.Omitted {
|
||||
return "_"
|
||||
}
|
||||
if v.Histogram != nil {
|
||||
return v.Histogram.String()
|
||||
}
|
||||
return fmt.Sprintf("%f", v.Value)
|
||||
}
|
||||
|
||||
|
@ -270,6 +290,10 @@ func (p *parser) addParseErr(positionRange PositionRange, err error) {
|
|||
p.parseErrors = append(p.parseErrors, perr)
|
||||
}
|
||||
|
||||
func (p *parser) addSemanticError(err error) {
|
||||
p.addParseErr(p.yyParser.lval.item.PositionRange(), err)
|
||||
}
|
||||
|
||||
// unexpected creates a parser error complaining about an unexpected lexer item.
|
||||
// The item that is presented as unexpected is always the last item produced
|
||||
// by the lexer.
|
||||
|
@ -443,6 +467,147 @@ func (p *parser) newAggregateExpr(op Item, modifier, args Node) (ret *AggregateE
|
|||
return ret
|
||||
}
|
||||
|
||||
// newMap is used when building the FloatHistogram from a map.
|
||||
func (p *parser) newMap() (ret map[string]interface{}) {
|
||||
return map[string]interface{}{}
|
||||
}
|
||||
|
||||
// mergeMaps is used to combine maps as they're used to later build the Float histogram.
|
||||
// This will merge the right map into the left map.
|
||||
func (p *parser) mergeMaps(left, right *map[string]interface{}) (ret *map[string]interface{}) {
|
||||
for key, value := range *right {
|
||||
if _, ok := (*left)[key]; ok {
|
||||
p.addParseErrf(PositionRange{}, "duplicate key \"%s\" in histogram", key)
|
||||
continue
|
||||
}
|
||||
(*left)[key] = value
|
||||
}
|
||||
return left
|
||||
}
|
||||
|
||||
func (p *parser) histogramsIncreaseSeries(base, inc *histogram.FloatHistogram, times uint64) ([]SequenceValue, error) {
|
||||
return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) *histogram.FloatHistogram {
|
||||
return a.Add(b)
|
||||
})
|
||||
}
|
||||
|
||||
func (p *parser) histogramsDecreaseSeries(base, inc *histogram.FloatHistogram, times uint64) ([]SequenceValue, error) {
|
||||
return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) *histogram.FloatHistogram {
|
||||
return a.Sub(b)
|
||||
})
|
||||
}
|
||||
|
||||
func (p *parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uint64,
|
||||
combine func(*histogram.FloatHistogram, *histogram.FloatHistogram) *histogram.FloatHistogram,
|
||||
) ([]SequenceValue, error) {
|
||||
ret := make([]SequenceValue, times+1)
|
||||
// Add an additional value (the base) for time 0, which we ignore in tests.
|
||||
ret[0] = SequenceValue{Histogram: base}
|
||||
cur := base
|
||||
for i := uint64(1); i <= times; i++ {
|
||||
if cur.Schema > inc.Schema {
|
||||
return nil, fmt.Errorf("error combining histograms: cannot merge from schema %d to %d", inc.Schema, cur.Schema)
|
||||
}
|
||||
|
||||
cur = combine(cur.Copy(), inc)
|
||||
ret[i] = SequenceValue{Histogram: cur}
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// buildHistogramFromMap is used in the grammar to take then individual parts of the histogram and complete it.
|
||||
func (p *parser) buildHistogramFromMap(desc *map[string]interface{}) *histogram.FloatHistogram {
|
||||
output := &histogram.FloatHistogram{}
|
||||
|
||||
val, ok := (*desc)["schema"]
|
||||
if ok {
|
||||
schema, ok := val.(int64)
|
||||
if ok {
|
||||
output.Schema = int32(schema)
|
||||
} else {
|
||||
p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing schema number: %v", val)
|
||||
}
|
||||
}
|
||||
|
||||
val, ok = (*desc)["sum"]
|
||||
if ok {
|
||||
sum, ok := val.(float64)
|
||||
if ok {
|
||||
output.Sum = sum
|
||||
} else {
|
||||
p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing sum number: %v", val)
|
||||
}
|
||||
}
|
||||
val, ok = (*desc)["count"]
|
||||
if ok {
|
||||
count, ok := val.(float64)
|
||||
if ok {
|
||||
output.Count = count
|
||||
} else {
|
||||
p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing count number: %v", val)
|
||||
}
|
||||
}
|
||||
|
||||
val, ok = (*desc)["z_bucket"]
|
||||
if ok {
|
||||
bucket, ok := val.(float64)
|
||||
if ok {
|
||||
output.ZeroCount = bucket
|
||||
} else {
|
||||
p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing z_bucket number: %v", val)
|
||||
}
|
||||
}
|
||||
val, ok = (*desc)["z_bucket_w"]
|
||||
if ok {
|
||||
bucketWidth, ok := val.(float64)
|
||||
if ok {
|
||||
output.ZeroThreshold = bucketWidth
|
||||
} else {
|
||||
p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing z_bucket_w number: %v", val)
|
||||
}
|
||||
}
|
||||
|
||||
buckets, spans := p.buildHistogramBucketsAndSpans(desc, "buckets", "offset")
|
||||
output.PositiveBuckets = buckets
|
||||
output.PositiveSpans = spans
|
||||
|
||||
buckets, spans = p.buildHistogramBucketsAndSpans(desc, "n_buckets", "n_offset")
|
||||
output.NegativeBuckets = buckets
|
||||
output.NegativeSpans = spans
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
func (p *parser) buildHistogramBucketsAndSpans(desc *map[string]interface{}, bucketsKey, offsetKey string,
|
||||
) (buckets []float64, spans []histogram.Span) {
|
||||
bucketCount := 0
|
||||
val, ok := (*desc)[bucketsKey]
|
||||
if ok {
|
||||
val, ok := val.([]float64)
|
||||
if ok {
|
||||
buckets = val
|
||||
bucketCount = len(buckets)
|
||||
} else {
|
||||
p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing %s float array: %v", bucketsKey, val)
|
||||
}
|
||||
}
|
||||
offset := int32(0)
|
||||
val, ok = (*desc)[offsetKey]
|
||||
if ok {
|
||||
val, ok := val.(int64)
|
||||
if ok {
|
||||
offset = int32(val)
|
||||
} else {
|
||||
p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing %s number: %v", offsetKey, val)
|
||||
}
|
||||
}
|
||||
if bucketCount > 0 {
|
||||
spans = []histogram.Span{{Offset: offset, Length: uint32(bucketCount)}}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// number parses a number.
|
||||
func (p *parser) number(val string) float64 {
|
||||
n, err := strconv.ParseInt(val, 0, 64)
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
)
|
||||
|
||||
|
@ -3629,6 +3630,17 @@ var testSeries = []struct {
|
|||
input: `my_metric{a="b"} 1 2 3-0x4`,
|
||||
expectedMetric: labels.FromStrings(labels.MetricName, "my_metric", "a", "b"),
|
||||
expectedValues: newSeq(1, 2, 3, 3, 3, 3, 3),
|
||||
}, {
|
||||
input: `{} 1+1`,
|
||||
fail: true,
|
||||
}, {
|
||||
input: `{} 1x0`,
|
||||
expectedMetric: labels.EmptyLabels(),
|
||||
expectedValues: newSeq(1),
|
||||
}, {
|
||||
input: `{} 1+1x0`,
|
||||
expectedMetric: labels.EmptyLabels(),
|
||||
expectedValues: newSeq(1),
|
||||
}, {
|
||||
input: `my_metric{a="b"} 1 3 _ 5 _x4`,
|
||||
expectedMetric: labels.FromStrings(labels.MetricName, "my_metric", "a", "b"),
|
||||
|
@ -3696,6 +3708,305 @@ func newSeq(vals ...float64) (res []SequenceValue) {
|
|||
return res
|
||||
}
|
||||
|
||||
func TestParseHistogramSeries(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
name string
|
||||
input string
|
||||
expected []histogram.FloatHistogram
|
||||
expectedError string
|
||||
}{
|
||||
{
|
||||
name: "empty histogram",
|
||||
input: "{} {{}}",
|
||||
expected: []histogram.FloatHistogram{{}},
|
||||
},
|
||||
{
|
||||
name: "empty histogram with space",
|
||||
input: "{} {{ }}",
|
||||
expected: []histogram.FloatHistogram{{}},
|
||||
},
|
||||
{
|
||||
name: "all properties used",
|
||||
input: `{} {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5}}`,
|
||||
expected: []histogram.FloatHistogram{{
|
||||
Schema: 1,
|
||||
Sum: -0.3,
|
||||
Count: 3.1,
|
||||
ZeroCount: 7.1,
|
||||
ZeroThreshold: 0.05,
|
||||
PositiveBuckets: []float64{5.1, 10, 7},
|
||||
PositiveSpans: []histogram.Span{{Offset: -3, Length: 3}},
|
||||
NegativeBuckets: []float64{4.1, 5},
|
||||
NegativeSpans: []histogram.Span{{Offset: -5, Length: 2}},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "all properties used - with spaces",
|
||||
input: `{} {{schema:1 sum:0.3 count:3 z_bucket:7 z_bucket_w:5 buckets:[5 10 7 ] offset:-3 n_buckets:[4 5] n_offset:5 }}`,
|
||||
expected: []histogram.FloatHistogram{{
|
||||
Schema: 1,
|
||||
Sum: 0.3,
|
||||
Count: 3,
|
||||
ZeroCount: 7,
|
||||
ZeroThreshold: 5,
|
||||
PositiveBuckets: []float64{5, 10, 7},
|
||||
PositiveSpans: []histogram.Span{{Offset: -3, Length: 3}},
|
||||
NegativeBuckets: []float64{4, 5},
|
||||
NegativeSpans: []histogram.Span{{Offset: 5, Length: 2}},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "static series",
|
||||
input: `{} {{buckets:[5 10 7] schema:1}}x2`,
|
||||
expected: []histogram.FloatHistogram{
|
||||
{
|
||||
Schema: 1,
|
||||
PositiveBuckets: []float64{5, 10, 7},
|
||||
PositiveSpans: []histogram.Span{{
|
||||
Offset: 0,
|
||||
Length: 3,
|
||||
}},
|
||||
},
|
||||
{
|
||||
Schema: 1,
|
||||
PositiveBuckets: []float64{5, 10, 7},
|
||||
PositiveSpans: []histogram.Span{{
|
||||
Offset: 0,
|
||||
Length: 3,
|
||||
}},
|
||||
},
|
||||
{
|
||||
Schema: 1,
|
||||
PositiveBuckets: []float64{5, 10, 7},
|
||||
PositiveSpans: []histogram.Span{{
|
||||
Offset: 0,
|
||||
Length: 3,
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "static series - x0",
|
||||
input: `{} {{buckets:[5 10 7] schema:1}}x0`,
|
||||
expected: []histogram.FloatHistogram{
|
||||
{
|
||||
Schema: 1,
|
||||
PositiveBuckets: []float64{5, 10, 7},
|
||||
PositiveSpans: []histogram.Span{{
|
||||
Offset: 0,
|
||||
Length: 3,
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "2 histograms stated explicitly",
|
||||
input: `{} {{buckets:[5 10 7] schema:1}} {{buckets:[1 2 3] schema:1}}`,
|
||||
expected: []histogram.FloatHistogram{
|
||||
{
|
||||
Schema: 1,
|
||||
PositiveBuckets: []float64{5, 10, 7},
|
||||
PositiveSpans: []histogram.Span{{
|
||||
Offset: 0,
|
||||
Length: 3,
|
||||
}},
|
||||
},
|
||||
{
|
||||
Schema: 1,
|
||||
PositiveBuckets: []float64{1, 2, 3},
|
||||
PositiveSpans: []histogram.Span{{
|
||||
Offset: 0,
|
||||
Length: 3,
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "series with increment - with different schemas",
|
||||
input: `{} {{buckets:[5] schema:0}}+{{buckets:[1 2] schema:1}}x2`,
|
||||
expected: []histogram.FloatHistogram{
|
||||
{
|
||||
PositiveBuckets: []float64{5},
|
||||
PositiveSpans: []histogram.Span{{
|
||||
Offset: 0,
|
||||
Length: 1,
|
||||
}},
|
||||
},
|
||||
{
|
||||
PositiveBuckets: []float64{6, 2},
|
||||
PositiveSpans: []histogram.Span{{
|
||||
Offset: 0,
|
||||
Length: 2,
|
||||
}},
|
||||
},
|
||||
{
|
||||
PositiveBuckets: []float64{7, 4},
|
||||
PositiveSpans: []histogram.Span{{
|
||||
Offset: 0,
|
||||
Length: 2,
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "series with decrement",
|
||||
input: `{} {{buckets:[5 10 7] schema:1}}-{{buckets:[1 2 3] schema:1}}x2`,
|
||||
expected: []histogram.FloatHistogram{
|
||||
{
|
||||
Schema: 1,
|
||||
PositiveBuckets: []float64{5, 10, 7},
|
||||
PositiveSpans: []histogram.Span{{
|
||||
Offset: 0,
|
||||
Length: 3,
|
||||
}},
|
||||
},
|
||||
{
|
||||
Schema: 1,
|
||||
PositiveBuckets: []float64{4, 8, 4},
|
||||
PositiveSpans: []histogram.Span{{
|
||||
Offset: 0,
|
||||
Length: 3,
|
||||
}},
|
||||
},
|
||||
{
|
||||
Schema: 1,
|
||||
PositiveBuckets: []float64{3, 6, 1},
|
||||
PositiveSpans: []histogram.Span{{
|
||||
Offset: 0,
|
||||
Length: 3,
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "series with increment - 0x",
|
||||
input: `{} {{buckets:[5 10 7] schema:1}}+{{buckets:[1 2 3] schema:1}}x0`,
|
||||
expected: []histogram.FloatHistogram{
|
||||
{
|
||||
Schema: 1,
|
||||
PositiveBuckets: []float64{5, 10, 7},
|
||||
PositiveSpans: []histogram.Span{{
|
||||
Offset: 0,
|
||||
Length: 3,
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "series with different schemas - second one is smaller",
|
||||
input: `{} {{buckets:[5 10 7] schema:1}}+{{buckets:[1 2 3] schema:0}}x2`,
|
||||
expectedError: `1:63: parse error: error combining histograms: cannot merge from schema 0 to 1`,
|
||||
},
|
||||
{
|
||||
name: "different order",
|
||||
input: `{} {{buckets:[5 10 7] schema:1}}`,
|
||||
expected: []histogram.FloatHistogram{{
|
||||
Schema: 1,
|
||||
PositiveBuckets: []float64{5, 10, 7},
|
||||
PositiveSpans: []histogram.Span{{
|
||||
Offset: 0,
|
||||
Length: 3,
|
||||
}},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "double property",
|
||||
input: `{} {{schema:1 schema:1}}`,
|
||||
expectedError: `1:1: parse error: duplicate key "schema" in histogram`,
|
||||
},
|
||||
{
|
||||
name: "unknown property",
|
||||
input: `{} {{foo:1}}`,
|
||||
expectedError: `1:6: parse error: bad histogram descriptor found: "foo"`,
|
||||
},
|
||||
{
|
||||
name: "space before :",
|
||||
input: `{} {{schema :1}}`,
|
||||
expectedError: "1:6: parse error: missing `:` for histogram descriptor",
|
||||
},
|
||||
{
|
||||
name: "space after :",
|
||||
input: `{} {{schema: 1}}`,
|
||||
expectedError: `1:13: parse error: unexpected " " in series values`,
|
||||
},
|
||||
{
|
||||
name: "space after [",
|
||||
input: `{} {{buckets:[ 1]}}`,
|
||||
expectedError: `1:15: parse error: unexpected " " in series values`,
|
||||
},
|
||||
{
|
||||
name: "space after {{",
|
||||
input: `{} {{ schema:1}}`,
|
||||
expectedError: `1:7: parse error: unexpected "<Item 57372>" "schema" in series values`,
|
||||
},
|
||||
} {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
_, vals, err := ParseSeriesDesc(test.input)
|
||||
if test.expectedError != "" {
|
||||
require.EqualError(t, err, test.expectedError)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
var got []histogram.FloatHistogram
|
||||
for _, v := range vals {
|
||||
got = append(got, *v.Histogram)
|
||||
}
|
||||
require.Equal(t, test.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHistogramTestExpression(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
name string
|
||||
input histogram.FloatHistogram
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "single positive and negative span",
|
||||
input: histogram.FloatHistogram{
|
||||
Schema: 1,
|
||||
Sum: -0.3,
|
||||
Count: 3.1,
|
||||
ZeroCount: 7.1,
|
||||
ZeroThreshold: 0.05,
|
||||
PositiveBuckets: []float64{5.1, 10, 7},
|
||||
PositiveSpans: []histogram.Span{{Offset: -3, Length: 3}},
|
||||
NegativeBuckets: []float64{4.1, 5},
|
||||
NegativeSpans: []histogram.Span{{Offset: -5, Length: 2}},
|
||||
},
|
||||
expected: `{{schema:1 count:3.1 sum:-0.3 z_bucket:7.1 z_bucket_w:0.05 offset:-3 buckets:[5.1 10 7] n_offset:-5 n_buckets:[4.1 5]}}`,
|
||||
},
|
||||
{
|
||||
name: "multiple positive and negative spans",
|
||||
input: histogram.FloatHistogram{
|
||||
PositiveBuckets: []float64{5.1, 10, 7},
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: -3, Length: 1},
|
||||
{Offset: 4, Length: 2},
|
||||
},
|
||||
NegativeBuckets: []float64{4.1, 5, 7, 8, 9},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: -1, Length: 2},
|
||||
{Offset: 2, Length: 3},
|
||||
},
|
||||
},
|
||||
expected: `{{offset:-3 buckets:[5.1 0 0 0 0 10 7] n_offset:-1 n_buckets:[4.1 5 0 0 7 8 9]}}`,
|
||||
},
|
||||
} {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
expression := test.input.TestExpression()
|
||||
require.Equal(t, test.expected, expression)
|
||||
_, vals, err := ParseSeriesDesc("{} " + expression)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, vals, 1)
|
||||
canonical := vals[0].Histogram
|
||||
require.NotNil(t, canonical)
|
||||
require.Equal(t, test.expected, canonical.TestExpression())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseSeries(t *testing.T) {
|
||||
for _, test := range testSeries {
|
||||
metric, vals, err := ParseSeriesDesc(test.input)
|
||||
|
|
100
promql/test.go
100
promql/test.go
|
@ -30,6 +30,7 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/timestamp"
|
||||
"github.com/prometheus/prometheus/promql/parser"
|
||||
|
@ -162,12 +163,8 @@ func parseLoad(lines []string, i int) (int, *loadCmd, error) {
|
|||
i--
|
||||
break
|
||||
}
|
||||
metric, vals, err := parser.ParseSeriesDesc(defLine)
|
||||
metric, vals, err := parseSeries(defLine, i)
|
||||
if err != nil {
|
||||
var perr *parser.ParseErr
|
||||
if errors.As(err, &perr) {
|
||||
perr.LineOffset = i
|
||||
}
|
||||
return i, nil, err
|
||||
}
|
||||
cmd.set(metric, vals...)
|
||||
|
@ -175,6 +172,17 @@ func parseLoad(lines []string, i int) (int, *loadCmd, error) {
|
|||
return i, cmd, nil
|
||||
}
|
||||
|
||||
func parseSeries(defLine string, line int) (labels.Labels, []parser.SequenceValue, error) {
|
||||
metric, vals, err := parser.ParseSeriesDesc(defLine)
|
||||
if err != nil {
|
||||
parser.EnrichParseError(err, func(parseErr *parser.ParseErr) {
|
||||
parseErr.LineOffset = line
|
||||
})
|
||||
return labels.Labels{}, nil, err
|
||||
}
|
||||
return metric, vals, nil
|
||||
}
|
||||
|
||||
func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
|
||||
if !patEvalInstant.MatchString(lines[i]) {
|
||||
return i, nil, raise(i, "invalid evaluation command. (eval[_fail|_ordered] instant [at <offset:duration>] <query>")
|
||||
|
@ -187,14 +195,13 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
|
|||
)
|
||||
_, err := parser.ParseExpr(expr)
|
||||
if err != nil {
|
||||
var perr *parser.ParseErr
|
||||
if errors.As(err, &perr) {
|
||||
perr.LineOffset = i
|
||||
parser.EnrichParseError(err, func(parseErr *parser.ParseErr) {
|
||||
parseErr.LineOffset = i
|
||||
posOffset := parser.Pos(strings.Index(lines[i], expr))
|
||||
perr.PositionRange.Start += posOffset
|
||||
perr.PositionRange.End += posOffset
|
||||
perr.Query = lines[i]
|
||||
}
|
||||
parseErr.PositionRange.Start += posOffset
|
||||
parseErr.PositionRange.End += posOffset
|
||||
parseErr.Query = lines[i]
|
||||
})
|
||||
return i, nil, err
|
||||
}
|
||||
|
||||
|
@ -223,12 +230,8 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
|
|||
cmd.expect(0, parser.SequenceValue{Value: f})
|
||||
break
|
||||
}
|
||||
metric, vals, err := parser.ParseSeriesDesc(defLine)
|
||||
metric, vals, err := parseSeries(defLine, i)
|
||||
if err != nil {
|
||||
var perr *parser.ParseErr
|
||||
if errors.As(err, &perr) {
|
||||
perr.LineOffset = i
|
||||
}
|
||||
return i, nil, err
|
||||
}
|
||||
|
||||
|
@ -299,7 +302,7 @@ func (*evalCmd) testCmd() {}
|
|||
type loadCmd struct {
|
||||
gap time.Duration
|
||||
metrics map[uint64]labels.Labels
|
||||
defs map[uint64][]FPoint
|
||||
defs map[uint64][]Sample
|
||||
exemplars map[uint64][]exemplar.Exemplar
|
||||
}
|
||||
|
||||
|
@ -307,7 +310,7 @@ func newLoadCmd(gap time.Duration) *loadCmd {
|
|||
return &loadCmd{
|
||||
gap: gap,
|
||||
metrics: map[uint64]labels.Labels{},
|
||||
defs: map[uint64][]FPoint{},
|
||||
defs: map[uint64][]Sample{},
|
||||
exemplars: map[uint64][]exemplar.Exemplar{},
|
||||
}
|
||||
}
|
||||
|
@ -320,13 +323,14 @@ func (cmd loadCmd) String() string {
|
|||
func (cmd *loadCmd) set(m labels.Labels, vals ...parser.SequenceValue) {
|
||||
h := m.Hash()
|
||||
|
||||
samples := make([]FPoint, 0, len(vals))
|
||||
samples := make([]Sample, 0, len(vals))
|
||||
ts := testStartTime
|
||||
for _, v := range vals {
|
||||
if !v.Omitted {
|
||||
samples = append(samples, FPoint{
|
||||
samples = append(samples, Sample{
|
||||
T: ts.UnixNano() / int64(time.Millisecond/time.Nanosecond),
|
||||
F: v.Value,
|
||||
H: v.Histogram,
|
||||
})
|
||||
}
|
||||
ts = ts.Add(cmd.gap)
|
||||
|
@ -341,7 +345,7 @@ func (cmd *loadCmd) append(a storage.Appender) error {
|
|||
m := cmd.metrics[h]
|
||||
|
||||
for _, s := range smpls {
|
||||
if _, err := a.Append(0, m, s.T, s.F); err != nil {
|
||||
if err := appendSample(a, s, m); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -349,6 +353,19 @@ func (cmd *loadCmd) append(a storage.Appender) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func appendSample(a storage.Appender, s Sample, m labels.Labels) error {
|
||||
if s.H != nil {
|
||||
if _, err := a.AppendHistogram(0, m, s.T, nil, s.H); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if _, err := a.Append(0, m, s.T, s.F); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// evalCmd is a command that evaluates an expression for the given time (range)
|
||||
// and expects a specific result.
|
||||
type evalCmd struct {
|
||||
|
@ -417,8 +434,13 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
|
|||
if ev.ordered && exp.pos != pos+1 {
|
||||
return fmt.Errorf("expected metric %s with %v at position %d but was at %d", v.Metric, exp.vals, exp.pos, pos+1)
|
||||
}
|
||||
if !almostEqual(exp.vals[0].Value, v.F) {
|
||||
return fmt.Errorf("expected %v for %s but got %v", exp.vals[0].Value, v.Metric, v.F)
|
||||
exp0 := exp.vals[0]
|
||||
expH := exp0.Histogram
|
||||
if (expH == nil) != (v.H == nil) || (expH != nil && !expH.Equals(v.H)) {
|
||||
return fmt.Errorf("expected %v for %s but got %s", HistogramTestExpression(expH), v.Metric, HistogramTestExpression(v.H))
|
||||
}
|
||||
if !almostEqual(exp0.Value, v.F) {
|
||||
return fmt.Errorf("expected %v for %s but got %v", exp0.Value, v.Metric, v.F)
|
||||
}
|
||||
|
||||
seen[fp] = true
|
||||
|
@ -434,8 +456,15 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
|
|||
}
|
||||
|
||||
case Scalar:
|
||||
if !almostEqual(ev.expected[0].vals[0].Value, val.V) {
|
||||
return fmt.Errorf("expected Scalar %v but got %v", val.V, ev.expected[0].vals[0].Value)
|
||||
if len(ev.expected) != 1 {
|
||||
return fmt.Errorf("expected vector result, but got scalar %s", val.String())
|
||||
}
|
||||
exp0 := ev.expected[0].vals[0]
|
||||
if exp0.Histogram != nil {
|
||||
return fmt.Errorf("expected Histogram %v but got scalar %s", exp0.Histogram.TestExpression(), val.String())
|
||||
}
|
||||
if !almostEqual(exp0.Value, val.V) {
|
||||
return fmt.Errorf("expected Scalar %v but got %v", val.V, exp0.Value)
|
||||
}
|
||||
|
||||
default:
|
||||
|
@ -444,6 +473,14 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// HistogramTestExpression returns TestExpression() for the given histogram or "" if the histogram is nil.
|
||||
func HistogramTestExpression(h *histogram.FloatHistogram) string {
|
||||
if h != nil {
|
||||
return h.TestExpression()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// clearCmd is a command that wipes the test's storage state.
|
||||
type clearCmd struct{}
|
||||
|
||||
|
@ -560,7 +597,7 @@ func (t *test) exec(tc testCommand, engine engineQuerier) error {
|
|||
}
|
||||
err = cmd.compareResult(res.Value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error in %s %s: %w", cmd, iq.expr, err)
|
||||
return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err)
|
||||
}
|
||||
|
||||
// Check query returns same result in range mode,
|
||||
|
@ -581,12 +618,19 @@ func (t *test) exec(tc testCommand, engine engineQuerier) error {
|
|||
mat := rangeRes.Value.(Matrix)
|
||||
vec := make(Vector, 0, len(mat))
|
||||
for _, series := range mat {
|
||||
// We expect either Floats or Histograms.
|
||||
for _, point := range series.Floats {
|
||||
if point.T == timeMilliseconds(iq.evalTime) {
|
||||
vec = append(vec, Sample{Metric: series.Metric, T: point.T, F: point.F})
|
||||
break
|
||||
}
|
||||
}
|
||||
for _, point := range series.Histograms {
|
||||
if point.T == timeMilliseconds(iq.evalTime) {
|
||||
vec = append(vec, Sample{Metric: series.Metric, T: point.T, H: point.H})
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if _, ok := res.Value.(Scalar); ok {
|
||||
err = cmd.compareResult(Scalar{V: vec[0].F})
|
||||
|
@ -747,7 +791,7 @@ func (ll *LazyLoader) appendTill(ts int64) error {
|
|||
ll.loadCmd.defs[h] = smpls[i:]
|
||||
break
|
||||
}
|
||||
if _, err := app.Append(0, m, s.T, s.F); err != nil {
|
||||
if err := appendSample(app, s, m); err != nil {
|
||||
return err
|
||||
}
|
||||
if i == len(smpls)-1 {
|
||||
|
|
226
promql/testdata/native_histograms.test
vendored
Normal file
226
promql/testdata/native_histograms.test
vendored
Normal file
|
@ -0,0 +1,226 @@
|
|||
# Minimal valid case: an empty histogram.
|
||||
load 5m
|
||||
empty_histogram {{}}
|
||||
|
||||
eval instant at 5m empty_histogram
|
||||
{__name__="empty_histogram"} {{}}
|
||||
|
||||
eval instant at 5m histogram_count(empty_histogram)
|
||||
{} 0
|
||||
|
||||
eval instant at 5m histogram_sum(empty_histogram)
|
||||
{} 0
|
||||
|
||||
eval instant at 5m histogram_fraction(-Inf, +Inf, empty_histogram)
|
||||
{} NaN
|
||||
|
||||
eval instant at 5m histogram_fraction(0, 8, empty_histogram)
|
||||
{} NaN
|
||||
|
||||
|
||||
|
||||
# buckets:[1 2 1] means 1 observation in the 1st bucket, 2 observations in the 2nd and 1 observation in the 3rd (total 4).
|
||||
load 5m
|
||||
single_histogram {{schema:0 sum:5 count:4 buckets:[1 2 1]}}
|
||||
|
||||
# histogram_count extracts the count property from the histogram.
|
||||
eval instant at 5m histogram_count(single_histogram)
|
||||
{} 4
|
||||
|
||||
# histogram_sum extracts the sum property from the histogram.
|
||||
eval instant at 5m histogram_sum(single_histogram)
|
||||
{} 5
|
||||
|
||||
# We expect half of the values to fall in the range 1 < x <= 2.
|
||||
eval instant at 5m histogram_fraction(1, 2, single_histogram)
|
||||
{} 0.5
|
||||
|
||||
# We expect all values to fall in the range 0 < x <= 8.
|
||||
eval instant at 5m histogram_fraction(0, 8, single_histogram)
|
||||
{} 1
|
||||
|
||||
# Median is 1.5 due to linear estimation of the midpoint of the middle bucket, whose values are within range 1 < x <= 2.
|
||||
eval instant at 5m histogram_quantile(0.5, single_histogram)
|
||||
{} 1.5
|
||||
|
||||
|
||||
|
||||
# Repeat the same histogram 10 times.
|
||||
load 5m
|
||||
multi_histogram {{schema:0 sum:5 count:4 buckets:[1 2 1]}}x10
|
||||
|
||||
eval instant at 5m histogram_count(multi_histogram)
|
||||
{} 4
|
||||
|
||||
eval instant at 5m histogram_sum(multi_histogram)
|
||||
{} 5
|
||||
|
||||
eval instant at 5m histogram_fraction(1, 2, multi_histogram)
|
||||
{} 0.5
|
||||
|
||||
eval instant at 5m histogram_quantile(0.5, multi_histogram)
|
||||
{} 1.5
|
||||
|
||||
|
||||
# Each entry should look the same as the first.
|
||||
eval instant at 50m histogram_count(multi_histogram)
|
||||
{} 4
|
||||
|
||||
eval instant at 50m histogram_sum(multi_histogram)
|
||||
{} 5
|
||||
|
||||
eval instant at 50m histogram_fraction(1, 2, multi_histogram)
|
||||
{} 0.5
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, multi_histogram)
|
||||
{} 1.5
|
||||
|
||||
|
||||
|
||||
# Accumulate the histogram addition for 10 iterations, offset is a bucket position where offset:0 is always the bucket
|
||||
# with an upper limit of 1 and offset:1 is the bucket which follows to the right. Negative offsets represent bucket
|
||||
# positions for upper limits <1 (tending toward zero), where offset:-1 is the bucket to the left of offset:0.
|
||||
load 5m
|
||||
incr_histogram {{schema:0 sum:4 count:4 buckets:[1 2 1]}}+{{sum:2 count:1 buckets:[1] offset:1}}x10
|
||||
|
||||
eval instant at 5m histogram_count(incr_histogram)
|
||||
{} 5
|
||||
|
||||
eval instant at 5m histogram_sum(incr_histogram)
|
||||
{} 6
|
||||
|
||||
# We expect 3/5ths of the values to fall in the range 1 < x <= 2.
|
||||
eval instant at 5m histogram_fraction(1, 2, incr_histogram)
|
||||
{} 0.6
|
||||
|
||||
eval instant at 5m histogram_quantile(0.5, incr_histogram)
|
||||
{} 1.5
|
||||
|
||||
|
||||
eval instant at 50m incr_histogram
|
||||
{__name__="incr_histogram"} {{count:14 sum:24 buckets:[1 12 1]}}
|
||||
|
||||
eval instant at 50m histogram_count(incr_histogram)
|
||||
{} 14
|
||||
|
||||
eval instant at 50m histogram_sum(incr_histogram)
|
||||
{} 24
|
||||
|
||||
# We expect 12/14ths of the values to fall in the range 1 < x <= 2.
|
||||
eval instant at 50m histogram_fraction(1, 2, incr_histogram)
|
||||
{} 0.8571428571428571
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, incr_histogram)
|
||||
{} 1.5
|
||||
|
||||
# Per-second average rate of increase should be 1/(5*60) for count and buckets, then 2/(5*60) for sum.
|
||||
eval instant at 50m rate(incr_histogram[5m])
|
||||
{} {{count:0.0033333333333333335 sum:0.006666666666666667 offset:1 buckets:[0.0033333333333333335]}}
|
||||
|
||||
# Calculate the 50th percentile of observations over the last 10m.
|
||||
eval instant at 50m histogram_quantile(0.5, rate(incr_histogram[10m]))
|
||||
{} 1.5
|
||||
|
||||
|
||||
|
||||
# Schema represents the histogram resolution, different schema have compatible bucket boundaries, e.g.:
|
||||
# 0: 1 2 4 8 16 32 64 (higher resolution)
|
||||
# -1: 1 4 16 64 (lower resolution)
|
||||
#
|
||||
# Histograms can be merged as long as the histogram to the right is same resolution or higher.
|
||||
load 5m
|
||||
low_res_histogram {{schema:-1 sum:4 count:1 buckets:[1] offset:1}}+{{schema:0 sum:4 count:4 buckets:[2 2] offset:1}}x1
|
||||
|
||||
eval instant at 5m low_res_histogram
|
||||
{__name__="low_res_histogram"} {{schema:-1 count:5 sum:8 offset:1 buckets:[5]}}
|
||||
|
||||
eval instant at 5m histogram_count(low_res_histogram)
|
||||
{} 5
|
||||
|
||||
eval instant at 5m histogram_sum(low_res_histogram)
|
||||
{} 8
|
||||
|
||||
# We expect all values to fall into the lower-resolution bucket with the range 1 < x <= 4.
|
||||
eval instant at 5m histogram_fraction(1, 4, low_res_histogram)
|
||||
{} 1
|
||||
|
||||
|
||||
|
||||
# z_bucket:1 means there is one observation in the zero bucket and z_bucket_w:0.5 means the zero bucket has the range
|
||||
# 0 < x <= 0.5. Sum and count are expected to represent all observations in the histogram, including those in the zero bucket.
|
||||
load 5m
|
||||
single_zero_histogram {{schema:0 z_bucket:1 z_bucket_w:0.5 sum:0.25 count:1}}
|
||||
|
||||
eval instant at 5m histogram_count(single_zero_histogram)
|
||||
{} 1
|
||||
|
||||
eval instant at 5m histogram_sum(single_zero_histogram)
|
||||
{} 0.25
|
||||
|
||||
# When only the zero bucket is populated, or there are negative buckets, the distribution is assumed to be equally
|
||||
# distributed around zero; i.e. that there are an equal number of positive and negative observations. Therefore the
|
||||
# entire distribution must lie within the full range of the zero bucket, in this case: -0.5 < x <= +0.5.
|
||||
eval instant at 5m histogram_fraction(-0.5, 0.5, single_zero_histogram)
|
||||
{} 1
|
||||
|
||||
# Half of the observations are estimated to be zero, as this is the midpoint between -0.5 and +0.5.
|
||||
eval instant at 5m histogram_quantile(0.5, single_zero_histogram)
|
||||
{} 0
|
||||
|
||||
|
||||
|
||||
# Let's turn single_histogram upside-down.
|
||||
load 5m
|
||||
negative_histogram {{schema:0 sum:-5 count:4 n_buckets:[1 2 1]}}
|
||||
|
||||
eval instant at 5m histogram_count(negative_histogram)
|
||||
{} 4
|
||||
|
||||
eval instant at 5m histogram_sum(negative_histogram)
|
||||
{} -5
|
||||
|
||||
# We expect half of the values to fall in the range -2 < x <= -1.
|
||||
eval instant at 5m histogram_fraction(-2, -1, negative_histogram)
|
||||
{} 0.5
|
||||
|
||||
eval instant at 5m histogram_quantile(0.5, negative_histogram)
|
||||
{} -1.5
|
||||
|
||||
|
||||
|
||||
# Two histogram samples.
|
||||
load 5m
|
||||
two_samples_histogram {{schema:0 sum:4 count:4 buckets:[1 2 1]}} {{schema:0 sum:-4 count:4 n_buckets:[1 2 1]}}
|
||||
|
||||
# We expect to see the newest sample.
|
||||
eval instant at 10m histogram_count(two_samples_histogram)
|
||||
{} 4
|
||||
|
||||
eval instant at 10m histogram_sum(two_samples_histogram)
|
||||
{} -4
|
||||
|
||||
eval instant at 10m histogram_fraction(-2, -1, two_samples_histogram)
|
||||
{} 0.5
|
||||
|
||||
eval instant at 10m histogram_quantile(0.5, two_samples_histogram)
|
||||
{} -1.5
|
||||
|
||||
|
||||
|
||||
# Add two histograms with negated data.
|
||||
load 5m
|
||||
balanced_histogram {{schema:0 sum:4 count:4 buckets:[1 2 1]}}+{{schema:0 sum:-4 count:4 n_buckets:[1 2 1]}}x1
|
||||
|
||||
eval instant at 5m histogram_count(balanced_histogram)
|
||||
{} 8
|
||||
|
||||
eval instant at 5m histogram_sum(balanced_histogram)
|
||||
{} 0
|
||||
|
||||
eval instant at 5m histogram_fraction(0, 4, balanced_histogram)
|
||||
{} 0.5
|
||||
|
||||
# If the quantile happens to be located in a span of empty buckets, the actually returned value is the lower bound of
|
||||
# the first populated bucket after the span of empty buckets.
|
||||
eval instant at 5m histogram_quantile(0.5, balanced_histogram)
|
||||
{} 0.5
|
|
@ -71,8 +71,8 @@ func (a ByLabelName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
|||
// creates a new TimeSeries in the map if not found and returns the time series signature.
|
||||
// tsMap will be unmodified if either labels or sample is nil, but can still be modified if the exemplar is nil.
|
||||
func addSample(tsMap map[string]*prompb.TimeSeries, sample *prompb.Sample, labels []prompb.Label,
|
||||
datatype string) string {
|
||||
|
||||
datatype string,
|
||||
) string {
|
||||
if sample == nil || labels == nil || tsMap == nil {
|
||||
return ""
|
||||
}
|
||||
|
@ -164,7 +164,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
|
|||
sort.Stable(ByLabelName(labels))
|
||||
|
||||
for _, label := range labels {
|
||||
var finalKey = prometheustranslator.NormalizeLabel(label.Name)
|
||||
finalKey := prometheustranslator.NormalizeLabel(label.Name)
|
||||
if existingLabel, alreadyExists := l[finalKey]; alreadyExists {
|
||||
existingLabel.Value = existingLabel.Value + ";" + label.Value
|
||||
l[finalKey] = existingLabel
|
||||
|
@ -441,7 +441,8 @@ func maxTimestamp(a, b pcommon.Timestamp) pcommon.Timestamp {
|
|||
|
||||
// addSingleSummaryDataPoint converts pt to len(QuantileValues) + 2 samples.
|
||||
func addSingleSummaryDataPoint(pt pmetric.SummaryDataPoint, resource pcommon.Resource, metric pmetric.Metric, settings Settings,
|
||||
tsMap map[string]*prompb.TimeSeries) {
|
||||
tsMap map[string]*prompb.TimeSeries,
|
||||
) {
|
||||
timestamp := convertTimeStamp(pt.Timestamp())
|
||||
// sum and count of the summary should append suffix to baseName
|
||||
baseName := prometheustranslator.BuildPromCompliantName(metric, settings.Namespace)
|
||||
|
|
|
@ -4742,56 +4742,56 @@ func TestHistogramValidation(t *testing.T) {
|
|||
NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}},
|
||||
NegativeBuckets: []int64{},
|
||||
},
|
||||
errMsg: `negative side: spans need 1 buckets, have 0 buckets`,
|
||||
errMsg: `negative side: spans need 1 buckets, have 0 buckets: histogram spans specify different number of buckets than provided`,
|
||||
},
|
||||
"rejects histogram that has too few positive buckets": {
|
||||
h: &histogram.Histogram{
|
||||
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
|
||||
PositiveBuckets: []int64{},
|
||||
},
|
||||
errMsg: `positive side: spans need 1 buckets, have 0 buckets`,
|
||||
errMsg: `positive side: spans need 1 buckets, have 0 buckets: histogram spans specify different number of buckets than provided`,
|
||||
},
|
||||
"rejects histogram that has too many negative buckets": {
|
||||
h: &histogram.Histogram{
|
||||
NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}},
|
||||
NegativeBuckets: []int64{1, 2},
|
||||
},
|
||||
errMsg: `negative side: spans need 1 buckets, have 2 buckets`,
|
||||
errMsg: `negative side: spans need 1 buckets, have 2 buckets: histogram spans specify different number of buckets than provided`,
|
||||
},
|
||||
"rejects histogram that has too many positive buckets": {
|
||||
h: &histogram.Histogram{
|
||||
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
|
||||
PositiveBuckets: []int64{1, 2},
|
||||
},
|
||||
errMsg: `positive side: spans need 1 buckets, have 2 buckets`,
|
||||
errMsg: `positive side: spans need 1 buckets, have 2 buckets: histogram spans specify different number of buckets than provided`,
|
||||
},
|
||||
"rejects a histogram that has a negative span with a negative offset": {
|
||||
h: &histogram.Histogram{
|
||||
NegativeSpans: []histogram.Span{{Offset: -1, Length: 1}, {Offset: -1, Length: 1}},
|
||||
NegativeBuckets: []int64{1, 2},
|
||||
},
|
||||
errMsg: `negative side: span number 2 with offset -1`,
|
||||
errMsg: `negative side: span number 2 with offset -1: histogram has a span whose offset is negative`,
|
||||
},
|
||||
"rejects a histogram which has a positive span with a negative offset": {
|
||||
h: &histogram.Histogram{
|
||||
PositiveSpans: []histogram.Span{{Offset: -1, Length: 1}, {Offset: -1, Length: 1}},
|
||||
PositiveBuckets: []int64{1, 2},
|
||||
},
|
||||
errMsg: `positive side: span number 2 with offset -1`,
|
||||
errMsg: `positive side: span number 2 with offset -1: histogram has a span whose offset is negative`,
|
||||
},
|
||||
"rejects a histogram that has a negative bucket with a negative count": {
|
||||
h: &histogram.Histogram{
|
||||
NegativeSpans: []histogram.Span{{Offset: -1, Length: 1}},
|
||||
NegativeBuckets: []int64{-1},
|
||||
},
|
||||
errMsg: `negative side: bucket number 1 has observation count of -1`,
|
||||
errMsg: `negative side: bucket number 1 has observation count of -1: histogram has a bucket whose observation count is negative`,
|
||||
},
|
||||
"rejects a histogram that has a positive bucket with a negative count": {
|
||||
h: &histogram.Histogram{
|
||||
PositiveSpans: []histogram.Span{{Offset: -1, Length: 1}},
|
||||
PositiveBuckets: []int64{-1},
|
||||
},
|
||||
errMsg: `positive side: bucket number 1 has observation count of -1`,
|
||||
errMsg: `positive side: bucket number 1 has observation count of -1: histogram has a bucket whose observation count is negative`,
|
||||
},
|
||||
"rejects a histogram that has a lower count than count in buckets": {
|
||||
h: &histogram.Histogram{
|
||||
|
@ -4801,7 +4801,7 @@ func TestHistogramValidation(t *testing.T) {
|
|||
NegativeBuckets: []int64{1},
|
||||
PositiveBuckets: []int64{1},
|
||||
},
|
||||
errMsg: `2 observations found in buckets, but the Count field is 0`,
|
||||
errMsg: `2 observations found in buckets, but the Count field is 0: histogram's observation count should be at least the number of observations found in the buckets`,
|
||||
skipFloat: true,
|
||||
},
|
||||
"rejects a histogram that doesn't count the zero bucket in its count": {
|
||||
|
@ -4813,7 +4813,7 @@ func TestHistogramValidation(t *testing.T) {
|
|||
NegativeBuckets: []int64{1},
|
||||
PositiveBuckets: []int64{1},
|
||||
},
|
||||
errMsg: `3 observations found in buckets, but the Count field is 2`,
|
||||
errMsg: `3 observations found in buckets, but the Count field is 2: histogram's observation count should be at least the number of observations found in the buckets`,
|
||||
skipFloat: true,
|
||||
},
|
||||
}
|
||||
|
@ -4821,7 +4821,7 @@ func TestHistogramValidation(t *testing.T) {
|
|||
for testName, tc := range tests {
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
if err := ValidateHistogram(tc.h); tc.errMsg != "" {
|
||||
require.ErrorContains(t, err, tc.errMsg)
|
||||
require.EqualError(t, err, tc.errMsg)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
@ -4829,7 +4829,7 @@ func TestHistogramValidation(t *testing.T) {
|
|||
return
|
||||
}
|
||||
if err := ValidateFloatHistogram(tc.h.ToFloat()); tc.errMsg != "" {
|
||||
require.ErrorContains(t, err, tc.errMsg)
|
||||
require.EqualError(t, err, tc.errMsg)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue