mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
Merge pull request #11447 from prometheus/sparsehistogram
Add Support for Native Histograms This PR merges all the coding work that has been done in sparsehistogram branch over the last 1 year into main branch. Design doc on native histograms: https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit Some sneak peak: https://www.youtube.com/watch?v=T2GvcYNth9U
This commit is contained in:
commit
fa6e05903f
|
@ -57,6 +57,7 @@ import (
|
||||||
"github.com/prometheus/prometheus/discovery/legacymanager"
|
"github.com/prometheus/prometheus/discovery/legacymanager"
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/metadata"
|
"github.com/prometheus/prometheus/model/metadata"
|
||||||
"github.com/prometheus/prometheus/model/relabel"
|
"github.com/prometheus/prometheus/model/relabel"
|
||||||
|
@ -194,6 +195,10 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
||||||
case "no-default-scrape-port":
|
case "no-default-scrape-port":
|
||||||
c.scrape.NoDefaultPort = true
|
c.scrape.NoDefaultPort = true
|
||||||
level.Info(logger).Log("msg", "No default port will be appended to scrape targets' addresses.")
|
level.Info(logger).Log("msg", "No default port will be appended to scrape targets' addresses.")
|
||||||
|
case "native-histograms":
|
||||||
|
c.tsdb.EnableNativeHistograms = true
|
||||||
|
c.scrape.EnableProtobufNegotiation = true
|
||||||
|
level.Info(logger).Log("msg", "Experimental native histogram support enabled.")
|
||||||
case "":
|
case "":
|
||||||
continue
|
continue
|
||||||
case "promql-at-modifier", "promql-negative-offset":
|
case "promql-at-modifier", "promql-negative-offset":
|
||||||
|
@ -203,6 +208,12 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c.tsdb.EnableNativeHistograms && c.tsdb.EnableMemorySnapshotOnShutdown {
|
||||||
|
c.tsdb.EnableMemorySnapshotOnShutdown = false
|
||||||
|
level.Warn(logger).Log("msg", "memory-snapshot-on-shutdown has been disabled automatically because memory-snapshot-on-shutdown and native-histograms cannot be enabled at the same time.")
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -395,7 +406,7 @@ func main() {
|
||||||
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
|
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
|
||||||
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
|
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
|
||||||
|
|
||||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||||
Default("").StringsVar(&cfg.featureList)
|
Default("").StringsVar(&cfg.featureList)
|
||||||
|
|
||||||
promlogflag.AddFlags(a, &cfg.promlogConfig)
|
promlogflag.AddFlags(a, &cfg.promlogConfig)
|
||||||
|
@ -1380,6 +1391,10 @@ func (n notReadyAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels,
|
||||||
return 0, tsdb.ErrNotReady
|
return 0, tsdb.ErrNotReady
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (n notReadyAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (storage.SeriesRef, error) {
|
||||||
|
return 0, tsdb.ErrNotReady
|
||||||
|
}
|
||||||
|
|
||||||
func (n notReadyAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
|
func (n notReadyAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
|
||||||
return 0, tsdb.ErrNotReady
|
return 0, tsdb.ErrNotReady
|
||||||
}
|
}
|
||||||
|
@ -1510,6 +1525,7 @@ type tsdbOptions struct {
|
||||||
EnableExemplarStorage bool
|
EnableExemplarStorage bool
|
||||||
MaxExemplars int64
|
MaxExemplars int64
|
||||||
EnableMemorySnapshotOnShutdown bool
|
EnableMemorySnapshotOnShutdown bool
|
||||||
|
EnableNativeHistograms bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
|
func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
|
||||||
|
@ -1528,6 +1544,7 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
|
||||||
EnableExemplarStorage: opts.EnableExemplarStorage,
|
EnableExemplarStorage: opts.EnableExemplarStorage,
|
||||||
MaxExemplars: opts.MaxExemplars,
|
MaxExemplars: opts.MaxExemplars,
|
||||||
EnableMemorySnapshotOnShutdown: opts.EnableMemorySnapshotOnShutdown,
|
EnableMemorySnapshotOnShutdown: opts.EnableMemorySnapshotOnShutdown,
|
||||||
|
EnableNativeHistograms: opts.EnableNativeHistograms,
|
||||||
OutOfOrderTimeWindow: opts.OutOfOrderTimeWindow,
|
OutOfOrderTimeWindow: opts.OutOfOrderTimeWindow,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,6 +25,7 @@ import (
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/tsdb"
|
"github.com/prometheus/prometheus/tsdb"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
)
|
)
|
||||||
|
|
||||||
type backfillSample struct {
|
type backfillSample struct {
|
||||||
|
@ -50,7 +51,7 @@ func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMa
|
||||||
series := ss.At()
|
series := ss.At()
|
||||||
it := series.Iterator()
|
it := series.Iterator()
|
||||||
require.NoError(t, it.Err())
|
require.NoError(t, it.Err())
|
||||||
for it.Next() {
|
for it.Next() == chunkenc.ValFloat {
|
||||||
ts, v := it.At()
|
ts, v := it.At()
|
||||||
samples = append(samples, backfillSample{Timestamp: ts, Value: v, Labels: series.Labels()})
|
samples = append(samples, backfillSample{Timestamp: ts, Value: v, Labels: series.Labels()})
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,6 +28,7 @@ import (
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/tsdb"
|
"github.com/prometheus/prometheus/tsdb"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
)
|
)
|
||||||
|
|
||||||
type mockQueryRangeAPI struct {
|
type mockQueryRangeAPI struct {
|
||||||
|
@ -139,7 +140,7 @@ func TestBackfillRuleIntegration(t *testing.T) {
|
||||||
require.Equal(t, 3, len(series.Labels()))
|
require.Equal(t, 3, len(series.Labels()))
|
||||||
}
|
}
|
||||||
it := series.Iterator()
|
it := series.Iterator()
|
||||||
for it.Next() {
|
for it.Next() == chunkenc.ValFloat {
|
||||||
samplesCount++
|
samplesCount++
|
||||||
ts, v := it.At()
|
ts, v := it.At()
|
||||||
if v == testValue {
|
if v == testValue {
|
||||||
|
|
|
@ -31,6 +31,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/tsdb/index"
|
"github.com/prometheus/prometheus/tsdb/index"
|
||||||
|
|
||||||
"github.com/alecthomas/units"
|
"github.com/alecthomas/units"
|
||||||
|
@ -644,7 +645,7 @@ func dumpSamples(path string, mint, maxt int64) (err error) {
|
||||||
series := ss.At()
|
series := ss.At()
|
||||||
lbs := series.Labels()
|
lbs := series.Labels()
|
||||||
it := series.Iterator()
|
it := series.Iterator()
|
||||||
for it.Next() {
|
for it.Next() == chunkenc.ValFloat {
|
||||||
ts, val := it.At()
|
ts, val := it.At()
|
||||||
fmt.Printf("%s %g %d\n", lbs, val, ts)
|
fmt.Printf("%s %g %d\n", lbs, val, ts)
|
||||||
}
|
}
|
||||||
|
|
|
@ -447,7 +447,7 @@ func query(ctx context.Context, qs string, t time.Time, engine *promql.Engine, q
|
||||||
return v, nil
|
return v, nil
|
||||||
case promql.Scalar:
|
case promql.Scalar:
|
||||||
return promql.Vector{promql.Sample{
|
return promql.Vector{promql.Sample{
|
||||||
Point: promql.Point(v),
|
Point: promql.Point{T: v.T, V: v.V},
|
||||||
Metric: labels.Labels{},
|
Metric: labels.Labels{},
|
||||||
}}, nil
|
}}, nil
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -776,12 +776,13 @@ func CheckTargetAddress(address model.LabelValue) error {
|
||||||
|
|
||||||
// RemoteWriteConfig is the configuration for writing to remote storage.
|
// RemoteWriteConfig is the configuration for writing to remote storage.
|
||||||
type RemoteWriteConfig struct {
|
type RemoteWriteConfig struct {
|
||||||
URL *config.URL `yaml:"url"`
|
URL *config.URL `yaml:"url"`
|
||||||
RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"`
|
RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"`
|
||||||
Headers map[string]string `yaml:"headers,omitempty"`
|
Headers map[string]string `yaml:"headers,omitempty"`
|
||||||
WriteRelabelConfigs []*relabel.Config `yaml:"write_relabel_configs,omitempty"`
|
WriteRelabelConfigs []*relabel.Config `yaml:"write_relabel_configs,omitempty"`
|
||||||
Name string `yaml:"name,omitempty"`
|
Name string `yaml:"name,omitempty"`
|
||||||
SendExemplars bool `yaml:"send_exemplars,omitempty"`
|
SendExemplars bool `yaml:"send_exemplars,omitempty"`
|
||||||
|
SendNativeHistograms bool `yaml:"send_native_histograms,omitempty"`
|
||||||
|
|
||||||
// We cannot do proper Go type embedding below as the parser will then parse
|
// We cannot do proper Go type embedding below as the parser will then parse
|
||||||
// values arbitrarily into the overflow maps of further-down types.
|
// values arbitrarily into the overflow maps of further-down types.
|
||||||
|
|
|
@ -3031,6 +3031,9 @@ write_relabel_configs:
|
||||||
# Enables sending of exemplars over remote write. Note that exemplar storage itself must be enabled for exemplars to be scraped in the first place.
|
# Enables sending of exemplars over remote write. Note that exemplar storage itself must be enabled for exemplars to be scraped in the first place.
|
||||||
[ send_exemplars: <boolean> | default = false ]
|
[ send_exemplars: <boolean> | default = false ]
|
||||||
|
|
||||||
|
# Enables sending of native histograms, also known as sparse histograms, over remote write.
|
||||||
|
[ send_native_histograms: <boolean> | default = false ]
|
||||||
|
|
||||||
# Sets the `Authorization` header on every remote write request with the
|
# Sets the `Authorization` header on every remote write request with the
|
||||||
# configured username and password.
|
# configured username and password.
|
||||||
# password and password_file are mutually exclusive.
|
# password and password_file are mutually exclusive.
|
||||||
|
|
|
@ -17,6 +17,10 @@ Rule files use YAML.
|
||||||
The rule files can be reloaded at runtime by sending `SIGHUP` to the Prometheus
|
The rule files can be reloaded at runtime by sending `SIGHUP` to the Prometheus
|
||||||
process. The changes are only applied if all rule files are well-formatted.
|
process. The changes are only applied if all rule files are well-formatted.
|
||||||
|
|
||||||
|
_Note about native histograms (experimental feature): Rules evaluating to
|
||||||
|
native histograms do not yet work as expected. Instead of a native histogram,
|
||||||
|
the sample stored is just a floating point value of zero._
|
||||||
|
|
||||||
## Syntax-checking rules
|
## Syntax-checking rules
|
||||||
|
|
||||||
To quickly check whether a rule file is syntactically correct without starting
|
To quickly check whether a rule file is syntactically correct without starting
|
||||||
|
|
|
@ -103,3 +103,26 @@ When enabled, the default ports for HTTP (`:80`) or HTTPS (`:443`) will _not_ be
|
||||||
the address used to scrape a target (the value of the `__address_` label), contrary to the default behavior.
|
the address used to scrape a target (the value of the `__address_` label), contrary to the default behavior.
|
||||||
In addition, if a default HTTP or HTTPS port has already been added either in a static configuration or
|
In addition, if a default HTTP or HTTPS port has already been added either in a static configuration or
|
||||||
by a service discovery mechanism and the respective scheme is specified (`http` or `https`), that port will be removed.
|
by a service discovery mechanism and the respective scheme is specified (`http` or `https`), that port will be removed.
|
||||||
|
|
||||||
|
## Native Histograms
|
||||||
|
|
||||||
|
`--enable-feature=native-histograms`
|
||||||
|
|
||||||
|
When enabled, Prometheus will ingest native histograms (formerly also known as
|
||||||
|
sparse histograms or high-res histograms). Native histograms are still highly
|
||||||
|
experimental. Expect breaking changes to happen (including those rendering the
|
||||||
|
TSDB unreadable).
|
||||||
|
|
||||||
|
Native histograms are currently only supported in the traditional Prometheus
|
||||||
|
protobuf exposition format. This feature flag therefore also enables a new (and
|
||||||
|
also experimental) protobuf parser, through which _all_ metrics are ingested
|
||||||
|
(i.e. not only native histograms). Prometheus will try to negotiate the
|
||||||
|
protobuf format first. The instrumented target needs to support the protobuf
|
||||||
|
format, too, _and_ it needs to expose native histograms. The protobuf format
|
||||||
|
allows to expose conventional and native histograms side by side. With this
|
||||||
|
feature flag disabled, Prometheus will continue to parse the conventional
|
||||||
|
histogram (albeit via the text format). With this flag enabled, Prometheus will
|
||||||
|
still ingest those conventional histograms that do not come with a
|
||||||
|
corresponding native histogram. However, if a native histogram is present,
|
||||||
|
Prometheus will ignore the corresponding conventional histogram, with the
|
||||||
|
notable exception of exemplars, which are always ingested.
|
||||||
|
|
|
@ -8,6 +8,9 @@ sort_rank: 6
|
||||||
Federation allows a Prometheus server to scrape selected time series from
|
Federation allows a Prometheus server to scrape selected time series from
|
||||||
another Prometheus server.
|
another Prometheus server.
|
||||||
|
|
||||||
|
_Note about native histograms (experimental feature): Federation does not
|
||||||
|
support native histograms yet._
|
||||||
|
|
||||||
## Use cases
|
## Use cases
|
||||||
|
|
||||||
There are different use cases for federation. Commonly, it is used to either
|
There are different use cases for federation. Commonly, it is used to either
|
||||||
|
|
|
@ -447,6 +447,12 @@ sample values. JSON does not support special float values such as `NaN`, `Inf`,
|
||||||
and `-Inf`, so sample values are transferred as quoted JSON strings rather than
|
and `-Inf`, so sample values are transferred as quoted JSON strings rather than
|
||||||
raw numbers.
|
raw numbers.
|
||||||
|
|
||||||
|
The keys `"histogram"` and `"histograms"` only show up if the experimental
|
||||||
|
native histograms are present in the response. Their placeholder `<histogram>`
|
||||||
|
is explained in detail in its own section below. Any one object will only have
|
||||||
|
the `"value"`/`"values"` key or the `"histogram"`/`"histograms"` key, but not
|
||||||
|
both.
|
||||||
|
|
||||||
### Range vectors
|
### Range vectors
|
||||||
|
|
||||||
Range vectors are returned as result type `matrix`. The corresponding
|
Range vectors are returned as result type `matrix`. The corresponding
|
||||||
|
@ -456,7 +462,8 @@ Range vectors are returned as result type `matrix`. The corresponding
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
"metric": { "<label_name>": "<label_value>", ... },
|
"metric": { "<label_name>": "<label_value>", ... },
|
||||||
"values": [ [ <unix_time>, "<sample_value>" ], ... ]
|
"values": [ [ <unix_time>, "<sample_value>" ], ... ],
|
||||||
|
"histograms": [ [ <unix_time>, <histogram> ], ... ]
|
||||||
},
|
},
|
||||||
...
|
...
|
||||||
]
|
]
|
||||||
|
@ -471,7 +478,8 @@ Instant vectors are returned as result type `vector`. The corresponding
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
"metric": { "<label_name>": "<label_value>", ... },
|
"metric": { "<label_name>": "<label_value>", ... },
|
||||||
"value": [ <unix_time>, "<sample_value>" ]
|
"value": [ <unix_time>, "<sample_value>" ],
|
||||||
|
"histogram": [ <unix_time>, <histogram> ]
|
||||||
},
|
},
|
||||||
...
|
...
|
||||||
]
|
]
|
||||||
|
@ -495,6 +503,33 @@ String results are returned as result type `string`. The corresponding
|
||||||
[ <unix_time>, "<string_value>" ]
|
[ <unix_time>, "<string_value>" ]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Native histograms
|
||||||
|
|
||||||
|
The `<histogram>` placeholder used above is formatted as follows.
|
||||||
|
|
||||||
|
_Note that native histograms are an experimental feature, and the format below
|
||||||
|
might still change._
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"count": "<count_of_observations>",
|
||||||
|
"sum": "<sum_of_observations>",
|
||||||
|
"buckets": [ [ <boundary_rule>, "<left_boundary>", "<right_boundary>", "<count_in_bucket>" ], ... ]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The `<boundary_rule>` placeholder is an integer between 0 and 3 with the
|
||||||
|
following meaning:
|
||||||
|
|
||||||
|
* 0: “open left” (left boundary is exclusive, right boundary in inclusive)
|
||||||
|
* 1: “open right” (left boundary is inclusive, right boundary in exclusive)
|
||||||
|
* 2: “open both” (both boundaries are exclusive)
|
||||||
|
* 3: “closed both” (both boundaries are inclusive)
|
||||||
|
|
||||||
|
Note that with the currently implemented bucket schemas, positive buckets are
|
||||||
|
“open left”, negative buckets are “open right”, and the zero bucket (with a
|
||||||
|
negative left boundary and a positive right boundary) is “closed both”.
|
||||||
|
|
||||||
## Targets
|
## Targets
|
||||||
|
|
||||||
The following endpoint returns an overview of the current state of the
|
The following endpoint returns an overview of the current state of the
|
||||||
|
|
|
@ -32,6 +32,16 @@ expression), only some of these types are legal as the result from a
|
||||||
user-specified expression. For example, an expression that returns an instant
|
user-specified expression. For example, an expression that returns an instant
|
||||||
vector is the only type that can be directly graphed.
|
vector is the only type that can be directly graphed.
|
||||||
|
|
||||||
|
_Notes about the experimental native histograms:_
|
||||||
|
|
||||||
|
* Ingesting native histograms has to be enabled via a [feature
|
||||||
|
flag](../feature_flags/#native-histograms).
|
||||||
|
* Once native histograms have been ingested into the TSDB (and even after
|
||||||
|
disabling the feature flag again), both instant vectors and range vectors may
|
||||||
|
now contain samples that aren't simple floating point numbers (float samples)
|
||||||
|
but complete histograms (histogram samples). A vector may contain a mix of
|
||||||
|
float samples and histogram samples.
|
||||||
|
|
||||||
## Literals
|
## Literals
|
||||||
|
|
||||||
### String literals
|
### String literals
|
||||||
|
|
|
@ -11,6 +11,22 @@ instant-vector)`. This means that there is one argument `v` which is an instant
|
||||||
vector, which if not provided it will default to the value of the expression
|
vector, which if not provided it will default to the value of the expression
|
||||||
`vector(time())`.
|
`vector(time())`.
|
||||||
|
|
||||||
|
_Notes about the experimental native histograms:_
|
||||||
|
|
||||||
|
* Ingesting native histograms has to be enabled via a [feature
|
||||||
|
flag](../feature_flags/#native-histograms). As long as no native histograms
|
||||||
|
have been ingested into the TSDB, all functions will behave as usual.
|
||||||
|
* Functions that do not explicitly mention native histograms in their
|
||||||
|
documentation (see below) effectively treat a native histogram as a float
|
||||||
|
sample of value 0. (This is confusing and will change before native
|
||||||
|
histograms become a stable feature.)
|
||||||
|
* Functions that do already act on native histograms might still change their
|
||||||
|
behavior in the future.
|
||||||
|
* If a function requires the same bucket layout between multiple native
|
||||||
|
histograms it acts on, it will automatically convert them
|
||||||
|
appropriately. (With the currently supported bucket schemas, that's always
|
||||||
|
possible.)
|
||||||
|
|
||||||
## `abs()`
|
## `abs()`
|
||||||
|
|
||||||
`abs(v instant-vector)` returns the input vector with all sample values converted to
|
`abs(v instant-vector)` returns the input vector with all sample values converted to
|
||||||
|
@ -19,8 +35,8 @@ their absolute value.
|
||||||
## `absent()`
|
## `absent()`
|
||||||
|
|
||||||
`absent(v instant-vector)` returns an empty vector if the vector passed to it
|
`absent(v instant-vector)` returns an empty vector if the vector passed to it
|
||||||
has any elements and a 1-element vector with the value 1 if the vector passed to
|
has any elements (floats or native histograms) and a 1-element vector with the
|
||||||
it has no elements.
|
value 1 if the vector passed to it has no elements.
|
||||||
|
|
||||||
This is useful for alerting on when no time series exist for a given metric name
|
This is useful for alerting on when no time series exist for a given metric name
|
||||||
and label combination.
|
and label combination.
|
||||||
|
@ -42,8 +58,8 @@ of the 1-element output vector from the input vector.
|
||||||
## `absent_over_time()`
|
## `absent_over_time()`
|
||||||
|
|
||||||
`absent_over_time(v range-vector)` returns an empty vector if the range vector
|
`absent_over_time(v range-vector)` returns an empty vector if the range vector
|
||||||
passed to it has any elements and a 1-element vector with the value 1 if the
|
passed to it has any elements (floats or native histograms) and a 1-element
|
||||||
range vector passed to it has no elements.
|
vector with the value 1 if the range vector passed to it has no elements.
|
||||||
|
|
||||||
This is useful for alerting on when no time series exist for a given metric name
|
This is useful for alerting on when no time series exist for a given metric name
|
||||||
and label combination for a certain amount of time.
|
and label combination for a certain amount of time.
|
||||||
|
@ -130,7 +146,14 @@ between now and 2 hours ago:
|
||||||
delta(cpu_temp_celsius{host="zeus"}[2h])
|
delta(cpu_temp_celsius{host="zeus"}[2h])
|
||||||
```
|
```
|
||||||
|
|
||||||
`delta` should only be used with gauges.
|
`delta` acts on native histograms by calculating a new histogram where each
|
||||||
|
compononent (sum and count of observations, buckets) is the difference between
|
||||||
|
the respective component in the first and last native histogram in
|
||||||
|
`v`. However, each element in `v` that contains a mix of float and native
|
||||||
|
histogram samples within the range, will be missing from the result vector.
|
||||||
|
|
||||||
|
`delta` should only be used with gauges and native histograms where the
|
||||||
|
components behave like gauges (so-called gauge histograms).
|
||||||
|
|
||||||
## `deriv()`
|
## `deriv()`
|
||||||
|
|
||||||
|
@ -154,53 +177,148 @@ Special cases are:
|
||||||
`floor(v instant-vector)` rounds the sample values of all elements in `v` down
|
`floor(v instant-vector)` rounds the sample values of all elements in `v` down
|
||||||
to the nearest integer.
|
to the nearest integer.
|
||||||
|
|
||||||
|
## `histogram_count()` and `histogram_sum()`
|
||||||
|
|
||||||
|
_Both functions only act on native histograms, which are an experimental
|
||||||
|
feature. The behavior of these functions may change in future versions of
|
||||||
|
Prometheus, including their removal from PromQL._
|
||||||
|
|
||||||
|
`histogram_count(v instant-vector)` returns the count of observations stored in
|
||||||
|
a native histogram. Samples that are not native histograms are ignored and do
|
||||||
|
not show up in the returned vector.
|
||||||
|
|
||||||
|
Similarly, `histogram_sum(v instant-vector)` returns the sum of observations
|
||||||
|
stored in a native histogram.
|
||||||
|
|
||||||
|
Use `histogram_count` in the following way to calculate a rate of observations
|
||||||
|
(in this case corresponding to “requests per second”) from a native histogram:
|
||||||
|
|
||||||
|
histogram_count(rate(http_request_duration_seconds[10m]))
|
||||||
|
|
||||||
|
The additional use of `histogram_sum` enables the calculation of the average of
|
||||||
|
observed values (in this case corresponding to “average request duration”):
|
||||||
|
|
||||||
|
histogram_sum(rate(http_request_duration_seconds[10m]))
|
||||||
|
/
|
||||||
|
histogram_count(rate(http_request_duration_seconds[10m]))
|
||||||
|
|
||||||
|
## `histogram_fraction()`
|
||||||
|
|
||||||
|
_This function only acts on native histograms, which are an experimental
|
||||||
|
feature. The behavior of this function may change in future versions of
|
||||||
|
Prometheus, including its removal from PromQL._
|
||||||
|
|
||||||
|
For a native histogram, `histogram_fraction(lower scalar, upper scalar, v
|
||||||
|
instant-vector)` returns the estimated fraction of observations between the
|
||||||
|
provided lower and upper values. Samples that are not native histograms are
|
||||||
|
ignored and do not show up in the returned vector.
|
||||||
|
|
||||||
|
For example, the following expression calculates the fraction of HTTP requests
|
||||||
|
over the last hour that took 200ms or less:
|
||||||
|
|
||||||
|
histogram_fraction(0, 0.2, rate(http_request_duration_seconds[1h]))
|
||||||
|
|
||||||
|
The error of the estimation depends on the resolution of the underlying native
|
||||||
|
histogram and how closely the provided boundaries are aligned with the bucket
|
||||||
|
boundaries in the histogram.
|
||||||
|
|
||||||
|
`+Inf` and `-Inf` are valid boundary values. For example, if the histogram in
|
||||||
|
the expression above included negative observations (which shouldn't be the
|
||||||
|
case for request durations), the appropriate lower boundary to include all
|
||||||
|
observations less than or equal 0.2 would be `-Inf` rather than `0`.
|
||||||
|
|
||||||
|
Whether the provided boundaries are inclusive or exclusive is only relevant if
|
||||||
|
the provided boundaries are precisely aligned with bucket boundaries in the
|
||||||
|
underlying native histogram. In this case, the behavior depends on the schema
|
||||||
|
definition of the histogram. The currently supported schemas all feature
|
||||||
|
inclusive upper boundaries and exclusive lower boundaries for positive values
|
||||||
|
(and vice versa for negative values). Without a precise alignment of
|
||||||
|
boundaries, the function uses linear interpolation to estimate the
|
||||||
|
fraction. With the resulting uncertainty, it becomes irrelevant if the
|
||||||
|
boundaries are inclusive or exclusive.
|
||||||
|
|
||||||
## `histogram_quantile()`
|
## `histogram_quantile()`
|
||||||
|
|
||||||
`histogram_quantile(φ scalar, b instant-vector)` calculates the φ-quantile (0 ≤ φ
|
`histogram_quantile(φ scalar, b instant-vector)` calculates the φ-quantile (0 ≤
|
||||||
≤ 1) from the buckets `b` of a
|
φ ≤ 1) from a [conventional
|
||||||
[histogram](https://prometheus.io/docs/concepts/metric_types/#histogram). (See
|
histogram](https://prometheus.io/docs/concepts/metric_types/#histogram) or from
|
||||||
[histograms and summaries](https://prometheus.io/docs/practices/histograms) for
|
a native histogram. (See [histograms and
|
||||||
a detailed explanation of φ-quantiles and the usage of the histogram metric type
|
summaries](https://prometheus.io/docs/practices/histograms) for a detailed
|
||||||
in general.) The samples in `b` are the counts of observations in each bucket.
|
explanation of φ-quantiles and the usage of the (conventional) histogram metric
|
||||||
Each sample must have a label `le` where the label value denotes the inclusive
|
type in general.)
|
||||||
upper bound of the bucket. (Samples without such a label are silently ignored.)
|
|
||||||
The [histogram metric type](https://prometheus.io/docs/concepts/metric_types/#histogram)
|
_Note that native histograms are an experimental feature. The behavior of this
|
||||||
automatically provides time series with the `_bucket` suffix and the appropriate
|
function when dealing with native histograms may change in future versions of
|
||||||
labels.
|
Prometheus._
|
||||||
|
|
||||||
|
The conventional float samples in `b` are considered the counts of observations
|
||||||
|
in each bucket of one or more conventional histograms. Each float sample must
|
||||||
|
have a label `le` where the label value denotes the inclusive upper bound of
|
||||||
|
the bucket. (Float samples without such a label are silently ignored.) The
|
||||||
|
other labels and the metric name are used to identify the buckets belonging to
|
||||||
|
each conventional histogram. The [histogram metric
|
||||||
|
type](https://prometheus.io/docs/concepts/metric_types/#histogram)
|
||||||
|
automatically provides time series with the `_bucket` suffix and the
|
||||||
|
appropriate labels.
|
||||||
|
|
||||||
|
The native histogram samples in `b` are treated each individually as a separate
|
||||||
|
histogram to calculate the quantile from.
|
||||||
|
|
||||||
|
As long as no naming collisions arise, `b` may contain a mix of conventional
|
||||||
|
and native histograms.
|
||||||
|
|
||||||
Use the `rate()` function to specify the time window for the quantile
|
Use the `rate()` function to specify the time window for the quantile
|
||||||
calculation.
|
calculation.
|
||||||
|
|
||||||
Example: A histogram metric is called `http_request_duration_seconds`. To
|
Example: A histogram metric is called `http_request_duration_seconds` (and
|
||||||
calculate the 90th percentile of request durations over the last 10m, use the
|
therefore the metric name for the buckets of a conventional histogram is
|
||||||
following expression:
|
`http_request_duration_seconds_bucket`). To calculate the 90th percentile of request
|
||||||
|
durations over the last 10m, use the following expression in case
|
||||||
|
`http_request_duration_seconds` is a conventional histogram:
|
||||||
|
|
||||||
histogram_quantile(0.9, rate(http_request_duration_seconds_bucket[10m]))
|
histogram_quantile(0.9, rate(http_request_duration_seconds_bucket[10m]))
|
||||||
|
|
||||||
|
For a native histogram, use the following expression instead:
|
||||||
|
|
||||||
|
histogram_quantile(0.9, rate(http_request_duration_seconds[10m]))
|
||||||
|
|
||||||
The quantile is calculated for each label combination in
|
The quantile is calculated for each label combination in
|
||||||
`http_request_duration_seconds`. To aggregate, use the `sum()` aggregator
|
`http_request_duration_seconds`. To aggregate, use the `sum()` aggregator
|
||||||
around the `rate()` function. Since the `le` label is required by
|
around the `rate()` function. Since the `le` label is required by
|
||||||
`histogram_quantile()`, it has to be included in the `by` clause. The following
|
`histogram_quantile()` to deal with conventional histograms, it has to be
|
||||||
expression aggregates the 90th percentile by `job`:
|
included in the `by` clause. The following expression aggregates the 90th
|
||||||
|
percentile by `job` for conventional histograms:
|
||||||
|
|
||||||
histogram_quantile(0.9, sum by (job, le) (rate(http_request_duration_seconds_bucket[10m])))
|
histogram_quantile(0.9, sum by (job, le) (rate(http_request_duration_seconds_bucket[10m])))
|
||||||
|
|
||||||
|
When aggregating native histograms, the expression simplifies to:
|
||||||
|
|
||||||
To aggregate everything, specify only the `le` label:
|
histogram_quantile(0.9, sum by (job) (rate(http_request_duration_seconds[10m])))
|
||||||
|
|
||||||
|
To aggregate all conventional histograms, specify only the `le` label:
|
||||||
|
|
||||||
histogram_quantile(0.9, sum by (le) (rate(http_request_duration_seconds_bucket[10m])))
|
histogram_quantile(0.9, sum by (le) (rate(http_request_duration_seconds_bucket[10m])))
|
||||||
|
|
||||||
The `histogram_quantile()` function interpolates quantile values by
|
With native histograms, aggregating everything works as usual without any `by` clause:
|
||||||
assuming a linear distribution within a bucket. The highest bucket
|
|
||||||
must have an upper bound of `+Inf`. (Otherwise, `NaN` is returned.) If
|
histogram_quantile(0.9, sum(rate(http_request_duration_seconds[10m])))
|
||||||
a quantile is located in the highest bucket, the upper bound of the
|
|
||||||
second highest bucket is returned. A lower limit of the lowest bucket
|
The `histogram_quantile()` function interpolates quantile values by
|
||||||
is assumed to be 0 if the upper bound of that bucket is greater than
|
assuming a linear distribution within a bucket.
|
||||||
0. In that case, the usual linear interpolation is applied within that
|
|
||||||
bucket. Otherwise, the upper bound of the lowest bucket is returned
|
If `b` has 0 observations, `NaN` is returned. For φ < 0, `-Inf` is
|
||||||
for quantiles located in the lowest bucket.
|
returned. For φ > 1, `+Inf` is returned. For φ = `NaN`, `NaN` is returned.
|
||||||
|
|
||||||
|
The following is only relevant for conventional histograms: If `b` contains
|
||||||
|
fewer than two buckets, `NaN` is returned. The highest bucket must have an
|
||||||
|
upper bound of `+Inf`. (Otherwise, `NaN` is returned.) If a quantile is located
|
||||||
|
in the highest bucket, the upper bound of the second highest bucket is
|
||||||
|
returned. A lower limit of the lowest bucket is assumed to be 0 if the upper
|
||||||
|
bound of that bucket is greater than
|
||||||
|
0. In that case, the usual linear interpolation is applied within that
|
||||||
|
bucket. Otherwise, the upper bound of the lowest bucket is returned for
|
||||||
|
quantiles located in the lowest bucket.
|
||||||
|
|
||||||
If `b` has 0 observations, `NaN` is returned. If `b` contains fewer than two buckets,
|
|
||||||
`NaN` is returned. For φ < 0, `-Inf` is returned. For φ > 1, `+Inf` is returned. For φ = `NaN`, `NaN` is returned.
|
|
||||||
|
|
||||||
## `holt_winters()`
|
## `holt_winters()`
|
||||||
|
|
||||||
|
@ -242,11 +360,17 @@ over the last 5 minutes, per time series in the range vector:
|
||||||
increase(http_requests_total{job="api-server"}[5m])
|
increase(http_requests_total{job="api-server"}[5m])
|
||||||
```
|
```
|
||||||
|
|
||||||
`increase` should only be used with counters. It is syntactic sugar
|
`increase` acts on native histograms by calculating a new histogram where each
|
||||||
for `rate(v)` multiplied by the number of seconds under the specified
|
compononent (sum and count of observations, buckets) is the increase between
|
||||||
time range window, and should be used primarily for human readability.
|
the respective component in the first and last native histogram in
|
||||||
Use `rate` in recording rules so that increases are tracked consistently
|
`v`. However, each element in `v` that contains a mix of float and native
|
||||||
on a per-second basis.
|
histogram samples within the range, will be missing from the result vector.
|
||||||
|
|
||||||
|
`increase` should only be used with counters and native histograms where the
|
||||||
|
components behave like counters. It is syntactic sugar for `rate(v)` multiplied
|
||||||
|
by the number of seconds under the specified time range window, and should be
|
||||||
|
used primarily for human readability. Use `rate` in recording rules so that
|
||||||
|
increases are tracked consistently on a per-second basis.
|
||||||
|
|
||||||
## `irate()`
|
## `irate()`
|
||||||
|
|
||||||
|
@ -358,8 +482,15 @@ over the last 5 minutes, per time series in the range vector:
|
||||||
rate(http_requests_total{job="api-server"}[5m])
|
rate(http_requests_total{job="api-server"}[5m])
|
||||||
```
|
```
|
||||||
|
|
||||||
`rate` should only be used with counters. It is best suited for alerting,
|
`rate` acts on native histograms by calculating a new histogram where each
|
||||||
and for graphing of slow-moving counters.
|
compononent (sum and count of observations, buckets) is the rate of increase
|
||||||
|
between the respective component in the first and last native histogram in
|
||||||
|
`v`. However, each element in `v` that contains a mix of float and native
|
||||||
|
histogram samples within the range, will be missing from the result vector.
|
||||||
|
|
||||||
|
`rate` should only be used with counters and native histograms where the
|
||||||
|
components behave like counters. It is best suited for alerting, and for
|
||||||
|
graphing of slow-moving counters.
|
||||||
|
|
||||||
Note that when combining `rate()` with an aggregation operator (e.g. `sum()`)
|
Note that when combining `rate()` with an aggregation operator (e.g. `sum()`)
|
||||||
or a function aggregating over time (any function ending in `_over_time`),
|
or a function aggregating over time (any function ending in `_over_time`),
|
||||||
|
|
|
@ -306,3 +306,31 @@ highest to lowest.
|
||||||
Operators on the same precedence level are left-associative. For example,
|
Operators on the same precedence level are left-associative. For example,
|
||||||
`2 * 3 % 2` is equivalent to `(2 * 3) % 2`. However `^` is right associative,
|
`2 * 3 % 2` is equivalent to `(2 * 3) % 2`. However `^` is right associative,
|
||||||
so `2 ^ 3 ^ 2` is equivalent to `2 ^ (3 ^ 2)`.
|
so `2 ^ 3 ^ 2` is equivalent to `2 ^ (3 ^ 2)`.
|
||||||
|
|
||||||
|
## Operators for native histograms
|
||||||
|
|
||||||
|
Native histograms are an experimental feature. Ingesting native histograms has
|
||||||
|
to be enabled via a [feature flag](../feature_flags/#native-histograms). Once
|
||||||
|
native histograms have been ingested, they can be queried (even after the
|
||||||
|
feature flag has been disabled again). However, the operator support for native
|
||||||
|
histograms is still very limited.
|
||||||
|
|
||||||
|
Logical/set binary operators work as expected even if histogram samples are
|
||||||
|
involved. They only check for the existence of a vector element and don't
|
||||||
|
change their behavior depending on the sample type of an element (float or
|
||||||
|
histogram).
|
||||||
|
|
||||||
|
The binary `+` operator between two native histograms and the `sum` aggregation
|
||||||
|
operator to aggregate native histograms are fully supported. Even if the
|
||||||
|
histograms involved have different bucket layouts, the buckets are
|
||||||
|
automatically converted appropriately so that the operation can be
|
||||||
|
performed. (With the currently supported bucket schemas, that's always
|
||||||
|
possible.) If either operator has to sum up a mix of histogram samples and
|
||||||
|
float samples, the corresponding vector element is removed from the output
|
||||||
|
vector entirely.
|
||||||
|
|
||||||
|
All other operators do not behave in a meaningful way. They either treat the
|
||||||
|
histogram sample as if it were a float sample of value 0, or (in case of
|
||||||
|
arithmetic operations between a scalar and a vector) they leave the histogram
|
||||||
|
sample unchanged. This behavior will change to a meaningful one before native
|
||||||
|
histograms are a stable feature.
|
||||||
|
|
|
@ -49,6 +49,11 @@ func main() {
|
||||||
}
|
}
|
||||||
fmt.Printf("\tExemplar: %+v %f %d\n", m, e.Value, e.Timestamp)
|
fmt.Printf("\tExemplar: %+v %f %d\n", m, e.Value, e.Timestamp)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, hp := range ts.Histograms {
|
||||||
|
h := remote.HistogramProtoToHistogram(hp)
|
||||||
|
fmt.Printf("\tHistogram: %s\n", h.String())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
|
@ -29,8 +29,7 @@ require (
|
||||||
github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2 // indirect
|
github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2 // indirect
|
||||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
github.com/jpillora/backoff v1.0.0 // indirect
|
github.com/jpillora/backoff v1.0.0 // indirect
|
||||||
github.com/kr/pretty v0.2.1 // indirect
|
github.com/kr/pretty v0.3.0 // indirect
|
||||||
github.com/kr/text v0.2.0 // indirect
|
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
|
@ -55,7 +54,7 @@ require (
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/prometheus/prometheus v0.38.0
|
github.com/prometheus/prometheus v0.37.1-0.20221011120840-430bdc9dd099
|
||||||
golang.org/x/oauth2 v0.0.0-20220808172628-8227340efae7 // indirect
|
golang.org/x/oauth2 v0.0.0-20220808172628-8227340efae7 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF
|
||||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc=
|
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc=
|
||||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
|
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
|
||||||
github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo=
|
github.com/armon/go-metrics v0.3.3 h1:a9F4rlj7EWWrbj7BYw8J8+x+ZZkJeqzNyRk8hdPF+ro=
|
||||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||||
github.com/aws/aws-sdk-go v1.44.72 h1:i7J5XT7pjBjtl1OrdIhiQHzsG89wkZCcM1HhyK++3DI=
|
github.com/aws/aws-sdk-go v1.44.72 h1:i7J5XT7pjBjtl1OrdIhiQHzsG89wkZCcM1HhyK++3DI=
|
||||||
github.com/aws/aws-sdk-go v1.44.72/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
github.com/aws/aws-sdk-go v1.44.72/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||||
|
@ -103,19 +103,19 @@ github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||||
github.com/gophercloud/gophercloud v0.25.0 h1:C3Oae7y0fUVQGSsBrb3zliAjdX+riCSEh4lNMejFNI4=
|
github.com/gophercloud/gophercloud v0.25.0 h1:C3Oae7y0fUVQGSsBrb3zliAjdX+riCSEh4lNMejFNI4=
|
||||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||||
github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2 h1:uirlL/j72L93RhV4+mkWhjv0cov2I0MIgPOG9rMDr1k=
|
github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2 h1:uirlL/j72L93RhV4+mkWhjv0cov2I0MIgPOG9rMDr1k=
|
||||||
github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
|
github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
|
||||||
github.com/hashicorp/consul/api v1.14.0 h1:Y64GIJ8hYTu+tuGekwO4G4ardXoiCivX9wv1iP/kihk=
|
github.com/hashicorp/consul/api v1.13.1 h1:r5cPdVFUy+pFF7nt+0ArLD9hm+E39OewJkvNdjKXcL4=
|
||||||
github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c=
|
github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c=
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||||
github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU=
|
github.com/hashicorp/go-hclog v0.12.2 h1:F1fdYblUEsxKiailtkhCCG2g4bipEgaHiDc8vffNpD4=
|
||||||
github.com/hashicorp/go-immutable-radix v1.3.0 h1:8exGP7ego3OmkfksihtSouGMZ+hQrhxx+FVELeXpVPE=
|
github.com/hashicorp/go-immutable-radix v1.2.0 h1:l6UW37iCXwZkZoAbEYnptSHVE/cQ5bOTPYG5W3vf9+8=
|
||||||
github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ=
|
github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ=
|
||||||
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
|
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
|
||||||
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
|
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
|
||||||
github.com/hashicorp/nomad/api v0.0.0-20220809212729-939d643fec2c h1:lV5A4cLQr1Bh1xGSSQ2R0fDRK4GZnfXxYia4Q7aaTXc=
|
github.com/hashicorp/nomad/api v0.0.0-20220629141207-c2428e1673ec h1:jAF71e0KoaY2LJlRsRxxGz6MNQOG5gTBIc+rklxfNO0=
|
||||||
github.com/hashicorp/serf v0.9.7 h1:hkdgbqizGQHuU5IPqYM1JdSMV8nKfpuOnZYXssk9muY=
|
github.com/hashicorp/serf v0.9.6 h1:uuEX1kLR6aoda1TBttmJQKDLZE1Ob7KN0NPdE7EtCDc=
|
||||||
github.com/hetznercloud/hcloud-go v1.35.2 h1:eEDtmDiI2plZ2UQmj4YpiYse5XbtpXOUBpAdIOLxzgE=
|
github.com/hetznercloud/hcloud-go v1.35.2 h1:eEDtmDiI2plZ2UQmj4YpiYse5XbtpXOUBpAdIOLxzgE=
|
||||||
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
|
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
|
||||||
github.com/influxdata/influxdb v1.10.0 h1:8xDpt8KO3lzrzf/ss+l8r42AGUZvoITu5824berK7SE=
|
github.com/influxdata/influxdb v1.10.0 h1:8xDpt8KO3lzrzf/ss+l8r42AGUZvoITu5824berK7SE=
|
||||||
|
@ -142,8 +142,8 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
||||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
|
@ -157,7 +157,7 @@ github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182aff
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||||
github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA=
|
github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA=
|
||||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
@ -205,8 +205,10 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1
|
||||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||||
github.com/prometheus/prometheus v0.38.0 h1:YSiJ5gDZmXnOntPRyHn1wb/6I1Frasj9dw57XowIqeA=
|
github.com/prometheus/prometheus v0.37.1-0.20221011120840-430bdc9dd099 h1:ISpgxhFfSrMztQTw0Za6xDDC3Fwe4kciR8Pwv3Sz9yE=
|
||||||
github.com/prometheus/prometheus v0.38.0/go.mod h1:2zHO5FtRhM+iu995gwKIb99EXxjeZEuXpKUTIRq4YI0=
|
github.com/prometheus/prometheus v0.37.1-0.20221011120840-430bdc9dd099/go.mod h1:dfkjkdCd3FhLE0BiBIKwwwkZiDQnTnDThE1Zex1UwbA=
|
||||||
|
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||||
|
github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=
|
||||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9 h1:0roa6gXKgyta64uqh52AQG3wzZXH21unn+ltzQSXML0=
|
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9 h1:0roa6gXKgyta64uqh52AQG3wzZXH21unn+ltzQSXML0=
|
||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
|
@ -320,7 +322,7 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
|
||||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU=
|
golang.org/x/tools v0.1.13-0.20220908144252-ce397412b6a4 h1:glzimF7qHZuKVEiMbE7UqBu44MyTjt5u6j3Jz+rfMRM=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
@ -329,7 +331,7 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
|
||||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||||
google.golang.org/genproto v0.0.0-20220808204814-fd01256a5276 h1:7PEE9xCtufpGJzrqweakEEnTh7YFELmnKm/ee+5jmfQ=
|
google.golang.org/genproto v0.0.0-20220802133213-ce4fa296bf78 h1:QntLWYqZeuBtJkth3m/6DLznnI0AHJr+AgJXvVh/izw=
|
||||||
google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w=
|
google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w=
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
|
@ -347,8 +349,9 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
|
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||||
gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI=
|
gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4=
|
||||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
|
1
go.mod
1
go.mod
|
@ -159,6 +159,7 @@ require (
|
||||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/prometheus/procfs v0.8.0 // indirect
|
github.com/prometheus/procfs v0.8.0 // indirect
|
||||||
|
github.com/rogpeppe/go-internal v1.8.1 // indirect
|
||||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
go.mongodb.org/mongo-driver v1.10.2 // indirect
|
go.mongodb.org/mongo-driver v1.10.2 // indirect
|
||||||
|
|
4
go.sum
4
go.sum
|
@ -686,6 +686,7 @@ github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAv
|
||||||
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
|
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
|
||||||
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
|
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
|
||||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||||
|
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
|
@ -748,7 +749,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L
|
||||||
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
|
github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=
|
||||||
|
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
|
||||||
github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
|
github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
|
||||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||||
|
|
871
model/histogram/float_histogram.go
Normal file
871
model/histogram/float_histogram.go
Normal file
|
@ -0,0 +1,871 @@
|
||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package histogram
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FloatHistogram is similar to Histogram but uses float64 for all
|
||||||
|
// counts. Additionally, bucket counts are absolute and not deltas.
|
||||||
|
//
|
||||||
|
// A FloatHistogram is needed by PromQL to handle operations that might result
|
||||||
|
// in fractional counts. Since the counts in a histogram are unlikely to be too
|
||||||
|
// large to be represented precisely by a float64, a FloatHistogram can also be
|
||||||
|
// used to represent a histogram with integer counts and thus serves as a more
|
||||||
|
// generalized representation.
|
||||||
|
type FloatHistogram struct {
|
||||||
|
// Currently valid schema numbers are -4 <= n <= 8. They are all for
|
||||||
|
// base-2 bucket schemas, where 1 is a bucket boundary in each case, and
|
||||||
|
// then each power of two is divided into 2^n logarithmic buckets. Or
|
||||||
|
// in other words, each bucket boundary is the previous boundary times
|
||||||
|
// 2^(2^-n).
|
||||||
|
Schema int32
|
||||||
|
// Width of the zero bucket.
|
||||||
|
ZeroThreshold float64
|
||||||
|
// Observations falling into the zero bucket. Must be zero or positive.
|
||||||
|
ZeroCount float64
|
||||||
|
// Total number of observations. Must be zero or positive.
|
||||||
|
Count float64
|
||||||
|
// Sum of observations. This is also used as the stale marker.
|
||||||
|
Sum float64
|
||||||
|
// Spans for positive and negative buckets (see Span below).
|
||||||
|
PositiveSpans, NegativeSpans []Span
|
||||||
|
// Observation counts in buckets. Each represents an absolute count and
|
||||||
|
// must be zero or positive.
|
||||||
|
PositiveBuckets, NegativeBuckets []float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy returns a deep copy of the Histogram.
|
||||||
|
func (h *FloatHistogram) Copy() *FloatHistogram {
|
||||||
|
c := *h
|
||||||
|
|
||||||
|
if h.PositiveSpans != nil {
|
||||||
|
c.PositiveSpans = make([]Span, len(h.PositiveSpans))
|
||||||
|
copy(c.PositiveSpans, h.PositiveSpans)
|
||||||
|
}
|
||||||
|
if h.NegativeSpans != nil {
|
||||||
|
c.NegativeSpans = make([]Span, len(h.NegativeSpans))
|
||||||
|
copy(c.NegativeSpans, h.NegativeSpans)
|
||||||
|
}
|
||||||
|
if h.PositiveBuckets != nil {
|
||||||
|
c.PositiveBuckets = make([]float64, len(h.PositiveBuckets))
|
||||||
|
copy(c.PositiveBuckets, h.PositiveBuckets)
|
||||||
|
}
|
||||||
|
if h.NegativeBuckets != nil {
|
||||||
|
c.NegativeBuckets = make([]float64, len(h.NegativeBuckets))
|
||||||
|
copy(c.NegativeBuckets, h.NegativeBuckets)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &c
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyToSchema works like Copy, but the returned deep copy has the provided
|
||||||
|
// target schema, which must be ≤ the original schema (i.e. it must have a lower
|
||||||
|
// resolution).
|
||||||
|
func (h *FloatHistogram) CopyToSchema(targetSchema int32) *FloatHistogram {
|
||||||
|
if targetSchema == h.Schema {
|
||||||
|
// Fast path.
|
||||||
|
return h.Copy()
|
||||||
|
}
|
||||||
|
if targetSchema > h.Schema {
|
||||||
|
panic(fmt.Errorf("cannot copy from schema %d to %d", h.Schema, targetSchema))
|
||||||
|
}
|
||||||
|
c := FloatHistogram{
|
||||||
|
Schema: targetSchema,
|
||||||
|
ZeroThreshold: h.ZeroThreshold,
|
||||||
|
ZeroCount: h.ZeroCount,
|
||||||
|
Count: h.Count,
|
||||||
|
Sum: h.Sum,
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(beorn7): This is a straight-forward implementation using merging
|
||||||
|
// iterators for the original buckets and then adding one merged bucket
|
||||||
|
// after another to the newly created FloatHistogram. It's well possible
|
||||||
|
// that a more involved implementation performs much better, which we
|
||||||
|
// could do if this code path turns out to be performance-critical.
|
||||||
|
var iInSpan, index int32
|
||||||
|
for iSpan, iBucket, it := -1, -1, h.floatBucketIterator(true, 0, targetSchema); it.Next(); {
|
||||||
|
b := it.At()
|
||||||
|
c.PositiveSpans, c.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket(
|
||||||
|
b, c.PositiveSpans, c.PositiveBuckets, iSpan, iBucket, iInSpan, index,
|
||||||
|
)
|
||||||
|
index = b.Index
|
||||||
|
}
|
||||||
|
for iSpan, iBucket, it := -1, -1, h.floatBucketIterator(false, 0, targetSchema); it.Next(); {
|
||||||
|
b := it.At()
|
||||||
|
c.NegativeSpans, c.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket(
|
||||||
|
b, c.NegativeSpans, c.NegativeBuckets, iSpan, iBucket, iInSpan, index,
|
||||||
|
)
|
||||||
|
index = b.Index
|
||||||
|
}
|
||||||
|
|
||||||
|
return &c
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a string representation of the Histogram.
|
||||||
|
func (h *FloatHistogram) String() string {
|
||||||
|
var sb strings.Builder
|
||||||
|
fmt.Fprintf(&sb, "{count:%g, sum:%g", h.Count, h.Sum)
|
||||||
|
|
||||||
|
var nBuckets []Bucket[float64]
|
||||||
|
for it := h.NegativeBucketIterator(); it.Next(); {
|
||||||
|
bucket := it.At()
|
||||||
|
if bucket.Count != 0 {
|
||||||
|
nBuckets = append(nBuckets, it.At())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i := len(nBuckets) - 1; i >= 0; i-- {
|
||||||
|
fmt.Fprintf(&sb, ", %s", nBuckets[i].String())
|
||||||
|
}
|
||||||
|
|
||||||
|
if h.ZeroCount != 0 {
|
||||||
|
fmt.Fprintf(&sb, ", %s", h.ZeroBucket().String())
|
||||||
|
}
|
||||||
|
|
||||||
|
for it := h.PositiveBucketIterator(); it.Next(); {
|
||||||
|
bucket := it.At()
|
||||||
|
if bucket.Count != 0 {
|
||||||
|
fmt.Fprintf(&sb, ", %s", bucket.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sb.WriteRune('}')
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ZeroBucket returns the zero bucket.
|
||||||
|
func (h *FloatHistogram) ZeroBucket() Bucket[float64] {
|
||||||
|
return Bucket[float64]{
|
||||||
|
Lower: -h.ZeroThreshold,
|
||||||
|
Upper: h.ZeroThreshold,
|
||||||
|
LowerInclusive: true,
|
||||||
|
UpperInclusive: true,
|
||||||
|
Count: h.ZeroCount,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scale scales the FloatHistogram by the provided factor, i.e. it scales all
|
||||||
|
// bucket counts including the zero bucket and the count and the sum of
|
||||||
|
// observations. The bucket layout stays the same. This method changes the
|
||||||
|
// receiving histogram directly (rather than acting on a copy). It returns a
|
||||||
|
// pointer to the receiving histogram for convenience.
|
||||||
|
func (h *FloatHistogram) Scale(factor float64) *FloatHistogram {
|
||||||
|
h.ZeroCount *= factor
|
||||||
|
h.Count *= factor
|
||||||
|
h.Sum *= factor
|
||||||
|
for i := range h.PositiveBuckets {
|
||||||
|
h.PositiveBuckets[i] *= factor
|
||||||
|
}
|
||||||
|
for i := range h.NegativeBuckets {
|
||||||
|
h.NegativeBuckets[i] *= factor
|
||||||
|
}
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds the provided other histogram to the receiving histogram. Count, Sum,
|
||||||
|
// and buckets from the other histogram are added to the corresponding
|
||||||
|
// components of the receiving histogram. Buckets in the other histogram that do
|
||||||
|
// not exist in the receiving histogram are inserted into the latter. The
|
||||||
|
// resulting histogram might have buckets with a population of zero or directly
|
||||||
|
// adjacent spans (offset=0). To normalize those, call the Compact method.
|
||||||
|
//
|
||||||
|
// The method reconciles differences in the zero threshold and in the schema,
|
||||||
|
// but the schema of the other histogram must be ≥ the schema of the receiving
|
||||||
|
// histogram (i.e. must have an equal or higher resolution). This means that the
|
||||||
|
// schema of the receiving histogram won't change. Its zero threshold, however,
|
||||||
|
// will change if needed. The other histogram will not be modified in any case.
|
||||||
|
//
|
||||||
|
// This method returns a pointer to the receiving histogram for convenience.
|
||||||
|
func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram {
|
||||||
|
otherZeroCount := h.reconcileZeroBuckets(other)
|
||||||
|
h.ZeroCount += otherZeroCount
|
||||||
|
h.Count += other.Count
|
||||||
|
h.Sum += other.Sum
|
||||||
|
|
||||||
|
// TODO(beorn7): If needed, this can be optimized by inspecting the
|
||||||
|
// spans in other and create missing buckets in h in batches.
|
||||||
|
var iInSpan, index int32
|
||||||
|
for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(true, h.ZeroThreshold, h.Schema); it.Next(); {
|
||||||
|
b := it.At()
|
||||||
|
h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket(
|
||||||
|
b, h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan, index,
|
||||||
|
)
|
||||||
|
index = b.Index
|
||||||
|
}
|
||||||
|
for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(false, h.ZeroThreshold, h.Schema); it.Next(); {
|
||||||
|
b := it.At()
|
||||||
|
h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket(
|
||||||
|
b, h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan, index,
|
||||||
|
)
|
||||||
|
index = b.Index
|
||||||
|
}
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sub works like Add but subtracts the other histogram.
|
||||||
|
func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram {
|
||||||
|
otherZeroCount := h.reconcileZeroBuckets(other)
|
||||||
|
h.ZeroCount -= otherZeroCount
|
||||||
|
h.Count -= other.Count
|
||||||
|
h.Sum -= other.Sum
|
||||||
|
|
||||||
|
// TODO(beorn7): If needed, this can be optimized by inspecting the
|
||||||
|
// spans in other and create missing buckets in h in batches.
|
||||||
|
var iInSpan, index int32
|
||||||
|
for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(true, h.ZeroThreshold, h.Schema); it.Next(); {
|
||||||
|
b := it.At()
|
||||||
|
b.Count *= -1
|
||||||
|
h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket(
|
||||||
|
b, h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan, index,
|
||||||
|
)
|
||||||
|
index = b.Index
|
||||||
|
}
|
||||||
|
for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(false, h.ZeroThreshold, h.Schema); it.Next(); {
|
||||||
|
b := it.At()
|
||||||
|
b.Count *= -1
|
||||||
|
h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket(
|
||||||
|
b, h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan, index,
|
||||||
|
)
|
||||||
|
index = b.Index
|
||||||
|
}
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
// addBucket takes the "coordinates" of the last bucket that was handled and
|
||||||
|
// adds the provided bucket after it. If a corresponding bucket exists, the
|
||||||
|
// count is added. If not, the bucket is inserted. The updated slices and the
|
||||||
|
// coordinates of the inserted or added-to bucket are returned.
|
||||||
|
func addBucket(
|
||||||
|
b Bucket[float64],
|
||||||
|
spans []Span, buckets []float64,
|
||||||
|
iSpan, iBucket int,
|
||||||
|
iInSpan, index int32,
|
||||||
|
) (
|
||||||
|
newSpans []Span, newBuckets []float64,
|
||||||
|
newISpan, newIBucket int, newIInSpan int32,
|
||||||
|
) {
|
||||||
|
if iSpan == -1 {
|
||||||
|
// First add, check if it is before all spans.
|
||||||
|
if len(spans) == 0 || spans[0].Offset > b.Index {
|
||||||
|
// Add bucket before all others.
|
||||||
|
buckets = append(buckets, 0)
|
||||||
|
copy(buckets[1:], buckets)
|
||||||
|
buckets[0] = b.Count
|
||||||
|
if len(spans) > 0 && spans[0].Offset == b.Index+1 {
|
||||||
|
spans[0].Length++
|
||||||
|
spans[0].Offset--
|
||||||
|
return spans, buckets, 0, 0, 0
|
||||||
|
}
|
||||||
|
spans = append(spans, Span{})
|
||||||
|
copy(spans[1:], spans)
|
||||||
|
spans[0] = Span{Offset: b.Index, Length: 1}
|
||||||
|
if len(spans) > 1 {
|
||||||
|
// Convert the absolute offset in the formerly
|
||||||
|
// first span to a relative offset.
|
||||||
|
spans[1].Offset -= b.Index + 1
|
||||||
|
}
|
||||||
|
return spans, buckets, 0, 0, 0
|
||||||
|
}
|
||||||
|
if spans[0].Offset == b.Index {
|
||||||
|
// Just add to first bucket.
|
||||||
|
buckets[0] += b.Count
|
||||||
|
return spans, buckets, 0, 0, 0
|
||||||
|
}
|
||||||
|
// We are behind the first bucket, so set everything to the
|
||||||
|
// first bucket and continue normally.
|
||||||
|
iSpan, iBucket, iInSpan = 0, 0, 0
|
||||||
|
index = spans[0].Offset
|
||||||
|
}
|
||||||
|
deltaIndex := b.Index - index
|
||||||
|
for {
|
||||||
|
remainingInSpan := int32(spans[iSpan].Length) - iInSpan
|
||||||
|
if deltaIndex < remainingInSpan {
|
||||||
|
// Bucket is in current span.
|
||||||
|
iBucket += int(deltaIndex)
|
||||||
|
iInSpan += deltaIndex
|
||||||
|
buckets[iBucket] += b.Count
|
||||||
|
return spans, buckets, iSpan, iBucket, iInSpan
|
||||||
|
}
|
||||||
|
deltaIndex -= remainingInSpan
|
||||||
|
iBucket += int(remainingInSpan)
|
||||||
|
iSpan++
|
||||||
|
if iSpan == len(spans) || deltaIndex < spans[iSpan].Offset {
|
||||||
|
// Bucket is in gap behind previous span (or there are no further spans).
|
||||||
|
buckets = append(buckets, 0)
|
||||||
|
copy(buckets[iBucket+1:], buckets[iBucket:])
|
||||||
|
buckets[iBucket] = b.Count
|
||||||
|
if deltaIndex == 0 {
|
||||||
|
// Directly after previous span, extend previous span.
|
||||||
|
if iSpan < len(spans) {
|
||||||
|
spans[iSpan].Offset--
|
||||||
|
}
|
||||||
|
iSpan--
|
||||||
|
iInSpan = int32(spans[iSpan].Length)
|
||||||
|
spans[iSpan].Length++
|
||||||
|
return spans, buckets, iSpan, iBucket, iInSpan
|
||||||
|
}
|
||||||
|
if iSpan < len(spans) && deltaIndex == spans[iSpan].Offset-1 {
|
||||||
|
// Directly before next span, extend next span.
|
||||||
|
iInSpan = 0
|
||||||
|
spans[iSpan].Offset--
|
||||||
|
spans[iSpan].Length++
|
||||||
|
return spans, buckets, iSpan, iBucket, iInSpan
|
||||||
|
}
|
||||||
|
// No next span, or next span is not directly adjacent to new bucket.
|
||||||
|
// Add new span.
|
||||||
|
iInSpan = 0
|
||||||
|
if iSpan < len(spans) {
|
||||||
|
spans[iSpan].Offset -= deltaIndex + 1
|
||||||
|
}
|
||||||
|
spans = append(spans, Span{})
|
||||||
|
copy(spans[iSpan+1:], spans[iSpan:])
|
||||||
|
spans[iSpan] = Span{Length: 1, Offset: deltaIndex}
|
||||||
|
return spans, buckets, iSpan, iBucket, iInSpan
|
||||||
|
}
|
||||||
|
// Try start of next span.
|
||||||
|
deltaIndex -= spans[iSpan].Offset
|
||||||
|
iInSpan = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compact eliminates empty buckets at the beginning and end of each span, then
|
||||||
|
// merges spans that are consecutive or at most maxEmptyBuckets apart, and
|
||||||
|
// finally splits spans that contain more consecutive empty buckets than
|
||||||
|
// maxEmptyBuckets. (The actual implementation might do something more efficient
|
||||||
|
// but with the same result.) The compaction happens "in place" in the
|
||||||
|
// receiving histogram, but a pointer to it is returned for convenience.
|
||||||
|
//
|
||||||
|
// The ideal value for maxEmptyBuckets depends on circumstances. The motivation
|
||||||
|
// to set maxEmptyBuckets > 0 is the assumption that is is less overhead to
|
||||||
|
// represent very few empty buckets explicitly within one span than cutting the
|
||||||
|
// one span into two to treat the empty buckets as a gap between the two spans,
|
||||||
|
// both in terms of storage requirement as well as in terms of encoding and
|
||||||
|
// decoding effort. However, the tradeoffs are subtle. For one, they are
|
||||||
|
// different in the exposition format vs. in a TSDB chunk vs. for the in-memory
|
||||||
|
// representation as Go types. In the TSDB, as an additional aspects, the span
|
||||||
|
// layout is only stored once per chunk, while many histograms with that same
|
||||||
|
// chunk layout are then only stored with their buckets (so that even a single
|
||||||
|
// empty bucket will be stored many times).
|
||||||
|
//
|
||||||
|
// For the Go types, an additional Span takes 8 bytes. Similarly, an additional
|
||||||
|
// bucket takes 8 bytes. Therefore, with a single separating empty bucket, both
|
||||||
|
// options have the same storage requirement, but the single-span solution is
|
||||||
|
// easier to iterate through. Still, the safest bet is to use maxEmptyBuckets==0
|
||||||
|
// and only use a larger number if you know what you are doing.
|
||||||
|
func (h *FloatHistogram) Compact(maxEmptyBuckets int) *FloatHistogram {
|
||||||
|
h.PositiveBuckets, h.PositiveSpans = compactBuckets(
|
||||||
|
h.PositiveBuckets, h.PositiveSpans, maxEmptyBuckets, false,
|
||||||
|
)
|
||||||
|
h.NegativeBuckets, h.NegativeSpans = compactBuckets(
|
||||||
|
h.NegativeBuckets, h.NegativeSpans, maxEmptyBuckets, false,
|
||||||
|
)
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
// DetectReset returns true if the receiving histogram is missing any buckets
|
||||||
|
// that have a non-zero population in the provided previous histogram. It also
|
||||||
|
// returns true if any count (in any bucket, in the zero count, or in the count
|
||||||
|
// of observations, but NOT the sum of observations) is smaller in the receiving
|
||||||
|
// histogram compared to the previous histogram. Otherwise, it returns false.
|
||||||
|
//
|
||||||
|
// Special behavior in case the Schema or the ZeroThreshold are not the same in
|
||||||
|
// both histograms:
|
||||||
|
//
|
||||||
|
// - A decrease of the ZeroThreshold or an increase of the Schema (i.e. an
|
||||||
|
// increase of resolution) can only happen together with a reset. Thus, the
|
||||||
|
// method returns true in either case.
|
||||||
|
//
|
||||||
|
// - Upon an increase of the ZeroThreshold, the buckets in the previous
|
||||||
|
// histogram that fall within the new ZeroThreshold are added to the ZeroCount
|
||||||
|
// of the previous histogram (without mutating the provided previous
|
||||||
|
// histogram). The scenario that a populated bucket of the previous histogram
|
||||||
|
// is partially within, partially outside of the new ZeroThreshold, can only
|
||||||
|
// happen together with a counter reset and therefore shortcuts to returning
|
||||||
|
// true.
|
||||||
|
//
|
||||||
|
// - Upon a decrease of the Schema, the buckets of the previous histogram are
|
||||||
|
// merged so that they match the new, lower-resolution schema (again without
|
||||||
|
// mutating the provided previous histogram).
|
||||||
|
//
|
||||||
|
// Note that this kind of reset detection is quite expensive. Ideally, resets
|
||||||
|
// are detected at ingest time and stored in the TSDB, so that the reset
|
||||||
|
// information can be read directly from there rather than be detected each time
|
||||||
|
// again.
|
||||||
|
func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool {
|
||||||
|
if h.Count < previous.Count {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if h.Schema > previous.Schema {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if h.ZeroThreshold < previous.ZeroThreshold {
|
||||||
|
// ZeroThreshold decreased.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
previousZeroCount, newThreshold := previous.zeroCountForLargerThreshold(h.ZeroThreshold)
|
||||||
|
if newThreshold != h.ZeroThreshold {
|
||||||
|
// ZeroThreshold is within a populated bucket in previous
|
||||||
|
// histogram.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if h.ZeroCount < previousZeroCount {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
currIt := h.floatBucketIterator(true, h.ZeroThreshold, h.Schema)
|
||||||
|
prevIt := previous.floatBucketIterator(true, h.ZeroThreshold, h.Schema)
|
||||||
|
if detectReset(currIt, prevIt) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
currIt = h.floatBucketIterator(false, h.ZeroThreshold, h.Schema)
|
||||||
|
prevIt = previous.floatBucketIterator(false, h.ZeroThreshold, h.Schema)
|
||||||
|
return detectReset(currIt, prevIt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func detectReset(currIt, prevIt BucketIterator[float64]) bool {
|
||||||
|
if !prevIt.Next() {
|
||||||
|
return false // If no buckets in previous histogram, nothing can be reset.
|
||||||
|
}
|
||||||
|
prevBucket := prevIt.At()
|
||||||
|
if !currIt.Next() {
|
||||||
|
// No bucket in current, but at least one in previous
|
||||||
|
// histogram. Check if any of those are non-zero, in which case
|
||||||
|
// this is a reset.
|
||||||
|
for {
|
||||||
|
if prevBucket.Count != 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if !prevIt.Next() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
currBucket := currIt.At()
|
||||||
|
for {
|
||||||
|
// Forward currIt until we find the bucket corresponding to prevBucket.
|
||||||
|
for currBucket.Index < prevBucket.Index {
|
||||||
|
if !currIt.Next() {
|
||||||
|
// Reached end of currIt early, therefore
|
||||||
|
// previous histogram has a bucket that the
|
||||||
|
// current one does not have. Unlass all
|
||||||
|
// remaining buckets in the previous histogram
|
||||||
|
// are unpopulated, this is a reset.
|
||||||
|
for {
|
||||||
|
if prevBucket.Count != 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if !prevIt.Next() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
currBucket = currIt.At()
|
||||||
|
}
|
||||||
|
if currBucket.Index > prevBucket.Index {
|
||||||
|
// Previous histogram has a bucket the current one does
|
||||||
|
// not have. If it's populated, it's a reset.
|
||||||
|
if prevBucket.Count != 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// We have reached corresponding buckets in both iterators.
|
||||||
|
// We can finally compare the counts.
|
||||||
|
if currBucket.Count < prevBucket.Count {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !prevIt.Next() {
|
||||||
|
// Reached end of prevIt without finding offending buckets.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
prevBucket = prevIt.At()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PositiveBucketIterator returns a BucketIterator to iterate over all positive
|
||||||
|
// buckets in ascending order (starting next to the zero bucket and going up).
|
||||||
|
func (h *FloatHistogram) PositiveBucketIterator() BucketIterator[float64] {
|
||||||
|
return h.floatBucketIterator(true, 0, h.Schema)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NegativeBucketIterator returns a BucketIterator to iterate over all negative
|
||||||
|
// buckets in descending order (starting next to the zero bucket and going
|
||||||
|
// down).
|
||||||
|
func (h *FloatHistogram) NegativeBucketIterator() BucketIterator[float64] {
|
||||||
|
return h.floatBucketIterator(false, 0, h.Schema)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PositiveReverseBucketIterator returns a BucketIterator to iterate over all
|
||||||
|
// positive buckets in descending order (starting at the highest bucket and
|
||||||
|
// going down towards the zero bucket).
|
||||||
|
func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64] {
|
||||||
|
return newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NegativeReverseBucketIterator returns a BucketIterator to iterate over all
|
||||||
|
// negative buckets in ascending order (starting at the lowest bucket and going
|
||||||
|
// up towards the zero bucket).
|
||||||
|
func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64] {
|
||||||
|
return newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllBucketIterator returns a BucketIterator to iterate over all negative,
|
||||||
|
// zero, and positive buckets in ascending order (starting at the lowest bucket
|
||||||
|
// and going up). If the highest negative bucket or the lowest positive bucket
|
||||||
|
// overlap with the zero bucket, their upper or lower boundary, respectively, is
|
||||||
|
// set to the zero threshold.
|
||||||
|
func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] {
|
||||||
|
return &allFloatBucketIterator{
|
||||||
|
h: h,
|
||||||
|
negIter: h.NegativeReverseBucketIterator(),
|
||||||
|
posIter: h.PositiveBucketIterator(),
|
||||||
|
state: -1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// zeroCountForLargerThreshold returns what the histogram's zero count would be
|
||||||
|
// if the ZeroThreshold had the provided larger (or equal) value. If the
|
||||||
|
// provided value is less than the histogram's ZeroThreshold, the method panics.
|
||||||
|
// If the largerThreshold ends up within a populated bucket of the histogram, it
|
||||||
|
// is adjusted upwards to the lower limit of that bucket (all in terms of
|
||||||
|
// absolute values) and that bucket's count is included in the returned
|
||||||
|
// count. The adjusted threshold is returned, too.
|
||||||
|
func (h *FloatHistogram) zeroCountForLargerThreshold(largerThreshold float64) (count, threshold float64) {
|
||||||
|
// Fast path.
|
||||||
|
if largerThreshold == h.ZeroThreshold {
|
||||||
|
return h.ZeroCount, largerThreshold
|
||||||
|
}
|
||||||
|
if largerThreshold < h.ZeroThreshold {
|
||||||
|
panic(fmt.Errorf("new threshold %f is less than old threshold %f", largerThreshold, h.ZeroThreshold))
|
||||||
|
}
|
||||||
|
outer:
|
||||||
|
for {
|
||||||
|
count = h.ZeroCount
|
||||||
|
i := h.PositiveBucketIterator()
|
||||||
|
for i.Next() {
|
||||||
|
b := i.At()
|
||||||
|
if b.Lower >= largerThreshold {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
count += b.Count // Bucket to be merged into zero bucket.
|
||||||
|
if b.Upper > largerThreshold {
|
||||||
|
// New threshold ended up within a bucket. if it's
|
||||||
|
// populated, we need to adjust largerThreshold before
|
||||||
|
// we are done here.
|
||||||
|
if b.Count != 0 {
|
||||||
|
largerThreshold = b.Upper
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
i = h.NegativeBucketIterator()
|
||||||
|
for i.Next() {
|
||||||
|
b := i.At()
|
||||||
|
if b.Upper <= -largerThreshold {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
count += b.Count // Bucket to be merged into zero bucket.
|
||||||
|
if b.Lower < -largerThreshold {
|
||||||
|
// New threshold ended up within a bucket. If
|
||||||
|
// it's populated, we need to adjust
|
||||||
|
// largerThreshold and have to redo the whole
|
||||||
|
// thing because the treatment of the positive
|
||||||
|
// buckets is invalid now.
|
||||||
|
if b.Count != 0 {
|
||||||
|
largerThreshold = -b.Lower
|
||||||
|
continue outer
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return count, largerThreshold
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// trimBucketsInZeroBucket removes all buckets that are within the zero
|
||||||
|
// bucket. It assumes that the zero threshold is at a bucket boundary and that
|
||||||
|
// the counts in the buckets to remove are already part of the zero count.
|
||||||
|
func (h *FloatHistogram) trimBucketsInZeroBucket() {
|
||||||
|
i := h.PositiveBucketIterator()
|
||||||
|
bucketsIdx := 0
|
||||||
|
for i.Next() {
|
||||||
|
b := i.At()
|
||||||
|
if b.Lower >= h.ZeroThreshold {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
h.PositiveBuckets[bucketsIdx] = 0
|
||||||
|
bucketsIdx++
|
||||||
|
}
|
||||||
|
i = h.NegativeBucketIterator()
|
||||||
|
bucketsIdx = 0
|
||||||
|
for i.Next() {
|
||||||
|
b := i.At()
|
||||||
|
if b.Upper <= -h.ZeroThreshold {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
h.NegativeBuckets[bucketsIdx] = 0
|
||||||
|
bucketsIdx++
|
||||||
|
}
|
||||||
|
// We are abusing Compact to trim the buckets set to zero
|
||||||
|
// above. Premature compacting could cause additional cost, but this
|
||||||
|
// code path is probably rarely used anyway.
|
||||||
|
h.Compact(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// reconcileZeroBuckets finds a zero bucket large enough to include the zero
|
||||||
|
// buckets of both histograms (the receiving histogram and the other histogram)
|
||||||
|
// with a zero threshold that is not within a populated bucket in either
|
||||||
|
// histogram. This method modifies the receiving histogram accourdingly, but
|
||||||
|
// leaves the other histogram as is. Instead, it returns the zero count the
|
||||||
|
// other histogram would have if it were modified.
|
||||||
|
func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 {
|
||||||
|
otherZeroCount := other.ZeroCount
|
||||||
|
otherZeroThreshold := other.ZeroThreshold
|
||||||
|
|
||||||
|
for otherZeroThreshold != h.ZeroThreshold {
|
||||||
|
if h.ZeroThreshold > otherZeroThreshold {
|
||||||
|
otherZeroCount, otherZeroThreshold = other.zeroCountForLargerThreshold(h.ZeroThreshold)
|
||||||
|
}
|
||||||
|
if otherZeroThreshold > h.ZeroThreshold {
|
||||||
|
h.ZeroCount, h.ZeroThreshold = h.zeroCountForLargerThreshold(otherZeroThreshold)
|
||||||
|
h.trimBucketsInZeroBucket()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return otherZeroCount
|
||||||
|
}
|
||||||
|
|
||||||
|
// floatBucketIterator is a low-level constructor for bucket iterators.
|
||||||
|
//
|
||||||
|
// If positive is true, the returned iterator iterates through the positive
|
||||||
|
// buckets, otherwise through the negative buckets.
|
||||||
|
//
|
||||||
|
// If absoluteStartValue is < the lowest absolute value of any upper bucket
|
||||||
|
// boundary, the iterator starts with the first bucket. Otherwise, it will skip
|
||||||
|
// all buckets with an absolute value of their upper boundary ≤
|
||||||
|
// absoluteStartValue.
|
||||||
|
//
|
||||||
|
// targetSchema must be ≤ the schema of FloatHistogram (and of course within the
|
||||||
|
// legal values for schemas in general). The buckets are merged to match the
|
||||||
|
// targetSchema prior to iterating (without mutating FloatHistogram).
|
||||||
|
func (h *FloatHistogram) floatBucketIterator(
|
||||||
|
positive bool, absoluteStartValue float64, targetSchema int32,
|
||||||
|
) *floatBucketIterator {
|
||||||
|
if targetSchema > h.Schema {
|
||||||
|
panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema))
|
||||||
|
}
|
||||||
|
i := &floatBucketIterator{
|
||||||
|
baseBucketIterator: baseBucketIterator[float64, float64]{
|
||||||
|
schema: h.Schema,
|
||||||
|
positive: positive,
|
||||||
|
},
|
||||||
|
targetSchema: targetSchema,
|
||||||
|
absoluteStartValue: absoluteStartValue,
|
||||||
|
}
|
||||||
|
if positive {
|
||||||
|
i.spans = h.PositiveSpans
|
||||||
|
i.buckets = h.PositiveBuckets
|
||||||
|
} else {
|
||||||
|
i.spans = h.NegativeSpans
|
||||||
|
i.buckets = h.NegativeBuckets
|
||||||
|
}
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
// reverseFloatbucketiterator is a low-level constructor for reverse bucket iterators.
|
||||||
|
func newReverseFloatBucketIterator(
|
||||||
|
spans []Span, buckets []float64, schema int32, positive bool,
|
||||||
|
) *reverseFloatBucketIterator {
|
||||||
|
r := &reverseFloatBucketIterator{
|
||||||
|
baseBucketIterator: baseBucketIterator[float64, float64]{
|
||||||
|
schema: schema,
|
||||||
|
spans: spans,
|
||||||
|
buckets: buckets,
|
||||||
|
positive: positive,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
r.spansIdx = len(r.spans) - 1
|
||||||
|
r.bucketsIdx = len(r.buckets) - 1
|
||||||
|
if r.spansIdx >= 0 {
|
||||||
|
r.idxInSpan = int32(r.spans[r.spansIdx].Length) - 1
|
||||||
|
}
|
||||||
|
r.currIdx = 0
|
||||||
|
for _, s := range r.spans {
|
||||||
|
r.currIdx += s.Offset + int32(s.Length)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
type floatBucketIterator struct {
|
||||||
|
baseBucketIterator[float64, float64]
|
||||||
|
|
||||||
|
targetSchema int32 // targetSchema is the schema to merge to and must be ≤ schema.
|
||||||
|
origIdx int32 // The bucket index within the original schema.
|
||||||
|
absoluteStartValue float64 // Never return buckets with an upper bound ≤ this value.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *floatBucketIterator) Next() bool {
|
||||||
|
if i.spansIdx >= len(i.spans) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy all of these into local variables so that we can forward to the
|
||||||
|
// next bucket and then roll back if needed.
|
||||||
|
origIdx, spansIdx, idxInSpan := i.origIdx, i.spansIdx, i.idxInSpan
|
||||||
|
span := i.spans[spansIdx]
|
||||||
|
firstPass := true
|
||||||
|
i.currCount = 0
|
||||||
|
|
||||||
|
mergeLoop: // Merge together all buckets from the original schema that fall into one bucket in the targetSchema.
|
||||||
|
for {
|
||||||
|
if i.bucketsIdx == 0 {
|
||||||
|
// Seed origIdx for the first bucket.
|
||||||
|
origIdx = span.Offset
|
||||||
|
} else {
|
||||||
|
origIdx++
|
||||||
|
}
|
||||||
|
for idxInSpan >= span.Length {
|
||||||
|
// We have exhausted the current span and have to find a new
|
||||||
|
// one. We even handle pathologic spans of length 0 here.
|
||||||
|
idxInSpan = 0
|
||||||
|
spansIdx++
|
||||||
|
if spansIdx >= len(i.spans) {
|
||||||
|
if firstPass {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
break mergeLoop
|
||||||
|
}
|
||||||
|
span = i.spans[spansIdx]
|
||||||
|
origIdx += span.Offset
|
||||||
|
}
|
||||||
|
currIdx := i.targetIdx(origIdx)
|
||||||
|
if firstPass {
|
||||||
|
i.currIdx = currIdx
|
||||||
|
firstPass = false
|
||||||
|
} else if currIdx != i.currIdx {
|
||||||
|
// Reached next bucket in targetSchema.
|
||||||
|
// Do not actually forward to the next bucket, but break out.
|
||||||
|
break mergeLoop
|
||||||
|
}
|
||||||
|
i.currCount += i.buckets[i.bucketsIdx]
|
||||||
|
idxInSpan++
|
||||||
|
i.bucketsIdx++
|
||||||
|
i.origIdx, i.spansIdx, i.idxInSpan = origIdx, spansIdx, idxInSpan
|
||||||
|
if i.schema == i.targetSchema {
|
||||||
|
// Don't need to test the next bucket for mergeability
|
||||||
|
// if we have no schema change anyway.
|
||||||
|
break mergeLoop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Skip buckets before absoluteStartValue.
|
||||||
|
// TODO(beorn7): Maybe do something more efficient than this recursive call.
|
||||||
|
if getBound(i.currIdx, i.targetSchema) <= i.absoluteStartValue {
|
||||||
|
return i.Next()
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// targetIdx returns the bucket index within i.targetSchema for the given bucket
|
||||||
|
// index within i.schema.
|
||||||
|
func (i *floatBucketIterator) targetIdx(idx int32) int32 {
|
||||||
|
if i.schema == i.targetSchema {
|
||||||
|
// Fast path for the common case. The below would yield the same
|
||||||
|
// result, just with more effort.
|
||||||
|
return idx
|
||||||
|
}
|
||||||
|
return ((idx - 1) >> (i.schema - i.targetSchema)) + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
type reverseFloatBucketIterator struct {
|
||||||
|
baseBucketIterator[float64, float64]
|
||||||
|
idxInSpan int32 // Changed from uint32 to allow negative values for exhaustion detection.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *reverseFloatBucketIterator) Next() bool {
|
||||||
|
i.currIdx--
|
||||||
|
if i.bucketsIdx < 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for i.idxInSpan < 0 {
|
||||||
|
// We have exhausted the current span and have to find a new
|
||||||
|
// one. We'll even handle pathologic spans of length 0.
|
||||||
|
i.spansIdx--
|
||||||
|
i.idxInSpan = int32(i.spans[i.spansIdx].Length) - 1
|
||||||
|
i.currIdx -= i.spans[i.spansIdx+1].Offset
|
||||||
|
}
|
||||||
|
|
||||||
|
i.currCount = i.buckets[i.bucketsIdx]
|
||||||
|
i.bucketsIdx--
|
||||||
|
i.idxInSpan--
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
type allFloatBucketIterator struct {
|
||||||
|
h *FloatHistogram
|
||||||
|
negIter, posIter BucketIterator[float64]
|
||||||
|
// -1 means we are iterating negative buckets.
|
||||||
|
// 0 means it is time for the zero bucket.
|
||||||
|
// 1 means we are iterating positive buckets.
|
||||||
|
// Anything else means iteration is over.
|
||||||
|
state int8
|
||||||
|
currBucket Bucket[float64]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *allFloatBucketIterator) Next() bool {
|
||||||
|
switch i.state {
|
||||||
|
case -1:
|
||||||
|
if i.negIter.Next() {
|
||||||
|
i.currBucket = i.negIter.At()
|
||||||
|
if i.currBucket.Upper > -i.h.ZeroThreshold {
|
||||||
|
i.currBucket.Upper = -i.h.ZeroThreshold
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
i.state = 0
|
||||||
|
return i.Next()
|
||||||
|
case 0:
|
||||||
|
i.state = 1
|
||||||
|
if i.h.ZeroCount > 0 {
|
||||||
|
i.currBucket = Bucket[float64]{
|
||||||
|
Lower: -i.h.ZeroThreshold,
|
||||||
|
Upper: i.h.ZeroThreshold,
|
||||||
|
LowerInclusive: true,
|
||||||
|
UpperInclusive: true,
|
||||||
|
Count: i.h.ZeroCount,
|
||||||
|
// Index is irrelevant for the zero bucket.
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return i.Next()
|
||||||
|
case 1:
|
||||||
|
if i.posIter.Next() {
|
||||||
|
i.currBucket = i.posIter.At()
|
||||||
|
if i.currBucket.Lower < i.h.ZeroThreshold {
|
||||||
|
i.currBucket.Lower = i.h.ZeroThreshold
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
i.state = 42
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *allFloatBucketIterator) At() Bucket[float64] {
|
||||||
|
return i.currBucket
|
||||||
|
}
|
1836
model/histogram/float_histogram_test.go
Normal file
1836
model/histogram/float_histogram_test.go
Normal file
File diff suppressed because it is too large
Load diff
536
model/histogram/generic.go
Normal file
536
model/histogram/generic.go
Normal file
|
@ -0,0 +1,536 @@
|
||||||
|
// Copyright 2022 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package histogram
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BucketCount is a type constraint for the count in a bucket, which can be
|
||||||
|
// float64 (for type FloatHistogram) or uint64 (for type Histogram).
|
||||||
|
type BucketCount interface {
|
||||||
|
float64 | uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// internalBucketCount is used internally by Histogram and FloatHistogram. The
|
||||||
|
// difference to the BucketCount above is that Histogram internally uses deltas
|
||||||
|
// between buckets rather than absolute counts (while FloatHistogram uses
|
||||||
|
// absolute counts directly). Go type parameters don't allow type
|
||||||
|
// specialization. Therefore, where special treatment of deltas between buckets
|
||||||
|
// vs. absolute counts is important, this information has to be provided as a
|
||||||
|
// separate boolean parameter "deltaBuckets"
|
||||||
|
type internalBucketCount interface {
|
||||||
|
float64 | int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bucket represents a bucket with lower and upper limit and the absolute count
|
||||||
|
// of samples in the bucket. It also specifies if each limit is inclusive or
|
||||||
|
// not. (Mathematically, inclusive limits create a closed interval, and
|
||||||
|
// non-inclusive limits an open interval.)
|
||||||
|
//
|
||||||
|
// To represent cumulative buckets, Lower is set to -Inf, and the Count is then
|
||||||
|
// cumulative (including the counts of all buckets for smaller values).
|
||||||
|
type Bucket[BC BucketCount] struct {
|
||||||
|
Lower, Upper float64
|
||||||
|
LowerInclusive, UpperInclusive bool
|
||||||
|
Count BC
|
||||||
|
|
||||||
|
// Index within schema. To easily compare buckets that share the same
|
||||||
|
// schema and sign (positive or negative). Irrelevant for the zero bucket.
|
||||||
|
Index int32
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a string representation of a Bucket, using the usual
|
||||||
|
// mathematical notation of '['/']' for inclusive bounds and '('/')' for
|
||||||
|
// non-inclusive bounds.
|
||||||
|
func (b Bucket[BC]) String() string {
|
||||||
|
var sb strings.Builder
|
||||||
|
if b.LowerInclusive {
|
||||||
|
sb.WriteRune('[')
|
||||||
|
} else {
|
||||||
|
sb.WriteRune('(')
|
||||||
|
}
|
||||||
|
fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper)
|
||||||
|
if b.UpperInclusive {
|
||||||
|
sb.WriteRune(']')
|
||||||
|
} else {
|
||||||
|
sb.WriteRune(')')
|
||||||
|
}
|
||||||
|
fmt.Fprintf(&sb, ":%v", b.Count)
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// BucketIterator iterates over the buckets of a Histogram, returning decoded
|
||||||
|
// buckets.
|
||||||
|
type BucketIterator[BC BucketCount] interface {
|
||||||
|
// Next advances the iterator by one.
|
||||||
|
Next() bool
|
||||||
|
// At returns the current bucket.
|
||||||
|
At() Bucket[BC]
|
||||||
|
}
|
||||||
|
|
||||||
|
// baseBucketIterator provides a struct that is shared by most BucketIterator
|
||||||
|
// implementations, together with an implementation of the At method. This
|
||||||
|
// iterator can be embedded in full implementations of BucketIterator to save on
|
||||||
|
// code replication.
|
||||||
|
type baseBucketIterator[BC BucketCount, IBC internalBucketCount] struct {
|
||||||
|
schema int32
|
||||||
|
spans []Span
|
||||||
|
buckets []IBC
|
||||||
|
|
||||||
|
positive bool // Whether this is for positive buckets.
|
||||||
|
|
||||||
|
spansIdx int // Current span within spans slice.
|
||||||
|
idxInSpan uint32 // Index in the current span. 0 <= idxInSpan < span.Length.
|
||||||
|
bucketsIdx int // Current bucket within buckets slice.
|
||||||
|
|
||||||
|
currCount IBC // Count in the current bucket.
|
||||||
|
currIdx int32 // The actual bucket index.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b baseBucketIterator[BC, IBC]) At() Bucket[BC] {
|
||||||
|
bucket := Bucket[BC]{
|
||||||
|
Count: BC(b.currCount),
|
||||||
|
Index: b.currIdx,
|
||||||
|
}
|
||||||
|
if b.positive {
|
||||||
|
bucket.Upper = getBound(b.currIdx, b.schema)
|
||||||
|
bucket.Lower = getBound(b.currIdx-1, b.schema)
|
||||||
|
} else {
|
||||||
|
bucket.Lower = -getBound(b.currIdx, b.schema)
|
||||||
|
bucket.Upper = -getBound(b.currIdx-1, b.schema)
|
||||||
|
}
|
||||||
|
bucket.LowerInclusive = bucket.Lower < 0
|
||||||
|
bucket.UpperInclusive = bucket.Upper > 0
|
||||||
|
return bucket
|
||||||
|
}
|
||||||
|
|
||||||
|
// compactBuckets is a generic function used by both Histogram.Compact and
|
||||||
|
// FloatHistogram.Compact. Set deltaBuckets to true if the provided buckets are
|
||||||
|
// deltas. Set it to false if the buckets contain absolute counts.
|
||||||
|
func compactBuckets[IBC internalBucketCount](buckets []IBC, spans []Span, maxEmptyBuckets int, deltaBuckets bool) ([]IBC, []Span) {
|
||||||
|
// Fast path: If there are no empty buckets AND no offset in any span is
|
||||||
|
// <= maxEmptyBuckets AND no span has length 0, there is nothing to do and we can return
|
||||||
|
// immediately. We check that first because it's cheap and presumably
|
||||||
|
// common.
|
||||||
|
nothingToDo := true
|
||||||
|
var currentBucketAbsolute IBC
|
||||||
|
for _, bucket := range buckets {
|
||||||
|
if deltaBuckets {
|
||||||
|
currentBucketAbsolute += bucket
|
||||||
|
} else {
|
||||||
|
currentBucketAbsolute = bucket
|
||||||
|
}
|
||||||
|
if currentBucketAbsolute == 0 {
|
||||||
|
nothingToDo = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if nothingToDo {
|
||||||
|
for _, span := range spans {
|
||||||
|
if int(span.Offset) <= maxEmptyBuckets || span.Length == 0 {
|
||||||
|
nothingToDo = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if nothingToDo {
|
||||||
|
return buckets, spans
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var iBucket, iSpan int
|
||||||
|
var posInSpan uint32
|
||||||
|
currentBucketAbsolute = 0
|
||||||
|
|
||||||
|
// Helper function.
|
||||||
|
emptyBucketsHere := func() int {
|
||||||
|
i := 0
|
||||||
|
abs := currentBucketAbsolute
|
||||||
|
for uint32(i)+posInSpan < spans[iSpan].Length && abs == 0 {
|
||||||
|
i++
|
||||||
|
if i+iBucket >= len(buckets) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
abs = buckets[i+iBucket]
|
||||||
|
}
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge spans with zero-offset to avoid special cases later.
|
||||||
|
if len(spans) > 1 {
|
||||||
|
for i, span := range spans[1:] {
|
||||||
|
if span.Offset == 0 {
|
||||||
|
spans[iSpan].Length += span.Length
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
iSpan++
|
||||||
|
if i+1 != iSpan {
|
||||||
|
spans[iSpan] = span
|
||||||
|
}
|
||||||
|
}
|
||||||
|
spans = spans[:iSpan+1]
|
||||||
|
iSpan = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge spans with zero-length to avoid special cases later.
|
||||||
|
for i, span := range spans {
|
||||||
|
if span.Length == 0 {
|
||||||
|
if i+1 < len(spans) {
|
||||||
|
spans[i+1].Offset += span.Offset
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if i != iSpan {
|
||||||
|
spans[iSpan] = span
|
||||||
|
}
|
||||||
|
iSpan++
|
||||||
|
}
|
||||||
|
spans = spans[:iSpan]
|
||||||
|
iSpan = 0
|
||||||
|
|
||||||
|
// Cut out empty buckets from start and end of spans, no matter
|
||||||
|
// what. Also cut out empty buckets from the middle of a span but only
|
||||||
|
// if there are more than maxEmptyBuckets consecutive empty buckets.
|
||||||
|
for iBucket < len(buckets) {
|
||||||
|
if deltaBuckets {
|
||||||
|
currentBucketAbsolute += buckets[iBucket]
|
||||||
|
} else {
|
||||||
|
currentBucketAbsolute = buckets[iBucket]
|
||||||
|
}
|
||||||
|
if nEmpty := emptyBucketsHere(); nEmpty > 0 {
|
||||||
|
if posInSpan > 0 &&
|
||||||
|
nEmpty < int(spans[iSpan].Length-posInSpan) &&
|
||||||
|
nEmpty <= maxEmptyBuckets {
|
||||||
|
// The empty buckets are in the middle of a
|
||||||
|
// span, and there are few enough to not bother.
|
||||||
|
// Just fast-forward.
|
||||||
|
iBucket += nEmpty
|
||||||
|
if deltaBuckets {
|
||||||
|
currentBucketAbsolute = 0
|
||||||
|
}
|
||||||
|
posInSpan += uint32(nEmpty)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// In all other cases, we cut out the empty buckets.
|
||||||
|
if deltaBuckets && iBucket+nEmpty < len(buckets) {
|
||||||
|
currentBucketAbsolute = -buckets[iBucket]
|
||||||
|
buckets[iBucket+nEmpty] += buckets[iBucket]
|
||||||
|
}
|
||||||
|
buckets = append(buckets[:iBucket], buckets[iBucket+nEmpty:]...)
|
||||||
|
if posInSpan == 0 {
|
||||||
|
// Start of span.
|
||||||
|
if nEmpty == int(spans[iSpan].Length) {
|
||||||
|
// The whole span is empty.
|
||||||
|
offset := spans[iSpan].Offset
|
||||||
|
spans = append(spans[:iSpan], spans[iSpan+1:]...)
|
||||||
|
if len(spans) > iSpan {
|
||||||
|
spans[iSpan].Offset += offset + int32(nEmpty)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
spans[iSpan].Length -= uint32(nEmpty)
|
||||||
|
spans[iSpan].Offset += int32(nEmpty)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// It's in the middle or in the end of the span.
|
||||||
|
// Split the current span.
|
||||||
|
newSpan := Span{
|
||||||
|
Offset: int32(nEmpty),
|
||||||
|
Length: spans[iSpan].Length - posInSpan - uint32(nEmpty),
|
||||||
|
}
|
||||||
|
spans[iSpan].Length = posInSpan
|
||||||
|
// In any case, we have to split to the next span.
|
||||||
|
iSpan++
|
||||||
|
posInSpan = 0
|
||||||
|
if newSpan.Length == 0 {
|
||||||
|
// The span is empty, so we were already at the end of a span.
|
||||||
|
// We don't have to insert the new span, just adjust the next
|
||||||
|
// span's offset, if there is one.
|
||||||
|
if iSpan < len(spans) {
|
||||||
|
spans[iSpan].Offset += int32(nEmpty)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Insert the new span.
|
||||||
|
spans = append(spans, Span{})
|
||||||
|
if iSpan+1 < len(spans) {
|
||||||
|
copy(spans[iSpan+1:], spans[iSpan:])
|
||||||
|
}
|
||||||
|
spans[iSpan] = newSpan
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
iBucket++
|
||||||
|
posInSpan++
|
||||||
|
if posInSpan >= spans[iSpan].Length {
|
||||||
|
posInSpan = 0
|
||||||
|
iSpan++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if maxEmptyBuckets == 0 || len(buckets) == 0 {
|
||||||
|
return buckets, spans
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finally, check if any offsets between spans are small enough to merge
|
||||||
|
// the spans.
|
||||||
|
iBucket = int(spans[0].Length)
|
||||||
|
if deltaBuckets {
|
||||||
|
currentBucketAbsolute = 0
|
||||||
|
for _, bucket := range buckets[:iBucket] {
|
||||||
|
currentBucketAbsolute += bucket
|
||||||
|
}
|
||||||
|
}
|
||||||
|
iSpan = 1
|
||||||
|
for iSpan < len(spans) {
|
||||||
|
if int(spans[iSpan].Offset) > maxEmptyBuckets {
|
||||||
|
l := int(spans[iSpan].Length)
|
||||||
|
if deltaBuckets {
|
||||||
|
for _, bucket := range buckets[iBucket : iBucket+l] {
|
||||||
|
currentBucketAbsolute += bucket
|
||||||
|
}
|
||||||
|
}
|
||||||
|
iBucket += l
|
||||||
|
iSpan++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Merge span with previous one and insert empty buckets.
|
||||||
|
offset := int(spans[iSpan].Offset)
|
||||||
|
spans[iSpan-1].Length += uint32(offset) + spans[iSpan].Length
|
||||||
|
spans = append(spans[:iSpan], spans[iSpan+1:]...)
|
||||||
|
newBuckets := make([]IBC, len(buckets)+offset)
|
||||||
|
copy(newBuckets, buckets[:iBucket])
|
||||||
|
copy(newBuckets[iBucket+offset:], buckets[iBucket:])
|
||||||
|
if deltaBuckets {
|
||||||
|
newBuckets[iBucket] = -currentBucketAbsolute
|
||||||
|
newBuckets[iBucket+offset] += currentBucketAbsolute
|
||||||
|
}
|
||||||
|
iBucket += offset
|
||||||
|
buckets = newBuckets
|
||||||
|
currentBucketAbsolute = buckets[iBucket]
|
||||||
|
// Note that with many merges, it would be more efficient to
|
||||||
|
// first record all the chunks of empty buckets to insert and
|
||||||
|
// then do it in one go through all the buckets.
|
||||||
|
}
|
||||||
|
|
||||||
|
return buckets, spans
|
||||||
|
}
|
||||||
|
|
||||||
|
func getBound(idx, schema int32) float64 {
|
||||||
|
// Here a bit of context about the behavior for the last bucket counting
|
||||||
|
// regular numbers (called simply "last bucket" below) and the bucket
|
||||||
|
// counting observations of ±Inf (called "inf bucket" below, with an idx
|
||||||
|
// one higher than that of the "last bucket"):
|
||||||
|
//
|
||||||
|
// If we apply the usual formula to the last bucket, its upper bound
|
||||||
|
// would be calculated as +Inf. The reason is that the max possible
|
||||||
|
// regular float64 number (math.MaxFloat64) doesn't coincide with one of
|
||||||
|
// the calculated bucket boundaries. So the calculated boundary has to
|
||||||
|
// be larger than math.MaxFloat64, and the only float64 larger than
|
||||||
|
// math.MaxFloat64 is +Inf. However, we want to count actual
|
||||||
|
// observations of ±Inf in the inf bucket. Therefore, we have to treat
|
||||||
|
// the upper bound of the last bucket specially and set it to
|
||||||
|
// math.MaxFloat64. (The upper bound of the inf bucket, with its idx
|
||||||
|
// being one higher than that of the last bucket, naturally comes out as
|
||||||
|
// +Inf by the usual formula. So that's fine.)
|
||||||
|
//
|
||||||
|
// math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of
|
||||||
|
// 1024. If there were a float64 number following math.MaxFloat64, it
|
||||||
|
// would have a frac of 1.0 and an exp of 1024, or equivalently a frac
|
||||||
|
// of 0.5 and an exp of 1025. However, since frac must be smaller than
|
||||||
|
// 1, and exp must be smaller than 1025, either representation overflows
|
||||||
|
// a float64. (Which, in turn, is the reason that math.MaxFloat64 is the
|
||||||
|
// largest possible float64. Q.E.D.) However, the formula for
|
||||||
|
// calculating the upper bound from the idx and schema of the last
|
||||||
|
// bucket results in precisely that. It is either frac=1.0 & exp=1024
|
||||||
|
// (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is,
|
||||||
|
// by the way, a power of two where the exponent itself is a power of
|
||||||
|
// two, 2¹⁰ in fact, which coinicides with a bucket boundary in all
|
||||||
|
// schemas.) So these are the special cases we have to catch below.
|
||||||
|
if schema < 0 {
|
||||||
|
exp := int(idx) << -schema
|
||||||
|
if exp == 1024 {
|
||||||
|
// This is the last bucket before the overflow bucket
|
||||||
|
// (for ±Inf observations). Return math.MaxFloat64 as
|
||||||
|
// explained above.
|
||||||
|
return math.MaxFloat64
|
||||||
|
}
|
||||||
|
return math.Ldexp(1, exp)
|
||||||
|
}
|
||||||
|
|
||||||
|
fracIdx := idx & ((1 << schema) - 1)
|
||||||
|
frac := exponentialBounds[schema][fracIdx]
|
||||||
|
exp := (int(idx) >> schema) + 1
|
||||||
|
if frac == 0.5 && exp == 1025 {
|
||||||
|
// This is the last bucket before the overflow bucket (for ±Inf
|
||||||
|
// observations). Return math.MaxFloat64 as explained above.
|
||||||
|
return math.MaxFloat64
|
||||||
|
}
|
||||||
|
return math.Ldexp(frac, exp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// exponentialBounds is a precalculated table of bucket bounds in the interval
|
||||||
|
// [0.5,1) in schema 0 to 8.
|
||||||
|
var exponentialBounds = [][]float64{
|
||||||
|
// Schema "0":
|
||||||
|
{0.5},
|
||||||
|
// Schema 1:
|
||||||
|
{0.5, 0.7071067811865475},
|
||||||
|
// Schema 2:
|
||||||
|
{0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144},
|
||||||
|
// Schema 3:
|
||||||
|
{
|
||||||
|
0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048,
|
||||||
|
0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711,
|
||||||
|
},
|
||||||
|
// Schema 4:
|
||||||
|
{
|
||||||
|
0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458,
|
||||||
|
0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463,
|
||||||
|
0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627,
|
||||||
|
0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735,
|
||||||
|
},
|
||||||
|
// Schema 5:
|
||||||
|
{
|
||||||
|
0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117,
|
||||||
|
0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887,
|
||||||
|
0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666,
|
||||||
|
0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159,
|
||||||
|
0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112,
|
||||||
|
0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823,
|
||||||
|
0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533,
|
||||||
|
0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999,
|
||||||
|
},
|
||||||
|
// Schema 6:
|
||||||
|
{
|
||||||
|
0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142,
|
||||||
|
0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598,
|
||||||
|
0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209,
|
||||||
|
0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406,
|
||||||
|
0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349,
|
||||||
|
0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891,
|
||||||
|
0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515,
|
||||||
|
0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555,
|
||||||
|
0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234,
|
||||||
|
0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269,
|
||||||
|
0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334,
|
||||||
|
0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681,
|
||||||
|
0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529,
|
||||||
|
0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991,
|
||||||
|
0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827,
|
||||||
|
0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752,
|
||||||
|
},
|
||||||
|
// Schema 7:
|
||||||
|
{
|
||||||
|
0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764,
|
||||||
|
0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894,
|
||||||
|
0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309,
|
||||||
|
0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545,
|
||||||
|
0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393,
|
||||||
|
0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595,
|
||||||
|
0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754,
|
||||||
|
0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704,
|
||||||
|
0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907,
|
||||||
|
0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665,
|
||||||
|
0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253,
|
||||||
|
0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329,
|
||||||
|
0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032,
|
||||||
|
0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728,
|
||||||
|
0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265,
|
||||||
|
0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076,
|
||||||
|
0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491,
|
||||||
|
0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908,
|
||||||
|
0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126,
|
||||||
|
0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777,
|
||||||
|
0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764,
|
||||||
|
0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465,
|
||||||
|
0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821,
|
||||||
|
0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981,
|
||||||
|
0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312,
|
||||||
|
0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842,
|
||||||
|
0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671,
|
||||||
|
0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263,
|
||||||
|
0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943,
|
||||||
|
0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368,
|
||||||
|
0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164,
|
||||||
|
0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328,
|
||||||
|
},
|
||||||
|
// Schema 8:
|
||||||
|
{
|
||||||
|
0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088,
|
||||||
|
0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869,
|
||||||
|
0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205,
|
||||||
|
0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158,
|
||||||
|
0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313,
|
||||||
|
0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321,
|
||||||
|
0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954,
|
||||||
|
0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847,
|
||||||
|
0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111,
|
||||||
|
0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088,
|
||||||
|
0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098,
|
||||||
|
0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026,
|
||||||
|
0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894,
|
||||||
|
0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493,
|
||||||
|
0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185,
|
||||||
|
0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968,
|
||||||
|
0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903,
|
||||||
|
0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005,
|
||||||
|
0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725,
|
||||||
|
0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082,
|
||||||
|
0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581,
|
||||||
|
0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031,
|
||||||
|
0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346,
|
||||||
|
0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447,
|
||||||
|
0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385,
|
||||||
|
0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788,
|
||||||
|
0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727,
|
||||||
|
0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171,
|
||||||
|
0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058,
|
||||||
|
0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119,
|
||||||
|
0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999,
|
||||||
|
0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352,
|
||||||
|
0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471,
|
||||||
|
0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126,
|
||||||
|
0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218,
|
||||||
|
0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837,
|
||||||
|
0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984,
|
||||||
|
0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031,
|
||||||
|
0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071,
|
||||||
|
0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282,
|
||||||
|
0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442,
|
||||||
|
0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707,
|
||||||
|
0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818,
|
||||||
|
0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853,
|
||||||
|
0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642,
|
||||||
|
0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003,
|
||||||
|
0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079,
|
||||||
|
0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391,
|
||||||
|
0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661,
|
||||||
|
0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629,
|
||||||
|
0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553,
|
||||||
|
0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389,
|
||||||
|
0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771,
|
||||||
|
0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002,
|
||||||
|
0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155,
|
||||||
|
0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483,
|
||||||
|
0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253,
|
||||||
|
0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191,
|
||||||
|
0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693,
|
||||||
|
0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947,
|
||||||
|
0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133,
|
||||||
|
0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889,
|
||||||
|
0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168,
|
||||||
|
0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698,
|
||||||
|
},
|
||||||
|
}
|
112
model/histogram/generic_test.go
Normal file
112
model/histogram/generic_test.go
Normal file
|
@ -0,0 +1,112 @@
|
||||||
|
// Copyright 2022 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package histogram
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGetBound(t *testing.T) {
|
||||||
|
scenarios := []struct {
|
||||||
|
idx int32
|
||||||
|
schema int32
|
||||||
|
want float64
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
idx: -1,
|
||||||
|
schema: -1,
|
||||||
|
want: 0.25,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
idx: 0,
|
||||||
|
schema: -1,
|
||||||
|
want: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
idx: 1,
|
||||||
|
schema: -1,
|
||||||
|
want: 4,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
idx: 512,
|
||||||
|
schema: -1,
|
||||||
|
want: math.MaxFloat64,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
idx: 513,
|
||||||
|
schema: -1,
|
||||||
|
want: math.Inf(+1),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
idx: -1,
|
||||||
|
schema: 0,
|
||||||
|
want: 0.5,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
idx: 0,
|
||||||
|
schema: 0,
|
||||||
|
want: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
idx: 1,
|
||||||
|
schema: 0,
|
||||||
|
want: 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
idx: 1024,
|
||||||
|
schema: 0,
|
||||||
|
want: math.MaxFloat64,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
idx: 1025,
|
||||||
|
schema: 0,
|
||||||
|
want: math.Inf(+1),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
idx: -1,
|
||||||
|
schema: 2,
|
||||||
|
want: 0.8408964152537144,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
idx: 0,
|
||||||
|
schema: 2,
|
||||||
|
want: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
idx: 1,
|
||||||
|
schema: 2,
|
||||||
|
want: 1.189207115002721,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
idx: 4096,
|
||||||
|
schema: 2,
|
||||||
|
want: math.MaxFloat64,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
idx: 4097,
|
||||||
|
schema: 2,
|
||||||
|
want: math.Inf(+1),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, s := range scenarios {
|
||||||
|
got := getBound(s.idx, s.schema)
|
||||||
|
if s.want != got {
|
||||||
|
require.Equal(t, s.want, got, "idx %d, schema %d", s.idx, s.schema)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
448
model/histogram/histogram.go
Normal file
448
model/histogram/histogram.go
Normal file
|
@ -0,0 +1,448 @@
|
||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package histogram
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Histogram encodes a sparse, high-resolution histogram. See the design
|
||||||
|
// document for full details:
|
||||||
|
// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit#
|
||||||
|
//
|
||||||
|
// The most tricky bit is how bucket indices represent real bucket boundaries.
|
||||||
|
// An example for schema 0 (by which each bucket is twice as wide as the
|
||||||
|
// previous bucket):
|
||||||
|
//
|
||||||
|
// Bucket boundaries → [-2,-1) [-1,-0.5) [-0.5,-0.25) ... [-0.001,0.001] ... (0.25,0.5] (0.5,1] (1,2] ....
|
||||||
|
// ↑ ↑ ↑ ↑ ↑ ↑ ↑
|
||||||
|
// Zero bucket (width e.g. 0.001) → | | | ZB | | |
|
||||||
|
// Positive bucket indices → | | | ... -1 0 1 2 3
|
||||||
|
// Negative bucket indices → 3 2 1 0 -1 ...
|
||||||
|
//
|
||||||
|
// Which bucket indices are actually used is determined by the spans.
|
||||||
|
type Histogram struct {
|
||||||
|
// Currently valid schema numbers are -4 <= n <= 8. They are all for
|
||||||
|
// base-2 bucket schemas, where 1 is a bucket boundary in each case, and
|
||||||
|
// then each power of two is divided into 2^n logarithmic buckets. Or
|
||||||
|
// in other words, each bucket boundary is the previous boundary times
|
||||||
|
// 2^(2^-n).
|
||||||
|
Schema int32
|
||||||
|
// Width of the zero bucket.
|
||||||
|
ZeroThreshold float64
|
||||||
|
// Observations falling into the zero bucket.
|
||||||
|
ZeroCount uint64
|
||||||
|
// Total number of observations.
|
||||||
|
Count uint64
|
||||||
|
// Sum of observations. This is also used as the stale marker.
|
||||||
|
Sum float64
|
||||||
|
// Spans for positive and negative buckets (see Span below).
|
||||||
|
PositiveSpans, NegativeSpans []Span
|
||||||
|
// Observation counts in buckets. The first element is an absolute
|
||||||
|
// count. All following ones are deltas relative to the previous
|
||||||
|
// element.
|
||||||
|
PositiveBuckets, NegativeBuckets []int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Span defines a continuous sequence of buckets.
|
||||||
|
type Span struct {
|
||||||
|
// Gap to previous span (always positive), or starting index for the 1st
|
||||||
|
// span (which can be negative).
|
||||||
|
Offset int32
|
||||||
|
// Length of the span.
|
||||||
|
Length uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy returns a deep copy of the Histogram.
|
||||||
|
func (h *Histogram) Copy() *Histogram {
|
||||||
|
c := *h
|
||||||
|
|
||||||
|
if len(h.PositiveSpans) != 0 {
|
||||||
|
c.PositiveSpans = make([]Span, len(h.PositiveSpans))
|
||||||
|
copy(c.PositiveSpans, h.PositiveSpans)
|
||||||
|
}
|
||||||
|
if len(h.NegativeSpans) != 0 {
|
||||||
|
c.NegativeSpans = make([]Span, len(h.NegativeSpans))
|
||||||
|
copy(c.NegativeSpans, h.NegativeSpans)
|
||||||
|
}
|
||||||
|
if len(h.PositiveBuckets) != 0 {
|
||||||
|
c.PositiveBuckets = make([]int64, len(h.PositiveBuckets))
|
||||||
|
copy(c.PositiveBuckets, h.PositiveBuckets)
|
||||||
|
}
|
||||||
|
if len(h.NegativeBuckets) != 0 {
|
||||||
|
c.NegativeBuckets = make([]int64, len(h.NegativeBuckets))
|
||||||
|
copy(c.NegativeBuckets, h.NegativeBuckets)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &c
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a string representation of the Histogram.
|
||||||
|
func (h *Histogram) String() string {
|
||||||
|
var sb strings.Builder
|
||||||
|
fmt.Fprintf(&sb, "{count:%d, sum:%g", h.Count, h.Sum)
|
||||||
|
|
||||||
|
var nBuckets []Bucket[uint64]
|
||||||
|
for it := h.NegativeBucketIterator(); it.Next(); {
|
||||||
|
bucket := it.At()
|
||||||
|
if bucket.Count != 0 {
|
||||||
|
nBuckets = append(nBuckets, it.At())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i := len(nBuckets) - 1; i >= 0; i-- {
|
||||||
|
fmt.Fprintf(&sb, ", %s", nBuckets[i].String())
|
||||||
|
}
|
||||||
|
|
||||||
|
if h.ZeroCount != 0 {
|
||||||
|
fmt.Fprintf(&sb, ", %s", h.ZeroBucket().String())
|
||||||
|
}
|
||||||
|
|
||||||
|
for it := h.PositiveBucketIterator(); it.Next(); {
|
||||||
|
bucket := it.At()
|
||||||
|
if bucket.Count != 0 {
|
||||||
|
fmt.Fprintf(&sb, ", %s", bucket.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sb.WriteRune('}')
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ZeroBucket returns the zero bucket.
|
||||||
|
func (h *Histogram) ZeroBucket() Bucket[uint64] {
|
||||||
|
return Bucket[uint64]{
|
||||||
|
Lower: -h.ZeroThreshold,
|
||||||
|
Upper: h.ZeroThreshold,
|
||||||
|
LowerInclusive: true,
|
||||||
|
UpperInclusive: true,
|
||||||
|
Count: h.ZeroCount,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PositiveBucketIterator returns a BucketIterator to iterate over all positive
|
||||||
|
// buckets in ascending order (starting next to the zero bucket and going up).
|
||||||
|
func (h *Histogram) PositiveBucketIterator() BucketIterator[uint64] {
|
||||||
|
return newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NegativeBucketIterator returns a BucketIterator to iterate over all negative
|
||||||
|
// buckets in descending order (starting next to the zero bucket and going down).
|
||||||
|
func (h *Histogram) NegativeBucketIterator() BucketIterator[uint64] {
|
||||||
|
return newRegularBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CumulativeBucketIterator returns a BucketIterator to iterate over a
|
||||||
|
// cumulative view of the buckets. This method currently only supports
|
||||||
|
// Histograms without negative buckets and panics if the Histogram has negative
|
||||||
|
// buckets. It is currently only used for testing.
|
||||||
|
func (h *Histogram) CumulativeBucketIterator() BucketIterator[uint64] {
|
||||||
|
if len(h.NegativeBuckets) > 0 {
|
||||||
|
panic("CumulativeBucketIterator called on Histogram with negative buckets")
|
||||||
|
}
|
||||||
|
return &cumulativeBucketIterator{h: h, posSpansIdx: -1}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equals returns true if the given histogram matches exactly.
|
||||||
|
// Exact match is when there are no new buckets (even empty) and no missing buckets,
|
||||||
|
// and all the bucket values match. Spans can have different empty length spans in between,
|
||||||
|
// but they must represent the same bucket layout to match.
|
||||||
|
func (h *Histogram) Equals(h2 *Histogram) bool {
|
||||||
|
if h2 == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold ||
|
||||||
|
h.ZeroCount != h2.ZeroCount || h.Count != h2.Count || h.Sum != h2.Sum {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !spansMatch(h.PositiveSpans, h2.PositiveSpans) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !bucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// spansMatch returns true if both spans represent the same bucket layout
|
||||||
|
// after combining zero length spans with the next non-zero length span.
|
||||||
|
func spansMatch(s1, s2 []Span) bool {
|
||||||
|
if len(s1) == 0 && len(s2) == 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
s1idx, s2idx := 0, 0
|
||||||
|
for {
|
||||||
|
if s1idx >= len(s1) {
|
||||||
|
return allEmptySpans(s2[s2idx:])
|
||||||
|
}
|
||||||
|
if s2idx >= len(s2) {
|
||||||
|
return allEmptySpans(s1[s1idx:])
|
||||||
|
}
|
||||||
|
|
||||||
|
currS1, currS2 := s1[s1idx], s2[s2idx]
|
||||||
|
s1idx++
|
||||||
|
s2idx++
|
||||||
|
if currS1.Length == 0 {
|
||||||
|
// This span is zero length, so we add consecutive such spans
|
||||||
|
// until we find a non-zero span.
|
||||||
|
for ; s1idx < len(s1) && s1[s1idx].Length == 0; s1idx++ {
|
||||||
|
currS1.Offset += s1[s1idx].Offset
|
||||||
|
}
|
||||||
|
if s1idx < len(s1) {
|
||||||
|
currS1.Offset += s1[s1idx].Offset
|
||||||
|
currS1.Length = s1[s1idx].Length
|
||||||
|
s1idx++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if currS2.Length == 0 {
|
||||||
|
// This span is zero length, so we add consecutive such spans
|
||||||
|
// until we find a non-zero span.
|
||||||
|
for ; s2idx < len(s2) && s2[s2idx].Length == 0; s2idx++ {
|
||||||
|
currS2.Offset += s2[s2idx].Offset
|
||||||
|
}
|
||||||
|
if s2idx < len(s2) {
|
||||||
|
currS2.Offset += s2[s2idx].Offset
|
||||||
|
currS2.Length = s2[s2idx].Length
|
||||||
|
s2idx++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if currS1.Length == 0 && currS2.Length == 0 {
|
||||||
|
// The last spans of both set are zero length. Previous spans match.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if currS1.Offset != currS2.Offset || currS1.Length != currS2.Length {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func allEmptySpans(s []Span) bool {
|
||||||
|
for _, ss := range s {
|
||||||
|
if ss.Length > 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func bucketsMatch(b1, b2 []int64) bool {
|
||||||
|
if len(b1) != len(b2) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i, b := range b1 {
|
||||||
|
if b != b2[i] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compact works like FloatHistogram.Compact. See there for detailed
|
||||||
|
// explanations.
|
||||||
|
func (h *Histogram) Compact(maxEmptyBuckets int) *Histogram {
|
||||||
|
h.PositiveBuckets, h.PositiveSpans = compactBuckets(
|
||||||
|
h.PositiveBuckets, h.PositiveSpans, maxEmptyBuckets, true,
|
||||||
|
)
|
||||||
|
h.NegativeBuckets, h.NegativeSpans = compactBuckets(
|
||||||
|
h.NegativeBuckets, h.NegativeSpans, maxEmptyBuckets, true,
|
||||||
|
)
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToFloat returns a FloatHistogram representation of the Histogram. It is a
|
||||||
|
// deep copy (e.g. spans are not shared).
|
||||||
|
func (h *Histogram) ToFloat() *FloatHistogram {
|
||||||
|
var (
|
||||||
|
positiveSpans, negativeSpans []Span
|
||||||
|
positiveBuckets, negativeBuckets []float64
|
||||||
|
)
|
||||||
|
if len(h.PositiveSpans) != 0 {
|
||||||
|
positiveSpans = make([]Span, len(h.PositiveSpans))
|
||||||
|
copy(positiveSpans, h.PositiveSpans)
|
||||||
|
}
|
||||||
|
if len(h.NegativeSpans) != 0 {
|
||||||
|
negativeSpans = make([]Span, len(h.NegativeSpans))
|
||||||
|
copy(negativeSpans, h.NegativeSpans)
|
||||||
|
}
|
||||||
|
if len(h.PositiveBuckets) != 0 {
|
||||||
|
positiveBuckets = make([]float64, len(h.PositiveBuckets))
|
||||||
|
var current float64
|
||||||
|
for i, b := range h.PositiveBuckets {
|
||||||
|
current += float64(b)
|
||||||
|
positiveBuckets[i] = current
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(h.NegativeBuckets) != 0 {
|
||||||
|
negativeBuckets = make([]float64, len(h.NegativeBuckets))
|
||||||
|
var current float64
|
||||||
|
for i, b := range h.NegativeBuckets {
|
||||||
|
current += float64(b)
|
||||||
|
negativeBuckets[i] = current
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &FloatHistogram{
|
||||||
|
Schema: h.Schema,
|
||||||
|
ZeroThreshold: h.ZeroThreshold,
|
||||||
|
ZeroCount: float64(h.ZeroCount),
|
||||||
|
Count: float64(h.Count),
|
||||||
|
Sum: h.Sum,
|
||||||
|
PositiveSpans: positiveSpans,
|
||||||
|
NegativeSpans: negativeSpans,
|
||||||
|
PositiveBuckets: positiveBuckets,
|
||||||
|
NegativeBuckets: negativeBuckets,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type regularBucketIterator struct {
|
||||||
|
baseBucketIterator[uint64, int64]
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool) *regularBucketIterator {
|
||||||
|
i := baseBucketIterator[uint64, int64]{
|
||||||
|
schema: schema,
|
||||||
|
spans: spans,
|
||||||
|
buckets: buckets,
|
||||||
|
positive: positive,
|
||||||
|
}
|
||||||
|
return ®ularBucketIterator{i}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *regularBucketIterator) Next() bool {
|
||||||
|
if r.spansIdx >= len(r.spans) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
span := r.spans[r.spansIdx]
|
||||||
|
// Seed currIdx for the first bucket.
|
||||||
|
if r.bucketsIdx == 0 {
|
||||||
|
r.currIdx = span.Offset
|
||||||
|
} else {
|
||||||
|
r.currIdx++
|
||||||
|
}
|
||||||
|
for r.idxInSpan >= span.Length {
|
||||||
|
// We have exhausted the current span and have to find a new
|
||||||
|
// one. We'll even handle pathologic spans of length 0.
|
||||||
|
r.idxInSpan = 0
|
||||||
|
r.spansIdx++
|
||||||
|
if r.spansIdx >= len(r.spans) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
span = r.spans[r.spansIdx]
|
||||||
|
r.currIdx += span.Offset
|
||||||
|
}
|
||||||
|
|
||||||
|
r.currCount += r.buckets[r.bucketsIdx]
|
||||||
|
r.idxInSpan++
|
||||||
|
r.bucketsIdx++
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
type cumulativeBucketIterator struct {
|
||||||
|
h *Histogram
|
||||||
|
|
||||||
|
posSpansIdx int // Index in h.PositiveSpans we are in. -1 means 0 bucket.
|
||||||
|
posBucketsIdx int // Index in h.PositiveBuckets.
|
||||||
|
idxInSpan uint32 // Index in the current span. 0 <= idxInSpan < span.Length.
|
||||||
|
|
||||||
|
initialized bool
|
||||||
|
currIdx int32 // The actual bucket index after decoding from spans.
|
||||||
|
currUpper float64 // The upper boundary of the current bucket.
|
||||||
|
currCount int64 // Current non-cumulative count for the current bucket. Does not apply for empty bucket.
|
||||||
|
currCumulativeCount uint64 // Current "cumulative" count for the current bucket.
|
||||||
|
|
||||||
|
// Between 2 spans there could be some empty buckets which
|
||||||
|
// still needs to be counted for cumulative buckets.
|
||||||
|
// When we hit the end of a span, we use this to iterate
|
||||||
|
// through the empty buckets.
|
||||||
|
emptyBucketCount int32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cumulativeBucketIterator) Next() bool {
|
||||||
|
if c.posSpansIdx == -1 {
|
||||||
|
// Zero bucket.
|
||||||
|
c.posSpansIdx++
|
||||||
|
if c.h.ZeroCount == 0 {
|
||||||
|
return c.Next()
|
||||||
|
}
|
||||||
|
|
||||||
|
c.currUpper = c.h.ZeroThreshold
|
||||||
|
c.currCount = int64(c.h.ZeroCount)
|
||||||
|
c.currCumulativeCount = uint64(c.currCount)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.posSpansIdx >= len(c.h.PositiveSpans) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.emptyBucketCount > 0 {
|
||||||
|
// We are traversing through empty buckets at the moment.
|
||||||
|
c.currUpper = getBound(c.currIdx, c.h.Schema)
|
||||||
|
c.currIdx++
|
||||||
|
c.emptyBucketCount--
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
span := c.h.PositiveSpans[c.posSpansIdx]
|
||||||
|
if c.posSpansIdx == 0 && !c.initialized {
|
||||||
|
// Initializing.
|
||||||
|
c.currIdx = span.Offset
|
||||||
|
// The first bucket is an absolute value and not a delta with Zero bucket.
|
||||||
|
c.currCount = 0
|
||||||
|
c.initialized = true
|
||||||
|
}
|
||||||
|
|
||||||
|
c.currCount += c.h.PositiveBuckets[c.posBucketsIdx]
|
||||||
|
c.currCumulativeCount += uint64(c.currCount)
|
||||||
|
c.currUpper = getBound(c.currIdx, c.h.Schema)
|
||||||
|
|
||||||
|
c.posBucketsIdx++
|
||||||
|
c.idxInSpan++
|
||||||
|
c.currIdx++
|
||||||
|
if c.idxInSpan >= span.Length {
|
||||||
|
// Move to the next span. This one is done.
|
||||||
|
c.posSpansIdx++
|
||||||
|
c.idxInSpan = 0
|
||||||
|
if c.posSpansIdx < len(c.h.PositiveSpans) {
|
||||||
|
c.emptyBucketCount = c.h.PositiveSpans[c.posSpansIdx].Offset
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cumulativeBucketIterator) At() Bucket[uint64] {
|
||||||
|
return Bucket[uint64]{
|
||||||
|
Upper: c.currUpper,
|
||||||
|
Lower: math.Inf(-1),
|
||||||
|
UpperInclusive: true,
|
||||||
|
LowerInclusive: true,
|
||||||
|
Count: c.currCumulativeCount,
|
||||||
|
Index: c.currIdx - 1,
|
||||||
|
}
|
||||||
|
}
|
782
model/histogram/histogram_test.go
Normal file
782
model/histogram/histogram_test.go
Normal file
|
@ -0,0 +1,782 @@
|
||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package histogram
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestHistogramString(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
histogram Histogram
|
||||||
|
expectedString string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
histogram: Histogram{
|
||||||
|
Schema: 0,
|
||||||
|
},
|
||||||
|
expectedString: "{count:0, sum:0}",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
histogram: Histogram{
|
||||||
|
Schema: 0,
|
||||||
|
Count: 9,
|
||||||
|
Sum: -3.1415,
|
||||||
|
ZeroCount: 12,
|
||||||
|
ZeroThreshold: 0.001,
|
||||||
|
NegativeSpans: []Span{
|
||||||
|
{Offset: 0, Length: 5},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []int64{1, 2, -2, 1, -1, 0},
|
||||||
|
},
|
||||||
|
expectedString: "{count:9, sum:-3.1415, [-64,-32):1, [-16,-8):1, [-8,-4):2, [-4,-2):1, [-2,-1):3, [-1,-0.5):1, [-0.001,0.001]:12}",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
histogram: Histogram{
|
||||||
|
Schema: 0,
|
||||||
|
Count: 19,
|
||||||
|
Sum: 2.7,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: 0, Length: 4},
|
||||||
|
{Offset: 0, Length: 0},
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
|
||||||
|
NegativeSpans: []Span{
|
||||||
|
{Offset: 0, Length: 5},
|
||||||
|
{Offset: 1, Length: 0},
|
||||||
|
{Offset: 0, Length: 1},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []int64{1, 2, -2, 1, -1, 0},
|
||||||
|
},
|
||||||
|
expectedString: "{count:19, sum:2.7, [-64,-32):1, [-16,-8):1, [-8,-4):2, [-4,-2):1, [-2,-1):3, [-1,-0.5):1, (0.5,1]:1, (1,2]:3, (2,4]:1, (4,8]:2, (8,16]:1, (16,32]:1, (32,64]:1}",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, c := range cases {
|
||||||
|
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||||
|
actualString := c.histogram.String()
|
||||||
|
require.Equal(t, c.expectedString, actualString)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCumulativeBucketIterator(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
histogram Histogram
|
||||||
|
expectedBuckets []Bucket[uint64]
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
histogram: Histogram{
|
||||||
|
Schema: 0,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||||
|
},
|
||||||
|
expectedBuckets: []Bucket[uint64]{
|
||||||
|
{Lower: math.Inf(-1), Upper: 1, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0},
|
||||||
|
{Lower: math.Inf(-1), Upper: 2, Count: 3, LowerInclusive: true, UpperInclusive: true, Index: 1},
|
||||||
|
|
||||||
|
{Lower: math.Inf(-1), Upper: 4, Count: 3, LowerInclusive: true, UpperInclusive: true, Index: 2},
|
||||||
|
|
||||||
|
{Lower: math.Inf(-1), Upper: 8, Count: 4, LowerInclusive: true, UpperInclusive: true, Index: 3},
|
||||||
|
{Lower: math.Inf(-1), Upper: 16, Count: 5, LowerInclusive: true, UpperInclusive: true, Index: 4},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
histogram: Histogram{
|
||||||
|
Schema: 0,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: 0, Length: 5},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0},
|
||||||
|
},
|
||||||
|
expectedBuckets: []Bucket[uint64]{
|
||||||
|
{Lower: math.Inf(-1), Upper: 1, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0},
|
||||||
|
{Lower: math.Inf(-1), Upper: 2, Count: 4, LowerInclusive: true, UpperInclusive: true, Index: 1},
|
||||||
|
{Lower: math.Inf(-1), Upper: 4, Count: 5, LowerInclusive: true, UpperInclusive: true, Index: 2},
|
||||||
|
{Lower: math.Inf(-1), Upper: 8, Count: 7, LowerInclusive: true, UpperInclusive: true, Index: 3},
|
||||||
|
|
||||||
|
{Lower: math.Inf(-1), Upper: 16, Count: 8, LowerInclusive: true, UpperInclusive: true, Index: 4},
|
||||||
|
|
||||||
|
{Lower: math.Inf(-1), Upper: 32, Count: 8, LowerInclusive: true, UpperInclusive: true, Index: 5},
|
||||||
|
{Lower: math.Inf(-1), Upper: 64, Count: 9, LowerInclusive: true, UpperInclusive: true, Index: 6},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
histogram: Histogram{
|
||||||
|
Schema: 0,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: 0, Length: 7},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
|
||||||
|
},
|
||||||
|
expectedBuckets: []Bucket[uint64]{
|
||||||
|
{Lower: math.Inf(-1), Upper: 1, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0},
|
||||||
|
{Lower: math.Inf(-1), Upper: 2, Count: 4, LowerInclusive: true, UpperInclusive: true, Index: 1},
|
||||||
|
{Lower: math.Inf(-1), Upper: 4, Count: 5, LowerInclusive: true, UpperInclusive: true, Index: 2},
|
||||||
|
{Lower: math.Inf(-1), Upper: 8, Count: 7, LowerInclusive: true, UpperInclusive: true, Index: 3},
|
||||||
|
{Lower: math.Inf(-1), Upper: 16, Count: 8, LowerInclusive: true, UpperInclusive: true, Index: 4},
|
||||||
|
{Lower: math.Inf(-1), Upper: 32, Count: 9, LowerInclusive: true, UpperInclusive: true, Index: 5},
|
||||||
|
{Lower: math.Inf(-1), Upper: 64, Count: 10, LowerInclusive: true, UpperInclusive: true, Index: 6},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
histogram: Histogram{
|
||||||
|
Schema: 3,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: -5, Length: 2}, // -5 -4
|
||||||
|
{Offset: 2, Length: 3}, // -1 0 1
|
||||||
|
{Offset: 2, Length: 2}, // 4 5
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 3},
|
||||||
|
},
|
||||||
|
expectedBuckets: []Bucket[uint64]{
|
||||||
|
{Lower: math.Inf(-1), Upper: 0.6484197773255048, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: -5},
|
||||||
|
{Lower: math.Inf(-1), Upper: 0.7071067811865475, Count: 4, LowerInclusive: true, UpperInclusive: true, Index: -4},
|
||||||
|
|
||||||
|
{Lower: math.Inf(-1), Upper: 0.7711054127039704, Count: 4, LowerInclusive: true, UpperInclusive: true, Index: -3},
|
||||||
|
{Lower: math.Inf(-1), Upper: 0.8408964152537144, Count: 4, LowerInclusive: true, UpperInclusive: true, Index: -2},
|
||||||
|
|
||||||
|
{Lower: math.Inf(-1), Upper: 0.9170040432046711, Count: 5, LowerInclusive: true, UpperInclusive: true, Index: -1},
|
||||||
|
{Lower: math.Inf(-1), Upper: 1, Count: 7, LowerInclusive: true, UpperInclusive: true, Index: 0},
|
||||||
|
{Lower: math.Inf(-1), Upper: 1.0905077326652577, Count: 8, LowerInclusive: true, UpperInclusive: true, Index: 1},
|
||||||
|
|
||||||
|
{Lower: math.Inf(-1), Upper: 1.189207115002721, Count: 8, LowerInclusive: true, UpperInclusive: true, Index: 2},
|
||||||
|
{Lower: math.Inf(-1), Upper: 1.2968395546510096, Count: 8, LowerInclusive: true, UpperInclusive: true, Index: 3},
|
||||||
|
|
||||||
|
{Lower: math.Inf(-1), Upper: 1.414213562373095, Count: 9, LowerInclusive: true, UpperInclusive: true, Index: 4},
|
||||||
|
{Lower: math.Inf(-1), Upper: 1.5422108254079407, Count: 13, LowerInclusive: true, UpperInclusive: true, Index: 5},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
histogram: Histogram{
|
||||||
|
Schema: -2,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: -2, Length: 4}, // -2 -1 0 1
|
||||||
|
{Offset: 2, Length: 2}, // 4 5
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0},
|
||||||
|
},
|
||||||
|
expectedBuckets: []Bucket[uint64]{
|
||||||
|
{Lower: math.Inf(-1), Upper: 0.00390625, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: -2},
|
||||||
|
{Lower: math.Inf(-1), Upper: 0.0625, Count: 4, LowerInclusive: true, UpperInclusive: true, Index: -1},
|
||||||
|
{Lower: math.Inf(-1), Upper: 1, Count: 5, LowerInclusive: true, UpperInclusive: true, Index: 0},
|
||||||
|
{Lower: math.Inf(-1), Upper: 16, Count: 7, LowerInclusive: true, UpperInclusive: true, Index: 1},
|
||||||
|
|
||||||
|
{Lower: math.Inf(-1), Upper: 256, Count: 7, LowerInclusive: true, UpperInclusive: true, Index: 2},
|
||||||
|
{Lower: math.Inf(-1), Upper: 4096, Count: 7, LowerInclusive: true, UpperInclusive: true, Index: 3},
|
||||||
|
|
||||||
|
{Lower: math.Inf(-1), Upper: 65536, Count: 8, LowerInclusive: true, UpperInclusive: true, Index: 4},
|
||||||
|
{Lower: math.Inf(-1), Upper: 1048576, Count: 9, LowerInclusive: true, UpperInclusive: true, Index: 5},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
histogram: Histogram{
|
||||||
|
Schema: -1,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: -2, Length: 5}, // -2 -1 0 1 2
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 2, -2, 1, -1},
|
||||||
|
},
|
||||||
|
expectedBuckets: []Bucket[uint64]{
|
||||||
|
{Lower: math.Inf(-1), Upper: 0.0625, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: -2},
|
||||||
|
{Lower: math.Inf(-1), Upper: 0.25, Count: 4, LowerInclusive: true, UpperInclusive: true, Index: -1},
|
||||||
|
{Lower: math.Inf(-1), Upper: 1, Count: 5, LowerInclusive: true, UpperInclusive: true, Index: 0},
|
||||||
|
{Lower: math.Inf(-1), Upper: 4, Count: 7, LowerInclusive: true, UpperInclusive: true, Index: 1},
|
||||||
|
{Lower: math.Inf(-1), Upper: 16, Count: 8, LowerInclusive: true, UpperInclusive: true, Index: 2},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, c := range cases {
|
||||||
|
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||||
|
it := c.histogram.CumulativeBucketIterator()
|
||||||
|
actualBuckets := make([]Bucket[uint64], 0, len(c.expectedBuckets))
|
||||||
|
for it.Next() {
|
||||||
|
actualBuckets = append(actualBuckets, it.At())
|
||||||
|
}
|
||||||
|
require.Equal(t, c.expectedBuckets, actualBuckets)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRegularBucketIterator(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
histogram Histogram
|
||||||
|
expectedPositiveBuckets []Bucket[uint64]
|
||||||
|
expectedNegativeBuckets []Bucket[uint64]
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
histogram: Histogram{
|
||||||
|
Schema: 0,
|
||||||
|
},
|
||||||
|
expectedPositiveBuckets: []Bucket[uint64]{},
|
||||||
|
expectedNegativeBuckets: []Bucket[uint64]{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
histogram: Histogram{
|
||||||
|
Schema: 0,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||||
|
},
|
||||||
|
expectedPositiveBuckets: []Bucket[uint64]{
|
||||||
|
{Lower: 0.5, Upper: 1, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 0},
|
||||||
|
{Lower: 1, Upper: 2, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1},
|
||||||
|
|
||||||
|
{Lower: 4, Upper: 8, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 3},
|
||||||
|
{Lower: 8, Upper: 16, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4},
|
||||||
|
},
|
||||||
|
expectedNegativeBuckets: []Bucket[uint64]{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
histogram: Histogram{
|
||||||
|
Schema: 0,
|
||||||
|
NegativeSpans: []Span{
|
||||||
|
{Offset: 0, Length: 5},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []int64{1, 2, -2, 1, -1, 0},
|
||||||
|
},
|
||||||
|
expectedPositiveBuckets: []Bucket[uint64]{},
|
||||||
|
expectedNegativeBuckets: []Bucket[uint64]{
|
||||||
|
{Lower: -1, Upper: -0.5, Count: 1, LowerInclusive: true, UpperInclusive: false, Index: 0},
|
||||||
|
{Lower: -2, Upper: -1, Count: 3, LowerInclusive: true, UpperInclusive: false, Index: 1},
|
||||||
|
{Lower: -4, Upper: -2, Count: 1, LowerInclusive: true, UpperInclusive: false, Index: 2},
|
||||||
|
{Lower: -8, Upper: -4, Count: 2, LowerInclusive: true, UpperInclusive: false, Index: 3},
|
||||||
|
{Lower: -16, Upper: -8, Count: 1, LowerInclusive: true, UpperInclusive: false, Index: 4},
|
||||||
|
|
||||||
|
{Lower: -64, Upper: -32, Count: 1, LowerInclusive: true, UpperInclusive: false, Index: 6},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
histogram: Histogram{
|
||||||
|
Schema: 0,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: 0, Length: 4},
|
||||||
|
{Offset: 0, Length: 0},
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
|
||||||
|
NegativeSpans: []Span{
|
||||||
|
{Offset: 0, Length: 5},
|
||||||
|
{Offset: 1, Length: 0},
|
||||||
|
{Offset: 0, Length: 1},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []int64{1, 2, -2, 1, -1, 0},
|
||||||
|
},
|
||||||
|
expectedPositiveBuckets: []Bucket[uint64]{
|
||||||
|
{Lower: 0.5, Upper: 1, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 0},
|
||||||
|
{Lower: 1, Upper: 2, Count: 3, LowerInclusive: false, UpperInclusive: true, Index: 1},
|
||||||
|
{Lower: 2, Upper: 4, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 2},
|
||||||
|
{Lower: 4, Upper: 8, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 3},
|
||||||
|
{Lower: 8, Upper: 16, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4},
|
||||||
|
{Lower: 16, Upper: 32, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 5},
|
||||||
|
{Lower: 32, Upper: 64, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 6},
|
||||||
|
},
|
||||||
|
expectedNegativeBuckets: []Bucket[uint64]{
|
||||||
|
{Lower: -1, Upper: -0.5, Count: 1, LowerInclusive: true, UpperInclusive: false, Index: 0},
|
||||||
|
{Lower: -2, Upper: -1, Count: 3, LowerInclusive: true, UpperInclusive: false, Index: 1},
|
||||||
|
{Lower: -4, Upper: -2, Count: 1, LowerInclusive: true, UpperInclusive: false, Index: 2},
|
||||||
|
{Lower: -8, Upper: -4, Count: 2, LowerInclusive: true, UpperInclusive: false, Index: 3},
|
||||||
|
{Lower: -16, Upper: -8, Count: 1, LowerInclusive: true, UpperInclusive: false, Index: 4},
|
||||||
|
|
||||||
|
{Lower: -64, Upper: -32, Count: 1, LowerInclusive: true, UpperInclusive: false, Index: 6},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
histogram: Histogram{
|
||||||
|
Schema: 3,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: -5, Length: 2}, // -5 -4
|
||||||
|
{Offset: 2, Length: 3}, // -1 0 1
|
||||||
|
{Offset: 2, Length: 2}, // 4 5
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 3},
|
||||||
|
},
|
||||||
|
expectedPositiveBuckets: []Bucket[uint64]{
|
||||||
|
{Lower: 0.5946035575013605, Upper: 0.6484197773255048, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: -5},
|
||||||
|
{Lower: 0.6484197773255048, Upper: 0.7071067811865475, Count: 3, LowerInclusive: false, UpperInclusive: true, Index: -4},
|
||||||
|
|
||||||
|
{Lower: 0.8408964152537144, Upper: 0.9170040432046711, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: -1},
|
||||||
|
{Lower: 0.9170040432046711, Upper: 1, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 0},
|
||||||
|
{Lower: 1, Upper: 1.0905077326652577, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 1},
|
||||||
|
|
||||||
|
{Lower: 1.2968395546510096, Upper: 1.414213562373095, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4},
|
||||||
|
{Lower: 1.414213562373095, Upper: 1.5422108254079407, Count: 4, LowerInclusive: false, UpperInclusive: true, Index: 5},
|
||||||
|
},
|
||||||
|
expectedNegativeBuckets: []Bucket[uint64]{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
histogram: Histogram{
|
||||||
|
Schema: -2,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: -2, Length: 4}, // -2 -1 0 1
|
||||||
|
{Offset: 2, Length: 2}, // 4 5
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0},
|
||||||
|
},
|
||||||
|
expectedPositiveBuckets: []Bucket[uint64]{
|
||||||
|
{Lower: 0.000244140625, Upper: 0.00390625, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: -2},
|
||||||
|
{Lower: 0.00390625, Upper: 0.0625, Count: 3, LowerInclusive: false, UpperInclusive: true, Index: -1},
|
||||||
|
{Lower: 0.0625, Upper: 1, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 0},
|
||||||
|
{Lower: 1, Upper: 16, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1},
|
||||||
|
|
||||||
|
{Lower: 4096, Upper: 65536, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4},
|
||||||
|
{Lower: 65536, Upper: 1048576, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 5},
|
||||||
|
},
|
||||||
|
expectedNegativeBuckets: []Bucket[uint64]{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
histogram: Histogram{
|
||||||
|
Schema: -1,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: -2, Length: 5}, // -2 -1 0 1 2
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 2, -2, 1, -1},
|
||||||
|
},
|
||||||
|
expectedPositiveBuckets: []Bucket[uint64]{
|
||||||
|
{Lower: 0.015625, Upper: 0.0625, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: -2},
|
||||||
|
{Lower: 0.0625, Upper: 0.25, Count: 3, LowerInclusive: false, UpperInclusive: true, Index: -1},
|
||||||
|
{Lower: 0.25, Upper: 1, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 0},
|
||||||
|
{Lower: 1, Upper: 4, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1},
|
||||||
|
{Lower: 4, Upper: 16, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 2},
|
||||||
|
},
|
||||||
|
expectedNegativeBuckets: []Bucket[uint64]{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, c := range cases {
|
||||||
|
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||||
|
it := c.histogram.PositiveBucketIterator()
|
||||||
|
actualPositiveBuckets := make([]Bucket[uint64], 0, len(c.expectedPositiveBuckets))
|
||||||
|
for it.Next() {
|
||||||
|
actualPositiveBuckets = append(actualPositiveBuckets, it.At())
|
||||||
|
}
|
||||||
|
require.Equal(t, c.expectedPositiveBuckets, actualPositiveBuckets)
|
||||||
|
it = c.histogram.NegativeBucketIterator()
|
||||||
|
actualNegativeBuckets := make([]Bucket[uint64], 0, len(c.expectedNegativeBuckets))
|
||||||
|
for it.Next() {
|
||||||
|
actualNegativeBuckets = append(actualNegativeBuckets, it.At())
|
||||||
|
}
|
||||||
|
require.Equal(t, c.expectedNegativeBuckets, actualNegativeBuckets)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHistogramToFloat(t *testing.T) {
|
||||||
|
h := Histogram{
|
||||||
|
Schema: 3,
|
||||||
|
Count: 61,
|
||||||
|
Sum: 2.7,
|
||||||
|
ZeroThreshold: 0.1,
|
||||||
|
ZeroCount: 42,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: 0, Length: 4},
|
||||||
|
{Offset: 0, Length: 0},
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
|
||||||
|
NegativeSpans: []Span{
|
||||||
|
{Offset: 0, Length: 5},
|
||||||
|
{Offset: 1, Length: 0},
|
||||||
|
{Offset: 0, Length: 1},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []int64{1, 2, -2, 1, -1, 0},
|
||||||
|
}
|
||||||
|
fh := h.ToFloat()
|
||||||
|
|
||||||
|
require.Equal(t, h.String(), fh.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHistogramMatches(t *testing.T) {
|
||||||
|
h1 := Histogram{
|
||||||
|
Schema: 3,
|
||||||
|
Count: 61,
|
||||||
|
Sum: 2.7,
|
||||||
|
ZeroThreshold: 0.1,
|
||||||
|
ZeroCount: 42,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: 0, Length: 4},
|
||||||
|
{Offset: 10, Length: 3},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
|
||||||
|
NegativeSpans: []Span{
|
||||||
|
{Offset: 0, Length: 4},
|
||||||
|
{Offset: 10, Length: 3},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
|
||||||
|
}
|
||||||
|
|
||||||
|
h2 := h1.Copy()
|
||||||
|
require.True(t, h1.Equals(h2))
|
||||||
|
|
||||||
|
// Changed spans but same layout.
|
||||||
|
h2.PositiveSpans = append(h2.PositiveSpans, Span{Offset: 5})
|
||||||
|
h2.NegativeSpans = append(h2.NegativeSpans, Span{Offset: 2})
|
||||||
|
require.True(t, h1.Equals(h2))
|
||||||
|
require.True(t, h2.Equals(&h1))
|
||||||
|
// Adding empty spans in between.
|
||||||
|
h2.PositiveSpans[1].Offset = 6
|
||||||
|
h2.PositiveSpans = []Span{
|
||||||
|
h2.PositiveSpans[0],
|
||||||
|
{Offset: 1},
|
||||||
|
{Offset: 3},
|
||||||
|
h2.PositiveSpans[1],
|
||||||
|
h2.PositiveSpans[2],
|
||||||
|
}
|
||||||
|
h2.NegativeSpans[1].Offset = 5
|
||||||
|
h2.NegativeSpans = []Span{
|
||||||
|
h2.NegativeSpans[0],
|
||||||
|
{Offset: 2},
|
||||||
|
{Offset: 3},
|
||||||
|
h2.NegativeSpans[1],
|
||||||
|
h2.NegativeSpans[2],
|
||||||
|
}
|
||||||
|
require.True(t, h1.Equals(h2))
|
||||||
|
require.True(t, h2.Equals(&h1))
|
||||||
|
|
||||||
|
// All mismatches.
|
||||||
|
require.False(t, h1.Equals(nil))
|
||||||
|
|
||||||
|
h2.Schema = 1
|
||||||
|
require.False(t, h1.Equals(h2))
|
||||||
|
|
||||||
|
h2 = h1.Copy()
|
||||||
|
h2.Count++
|
||||||
|
require.False(t, h1.Equals(h2))
|
||||||
|
|
||||||
|
h2 = h1.Copy()
|
||||||
|
h2.Sum++
|
||||||
|
require.False(t, h1.Equals(h2))
|
||||||
|
|
||||||
|
h2 = h1.Copy()
|
||||||
|
h2.ZeroThreshold++
|
||||||
|
require.False(t, h1.Equals(h2))
|
||||||
|
|
||||||
|
h2 = h1.Copy()
|
||||||
|
h2.ZeroCount++
|
||||||
|
require.False(t, h1.Equals(h2))
|
||||||
|
|
||||||
|
// Changing value of buckets.
|
||||||
|
h2 = h1.Copy()
|
||||||
|
h2.PositiveBuckets[len(h2.PositiveBuckets)-1]++
|
||||||
|
require.False(t, h1.Equals(h2))
|
||||||
|
h2 = h1.Copy()
|
||||||
|
h2.NegativeBuckets[len(h2.NegativeBuckets)-1]++
|
||||||
|
require.False(t, h1.Equals(h2))
|
||||||
|
|
||||||
|
// Changing bucket layout.
|
||||||
|
h2 = h1.Copy()
|
||||||
|
h2.PositiveSpans[1].Offset++
|
||||||
|
require.False(t, h1.Equals(h2))
|
||||||
|
h2 = h1.Copy()
|
||||||
|
h2.NegativeSpans[1].Offset++
|
||||||
|
require.False(t, h1.Equals(h2))
|
||||||
|
|
||||||
|
// Adding an empty bucket.
|
||||||
|
h2 = h1.Copy()
|
||||||
|
h2.PositiveSpans[0].Offset--
|
||||||
|
h2.PositiveSpans[0].Length++
|
||||||
|
h2.PositiveBuckets = append([]int64{0}, h2.PositiveBuckets...)
|
||||||
|
require.False(t, h1.Equals(h2))
|
||||||
|
h2 = h1.Copy()
|
||||||
|
h2.NegativeSpans[0].Offset--
|
||||||
|
h2.NegativeSpans[0].Length++
|
||||||
|
h2.NegativeBuckets = append([]int64{0}, h2.NegativeBuckets...)
|
||||||
|
require.False(t, h1.Equals(h2))
|
||||||
|
|
||||||
|
// Adding new bucket.
|
||||||
|
h2 = h1.Copy()
|
||||||
|
h2.PositiveSpans = append(h2.PositiveSpans, Span{
|
||||||
|
Offset: 1,
|
||||||
|
Length: 1,
|
||||||
|
})
|
||||||
|
h2.PositiveBuckets = append(h2.PositiveBuckets, 1)
|
||||||
|
require.False(t, h1.Equals(h2))
|
||||||
|
h2 = h1.Copy()
|
||||||
|
h2.NegativeSpans = append(h2.NegativeSpans, Span{
|
||||||
|
Offset: 1,
|
||||||
|
Length: 1,
|
||||||
|
})
|
||||||
|
h2.NegativeBuckets = append(h2.NegativeBuckets, 1)
|
||||||
|
require.False(t, h1.Equals(h2))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHistogramCompact(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
in *Histogram
|
||||||
|
maxEmptyBuckets int
|
||||||
|
expected *Histogram
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"empty histogram",
|
||||||
|
&Histogram{},
|
||||||
|
0,
|
||||||
|
&Histogram{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"nothing should happen",
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{{-2, 1}, {2, 3}},
|
||||||
|
PositiveBuckets: []int64{1, 3, -3, 42},
|
||||||
|
NegativeSpans: []Span{{3, 2}, {3, 2}},
|
||||||
|
NegativeBuckets: []int64{5, 3, 1.234e5, 1000},
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{{-2, 1}, {2, 3}},
|
||||||
|
PositiveBuckets: []int64{1, 3, -3, 42},
|
||||||
|
NegativeSpans: []Span{{3, 2}, {3, 2}},
|
||||||
|
NegativeBuckets: []int64{5, 3, 1.234e5, 1000},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"eliminate zero offsets",
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{{-2, 1}, {0, 3}, {0, 1}},
|
||||||
|
PositiveBuckets: []int64{1, 3, -3, 42, 3},
|
||||||
|
NegativeSpans: []Span{{0, 2}, {0, 2}, {2, 1}, {0, 1}},
|
||||||
|
NegativeBuckets: []int64{5, 3, 1.234e5, 1000, 3, 4},
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{{-2, 5}},
|
||||||
|
PositiveBuckets: []int64{1, 3, -3, 42, 3},
|
||||||
|
NegativeSpans: []Span{{0, 4}, {2, 2}},
|
||||||
|
NegativeBuckets: []int64{5, 3, 1.234e5, 1000, 3, 4},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"eliminate zero length",
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{{-2, 2}, {2, 0}, {3, 3}},
|
||||||
|
PositiveBuckets: []int64{1, 3, -3, 42, 3},
|
||||||
|
NegativeSpans: []Span{{0, 2}, {0, 0}, {2, 0}, {1, 4}},
|
||||||
|
NegativeBuckets: []int64{5, 3, 1.234e5, 1000, 3, 4},
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{{-2, 2}, {5, 3}},
|
||||||
|
PositiveBuckets: []int64{1, 3, -3, 42, 3},
|
||||||
|
NegativeSpans: []Span{{0, 2}, {3, 4}},
|
||||||
|
NegativeBuckets: []int64{5, 3, 1.234e5, 1000, 3, 4},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"eliminate multiple zero length spans",
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{{-2, 2}, {2, 0}, {2, 0}, {2, 0}, {3, 3}},
|
||||||
|
PositiveBuckets: []int64{1, 3, -3, 42, 3},
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{{-2, 2}, {9, 3}},
|
||||||
|
PositiveBuckets: []int64{1, 3, -3, 42, 3},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cut empty buckets at start or end",
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{{-4, 4}, {5, 3}},
|
||||||
|
PositiveBuckets: []int64{0, 0, 1, 3, -3, 42, 3},
|
||||||
|
NegativeSpans: []Span{{0, 2}, {3, 5}},
|
||||||
|
NegativeBuckets: []int64{5, 3, -4, -2, 3, 4, -9},
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{{-2, 2}, {5, 3}},
|
||||||
|
PositiveBuckets: []int64{1, 3, -3, 42, 3},
|
||||||
|
NegativeSpans: []Span{{0, 2}, {3, 4}},
|
||||||
|
NegativeBuckets: []int64{5, 3, -4, -2, 3, 4},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cut empty buckets at start and end",
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{{-4, 4}, {5, 6}},
|
||||||
|
PositiveBuckets: []int64{0, 0, 1, 3, -3, 42, 3, -46, 0, 0},
|
||||||
|
NegativeSpans: []Span{{-2, 4}, {3, 5}},
|
||||||
|
NegativeBuckets: []int64{0, 0, 5, 3, -4, -2, 3, 4, -9},
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{{-2, 2}, {5, 3}},
|
||||||
|
PositiveBuckets: []int64{1, 3, -3, 42, 3},
|
||||||
|
NegativeSpans: []Span{{0, 2}, {3, 4}},
|
||||||
|
NegativeBuckets: []int64{5, 3, -4, -2, 3, 4},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cut empty buckets at start or end of spans, even in the middle",
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{{-4, 6}, {3, 6}},
|
||||||
|
PositiveBuckets: []int64{0, 0, 1, 3, -4, 0, 1, 42, 3, -46, 0, 0},
|
||||||
|
NegativeSpans: []Span{{0, 2}, {2, 6}},
|
||||||
|
NegativeBuckets: []int64{5, 3, -8, 4, -2, 3, 4, -9},
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{{-2, 2}, {5, 3}},
|
||||||
|
PositiveBuckets: []int64{1, 3, -3, 42, 3},
|
||||||
|
NegativeSpans: []Span{{0, 2}, {3, 4}},
|
||||||
|
NegativeBuckets: []int64{5, 3, -4, -2, 3, 4},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cut empty buckets at start or end but merge spans due to maxEmptyBuckets",
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{{-4, 4}, {5, 3}},
|
||||||
|
PositiveBuckets: []int64{0, 0, 1, 3, -3, 42, 3},
|
||||||
|
NegativeSpans: []Span{{0, 2}, {3, 5}},
|
||||||
|
NegativeBuckets: []int64{5, 3, -4, -2, 3, 4, -9},
|
||||||
|
},
|
||||||
|
10,
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{{-2, 10}},
|
||||||
|
PositiveBuckets: []int64{1, 3, -4, 0, 0, 0, 0, 1, 42, 3},
|
||||||
|
NegativeSpans: []Span{{0, 9}},
|
||||||
|
NegativeBuckets: []int64{5, 3, -8, 0, 0, 4, -2, 3, 4},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cut empty buckets from the middle of a span",
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{{-4, 6}, {3, 3}},
|
||||||
|
PositiveBuckets: []int64{0, 0, 1, -1, 0, 3, -2, 42, 3},
|
||||||
|
NegativeSpans: []Span{{0, 2}, {3, 5}},
|
||||||
|
NegativeBuckets: []int64{5, 3, -4, -2, -2, 3, 4},
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{{-2, 1}, {2, 1}, {3, 3}},
|
||||||
|
PositiveBuckets: []int64{1, 2, -2, 42, 3},
|
||||||
|
NegativeSpans: []Span{{0, 2}, {3, 2}, {1, 2}},
|
||||||
|
NegativeBuckets: []int64{5, 3, -4, -2, 1, 4},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cut out a span containing only empty buckets",
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{{-4, 3}, {2, 2}, {3, 4}},
|
||||||
|
PositiveBuckets: []int64{0, 0, 1, -1, 0, 3, -2, 42, 3},
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{{-2, 1}, {7, 4}},
|
||||||
|
PositiveBuckets: []int64{1, 2, -2, 42, 3},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cut empty buckets from the middle of a span, avoiding some due to maxEmptyBuckets",
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{{-4, 6}, {3, 3}},
|
||||||
|
PositiveBuckets: []int64{0, 0, 1, -1, 0, 3, -2, 42, 3},
|
||||||
|
NegativeSpans: []Span{{0, 2}, {3, 5}},
|
||||||
|
NegativeBuckets: []int64{5, 3, -4, -2, -2, 3, 4},
|
||||||
|
},
|
||||||
|
1,
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{{-2, 1}, {2, 1}, {3, 3}},
|
||||||
|
PositiveBuckets: []int64{1, 2, -2, 42, 3},
|
||||||
|
NegativeSpans: []Span{{0, 2}, {3, 5}},
|
||||||
|
NegativeBuckets: []int64{5, 3, -4, -2, -2, 3, 4},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"avoiding all cutting of empty buckets from the middle of a chunk due to maxEmptyBuckets",
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{{-4, 6}, {3, 3}},
|
||||||
|
PositiveBuckets: []int64{0, 0, 1, -1, 0, 3, -2, 42, 3},
|
||||||
|
NegativeSpans: []Span{{0, 2}, {3, 5}},
|
||||||
|
NegativeBuckets: []int64{5, 3, -4, -2, -2, 3, 4},
|
||||||
|
},
|
||||||
|
2,
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{{-2, 4}, {3, 3}},
|
||||||
|
PositiveBuckets: []int64{1, -1, 0, 3, -2, 42, 3},
|
||||||
|
NegativeSpans: []Span{{0, 2}, {3, 5}},
|
||||||
|
NegativeBuckets: []int64{5, 3, -4, -2, -2, 3, 4},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"everything merged into one span due to maxEmptyBuckets",
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{{-4, 6}, {3, 3}},
|
||||||
|
PositiveBuckets: []int64{0, 0, 1, -1, 0, 3, -2, 42, 3},
|
||||||
|
NegativeSpans: []Span{{0, 2}, {3, 5}},
|
||||||
|
NegativeBuckets: []int64{5, 3, -4, -2, -2, 3, 4},
|
||||||
|
},
|
||||||
|
3,
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{{-2, 10}},
|
||||||
|
PositiveBuckets: []int64{1, -1, 0, 3, -3, 0, 0, 1, 42, 3},
|
||||||
|
NegativeSpans: []Span{{0, 10}},
|
||||||
|
NegativeBuckets: []int64{5, 3, -8, 0, 0, 4, -2, -2, 3, 4},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"only empty buckets and maxEmptyBuckets greater zero",
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{{-4, 6}, {3, 3}},
|
||||||
|
PositiveBuckets: []int64{0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||||
|
NegativeSpans: []Span{{0, 7}},
|
||||||
|
NegativeBuckets: []int64{0, 0, 0, 0, 0, 0, 0},
|
||||||
|
},
|
||||||
|
3,
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{},
|
||||||
|
PositiveBuckets: []int64{},
|
||||||
|
NegativeSpans: []Span{},
|
||||||
|
NegativeBuckets: []int64{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"multiple spans of only empty buckets",
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{{-10, 2}, {2, 1}, {3, 3}},
|
||||||
|
PositiveBuckets: []int64{0, 0, 0, 0, 2, 3},
|
||||||
|
NegativeSpans: []Span{{-10, 2}, {2, 1}, {3, 3}},
|
||||||
|
NegativeBuckets: []int64{2, 3, -5, 0, 0, 0},
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
&Histogram{
|
||||||
|
PositiveSpans: []Span{{-1, 2}},
|
||||||
|
PositiveBuckets: []int64{2, 3},
|
||||||
|
NegativeSpans: []Span{{-10, 2}},
|
||||||
|
NegativeBuckets: []int64{2, 3},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range cases {
|
||||||
|
t.Run(c.name, func(t *testing.T) {
|
||||||
|
require.Equal(t, c.expected, c.in.Compact(c.maxEmptyBuckets))
|
||||||
|
// Compact has happened in-place, too.
|
||||||
|
require.Equal(t, c.expected, c.in)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -17,16 +17,23 @@ import (
|
||||||
"mime"
|
"mime"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Parser parses samples from a byte slice of samples in the official
|
// Parser parses samples from a byte slice of samples in the official
|
||||||
// Prometheus and OpenMetrics text exposition formats.
|
// Prometheus and OpenMetrics text exposition formats.
|
||||||
type Parser interface {
|
type Parser interface {
|
||||||
// Series returns the bytes of the series, the timestamp if set, and the value
|
// Series returns the bytes of a series with a simple float64 as a
|
||||||
// of the current sample.
|
// value, the timestamp if set, and the value of the current sample.
|
||||||
Series() ([]byte, *int64, float64)
|
Series() ([]byte, *int64, float64)
|
||||||
|
|
||||||
|
// Histogram returns the bytes of a series with a sparse histogram as a
|
||||||
|
// value, the timestamp if set, and the histogram in the current sample.
|
||||||
|
// Depending on the parsed input, the function returns an (integer) Histogram
|
||||||
|
// or a FloatHistogram, with the respective other return value being nil.
|
||||||
|
Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram)
|
||||||
|
|
||||||
// Help returns the metric name and help text in the current entry.
|
// Help returns the metric name and help text in the current entry.
|
||||||
// Must only be called after Next returned a help entry.
|
// Must only be called after Next returned a help entry.
|
||||||
// The returned byte slices become invalid after the next call to Next.
|
// The returned byte slices become invalid after the next call to Next.
|
||||||
|
@ -70,22 +77,30 @@ func New(b []byte, contentType string) (Parser, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
mediaType, _, err := mime.ParseMediaType(contentType)
|
mediaType, _, err := mime.ParseMediaType(contentType)
|
||||||
if err == nil && mediaType == "application/openmetrics-text" {
|
if err != nil {
|
||||||
return NewOpenMetricsParser(b), nil
|
return NewPromParser(b), err
|
||||||
|
}
|
||||||
|
switch mediaType {
|
||||||
|
case "application/openmetrics-text":
|
||||||
|
return NewOpenMetricsParser(b), nil
|
||||||
|
case "application/vnd.google.protobuf":
|
||||||
|
return NewProtobufParser(b), nil
|
||||||
|
default:
|
||||||
|
return NewPromParser(b), nil
|
||||||
}
|
}
|
||||||
return NewPromParser(b), err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Entry represents the type of a parsed entry.
|
// Entry represents the type of a parsed entry.
|
||||||
type Entry int
|
type Entry int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
EntryInvalid Entry = -1
|
EntryInvalid Entry = -1
|
||||||
EntryType Entry = 0
|
EntryType Entry = 0
|
||||||
EntryHelp Entry = 1
|
EntryHelp Entry = 1
|
||||||
EntrySeries Entry = 2
|
EntrySeries Entry = 2 // A series with a simple float64 as value.
|
||||||
EntryComment Entry = 3
|
EntryComment Entry = 3
|
||||||
EntryUnit Entry = 4
|
EntryUnit Entry = 4
|
||||||
|
EntryHistogram Entry = 5 // A series with a sparse histogram as a value.
|
||||||
)
|
)
|
||||||
|
|
||||||
// MetricType represents metric type values.
|
// MetricType represents metric type values.
|
||||||
|
|
|
@ -27,6 +27,7 @@ import (
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/value"
|
"github.com/prometheus/prometheus/model/value"
|
||||||
)
|
)
|
||||||
|
@ -112,6 +113,12 @@ func (p *OpenMetricsParser) Series() ([]byte, *int64, float64) {
|
||||||
return p.series, nil, p.val
|
return p.series, nil, p.val
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Histogram always returns (nil, nil, nil, nil) because OpenMetrics does not support
|
||||||
|
// sparse histograms.
|
||||||
|
func (p *OpenMetricsParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) {
|
||||||
|
return nil, nil, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Help returns the metric name and help text in the current entry.
|
// Help returns the metric name and help text in the current entry.
|
||||||
// Must only be called after Next returned a help entry.
|
// Must only be called after Next returned a help entry.
|
||||||
// The returned byte slices become invalid after the next call to Next.
|
// The returned byte slices become invalid after the next call to Next.
|
||||||
|
|
|
@ -237,9 +237,7 @@ foo_total 17.0 1520879607.789 # {xx="yy"} 5`
|
||||||
p.Metric(&res)
|
p.Metric(&res)
|
||||||
found := p.Exemplar(&e)
|
found := p.Exemplar(&e)
|
||||||
require.Equal(t, exp[i].m, string(m))
|
require.Equal(t, exp[i].m, string(m))
|
||||||
if e.HasTs {
|
require.Equal(t, exp[i].t, ts)
|
||||||
require.Equal(t, exp[i].t, ts)
|
|
||||||
}
|
|
||||||
require.Equal(t, exp[i].v, v)
|
require.Equal(t, exp[i].v, v)
|
||||||
require.Equal(t, exp[i].lset, res)
|
require.Equal(t, exp[i].lset, res)
|
||||||
if exp[i].e == nil {
|
if exp[i].e == nil {
|
||||||
|
|
|
@ -28,6 +28,7 @@ import (
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/value"
|
"github.com/prometheus/prometheus/model/value"
|
||||||
)
|
)
|
||||||
|
@ -167,6 +168,12 @@ func (p *PromParser) Series() ([]byte, *int64, float64) {
|
||||||
return p.series, nil, p.val
|
return p.series, nil, p.val
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Histogram always returns (nil, nil, nil, nil) because the Prometheus text format
|
||||||
|
// does not support sparse histograms.
|
||||||
|
func (p *PromParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) {
|
||||||
|
return nil, nil, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Help returns the metric name and help text in the current entry.
|
// Help returns the metric name and help text in the current entry.
|
||||||
// Must only be called after Next returned a help entry.
|
// Must only be called after Next returned a help entry.
|
||||||
// The returned byte slices become invalid after the next call to Next.
|
// The returned byte slices become invalid after the next call to Next.
|
||||||
|
|
518
model/textparse/protobufparse.go
Normal file
518
model/textparse/protobufparse.go
Normal file
|
@ -0,0 +1,518 @@
|
||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package textparse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"github.com/gogo/protobuf/proto"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/prometheus/prompb/io/prometheus/client"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ProtobufParser is a very inefficient way of unmarshaling the old Prometheus
|
||||||
|
// protobuf format and then present it as it if were parsed by a
|
||||||
|
// Prometheus-2-style text parser. This is only done so that we can easily plug
|
||||||
|
// in the protobuf format into Prometheus 2. For future use (with the final
|
||||||
|
// format that will be used for native histograms), we have to revisit the
|
||||||
|
// parsing. A lot of the efficiency tricks of the Prometheus-2-style parsing
|
||||||
|
// could be used in a similar fashion (byte-slice pointers into the raw
|
||||||
|
// payload), which requires some hand-coded protobuf handling. But the current
|
||||||
|
// parsers all expect the full series name (metric name plus label pairs) as one
|
||||||
|
// string, which is not how things are represented in the protobuf format. If
|
||||||
|
// the re-arrangement work is actually causing problems (which has to be seen),
|
||||||
|
// that expectation needs to be changed.
|
||||||
|
type ProtobufParser struct {
|
||||||
|
in []byte // The intput to parse.
|
||||||
|
inPos int // Position within the input.
|
||||||
|
metricPos int // Position within Metric slice.
|
||||||
|
// fieldPos is the position within a Summary or (legacy) Histogram. -2
|
||||||
|
// is the count. -1 is the sum. Otherwise it is the index within
|
||||||
|
// quantiles/buckets.
|
||||||
|
fieldPos int
|
||||||
|
fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed.
|
||||||
|
// state is marked by the entry we are processing. EntryInvalid implies
|
||||||
|
// that we have to decode the next MetricFamily.
|
||||||
|
state Entry
|
||||||
|
|
||||||
|
mf *dto.MetricFamily
|
||||||
|
|
||||||
|
// The following are just shenanigans to satisfy the Parser interface.
|
||||||
|
metricBytes *bytes.Buffer // A somewhat fluid representation of the current metric.
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewProtobufParser returns a parser for the payload in the byte slice.
|
||||||
|
func NewProtobufParser(b []byte) Parser {
|
||||||
|
return &ProtobufParser{
|
||||||
|
in: b,
|
||||||
|
state: EntryInvalid,
|
||||||
|
mf: &dto.MetricFamily{},
|
||||||
|
metricBytes: &bytes.Buffer{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Series returns the bytes of a series with a simple float64 as a
|
||||||
|
// value, the timestamp if set, and the value of the current sample.
|
||||||
|
func (p *ProtobufParser) Series() ([]byte, *int64, float64) {
|
||||||
|
var (
|
||||||
|
m = p.mf.GetMetric()[p.metricPos]
|
||||||
|
ts = m.GetTimestampMs()
|
||||||
|
v float64
|
||||||
|
)
|
||||||
|
switch p.mf.GetType() {
|
||||||
|
case dto.MetricType_COUNTER:
|
||||||
|
v = m.GetCounter().GetValue()
|
||||||
|
case dto.MetricType_GAUGE:
|
||||||
|
v = m.GetGauge().GetValue()
|
||||||
|
case dto.MetricType_UNTYPED:
|
||||||
|
v = m.GetUntyped().GetValue()
|
||||||
|
case dto.MetricType_SUMMARY:
|
||||||
|
s := m.GetSummary()
|
||||||
|
switch p.fieldPos {
|
||||||
|
case -2:
|
||||||
|
v = float64(s.GetSampleCount())
|
||||||
|
case -1:
|
||||||
|
v = s.GetSampleSum()
|
||||||
|
// Need to detect a summaries without quantile here.
|
||||||
|
if len(s.GetQuantile()) == 0 {
|
||||||
|
p.fieldsDone = true
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
v = s.GetQuantile()[p.fieldPos].GetValue()
|
||||||
|
}
|
||||||
|
case dto.MetricType_HISTOGRAM:
|
||||||
|
// This should only happen for a legacy histogram.
|
||||||
|
h := m.GetHistogram()
|
||||||
|
switch p.fieldPos {
|
||||||
|
case -2:
|
||||||
|
v = float64(h.GetSampleCount())
|
||||||
|
case -1:
|
||||||
|
v = h.GetSampleSum()
|
||||||
|
default:
|
||||||
|
bb := h.GetBucket()
|
||||||
|
if p.fieldPos >= len(bb) {
|
||||||
|
v = float64(h.GetSampleCount())
|
||||||
|
} else {
|
||||||
|
v = float64(bb[p.fieldPos].GetCumulativeCount())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic("encountered unexpected metric type, this is a bug")
|
||||||
|
}
|
||||||
|
if ts != 0 {
|
||||||
|
return p.metricBytes.Bytes(), &ts, v
|
||||||
|
}
|
||||||
|
// Nasty hack: Assume that ts==0 means no timestamp. That's not true in
|
||||||
|
// general, but proto3 has no distinction between unset and
|
||||||
|
// default. Need to avoid in the final format.
|
||||||
|
return p.metricBytes.Bytes(), nil, v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Histogram returns the bytes of a series with a native histogram as a value,
|
||||||
|
// the timestamp if set, and the native histogram in the current sample.
|
||||||
|
//
|
||||||
|
// The Compact method is called before returning the Histogram (or FloatHistogram).
|
||||||
|
//
|
||||||
|
// If the SampleCountFloat or the ZeroCountFloat in the proto message is > 0,
|
||||||
|
// the histogram is parsed and returned as a FloatHistogram and nil is returned
|
||||||
|
// as the (integer) Histogram return value. Otherwise, it is parsed and returned
|
||||||
|
// as an (integer) Histogram and nil is returned as the FloatHistogram return
|
||||||
|
// value.
|
||||||
|
func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) {
|
||||||
|
var (
|
||||||
|
m = p.mf.GetMetric()[p.metricPos]
|
||||||
|
ts = m.GetTimestampMs()
|
||||||
|
h = m.GetHistogram()
|
||||||
|
)
|
||||||
|
if h.GetSampleCountFloat() > 0 || h.GetZeroCountFloat() > 0 {
|
||||||
|
// It is a float histogram.
|
||||||
|
fh := histogram.FloatHistogram{
|
||||||
|
Count: h.GetSampleCountFloat(),
|
||||||
|
Sum: h.GetSampleSum(),
|
||||||
|
ZeroThreshold: h.GetZeroThreshold(),
|
||||||
|
ZeroCount: h.GetZeroCountFloat(),
|
||||||
|
Schema: h.GetSchema(),
|
||||||
|
PositiveSpans: make([]histogram.Span, len(h.GetPositiveSpan())),
|
||||||
|
PositiveBuckets: h.GetPositiveCount(),
|
||||||
|
NegativeSpans: make([]histogram.Span, len(h.GetNegativeSpan())),
|
||||||
|
NegativeBuckets: h.GetNegativeCount(),
|
||||||
|
}
|
||||||
|
for i, span := range h.GetPositiveSpan() {
|
||||||
|
fh.PositiveSpans[i].Offset = span.GetOffset()
|
||||||
|
fh.PositiveSpans[i].Length = span.GetLength()
|
||||||
|
}
|
||||||
|
for i, span := range h.GetNegativeSpan() {
|
||||||
|
fh.NegativeSpans[i].Offset = span.GetOffset()
|
||||||
|
fh.NegativeSpans[i].Length = span.GetLength()
|
||||||
|
}
|
||||||
|
fh.Compact(0)
|
||||||
|
if ts != 0 {
|
||||||
|
return p.metricBytes.Bytes(), &ts, nil, &fh
|
||||||
|
}
|
||||||
|
// Nasty hack: Assume that ts==0 means no timestamp. That's not true in
|
||||||
|
// general, but proto3 has no distinction between unset and
|
||||||
|
// default. Need to avoid in the final format.
|
||||||
|
return p.metricBytes.Bytes(), nil, nil, &fh
|
||||||
|
}
|
||||||
|
|
||||||
|
sh := histogram.Histogram{
|
||||||
|
Count: h.GetSampleCount(),
|
||||||
|
Sum: h.GetSampleSum(),
|
||||||
|
ZeroThreshold: h.GetZeroThreshold(),
|
||||||
|
ZeroCount: h.GetZeroCount(),
|
||||||
|
Schema: h.GetSchema(),
|
||||||
|
PositiveSpans: make([]histogram.Span, len(h.GetPositiveSpan())),
|
||||||
|
PositiveBuckets: h.GetPositiveDelta(),
|
||||||
|
NegativeSpans: make([]histogram.Span, len(h.GetNegativeSpan())),
|
||||||
|
NegativeBuckets: h.GetNegativeDelta(),
|
||||||
|
}
|
||||||
|
for i, span := range h.GetPositiveSpan() {
|
||||||
|
sh.PositiveSpans[i].Offset = span.GetOffset()
|
||||||
|
sh.PositiveSpans[i].Length = span.GetLength()
|
||||||
|
}
|
||||||
|
for i, span := range h.GetNegativeSpan() {
|
||||||
|
sh.NegativeSpans[i].Offset = span.GetOffset()
|
||||||
|
sh.NegativeSpans[i].Length = span.GetLength()
|
||||||
|
}
|
||||||
|
sh.Compact(0)
|
||||||
|
if ts != 0 {
|
||||||
|
return p.metricBytes.Bytes(), &ts, &sh, nil
|
||||||
|
}
|
||||||
|
return p.metricBytes.Bytes(), nil, &sh, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Help returns the metric name and help text in the current entry.
|
||||||
|
// Must only be called after Next returned a help entry.
|
||||||
|
// The returned byte slices become invalid after the next call to Next.
|
||||||
|
func (p *ProtobufParser) Help() ([]byte, []byte) {
|
||||||
|
return p.metricBytes.Bytes(), []byte(p.mf.GetHelp())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the metric name and type in the current entry.
|
||||||
|
// Must only be called after Next returned a type entry.
|
||||||
|
// The returned byte slices become invalid after the next call to Next.
|
||||||
|
func (p *ProtobufParser) Type() ([]byte, MetricType) {
|
||||||
|
n := p.metricBytes.Bytes()
|
||||||
|
switch p.mf.GetType() {
|
||||||
|
case dto.MetricType_COUNTER:
|
||||||
|
return n, MetricTypeCounter
|
||||||
|
case dto.MetricType_GAUGE:
|
||||||
|
return n, MetricTypeGauge
|
||||||
|
case dto.MetricType_HISTOGRAM:
|
||||||
|
return n, MetricTypeHistogram
|
||||||
|
case dto.MetricType_SUMMARY:
|
||||||
|
return n, MetricTypeSummary
|
||||||
|
}
|
||||||
|
return n, MetricTypeUnknown
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unit always returns (nil, nil) because units aren't supported by the protobuf
|
||||||
|
// format.
|
||||||
|
func (p *ProtobufParser) Unit() ([]byte, []byte) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Comment always returns nil because comments aren't supported by the protobuf
|
||||||
|
// format.
|
||||||
|
func (p *ProtobufParser) Comment() []byte {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Metric writes the labels of the current sample into the passed labels.
|
||||||
|
// It returns the string from which the metric was parsed.
|
||||||
|
func (p *ProtobufParser) Metric(l *labels.Labels) string {
|
||||||
|
*l = append(*l, labels.Label{
|
||||||
|
Name: labels.MetricName,
|
||||||
|
Value: p.getMagicName(),
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, lp := range p.mf.GetMetric()[p.metricPos].GetLabel() {
|
||||||
|
*l = append(*l, labels.Label{
|
||||||
|
Name: lp.GetName(),
|
||||||
|
Value: lp.GetValue(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if needed, name, value := p.getMagicLabel(); needed {
|
||||||
|
*l = append(*l, labels.Label{Name: name, Value: value})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort labels to maintain the sorted labels invariant.
|
||||||
|
sort.Sort(*l)
|
||||||
|
|
||||||
|
return p.metricBytes.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exemplar writes the exemplar of the current sample into the passed
|
||||||
|
// exemplar. It returns if an exemplar exists or not. In case of a native
|
||||||
|
// histogram, the legacy bucket section is still used for exemplars. To ingest
|
||||||
|
// all examplars, call the Exemplar method repeatedly until it returns false.
|
||||||
|
func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool {
|
||||||
|
m := p.mf.GetMetric()[p.metricPos]
|
||||||
|
var exProto *dto.Exemplar
|
||||||
|
switch p.mf.GetType() {
|
||||||
|
case dto.MetricType_COUNTER:
|
||||||
|
exProto = m.GetCounter().GetExemplar()
|
||||||
|
case dto.MetricType_HISTOGRAM:
|
||||||
|
bb := m.GetHistogram().GetBucket()
|
||||||
|
if p.fieldPos < 0 {
|
||||||
|
if p.state == EntrySeries {
|
||||||
|
return false // At _count or _sum.
|
||||||
|
}
|
||||||
|
p.fieldPos = 0 // Start at 1st bucket for native histograms.
|
||||||
|
}
|
||||||
|
for p.fieldPos < len(bb) {
|
||||||
|
exProto = bb[p.fieldPos].GetExemplar()
|
||||||
|
if p.state == EntrySeries {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
p.fieldPos++
|
||||||
|
if exProto != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if exProto == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
ex.Value = exProto.GetValue()
|
||||||
|
if ts := exProto.GetTimestamp(); ts != nil {
|
||||||
|
ex.HasTs = true
|
||||||
|
ex.Ts = ts.GetSeconds()*1000 + int64(ts.GetNanos()/1_000_000)
|
||||||
|
}
|
||||||
|
for _, lp := range exProto.GetLabel() {
|
||||||
|
ex.Labels = append(ex.Labels, labels.Label{
|
||||||
|
Name: lp.GetName(),
|
||||||
|
Value: lp.GetValue(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next advances the parser to the next "sample" (emulating the behavior of a
|
||||||
|
// text format parser). It returns (EntryInvalid, io.EOF) if no samples were
|
||||||
|
// read.
|
||||||
|
func (p *ProtobufParser) Next() (Entry, error) {
|
||||||
|
switch p.state {
|
||||||
|
case EntryInvalid:
|
||||||
|
p.metricPos = 0
|
||||||
|
p.fieldPos = -2
|
||||||
|
n, err := readDelimited(p.in[p.inPos:], p.mf)
|
||||||
|
p.inPos += n
|
||||||
|
if err != nil {
|
||||||
|
return p.state, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip empty metric families.
|
||||||
|
if len(p.mf.GetMetric()) == 0 {
|
||||||
|
return p.Next()
|
||||||
|
}
|
||||||
|
|
||||||
|
// We are at the beginning of a metric family. Put only the name
|
||||||
|
// into metricBytes and validate only name and help for now.
|
||||||
|
name := p.mf.GetName()
|
||||||
|
if !model.IsValidMetricName(model.LabelValue(name)) {
|
||||||
|
return EntryInvalid, errors.Errorf("invalid metric name: %s", name)
|
||||||
|
}
|
||||||
|
if help := p.mf.GetHelp(); !utf8.ValidString(help) {
|
||||||
|
return EntryInvalid, errors.Errorf("invalid help for metric %q: %s", name, help)
|
||||||
|
}
|
||||||
|
p.metricBytes.Reset()
|
||||||
|
p.metricBytes.WriteString(name)
|
||||||
|
|
||||||
|
p.state = EntryHelp
|
||||||
|
case EntryHelp:
|
||||||
|
p.state = EntryType
|
||||||
|
case EntryType:
|
||||||
|
if p.mf.GetType() == dto.MetricType_HISTOGRAM &&
|
||||||
|
isNativeHistogram(p.mf.GetMetric()[0].GetHistogram()) {
|
||||||
|
p.state = EntryHistogram
|
||||||
|
} else {
|
||||||
|
p.state = EntrySeries
|
||||||
|
}
|
||||||
|
if err := p.updateMetricBytes(); err != nil {
|
||||||
|
return EntryInvalid, err
|
||||||
|
}
|
||||||
|
case EntryHistogram, EntrySeries:
|
||||||
|
if p.state == EntrySeries && !p.fieldsDone &&
|
||||||
|
(p.mf.GetType() == dto.MetricType_SUMMARY || p.mf.GetType() == dto.MetricType_HISTOGRAM) {
|
||||||
|
p.fieldPos++
|
||||||
|
} else {
|
||||||
|
p.metricPos++
|
||||||
|
p.fieldPos = -2
|
||||||
|
p.fieldsDone = false
|
||||||
|
}
|
||||||
|
if p.metricPos >= len(p.mf.GetMetric()) {
|
||||||
|
p.state = EntryInvalid
|
||||||
|
return p.Next()
|
||||||
|
}
|
||||||
|
if err := p.updateMetricBytes(); err != nil {
|
||||||
|
return EntryInvalid, err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return EntryInvalid, errors.Errorf("invalid protobuf parsing state: %d", p.state)
|
||||||
|
}
|
||||||
|
return p.state, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ProtobufParser) updateMetricBytes() error {
|
||||||
|
b := p.metricBytes
|
||||||
|
b.Reset()
|
||||||
|
b.WriteString(p.getMagicName())
|
||||||
|
for _, lp := range p.mf.GetMetric()[p.metricPos].GetLabel() {
|
||||||
|
b.WriteByte(model.SeparatorByte)
|
||||||
|
n := lp.GetName()
|
||||||
|
if !model.LabelName(n).IsValid() {
|
||||||
|
return errors.Errorf("invalid label name: %s", n)
|
||||||
|
}
|
||||||
|
b.WriteString(n)
|
||||||
|
b.WriteByte(model.SeparatorByte)
|
||||||
|
v := lp.GetValue()
|
||||||
|
if !utf8.ValidString(v) {
|
||||||
|
return errors.Errorf("invalid label value: %s", v)
|
||||||
|
}
|
||||||
|
b.WriteString(v)
|
||||||
|
}
|
||||||
|
if needed, n, v := p.getMagicLabel(); needed {
|
||||||
|
b.WriteByte(model.SeparatorByte)
|
||||||
|
b.WriteString(n)
|
||||||
|
b.WriteByte(model.SeparatorByte)
|
||||||
|
b.WriteString(v)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getMagicName usually just returns p.mf.GetType() but adds a magic suffix
|
||||||
|
// ("_count", "_sum", "_bucket") if needed according to the current parser
|
||||||
|
// state.
|
||||||
|
func (p *ProtobufParser) getMagicName() string {
|
||||||
|
t := p.mf.GetType()
|
||||||
|
if p.state == EntryHistogram || (t != dto.MetricType_HISTOGRAM && t != dto.MetricType_SUMMARY) {
|
||||||
|
return p.mf.GetName()
|
||||||
|
}
|
||||||
|
if p.fieldPos == -2 {
|
||||||
|
return p.mf.GetName() + "_count"
|
||||||
|
}
|
||||||
|
if p.fieldPos == -1 {
|
||||||
|
return p.mf.GetName() + "_sum"
|
||||||
|
}
|
||||||
|
if t == dto.MetricType_HISTOGRAM {
|
||||||
|
return p.mf.GetName() + "_bucket"
|
||||||
|
}
|
||||||
|
return p.mf.GetName()
|
||||||
|
}
|
||||||
|
|
||||||
|
// getMagicLabel returns if a magic label ("quantile" or "le") is needed and, if
|
||||||
|
// so, its name and value. It also sets p.fieldsDone if applicable.
|
||||||
|
func (p *ProtobufParser) getMagicLabel() (bool, string, string) {
|
||||||
|
if p.state == EntryHistogram || p.fieldPos < 0 {
|
||||||
|
return false, "", ""
|
||||||
|
}
|
||||||
|
switch p.mf.GetType() {
|
||||||
|
case dto.MetricType_SUMMARY:
|
||||||
|
qq := p.mf.GetMetric()[p.metricPos].GetSummary().GetQuantile()
|
||||||
|
q := qq[p.fieldPos]
|
||||||
|
p.fieldsDone = p.fieldPos == len(qq)-1
|
||||||
|
return true, model.QuantileLabel, formatOpenMetricsFloat(q.GetQuantile())
|
||||||
|
case dto.MetricType_HISTOGRAM:
|
||||||
|
bb := p.mf.GetMetric()[p.metricPos].GetHistogram().GetBucket()
|
||||||
|
if p.fieldPos >= len(bb) {
|
||||||
|
p.fieldsDone = true
|
||||||
|
return true, model.BucketLabel, "+Inf"
|
||||||
|
}
|
||||||
|
b := bb[p.fieldPos]
|
||||||
|
p.fieldsDone = math.IsInf(b.GetUpperBound(), +1)
|
||||||
|
return true, model.BucketLabel, formatOpenMetricsFloat(b.GetUpperBound())
|
||||||
|
}
|
||||||
|
return false, "", ""
|
||||||
|
}
|
||||||
|
|
||||||
|
var errInvalidVarint = errors.New("protobufparse: invalid varint encountered")
|
||||||
|
|
||||||
|
// readDelimited is essentially doing what the function of the same name in
|
||||||
|
// github.com/matttproud/golang_protobuf_extensions/pbutil is doing, but it is
|
||||||
|
// specific to a MetricFamily, utilizes the more efficient gogo-protobuf
|
||||||
|
// unmarshaling, and acts on a byte slice directly without any additional
|
||||||
|
// staging buffers.
|
||||||
|
func readDelimited(b []byte, mf *dto.MetricFamily) (n int, err error) {
|
||||||
|
if len(b) == 0 {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
messageLength, varIntLength := proto.DecodeVarint(b)
|
||||||
|
if varIntLength == 0 || varIntLength > binary.MaxVarintLen32 {
|
||||||
|
return 0, errInvalidVarint
|
||||||
|
}
|
||||||
|
totalLength := varIntLength + int(messageLength)
|
||||||
|
if totalLength > len(b) {
|
||||||
|
return 0, errors.Errorf("protobufparse: insufficient length of buffer, expected at least %d bytes, got %d bytes", totalLength, len(b))
|
||||||
|
}
|
||||||
|
mf.Reset()
|
||||||
|
return totalLength, mf.Unmarshal(b[varIntLength:totalLength])
|
||||||
|
}
|
||||||
|
|
||||||
|
// formatOpenMetricsFloat works like the usual Go string formatting of a fleat
|
||||||
|
// but appends ".0" if the resulting number would otherwise contain neither a
|
||||||
|
// "." nor an "e".
|
||||||
|
func formatOpenMetricsFloat(f float64) string {
|
||||||
|
// A few common cases hardcoded.
|
||||||
|
switch {
|
||||||
|
case f == 1:
|
||||||
|
return "1.0"
|
||||||
|
case f == 0:
|
||||||
|
return "0.0"
|
||||||
|
case f == -1:
|
||||||
|
return "-1.0"
|
||||||
|
case math.IsNaN(f):
|
||||||
|
return "NaN"
|
||||||
|
case math.IsInf(f, +1):
|
||||||
|
return "+Inf"
|
||||||
|
case math.IsInf(f, -1):
|
||||||
|
return "-Inf"
|
||||||
|
}
|
||||||
|
s := fmt.Sprint(f)
|
||||||
|
if strings.ContainsAny(s, "e.") {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return s + ".0"
|
||||||
|
}
|
||||||
|
|
||||||
|
// isNativeHistogram returns false iff the provided histograms has no sparse
|
||||||
|
// buckets and a zero threshold of 0 and a zero count of 0. In principle, this
|
||||||
|
// could still be meant to be a native histogram (with a zero threshold of 0 and
|
||||||
|
// no observations yet), but for now, we'll treat this case as a conventional
|
||||||
|
// histogram.
|
||||||
|
//
|
||||||
|
// TODO(beorn7): In the final format, there should be an unambiguous way of
|
||||||
|
// deciding if a histogram should be ingested as a conventional one or a native
|
||||||
|
// one.
|
||||||
|
func isNativeHistogram(h *dto.Histogram) bool {
|
||||||
|
return len(h.GetNegativeDelta()) > 0 ||
|
||||||
|
len(h.GetPositiveDelta()) > 0 ||
|
||||||
|
h.GetZeroCount() > 0 ||
|
||||||
|
h.GetZeroThreshold() > 0
|
||||||
|
}
|
681
model/textparse/protobufparse_test.go
Normal file
681
model/textparse/protobufparse_test.go
Normal file
|
@ -0,0 +1,681 @@
|
||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package textparse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"io"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/gogo/protobuf/proto"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/prometheus/prompb/io/prometheus/client"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestProtobufParse(t *testing.T) {
|
||||||
|
textMetricFamilies := []string{
|
||||||
|
`name: "go_build_info"
|
||||||
|
help: "Build information about the main Go module."
|
||||||
|
type: GAUGE
|
||||||
|
metric: <
|
||||||
|
label: <
|
||||||
|
name: "checksum"
|
||||||
|
value: ""
|
||||||
|
>
|
||||||
|
label: <
|
||||||
|
name: "path"
|
||||||
|
value: "github.com/prometheus/client_golang"
|
||||||
|
>
|
||||||
|
label: <
|
||||||
|
name: "version"
|
||||||
|
value: "(devel)"
|
||||||
|
>
|
||||||
|
gauge: <
|
||||||
|
value: 1
|
||||||
|
>
|
||||||
|
>
|
||||||
|
|
||||||
|
`,
|
||||||
|
`name: "go_memstats_alloc_bytes_total"
|
||||||
|
help: "Total number of bytes allocated, even if freed."
|
||||||
|
type: COUNTER
|
||||||
|
metric: <
|
||||||
|
counter: <
|
||||||
|
value: 1.546544e+06
|
||||||
|
exemplar: <
|
||||||
|
label: <
|
||||||
|
name: "dummyID"
|
||||||
|
value: "42"
|
||||||
|
>
|
||||||
|
value: 12
|
||||||
|
timestamp: <
|
||||||
|
seconds: 1625851151
|
||||||
|
nanos: 233181499
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
||||||
|
|
||||||
|
`,
|
||||||
|
`name: "something_untyped"
|
||||||
|
help: "Just to test the untyped type."
|
||||||
|
type: UNTYPED
|
||||||
|
metric: <
|
||||||
|
untyped: <
|
||||||
|
value: 42
|
||||||
|
>
|
||||||
|
timestamp_ms: 1234567
|
||||||
|
>
|
||||||
|
|
||||||
|
`,
|
||||||
|
`name: "test_histogram"
|
||||||
|
help: "Test histogram with many buckets removed to keep it manageable in size."
|
||||||
|
type: HISTOGRAM
|
||||||
|
metric: <
|
||||||
|
histogram: <
|
||||||
|
sample_count: 175
|
||||||
|
sample_sum: 0.0008280461746287094
|
||||||
|
bucket: <
|
||||||
|
cumulative_count: 2
|
||||||
|
upper_bound: -0.0004899999999999998
|
||||||
|
>
|
||||||
|
bucket: <
|
||||||
|
cumulative_count: 4
|
||||||
|
upper_bound: -0.0003899999999999998
|
||||||
|
exemplar: <
|
||||||
|
label: <
|
||||||
|
name: "dummyID"
|
||||||
|
value: "59727"
|
||||||
|
>
|
||||||
|
value: -0.00039
|
||||||
|
timestamp: <
|
||||||
|
seconds: 1625851155
|
||||||
|
nanos: 146848499
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
||||||
|
bucket: <
|
||||||
|
cumulative_count: 16
|
||||||
|
upper_bound: -0.0002899999999999998
|
||||||
|
exemplar: <
|
||||||
|
label: <
|
||||||
|
name: "dummyID"
|
||||||
|
value: "5617"
|
||||||
|
>
|
||||||
|
value: -0.00029
|
||||||
|
>
|
||||||
|
>
|
||||||
|
schema: 3
|
||||||
|
zero_threshold: 2.938735877055719e-39
|
||||||
|
zero_count: 2
|
||||||
|
negative_span: <
|
||||||
|
offset: -162
|
||||||
|
length: 1
|
||||||
|
>
|
||||||
|
negative_span: <
|
||||||
|
offset: 23
|
||||||
|
length: 4
|
||||||
|
>
|
||||||
|
negative_delta: 1
|
||||||
|
negative_delta: 3
|
||||||
|
negative_delta: -2
|
||||||
|
negative_delta: -1
|
||||||
|
negative_delta: 1
|
||||||
|
positive_span: <
|
||||||
|
offset: -161
|
||||||
|
length: 1
|
||||||
|
>
|
||||||
|
positive_span: <
|
||||||
|
offset: 8
|
||||||
|
length: 3
|
||||||
|
>
|
||||||
|
positive_delta: 1
|
||||||
|
positive_delta: 2
|
||||||
|
positive_delta: -1
|
||||||
|
positive_delta: -1
|
||||||
|
>
|
||||||
|
timestamp_ms: 1234568
|
||||||
|
>
|
||||||
|
|
||||||
|
`,
|
||||||
|
|
||||||
|
`name: "test_float_histogram"
|
||||||
|
help: "Test float histogram with many buckets removed to keep it manageable in size."
|
||||||
|
type: HISTOGRAM
|
||||||
|
metric: <
|
||||||
|
histogram: <
|
||||||
|
sample_count: 175
|
||||||
|
sample_count_float: 175.0
|
||||||
|
sample_sum: 0.0008280461746287094
|
||||||
|
bucket: <
|
||||||
|
cumulative_count_float: 2.0
|
||||||
|
upper_bound: -0.0004899999999999998
|
||||||
|
>
|
||||||
|
bucket: <
|
||||||
|
cumulative_count_float: 4.0
|
||||||
|
upper_bound: -0.0003899999999999998
|
||||||
|
exemplar: <
|
||||||
|
label: <
|
||||||
|
name: "dummyID"
|
||||||
|
value: "59727"
|
||||||
|
>
|
||||||
|
value: -0.00039
|
||||||
|
timestamp: <
|
||||||
|
seconds: 1625851155
|
||||||
|
nanos: 146848499
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
||||||
|
bucket: <
|
||||||
|
cumulative_count_float: 16
|
||||||
|
upper_bound: -0.0002899999999999998
|
||||||
|
exemplar: <
|
||||||
|
label: <
|
||||||
|
name: "dummyID"
|
||||||
|
value: "5617"
|
||||||
|
>
|
||||||
|
value: -0.00029
|
||||||
|
>
|
||||||
|
>
|
||||||
|
schema: 3
|
||||||
|
zero_threshold: 2.938735877055719e-39
|
||||||
|
zero_count_float: 2.0
|
||||||
|
negative_span: <
|
||||||
|
offset: -162
|
||||||
|
length: 1
|
||||||
|
>
|
||||||
|
negative_span: <
|
||||||
|
offset: 23
|
||||||
|
length: 4
|
||||||
|
>
|
||||||
|
negative_count: 1.0
|
||||||
|
negative_count: 3.0
|
||||||
|
negative_count: -2.0
|
||||||
|
negative_count: -1.0
|
||||||
|
negative_count: 1.0
|
||||||
|
positive_span: <
|
||||||
|
offset: -161
|
||||||
|
length: 1
|
||||||
|
>
|
||||||
|
positive_span: <
|
||||||
|
offset: 8
|
||||||
|
length: 3
|
||||||
|
>
|
||||||
|
positive_count: 1.0
|
||||||
|
positive_count: 2.0
|
||||||
|
positive_count: -1.0
|
||||||
|
positive_count: -1.0
|
||||||
|
>
|
||||||
|
timestamp_ms: 1234568
|
||||||
|
>
|
||||||
|
|
||||||
|
`,
|
||||||
|
`name: "test_histogram2"
|
||||||
|
help: "Similar histogram as before but now without sparse buckets."
|
||||||
|
type: HISTOGRAM
|
||||||
|
metric: <
|
||||||
|
histogram: <
|
||||||
|
sample_count: 175
|
||||||
|
sample_sum: 0.000828
|
||||||
|
bucket: <
|
||||||
|
cumulative_count: 2
|
||||||
|
upper_bound: -0.00048
|
||||||
|
>
|
||||||
|
bucket: <
|
||||||
|
cumulative_count: 4
|
||||||
|
upper_bound: -0.00038
|
||||||
|
exemplar: <
|
||||||
|
label: <
|
||||||
|
name: "dummyID"
|
||||||
|
value: "59727"
|
||||||
|
>
|
||||||
|
value: -0.00038
|
||||||
|
timestamp: <
|
||||||
|
seconds: 1625851153
|
||||||
|
nanos: 146848499
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
||||||
|
bucket: <
|
||||||
|
cumulative_count: 16
|
||||||
|
upper_bound: 1
|
||||||
|
exemplar: <
|
||||||
|
label: <
|
||||||
|
name: "dummyID"
|
||||||
|
value: "5617"
|
||||||
|
>
|
||||||
|
value: -0.000295
|
||||||
|
>
|
||||||
|
>
|
||||||
|
schema: 0
|
||||||
|
zero_threshold: 0
|
||||||
|
>
|
||||||
|
>
|
||||||
|
|
||||||
|
`,
|
||||||
|
`name: "rpc_durations_seconds"
|
||||||
|
help: "RPC latency distributions."
|
||||||
|
type: SUMMARY
|
||||||
|
metric: <
|
||||||
|
label: <
|
||||||
|
name: "service"
|
||||||
|
value: "exponential"
|
||||||
|
>
|
||||||
|
summary: <
|
||||||
|
sample_count: 262
|
||||||
|
sample_sum: 0.00025551262820703587
|
||||||
|
quantile: <
|
||||||
|
quantile: 0.5
|
||||||
|
value: 6.442786329648548e-07
|
||||||
|
>
|
||||||
|
quantile: <
|
||||||
|
quantile: 0.9
|
||||||
|
value: 1.9435742936658396e-06
|
||||||
|
>
|
||||||
|
quantile: <
|
||||||
|
quantile: 0.99
|
||||||
|
value: 4.0471608667037015e-06
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
||||||
|
`,
|
||||||
|
`name: "without_quantiles"
|
||||||
|
help: "A summary without quantiles."
|
||||||
|
type: SUMMARY
|
||||||
|
metric: <
|
||||||
|
summary: <
|
||||||
|
sample_count: 42
|
||||||
|
sample_sum: 1.234
|
||||||
|
>
|
||||||
|
>
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
|
||||||
|
varintBuf := make([]byte, binary.MaxVarintLen32)
|
||||||
|
inputBuf := &bytes.Buffer{}
|
||||||
|
|
||||||
|
for _, tmf := range textMetricFamilies {
|
||||||
|
pb := &dto.MetricFamily{}
|
||||||
|
// From text to proto message.
|
||||||
|
require.NoError(t, proto.UnmarshalText(tmf, pb))
|
||||||
|
// From proto message to binary protobuf.
|
||||||
|
protoBuf, err := proto.Marshal(pb)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Write first length, then binary protobuf.
|
||||||
|
varintLength := binary.PutUvarint(varintBuf, uint64(len(protoBuf)))
|
||||||
|
inputBuf.Write(varintBuf[:varintLength])
|
||||||
|
inputBuf.Write(protoBuf)
|
||||||
|
}
|
||||||
|
|
||||||
|
exp := []struct {
|
||||||
|
lset labels.Labels
|
||||||
|
m string
|
||||||
|
t int64
|
||||||
|
v float64
|
||||||
|
typ MetricType
|
||||||
|
help string
|
||||||
|
unit string
|
||||||
|
comment string
|
||||||
|
shs *histogram.Histogram
|
||||||
|
fhs *histogram.FloatHistogram
|
||||||
|
e []exemplar.Exemplar
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
m: "go_build_info",
|
||||||
|
help: "Build information about the main Go module.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "go_build_info",
|
||||||
|
typ: MetricTypeGauge,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "go_build_info\xFFchecksum\xFF\xFFpath\xFFgithub.com/prometheus/client_golang\xFFversion\xFF(devel)",
|
||||||
|
v: 1,
|
||||||
|
lset: labels.FromStrings(
|
||||||
|
"__name__", "go_build_info",
|
||||||
|
"checksum", "",
|
||||||
|
"path", "github.com/prometheus/client_golang",
|
||||||
|
"version", "(devel)",
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "go_memstats_alloc_bytes_total",
|
||||||
|
help: "Total number of bytes allocated, even if freed.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "go_memstats_alloc_bytes_total",
|
||||||
|
typ: MetricTypeCounter,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "go_memstats_alloc_bytes_total",
|
||||||
|
v: 1.546544e+06,
|
||||||
|
lset: labels.FromStrings(
|
||||||
|
"__name__", "go_memstats_alloc_bytes_total",
|
||||||
|
),
|
||||||
|
e: []exemplar.Exemplar{
|
||||||
|
{Labels: labels.FromStrings("dummyID", "42"), Value: 12, HasTs: true, Ts: 1625851151233},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "something_untyped",
|
||||||
|
help: "Just to test the untyped type.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "something_untyped",
|
||||||
|
typ: MetricTypeUnknown,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "something_untyped",
|
||||||
|
t: 1234567,
|
||||||
|
v: 42,
|
||||||
|
lset: labels.FromStrings(
|
||||||
|
"__name__", "something_untyped",
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "test_histogram",
|
||||||
|
help: "Test histogram with many buckets removed to keep it manageable in size.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "test_histogram",
|
||||||
|
typ: MetricTypeHistogram,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "test_histogram",
|
||||||
|
t: 1234568,
|
||||||
|
shs: &histogram.Histogram{
|
||||||
|
Count: 175,
|
||||||
|
ZeroCount: 2,
|
||||||
|
Sum: 0.0008280461746287094,
|
||||||
|
ZeroThreshold: 2.938735877055719e-39,
|
||||||
|
Schema: 3,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: -161, Length: 1},
|
||||||
|
{Offset: 8, Length: 3},
|
||||||
|
},
|
||||||
|
NegativeSpans: []histogram.Span{
|
||||||
|
{Offset: -162, Length: 1},
|
||||||
|
{Offset: 23, Length: 4},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 2, -1, -1},
|
||||||
|
NegativeBuckets: []int64{1, 3, -2, -1, 1},
|
||||||
|
},
|
||||||
|
lset: labels.FromStrings(
|
||||||
|
"__name__", "test_histogram",
|
||||||
|
),
|
||||||
|
e: []exemplar.Exemplar{
|
||||||
|
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
|
||||||
|
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "test_float_histogram",
|
||||||
|
help: "Test float histogram with many buckets removed to keep it manageable in size.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "test_float_histogram",
|
||||||
|
typ: MetricTypeHistogram,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "test_float_histogram",
|
||||||
|
t: 1234568,
|
||||||
|
fhs: &histogram.FloatHistogram{
|
||||||
|
Count: 175.0,
|
||||||
|
ZeroCount: 2.0,
|
||||||
|
Sum: 0.0008280461746287094,
|
||||||
|
ZeroThreshold: 2.938735877055719e-39,
|
||||||
|
Schema: 3,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: -161, Length: 1},
|
||||||
|
{Offset: 8, Length: 3},
|
||||||
|
},
|
||||||
|
NegativeSpans: []histogram.Span{
|
||||||
|
{Offset: -162, Length: 1},
|
||||||
|
{Offset: 23, Length: 4},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0},
|
||||||
|
NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0},
|
||||||
|
},
|
||||||
|
lset: labels.FromStrings(
|
||||||
|
"__name__", "test_float_histogram",
|
||||||
|
),
|
||||||
|
e: []exemplar.Exemplar{
|
||||||
|
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
|
||||||
|
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "test_histogram2",
|
||||||
|
help: "Similar histogram as before but now without sparse buckets.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "test_histogram2",
|
||||||
|
typ: MetricTypeHistogram,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "test_histogram2_count",
|
||||||
|
v: 175,
|
||||||
|
lset: labels.FromStrings(
|
||||||
|
"__name__", "test_histogram2_count",
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "test_histogram2_sum",
|
||||||
|
v: 0.000828,
|
||||||
|
lset: labels.FromStrings(
|
||||||
|
"__name__", "test_histogram2_sum",
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "test_histogram2_bucket\xffle\xff-0.00048",
|
||||||
|
v: 2,
|
||||||
|
lset: labels.FromStrings(
|
||||||
|
"__name__", "test_histogram2_bucket",
|
||||||
|
"le", "-0.00048",
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "test_histogram2_bucket\xffle\xff-0.00038",
|
||||||
|
v: 4,
|
||||||
|
lset: labels.FromStrings(
|
||||||
|
"__name__", "test_histogram2_bucket",
|
||||||
|
"le", "-0.00038",
|
||||||
|
),
|
||||||
|
e: []exemplar.Exemplar{
|
||||||
|
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00038, HasTs: true, Ts: 1625851153146},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "test_histogram2_bucket\xffle\xff1.0",
|
||||||
|
v: 16,
|
||||||
|
lset: labels.FromStrings(
|
||||||
|
"__name__", "test_histogram2_bucket",
|
||||||
|
"le", "1.0",
|
||||||
|
),
|
||||||
|
e: []exemplar.Exemplar{
|
||||||
|
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.000295, HasTs: false},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "test_histogram2_bucket\xffle\xff+Inf",
|
||||||
|
v: 175,
|
||||||
|
lset: labels.FromStrings(
|
||||||
|
"__name__", "test_histogram2_bucket",
|
||||||
|
"le", "+Inf",
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "rpc_durations_seconds",
|
||||||
|
help: "RPC latency distributions.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "rpc_durations_seconds",
|
||||||
|
typ: MetricTypeSummary,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "rpc_durations_seconds_count\xffservice\xffexponential",
|
||||||
|
v: 262,
|
||||||
|
lset: labels.FromStrings(
|
||||||
|
"__name__", "rpc_durations_seconds_count",
|
||||||
|
"service", "exponential",
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "rpc_durations_seconds_sum\xffservice\xffexponential",
|
||||||
|
v: 0.00025551262820703587,
|
||||||
|
lset: labels.FromStrings(
|
||||||
|
"__name__", "rpc_durations_seconds_sum",
|
||||||
|
"service", "exponential",
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.5",
|
||||||
|
v: 6.442786329648548e-07,
|
||||||
|
lset: labels.FromStrings(
|
||||||
|
"__name__", "rpc_durations_seconds",
|
||||||
|
"quantile", "0.5",
|
||||||
|
"service", "exponential",
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.9",
|
||||||
|
v: 1.9435742936658396e-06,
|
||||||
|
lset: labels.FromStrings(
|
||||||
|
"__name__", "rpc_durations_seconds",
|
||||||
|
"quantile", "0.9",
|
||||||
|
"service", "exponential",
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.99",
|
||||||
|
v: 4.0471608667037015e-06,
|
||||||
|
lset: labels.FromStrings(
|
||||||
|
"__name__", "rpc_durations_seconds",
|
||||||
|
"quantile", "0.99",
|
||||||
|
"service", "exponential",
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "without_quantiles",
|
||||||
|
help: "A summary without quantiles.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "without_quantiles",
|
||||||
|
typ: MetricTypeSummary,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "without_quantiles_count",
|
||||||
|
v: 42,
|
||||||
|
lset: labels.FromStrings(
|
||||||
|
"__name__", "without_quantiles_count",
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
m: "without_quantiles_sum",
|
||||||
|
v: 1.234,
|
||||||
|
lset: labels.FromStrings(
|
||||||
|
"__name__", "without_quantiles_sum",
|
||||||
|
),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
p := NewProtobufParser(inputBuf.Bytes())
|
||||||
|
i := 0
|
||||||
|
|
||||||
|
var res labels.Labels
|
||||||
|
|
||||||
|
for {
|
||||||
|
et, err := p.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
switch et {
|
||||||
|
case EntrySeries:
|
||||||
|
m, ts, v := p.Series()
|
||||||
|
|
||||||
|
var e exemplar.Exemplar
|
||||||
|
p.Metric(&res)
|
||||||
|
found := p.Exemplar(&e)
|
||||||
|
require.Equal(t, exp[i].m, string(m))
|
||||||
|
if ts != nil {
|
||||||
|
require.Equal(t, exp[i].t, *ts)
|
||||||
|
} else {
|
||||||
|
require.Equal(t, exp[i].t, int64(0))
|
||||||
|
}
|
||||||
|
require.Equal(t, exp[i].v, v)
|
||||||
|
require.Equal(t, exp[i].lset, res)
|
||||||
|
if len(exp[i].e) == 0 {
|
||||||
|
require.Equal(t, false, found)
|
||||||
|
} else {
|
||||||
|
require.Equal(t, true, found)
|
||||||
|
require.Equal(t, exp[i].e[0], e)
|
||||||
|
}
|
||||||
|
res = res[:0]
|
||||||
|
|
||||||
|
case EntryHistogram:
|
||||||
|
m, ts, shs, fhs := p.Histogram()
|
||||||
|
p.Metric(&res)
|
||||||
|
require.Equal(t, exp[i].m, string(m))
|
||||||
|
if ts != nil {
|
||||||
|
require.Equal(t, exp[i].t, *ts)
|
||||||
|
} else {
|
||||||
|
require.Equal(t, exp[i].t, int64(0))
|
||||||
|
}
|
||||||
|
require.Equal(t, exp[i].lset, res)
|
||||||
|
res = res[:0]
|
||||||
|
require.Equal(t, exp[i].m, string(m))
|
||||||
|
if shs != nil {
|
||||||
|
require.Equal(t, exp[i].shs, shs)
|
||||||
|
} else {
|
||||||
|
require.Equal(t, exp[i].fhs, fhs)
|
||||||
|
}
|
||||||
|
j := 0
|
||||||
|
for e := (exemplar.Exemplar{}); p.Exemplar(&e); j++ {
|
||||||
|
require.Equal(t, exp[i].e[j], e)
|
||||||
|
e = exemplar.Exemplar{}
|
||||||
|
}
|
||||||
|
require.Equal(t, len(exp[i].e), j, "not enough exemplars found")
|
||||||
|
|
||||||
|
case EntryType:
|
||||||
|
m, typ := p.Type()
|
||||||
|
require.Equal(t, exp[i].m, string(m))
|
||||||
|
require.Equal(t, exp[i].typ, typ)
|
||||||
|
|
||||||
|
case EntryHelp:
|
||||||
|
m, h := p.Help()
|
||||||
|
require.Equal(t, exp[i].m, string(m))
|
||||||
|
require.Equal(t, exp[i].help, string(h))
|
||||||
|
|
||||||
|
case EntryUnit:
|
||||||
|
m, u := p.Unit()
|
||||||
|
require.Equal(t, exp[i].m, string(m))
|
||||||
|
require.Equal(t, exp[i].unit, string(u))
|
||||||
|
|
||||||
|
case EntryComment:
|
||||||
|
require.Equal(t, exp[i].comment, string(p.Comment()))
|
||||||
|
}
|
||||||
|
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
require.Equal(t, len(exp), i)
|
||||||
|
}
|
|
@ -5,14 +5,17 @@ lint:
|
||||||
ENUM_VALUE_PREFIX:
|
ENUM_VALUE_PREFIX:
|
||||||
- remote.proto
|
- remote.proto
|
||||||
- types.proto
|
- types.proto
|
||||||
|
- io/prometheus/client/metrics.proto
|
||||||
ENUM_ZERO_VALUE_SUFFIX:
|
ENUM_ZERO_VALUE_SUFFIX:
|
||||||
- remote.proto
|
- remote.proto
|
||||||
- types.proto
|
- types.proto
|
||||||
|
- io/prometheus/client/metrics.proto
|
||||||
PACKAGE_DIRECTORY_MATCH:
|
PACKAGE_DIRECTORY_MATCH:
|
||||||
- remote.proto
|
- remote.proto
|
||||||
- types.proto
|
- types.proto
|
||||||
PACKAGE_VERSION_SUFFIX:
|
PACKAGE_VERSION_SUFFIX:
|
||||||
- remote.proto
|
- remote.proto
|
||||||
- types.proto
|
- types.proto
|
||||||
|
- io/prometheus/client/metrics.proto
|
||||||
deps:
|
deps:
|
||||||
- buf.build/gogo/protobuf
|
- buf.build/gogo/protobuf
|
||||||
|
|
3994
prompb/io/prometheus/client/metrics.pb.go
Normal file
3994
prompb/io/prometheus/client/metrics.pb.go
Normal file
File diff suppressed because it is too large
Load diff
146
prompb/io/prometheus/client/metrics.proto
Normal file
146
prompb/io/prometheus/client/metrics.proto
Normal file
|
@ -0,0 +1,146 @@
|
||||||
|
// Copyright 2013 Prometheus Team
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// This is copied and lightly edited from
|
||||||
|
// github.com/prometheus/client_model/io/prometheus/client/metrics.proto
|
||||||
|
// and finally converted to proto3 syntax to make it usable for the
|
||||||
|
// gogo-protobuf approach taken within prometheus/prometheus.
|
||||||
|
|
||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package io.prometheus.client;
|
||||||
|
option go_package = "io_prometheus_client";
|
||||||
|
|
||||||
|
import "google/protobuf/timestamp.proto";
|
||||||
|
|
||||||
|
message LabelPair {
|
||||||
|
string name = 1;
|
||||||
|
string value = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
enum MetricType {
|
||||||
|
// COUNTER must use the Metric field "counter".
|
||||||
|
COUNTER = 0;
|
||||||
|
// GAUGE must use the Metric field "gauge".
|
||||||
|
GAUGE = 1;
|
||||||
|
// SUMMARY must use the Metric field "summary".
|
||||||
|
SUMMARY = 2;
|
||||||
|
// UNTYPED must use the Metric field "untyped".
|
||||||
|
UNTYPED = 3;
|
||||||
|
// HISTOGRAM must use the Metric field "histogram".
|
||||||
|
HISTOGRAM = 4;
|
||||||
|
// GAUGE_HISTOGRAM must use the Metric field "histogram".
|
||||||
|
GAUGE_HISTOGRAM = 5;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Gauge {
|
||||||
|
double value = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Counter {
|
||||||
|
double value = 1;
|
||||||
|
Exemplar exemplar = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Quantile {
|
||||||
|
double quantile = 1;
|
||||||
|
double value = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Summary {
|
||||||
|
uint64 sample_count = 1;
|
||||||
|
double sample_sum = 2;
|
||||||
|
repeated Quantile quantile = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Untyped {
|
||||||
|
double value = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Histogram {
|
||||||
|
uint64 sample_count = 1;
|
||||||
|
double sample_count_float = 4; // Overrides sample_count if > 0.
|
||||||
|
double sample_sum = 2;
|
||||||
|
// Buckets for the conventional histogram.
|
||||||
|
repeated Bucket bucket = 3; // Ordered in increasing order of upper_bound, +Inf bucket is optional.
|
||||||
|
|
||||||
|
// Everything below here is for native histograms (also known as sparse histograms).
|
||||||
|
// Native histograms are an experimental feature without stability guarantees.
|
||||||
|
|
||||||
|
// schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8.
|
||||||
|
// They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and
|
||||||
|
// then each power of two is divided into 2^n logarithmic buckets.
|
||||||
|
// Or in other words, each bucket boundary is the previous boundary times 2^(2^-n).
|
||||||
|
// In the future, more bucket schemas may be added using numbers < -4 or > 8.
|
||||||
|
sint32 schema = 5;
|
||||||
|
double zero_threshold = 6; // Breadth of the zero bucket.
|
||||||
|
uint64 zero_count = 7; // Count in zero bucket.
|
||||||
|
double zero_count_float = 8; // Overrides sb_zero_count if > 0.
|
||||||
|
|
||||||
|
// Negative buckets for the native histogram.
|
||||||
|
repeated BucketSpan negative_span = 9;
|
||||||
|
// Use either "negative_delta" or "negative_count", the former for
|
||||||
|
// regular histograms with integer counts, the latter for float
|
||||||
|
// histograms.
|
||||||
|
repeated sint64 negative_delta = 10; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
|
||||||
|
repeated double negative_count = 11; // Absolute count of each bucket.
|
||||||
|
|
||||||
|
// Positive buckets for the native histogram.
|
||||||
|
repeated BucketSpan positive_span = 12;
|
||||||
|
// Use either "positive_delta" or "positive_count", the former for
|
||||||
|
// regular histograms with integer counts, the latter for float
|
||||||
|
// histograms.
|
||||||
|
repeated sint64 positive_delta = 13; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
|
||||||
|
repeated double positive_count = 14; // Absolute count of each bucket.
|
||||||
|
}
|
||||||
|
|
||||||
|
message Bucket {
|
||||||
|
uint64 cumulative_count = 1; // Cumulative in increasing order.
|
||||||
|
double cumulative_count_float = 4; // Overrides cumulative_count if > 0.
|
||||||
|
double upper_bound = 2; // Inclusive.
|
||||||
|
Exemplar exemplar = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
// A BucketSpan defines a number of consecutive buckets in a native
|
||||||
|
// histogram with their offset. Logically, it would be more
|
||||||
|
// straightforward to include the bucket counts in the Span. However,
|
||||||
|
// the protobuf representation is more compact in the way the data is
|
||||||
|
// structured here (with all the buckets in a single array separate
|
||||||
|
// from the Spans).
|
||||||
|
message BucketSpan {
|
||||||
|
sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative).
|
||||||
|
uint32 length = 2; // Length of consecutive buckets.
|
||||||
|
}
|
||||||
|
|
||||||
|
message Exemplar {
|
||||||
|
repeated LabelPair label = 1;
|
||||||
|
double value = 2;
|
||||||
|
google.protobuf.Timestamp timestamp = 3; // OpenMetrics-style.
|
||||||
|
}
|
||||||
|
|
||||||
|
message Metric {
|
||||||
|
repeated LabelPair label = 1;
|
||||||
|
Gauge gauge = 2;
|
||||||
|
Counter counter = 3;
|
||||||
|
Summary summary = 4;
|
||||||
|
Untyped untyped = 5;
|
||||||
|
Histogram histogram = 7;
|
||||||
|
int64 timestamp_ms = 6;
|
||||||
|
}
|
||||||
|
|
||||||
|
message MetricFamily {
|
||||||
|
string name = 1;
|
||||||
|
string help = 2;
|
||||||
|
MetricType type = 3;
|
||||||
|
repeated Metric metric = 4;
|
||||||
|
}
|
|
@ -34,8 +34,10 @@ const (
|
||||||
// Content-Type: "application/x-protobuf"
|
// Content-Type: "application/x-protobuf"
|
||||||
// Content-Encoding: "snappy"
|
// Content-Encoding: "snappy"
|
||||||
ReadRequest_SAMPLES ReadRequest_ResponseType = 0
|
ReadRequest_SAMPLES ReadRequest_ResponseType = 0
|
||||||
// Server will stream a delimited ChunkedReadResponse message that contains XOR encoded chunks for a single series.
|
// Server will stream a delimited ChunkedReadResponse message that
|
||||||
// Each message is following varint size and fixed size bigendian uint32 for CRC32 Castagnoli checksum.
|
// contains XOR or HISTOGRAM(!) encoded chunks for a single series.
|
||||||
|
// Each message is following varint size and fixed size bigendian
|
||||||
|
// uint32 for CRC32 Castagnoli checksum.
|
||||||
//
|
//
|
||||||
// Response headers:
|
// Response headers:
|
||||||
// Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse"
|
// Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse"
|
||||||
|
|
|
@ -39,8 +39,10 @@ message ReadRequest {
|
||||||
// Content-Type: "application/x-protobuf"
|
// Content-Type: "application/x-protobuf"
|
||||||
// Content-Encoding: "snappy"
|
// Content-Encoding: "snappy"
|
||||||
SAMPLES = 0;
|
SAMPLES = 0;
|
||||||
// Server will stream a delimited ChunkedReadResponse message that contains XOR encoded chunks for a single series.
|
// Server will stream a delimited ChunkedReadResponse message that
|
||||||
// Each message is following varint size and fixed size bigendian uint32 for CRC32 Castagnoli checksum.
|
// contains XOR or HISTOGRAM(!) encoded chunks for a single series.
|
||||||
|
// Each message is following varint size and fixed size bigendian
|
||||||
|
// uint32 for CRC32 Castagnoli checksum.
|
||||||
//
|
//
|
||||||
// Response headers:
|
// Response headers:
|
||||||
// Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse"
|
// Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse"
|
||||||
|
|
1534
prompb/types.pb.go
1534
prompb/types.pb.go
File diff suppressed because it is too large
Load diff
|
@ -54,13 +54,79 @@ message Exemplar {
|
||||||
int64 timestamp = 3;
|
int64 timestamp = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// A native histogram, also known as a sparse histogram.
|
||||||
|
// Original design doc:
|
||||||
|
// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit
|
||||||
|
// The appendix of this design doc also explains the concept of float
|
||||||
|
// histograms. This Histogram message can represent both, the usual
|
||||||
|
// integer histogram as well as a float histogram.
|
||||||
|
message Histogram {
|
||||||
|
enum ResetHint {
|
||||||
|
UNKNOWN = 0; // Need to test for a counter reset explicitly.
|
||||||
|
YES = 1; // This is the 1st histogram after a counter reset.
|
||||||
|
NO = 2; // There was no counter reset between this and the previous Histogram.
|
||||||
|
GAUGE = 3; // This is a gauge histogram where counter resets don't happen.
|
||||||
|
}
|
||||||
|
|
||||||
|
oneof count { // Count of observations in the histogram.
|
||||||
|
uint64 count_int = 1;
|
||||||
|
double count_float = 2;
|
||||||
|
}
|
||||||
|
double sum = 3; // Sum of observations in the histogram.
|
||||||
|
// The schema defines the bucket schema. Currently, valid numbers
|
||||||
|
// are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1
|
||||||
|
// is a bucket boundary in each case, and then each power of two is
|
||||||
|
// divided into 2^n logarithmic buckets. Or in other words, each
|
||||||
|
// bucket boundary is the previous boundary times 2^(2^-n). In the
|
||||||
|
// future, more bucket schemas may be added using numbers < -4 or >
|
||||||
|
// 8.
|
||||||
|
sint32 schema = 4;
|
||||||
|
double zero_threshold = 5; // Breadth of the zero bucket.
|
||||||
|
oneof zero_count { // Count in zero bucket.
|
||||||
|
uint64 zero_count_int = 6;
|
||||||
|
double zero_count_float = 7;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Negative Buckets.
|
||||||
|
repeated BucketSpan negative_spans = 8;
|
||||||
|
// Use either "negative_deltas" or "negative_counts", the former for
|
||||||
|
// regular histograms with integer counts, the latter for float
|
||||||
|
// histograms.
|
||||||
|
repeated sint64 negative_deltas = 9; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
|
||||||
|
repeated double negative_counts = 10; // Absolute count of each bucket.
|
||||||
|
|
||||||
|
// Positive Buckets.
|
||||||
|
repeated BucketSpan positive_spans = 11;
|
||||||
|
// Use either "positive_deltas" or "positive_counts", the former for
|
||||||
|
// regular histograms with integer counts, the latter for float
|
||||||
|
// histograms.
|
||||||
|
repeated sint64 positive_deltas = 12; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
|
||||||
|
repeated double positive_counts = 13; // Absolute count of each bucket.
|
||||||
|
|
||||||
|
ResetHint reset_hint = 14;
|
||||||
|
// timestamp is in ms format, see model/timestamp/timestamp.go for
|
||||||
|
// conversion from time.Time to Prometheus timestamp.
|
||||||
|
int64 timestamp = 15;
|
||||||
|
}
|
||||||
|
|
||||||
|
// A BucketSpan defines a number of consecutive buckets with their
|
||||||
|
// offset. Logically, it would be more straightforward to include the
|
||||||
|
// bucket counts in the Span. However, the protobuf representation is
|
||||||
|
// more compact in the way the data is structured here (with all the
|
||||||
|
// buckets in a single array separate from the Spans).
|
||||||
|
message BucketSpan {
|
||||||
|
sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative).
|
||||||
|
uint32 length = 2; // Length of consecutive buckets.
|
||||||
|
}
|
||||||
|
|
||||||
// TimeSeries represents samples and labels for a single time series.
|
// TimeSeries represents samples and labels for a single time series.
|
||||||
message TimeSeries {
|
message TimeSeries {
|
||||||
// For a timeseries to be valid, and for the samples and exemplars
|
// For a timeseries to be valid, and for the samples and exemplars
|
||||||
// to be ingested by the remote system properly, the labels field is required.
|
// to be ingested by the remote system properly, the labels field is required.
|
||||||
repeated Label labels = 1 [(gogoproto.nullable) = false];
|
repeated Label labels = 1 [(gogoproto.nullable) = false];
|
||||||
repeated Sample samples = 2 [(gogoproto.nullable) = false];
|
repeated Sample samples = 2 [(gogoproto.nullable) = false];
|
||||||
repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false];
|
repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false];
|
||||||
|
repeated Histogram histograms = 4 [(gogoproto.nullable) = false];
|
||||||
}
|
}
|
||||||
|
|
||||||
message Label {
|
message Label {
|
||||||
|
@ -103,8 +169,9 @@ message Chunk {
|
||||||
|
|
||||||
// We require this to match chunkenc.Encoding.
|
// We require this to match chunkenc.Encoding.
|
||||||
enum Encoding {
|
enum Encoding {
|
||||||
UNKNOWN = 0;
|
UNKNOWN = 0;
|
||||||
XOR = 1;
|
XOR = 1;
|
||||||
|
HISTOGRAM = 2;
|
||||||
}
|
}
|
||||||
Encoding type = 3;
|
Encoding type = 3;
|
||||||
bytes data = 4;
|
bytes data = 4;
|
||||||
|
|
211
promql/engine.go
211
promql/engine.go
|
@ -37,11 +37,13 @@ import (
|
||||||
"go.opentelemetry.io/otel/trace"
|
"go.opentelemetry.io/otel/trace"
|
||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/timestamp"
|
"github.com/prometheus/prometheus/model/timestamp"
|
||||||
"github.com/prometheus/prometheus/model/value"
|
"github.com/prometheus/prometheus/model/value"
|
||||||
"github.com/prometheus/prometheus/promql/parser"
|
"github.com/prometheus/prometheus/promql/parser"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/util/stats"
|
"github.com/prometheus/prometheus/util/stats"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -198,7 +200,6 @@ func (q *query) Exec(ctx context.Context) *Result {
|
||||||
|
|
||||||
// Exec query.
|
// Exec query.
|
||||||
res, warnings, err := q.ng.exec(ctx, q)
|
res, warnings, err := q.ng.exec(ctx, q)
|
||||||
|
|
||||||
return &Result{Err: err, Value: res, Warnings: warnings}
|
return &Result{Err: err, Value: res, Warnings: warnings}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -677,7 +678,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
|
||||||
for i, s := range mat {
|
for i, s := range mat {
|
||||||
// Point might have a different timestamp, force it to the evaluation
|
// Point might have a different timestamp, force it to the evaluation
|
||||||
// timestamp as that is when we ran the evaluation.
|
// timestamp as that is when we ran the evaluation.
|
||||||
vector[i] = Sample{Metric: s.Metric, Point: Point{V: s.Points[0].V, T: start}}
|
vector[i] = Sample{Metric: s.Metric, Point: Point{V: s.Points[0].V, H: s.Points[0].H, T: start}}
|
||||||
}
|
}
|
||||||
return vector, warnings, nil
|
return vector, warnings, nil
|
||||||
case parser.ValueTypeScalar:
|
case parser.ValueTypeScalar:
|
||||||
|
@ -981,8 +982,10 @@ func (ev *evaluator) recover(expr parser.Expr, ws *storage.Warnings, errp *error
|
||||||
case errWithWarnings:
|
case errWithWarnings:
|
||||||
*errp = err.err
|
*errp = err.err
|
||||||
*ws = append(*ws, err.warnings...)
|
*ws = append(*ws, err.warnings...)
|
||||||
|
case error:
|
||||||
|
*errp = err
|
||||||
default:
|
default:
|
||||||
*errp = e.(error)
|
*errp = fmt.Errorf("%v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1011,7 +1014,7 @@ type EvalNodeHelper struct {
|
||||||
// Caches.
|
// Caches.
|
||||||
// DropMetricName and label_*.
|
// DropMetricName and label_*.
|
||||||
Dmn map[uint64]labels.Labels
|
Dmn map[uint64]labels.Labels
|
||||||
// funcHistogramQuantile.
|
// funcHistogramQuantile for conventional histograms.
|
||||||
signatureToMetricWithBuckets map[string]*metricWithBuckets
|
signatureToMetricWithBuckets map[string]*metricWithBuckets
|
||||||
// label_replace.
|
// label_replace.
|
||||||
regex *regexp.Regexp
|
regex *regexp.Regexp
|
||||||
|
@ -1428,7 +1431,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
|
||||||
ev.samplesStats.IncrementSamplesAtStep(step, int64(len(points)))
|
ev.samplesStats.IncrementSamplesAtStep(step, int64(len(points)))
|
||||||
enh.Out = outVec[:0]
|
enh.Out = outVec[:0]
|
||||||
if len(outVec) > 0 {
|
if len(outVec) > 0 {
|
||||||
ss.Points = append(ss.Points, Point{V: outVec[0].Point.V, T: ts})
|
ss.Points = append(ss.Points, Point{V: outVec[0].Point.V, H: outVec[0].Point.H, T: ts})
|
||||||
}
|
}
|
||||||
// Only buffer stepRange milliseconds from the second step on.
|
// Only buffer stepRange milliseconds from the second step on.
|
||||||
it.ReduceDelta(stepRange)
|
it.ReduceDelta(stepRange)
|
||||||
|
@ -1581,10 +1584,10 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
|
||||||
|
|
||||||
for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval {
|
for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval {
|
||||||
step++
|
step++
|
||||||
_, v, ok := ev.vectorSelectorSingle(it, e, ts)
|
_, v, h, ok := ev.vectorSelectorSingle(it, e, ts)
|
||||||
if ok {
|
if ok {
|
||||||
if ev.currentSamples < ev.maxSamples {
|
if ev.currentSamples < ev.maxSamples {
|
||||||
ss.Points = append(ss.Points, Point{V: v, T: ts})
|
ss.Points = append(ss.Points, Point{V: v, H: h, T: ts})
|
||||||
ev.samplesStats.IncrementSamplesAtStep(step, 1)
|
ev.samplesStats.IncrementSamplesAtStep(step, 1)
|
||||||
ev.currentSamples++
|
ev.currentSamples++
|
||||||
} else {
|
} else {
|
||||||
|
@ -1694,6 +1697,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
|
||||||
mat[i].Points = append(mat[i].Points, Point{
|
mat[i].Points = append(mat[i].Points, Point{
|
||||||
T: ts,
|
T: ts,
|
||||||
V: mat[i].Points[0].V,
|
V: mat[i].Points[0].V,
|
||||||
|
H: mat[i].Points[0].H,
|
||||||
})
|
})
|
||||||
ev.currentSamples++
|
ev.currentSamples++
|
||||||
if ev.currentSamples > ev.maxSamples {
|
if ev.currentSamples > ev.maxSamples {
|
||||||
|
@ -1719,11 +1723,11 @@ func (ev *evaluator) vectorSelector(node *parser.VectorSelector, ts int64) (Vect
|
||||||
for i, s := range node.Series {
|
for i, s := range node.Series {
|
||||||
it.Reset(s.Iterator())
|
it.Reset(s.Iterator())
|
||||||
|
|
||||||
t, v, ok := ev.vectorSelectorSingle(it, node, ts)
|
t, v, h, ok := ev.vectorSelectorSingle(it, node, ts)
|
||||||
if ok {
|
if ok {
|
||||||
vec = append(vec, Sample{
|
vec = append(vec, Sample{
|
||||||
Metric: node.Series[i].Labels(),
|
Metric: node.Series[i].Labels(),
|
||||||
Point: Point{V: v, T: t},
|
Point: Point{V: v, H: h, T: t},
|
||||||
})
|
})
|
||||||
|
|
||||||
ev.currentSamples++
|
ev.currentSamples++
|
||||||
|
@ -1738,33 +1742,39 @@ func (ev *evaluator) vectorSelector(node *parser.VectorSelector, ts int64) (Vect
|
||||||
return vec, ws
|
return vec, ws
|
||||||
}
|
}
|
||||||
|
|
||||||
// vectorSelectorSingle evaluates a instant vector for the iterator of one time series.
|
// vectorSelectorSingle evaluates an instant vector for the iterator of one time series.
|
||||||
func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, node *parser.VectorSelector, ts int64) (int64, float64, bool) {
|
func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, node *parser.VectorSelector, ts int64) (
|
||||||
|
int64, float64, *histogram.FloatHistogram, bool,
|
||||||
|
) {
|
||||||
refTime := ts - durationMilliseconds(node.Offset)
|
refTime := ts - durationMilliseconds(node.Offset)
|
||||||
var t int64
|
var t int64
|
||||||
var v float64
|
var v float64
|
||||||
|
var h *histogram.FloatHistogram
|
||||||
|
|
||||||
ok := it.Seek(refTime)
|
valueType := it.Seek(refTime)
|
||||||
if !ok {
|
switch valueType {
|
||||||
|
case chunkenc.ValNone:
|
||||||
if it.Err() != nil {
|
if it.Err() != nil {
|
||||||
ev.error(it.Err())
|
ev.error(it.Err())
|
||||||
}
|
}
|
||||||
}
|
case chunkenc.ValFloat:
|
||||||
|
|
||||||
if ok {
|
|
||||||
t, v = it.At()
|
t, v = it.At()
|
||||||
|
case chunkenc.ValHistogram, chunkenc.ValFloatHistogram:
|
||||||
|
t, h = it.AtFloatHistogram()
|
||||||
|
default:
|
||||||
|
panic(fmt.Errorf("unknown value type %v", valueType))
|
||||||
}
|
}
|
||||||
|
if valueType == chunkenc.ValNone || t > refTime {
|
||||||
if !ok || t > refTime {
|
var ok bool
|
||||||
t, v, ok = it.PeekPrev()
|
t, v, _, h, ok = it.PeekPrev()
|
||||||
if !ok || t < refTime-durationMilliseconds(ev.lookbackDelta) {
|
if !ok || t < refTime-durationMilliseconds(ev.lookbackDelta) {
|
||||||
return 0, 0, false
|
return 0, 0, nil, false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if value.IsStaleNaN(v) {
|
if value.IsStaleNaN(v) || (h != nil && value.IsStaleNaN(h.Sum)) {
|
||||||
return 0, 0, false
|
return 0, 0, nil, false
|
||||||
}
|
}
|
||||||
return t, v, true
|
return t, v, h, true
|
||||||
}
|
}
|
||||||
|
|
||||||
var pointPool = sync.Pool{}
|
var pointPool = sync.Pool{}
|
||||||
|
@ -1849,30 +1859,59 @@ func (ev *evaluator) matrixIterSlice(it *storage.BufferedSeriesIterator, mint, m
|
||||||
out = out[:0]
|
out = out[:0]
|
||||||
}
|
}
|
||||||
|
|
||||||
ok := it.Seek(maxt)
|
soughtValueType := it.Seek(maxt)
|
||||||
if !ok {
|
if soughtValueType == chunkenc.ValNone {
|
||||||
if it.Err() != nil {
|
if it.Err() != nil {
|
||||||
ev.error(it.Err())
|
ev.error(it.Err())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
buf := it.Buffer()
|
buf := it.Buffer()
|
||||||
for buf.Next() {
|
loop:
|
||||||
t, v := buf.At()
|
for {
|
||||||
if value.IsStaleNaN(v) {
|
switch buf.Next() {
|
||||||
continue
|
case chunkenc.ValNone:
|
||||||
|
break loop
|
||||||
|
case chunkenc.ValFloatHistogram, chunkenc.ValHistogram:
|
||||||
|
t, h := buf.AtFloatHistogram()
|
||||||
|
if value.IsStaleNaN(h.Sum) {
|
||||||
|
continue loop
|
||||||
|
}
|
||||||
|
// Values in the buffer are guaranteed to be smaller than maxt.
|
||||||
|
if t >= mint {
|
||||||
|
if ev.currentSamples >= ev.maxSamples {
|
||||||
|
ev.error(ErrTooManySamples(env))
|
||||||
|
}
|
||||||
|
ev.currentSamples++
|
||||||
|
out = append(out, Point{T: t, H: h})
|
||||||
|
}
|
||||||
|
case chunkenc.ValFloat:
|
||||||
|
t, v := buf.At()
|
||||||
|
if value.IsStaleNaN(v) {
|
||||||
|
continue loop
|
||||||
|
}
|
||||||
|
// Values in the buffer are guaranteed to be smaller than maxt.
|
||||||
|
if t >= mint {
|
||||||
|
if ev.currentSamples >= ev.maxSamples {
|
||||||
|
ev.error(ErrTooManySamples(env))
|
||||||
|
}
|
||||||
|
ev.currentSamples++
|
||||||
|
out = append(out, Point{T: t, V: v})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Values in the buffer are guaranteed to be smaller than maxt.
|
}
|
||||||
if t >= mint {
|
// The sought sample might also be in the range.
|
||||||
|
switch soughtValueType {
|
||||||
|
case chunkenc.ValFloatHistogram, chunkenc.ValHistogram:
|
||||||
|
t, h := it.AtFloatHistogram()
|
||||||
|
if t == maxt && !value.IsStaleNaN(h.Sum) {
|
||||||
if ev.currentSamples >= ev.maxSamples {
|
if ev.currentSamples >= ev.maxSamples {
|
||||||
ev.error(ErrTooManySamples(env))
|
ev.error(ErrTooManySamples(env))
|
||||||
}
|
}
|
||||||
|
out = append(out, Point{T: t, H: h})
|
||||||
ev.currentSamples++
|
ev.currentSamples++
|
||||||
out = append(out, Point{T: t, V: v})
|
|
||||||
}
|
}
|
||||||
}
|
case chunkenc.ValFloat:
|
||||||
// The seeked sample might also be in the range.
|
|
||||||
if ok {
|
|
||||||
t, v := it.At()
|
t, v := it.At()
|
||||||
if t == maxt && !value.IsStaleNaN(v) {
|
if t == maxt && !value.IsStaleNaN(v) {
|
||||||
if ev.currentSamples >= ev.maxSamples {
|
if ev.currentSamples >= ev.maxSamples {
|
||||||
|
@ -2030,10 +2069,12 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
|
||||||
|
|
||||||
// Account for potentially swapped sidedness.
|
// Account for potentially swapped sidedness.
|
||||||
vl, vr := ls.V, rs.V
|
vl, vr := ls.V, rs.V
|
||||||
|
hl, hr := ls.H, rs.H
|
||||||
if matching.Card == parser.CardOneToMany {
|
if matching.Card == parser.CardOneToMany {
|
||||||
vl, vr = vr, vl
|
vl, vr = vr, vl
|
||||||
|
hl, hr = hr, hl
|
||||||
}
|
}
|
||||||
value, keep := vectorElemBinop(op, vl, vr)
|
value, histogramValue, keep := vectorElemBinop(op, vl, vr, hl, hr)
|
||||||
if returnBool {
|
if returnBool {
|
||||||
if keep {
|
if keep {
|
||||||
value = 1.0
|
value = 1.0
|
||||||
|
@ -2068,10 +2109,13 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
|
||||||
insertedSigs[insertSig] = struct{}{}
|
insertedSigs[insertSig] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
enh.Out = append(enh.Out, Sample{
|
if (hl != nil && hr != nil) || (hl == nil && hr == nil) {
|
||||||
Metric: metric,
|
// Both lhs and rhs are of same type.
|
||||||
Point: Point{V: value},
|
enh.Out = append(enh.Out, Sample{
|
||||||
})
|
Metric: metric,
|
||||||
|
Point: Point{V: value, H: histogramValue},
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return enh.Out
|
return enh.Out
|
||||||
}
|
}
|
||||||
|
@ -2149,7 +2193,7 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala
|
||||||
if swap {
|
if swap {
|
||||||
lv, rv = rv, lv
|
lv, rv = rv, lv
|
||||||
}
|
}
|
||||||
value, keep := vectorElemBinop(op, lv, rv)
|
value, _, keep := vectorElemBinop(op, lv, rv, nil, nil)
|
||||||
// Catch cases where the scalar is the LHS in a scalar-vector comparison operation.
|
// Catch cases where the scalar is the LHS in a scalar-vector comparison operation.
|
||||||
// We want to always keep the vector element value as the output value, even if it's on the RHS.
|
// We want to always keep the vector element value as the output value, even if it's on the RHS.
|
||||||
if op.IsComparisonOperator() && swap {
|
if op.IsComparisonOperator() && swap {
|
||||||
|
@ -2212,45 +2256,56 @@ func scalarBinop(op parser.ItemType, lhs, rhs float64) float64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
// vectorElemBinop evaluates a binary operation between two Vector elements.
|
// vectorElemBinop evaluates a binary operation between two Vector elements.
|
||||||
func vectorElemBinop(op parser.ItemType, lhs, rhs float64) (float64, bool) {
|
func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool) {
|
||||||
switch op {
|
switch op {
|
||||||
case parser.ADD:
|
case parser.ADD:
|
||||||
return lhs + rhs, true
|
if hlhs != nil && hrhs != nil {
|
||||||
|
// The histogram being added must have the larger schema
|
||||||
|
// code (i.e. the higher resolution).
|
||||||
|
if hrhs.Schema >= hlhs.Schema {
|
||||||
|
return 0, hlhs.Copy().Add(hrhs), true
|
||||||
|
}
|
||||||
|
return 0, hrhs.Copy().Add(hlhs), true
|
||||||
|
}
|
||||||
|
return lhs + rhs, nil, true
|
||||||
case parser.SUB:
|
case parser.SUB:
|
||||||
return lhs - rhs, true
|
return lhs - rhs, nil, true
|
||||||
case parser.MUL:
|
case parser.MUL:
|
||||||
return lhs * rhs, true
|
return lhs * rhs, nil, true
|
||||||
case parser.DIV:
|
case parser.DIV:
|
||||||
return lhs / rhs, true
|
return lhs / rhs, nil, true
|
||||||
case parser.POW:
|
case parser.POW:
|
||||||
return math.Pow(lhs, rhs), true
|
return math.Pow(lhs, rhs), nil, true
|
||||||
case parser.MOD:
|
case parser.MOD:
|
||||||
return math.Mod(lhs, rhs), true
|
return math.Mod(lhs, rhs), nil, true
|
||||||
case parser.EQLC:
|
case parser.EQLC:
|
||||||
return lhs, lhs == rhs
|
return lhs, nil, lhs == rhs
|
||||||
case parser.NEQ:
|
case parser.NEQ:
|
||||||
return lhs, lhs != rhs
|
return lhs, nil, lhs != rhs
|
||||||
case parser.GTR:
|
case parser.GTR:
|
||||||
return lhs, lhs > rhs
|
return lhs, nil, lhs > rhs
|
||||||
case parser.LSS:
|
case parser.LSS:
|
||||||
return lhs, lhs < rhs
|
return lhs, nil, lhs < rhs
|
||||||
case parser.GTE:
|
case parser.GTE:
|
||||||
return lhs, lhs >= rhs
|
return lhs, nil, lhs >= rhs
|
||||||
case parser.LTE:
|
case parser.LTE:
|
||||||
return lhs, lhs <= rhs
|
return lhs, nil, lhs <= rhs
|
||||||
case parser.ATAN2:
|
case parser.ATAN2:
|
||||||
return math.Atan2(lhs, rhs), true
|
return math.Atan2(lhs, rhs), nil, true
|
||||||
}
|
}
|
||||||
panic(fmt.Errorf("operator %q not allowed for operations between Vectors", op))
|
panic(fmt.Errorf("operator %q not allowed for operations between Vectors", op))
|
||||||
}
|
}
|
||||||
|
|
||||||
type groupedAggregation struct {
|
type groupedAggregation struct {
|
||||||
labels labels.Labels
|
hasFloat bool // Has at least 1 float64 sample aggregated.
|
||||||
value float64
|
hasHistogram bool // Has at least 1 histogram sample aggregated.
|
||||||
mean float64
|
labels labels.Labels
|
||||||
groupCount int
|
value float64
|
||||||
heap vectorByValueHeap
|
histogramValue *histogram.FloatHistogram
|
||||||
reverseHeap vectorByReverseValueHeap
|
mean float64
|
||||||
|
groupCount int
|
||||||
|
heap vectorByValueHeap
|
||||||
|
reverseHeap vectorByReverseValueHeap
|
||||||
}
|
}
|
||||||
|
|
||||||
// aggregation evaluates an aggregation operation on a Vector. The provided grouping labels
|
// aggregation evaluates an aggregation operation on a Vector. The provided grouping labels
|
||||||
|
@ -2330,6 +2385,12 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
|
||||||
mean: s.V,
|
mean: s.V,
|
||||||
groupCount: 1,
|
groupCount: 1,
|
||||||
}
|
}
|
||||||
|
if s.H == nil {
|
||||||
|
newAgg.hasFloat = true
|
||||||
|
} else if op == parser.SUM {
|
||||||
|
newAgg.histogramValue = s.H.Copy()
|
||||||
|
newAgg.hasHistogram = true
|
||||||
|
}
|
||||||
|
|
||||||
result[groupingKey] = newAgg
|
result[groupingKey] = newAgg
|
||||||
orderedResult = append(orderedResult, newAgg)
|
orderedResult = append(orderedResult, newAgg)
|
||||||
|
@ -2364,7 +2425,26 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
|
||||||
|
|
||||||
switch op {
|
switch op {
|
||||||
case parser.SUM:
|
case parser.SUM:
|
||||||
group.value += s.V
|
if s.H != nil {
|
||||||
|
group.hasHistogram = true
|
||||||
|
if group.histogramValue != nil {
|
||||||
|
// The histogram being added must have
|
||||||
|
// an equal or larger schema.
|
||||||
|
if s.H.Schema >= group.histogramValue.Schema {
|
||||||
|
group.histogramValue.Add(s.H)
|
||||||
|
} else {
|
||||||
|
h := s.H.Copy()
|
||||||
|
h.Add(group.histogramValue)
|
||||||
|
group.histogramValue = h
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Otherwise the aggregation contained floats
|
||||||
|
// previously and will be invalid anyway. No
|
||||||
|
// point in copying the histogram in that case.
|
||||||
|
} else {
|
||||||
|
group.hasFloat = true
|
||||||
|
group.value += s.V
|
||||||
|
}
|
||||||
|
|
||||||
case parser.AVG:
|
case parser.AVG:
|
||||||
group.groupCount++
|
group.groupCount++
|
||||||
|
@ -2498,13 +2578,18 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
|
||||||
case parser.QUANTILE:
|
case parser.QUANTILE:
|
||||||
aggr.value = quantile(q, aggr.heap)
|
aggr.value = quantile(q, aggr.heap)
|
||||||
|
|
||||||
|
case parser.SUM:
|
||||||
|
if aggr.hasFloat && aggr.hasHistogram {
|
||||||
|
// We cannot aggregate histogram sample with a float64 sample.
|
||||||
|
continue
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
// For other aggregations, we already have the right value.
|
// For other aggregations, we already have the right value.
|
||||||
}
|
}
|
||||||
|
|
||||||
enh.Out = append(enh.Out, Sample{
|
enh.Out = append(enh.Out, Sample{
|
||||||
Metric: aggr.labels,
|
Metric: aggr.labels,
|
||||||
Point: Point{V: aggr.value},
|
Point: Point{V: aggr.value, H: aggr.histogramValue},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return enh.Out
|
return enh.Out
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -29,10 +30,12 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"go.uber.org/goleak"
|
"go.uber.org/goleak"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/timestamp"
|
"github.com/prometheus/prometheus/model/timestamp"
|
||||||
"github.com/prometheus/prometheus/promql/parser"
|
"github.com/prometheus/prometheus/promql/parser"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
"github.com/prometheus/prometheus/tsdb"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
|
@ -3121,6 +3124,911 @@ func TestRangeQuery(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSparseHistogramRate(t *testing.T) {
|
||||||
|
// TODO(beorn7): Integrate histograms into the PromQL testing framework
|
||||||
|
// and write more tests there.
|
||||||
|
test, err := NewTest(t, "")
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer test.Close()
|
||||||
|
|
||||||
|
seriesName := "sparse_histogram_series"
|
||||||
|
lbls := labels.FromStrings("__name__", seriesName)
|
||||||
|
|
||||||
|
app := test.Storage().Appender(context.TODO())
|
||||||
|
for i, h := range tsdb.GenerateTestHistograms(100) {
|
||||||
|
_, err := app.AppendHistogram(0, lbls, int64(i)*int64(15*time.Second/time.Millisecond), h)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
|
require.NoError(t, test.Run())
|
||||||
|
engine := test.QueryEngine()
|
||||||
|
|
||||||
|
queryString := fmt.Sprintf("rate(%s[1m])", seriesName)
|
||||||
|
qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond)))
|
||||||
|
require.NoError(t, err)
|
||||||
|
res := qry.Exec(test.Context())
|
||||||
|
require.NoError(t, res.Err)
|
||||||
|
vector, err := res.Vector()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, vector, 1)
|
||||||
|
actualHistogram := vector[0].H
|
||||||
|
expectedHistogram := &histogram.FloatHistogram{
|
||||||
|
Schema: 1,
|
||||||
|
ZeroThreshold: 0.001,
|
||||||
|
ZeroCount: 1. / 15.,
|
||||||
|
Count: 4. / 15.,
|
||||||
|
Sum: 1.226666666666667,
|
||||||
|
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}},
|
||||||
|
PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.},
|
||||||
|
}
|
||||||
|
require.Equal(t, expectedHistogram, actualHistogram)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSparseHistogram_HistogramCountAndSum(t *testing.T) {
|
||||||
|
// TODO(codesome): Integrate histograms into the PromQL testing framework
|
||||||
|
// and write more tests there.
|
||||||
|
h := &histogram.Histogram{
|
||||||
|
Count: 24,
|
||||||
|
ZeroCount: 4,
|
||||||
|
ZeroThreshold: 0.001,
|
||||||
|
Sum: 100,
|
||||||
|
Schema: 0,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{2, 1, -2, 3},
|
||||||
|
NegativeSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []int64{2, 1, -2, 3},
|
||||||
|
}
|
||||||
|
|
||||||
|
test, err := NewTest(t, "")
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(test.Close)
|
||||||
|
|
||||||
|
seriesName := "sparse_histogram_series"
|
||||||
|
lbls := labels.FromStrings("__name__", seriesName)
|
||||||
|
engine := test.QueryEngine()
|
||||||
|
|
||||||
|
ts := int64(10 * time.Minute / time.Millisecond)
|
||||||
|
app := test.Storage().Appender(context.TODO())
|
||||||
|
_, err = app.AppendHistogram(0, lbls, ts, h)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
|
queryString := fmt.Sprintf("histogram_count(%s)", seriesName)
|
||||||
|
qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
res := qry.Exec(test.Context())
|
||||||
|
require.NoError(t, res.Err)
|
||||||
|
|
||||||
|
vector, err := res.Vector()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Len(t, vector, 1)
|
||||||
|
require.Nil(t, vector[0].H)
|
||||||
|
require.Equal(t, float64(h.Count), vector[0].V)
|
||||||
|
|
||||||
|
queryString = fmt.Sprintf("histogram_sum(%s)", seriesName)
|
||||||
|
qry, err = engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
res = qry.Exec(test.Context())
|
||||||
|
require.NoError(t, res.Err)
|
||||||
|
|
||||||
|
vector, err = res.Vector()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Len(t, vector, 1)
|
||||||
|
require.Nil(t, vector[0].H)
|
||||||
|
require.Equal(t, h.Sum, vector[0].V)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSparseHistogram_HistogramQuantile(t *testing.T) {
|
||||||
|
// TODO(codesome): Integrate histograms into the PromQL testing framework
|
||||||
|
// and write more tests there.
|
||||||
|
type subCase struct {
|
||||||
|
quantile string
|
||||||
|
value float64
|
||||||
|
}
|
||||||
|
|
||||||
|
cases := []struct {
|
||||||
|
text string
|
||||||
|
// Histogram to test.
|
||||||
|
h *histogram.Histogram
|
||||||
|
// Different quantiles to test for this histogram.
|
||||||
|
subCases []subCase
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
text: "all positive buckets with zero bucket",
|
||||||
|
h: &histogram.Histogram{
|
||||||
|
Count: 12,
|
||||||
|
ZeroCount: 2,
|
||||||
|
ZeroThreshold: 0.001,
|
||||||
|
Sum: 100, // Does not matter.
|
||||||
|
Schema: 0,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{2, 1, -2, 3},
|
||||||
|
},
|
||||||
|
subCases: []subCase{
|
||||||
|
{
|
||||||
|
quantile: "1.0001",
|
||||||
|
value: math.Inf(1),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
quantile: "1",
|
||||||
|
value: 16,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
quantile: "0.99",
|
||||||
|
value: 15.759999999999998,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
quantile: "0.9",
|
||||||
|
value: 13.600000000000001,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
quantile: "0.6",
|
||||||
|
value: 4.799999999999997,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
quantile: "0.5",
|
||||||
|
value: 1.6666666666666665,
|
||||||
|
},
|
||||||
|
{ // Zero bucket.
|
||||||
|
quantile: "0.1",
|
||||||
|
value: 0.0006000000000000001,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
quantile: "0",
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
quantile: "-1",
|
||||||
|
value: math.Inf(-1),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "all negative buckets with zero bucket",
|
||||||
|
h: &histogram.Histogram{
|
||||||
|
Count: 12,
|
||||||
|
ZeroCount: 2,
|
||||||
|
ZeroThreshold: 0.001,
|
||||||
|
Sum: 100, // Does not matter.
|
||||||
|
Schema: 0,
|
||||||
|
NegativeSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []int64{2, 1, -2, 3},
|
||||||
|
},
|
||||||
|
subCases: []subCase{
|
||||||
|
{
|
||||||
|
quantile: "1.0001",
|
||||||
|
value: math.Inf(1),
|
||||||
|
},
|
||||||
|
{ // Zero bucket.
|
||||||
|
quantile: "1",
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
{ // Zero bucket.
|
||||||
|
quantile: "0.99",
|
||||||
|
value: -6.000000000000048e-05,
|
||||||
|
},
|
||||||
|
{ // Zero bucket.
|
||||||
|
quantile: "0.9",
|
||||||
|
value: -0.0005999999999999996,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
quantile: "0.5",
|
||||||
|
value: -1.6666666666666667,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
quantile: "0.1",
|
||||||
|
value: -13.6,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
quantile: "0",
|
||||||
|
value: -16,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
quantile: "-1",
|
||||||
|
value: math.Inf(-1),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "both positive and negative buckets with zero bucket",
|
||||||
|
h: &histogram.Histogram{
|
||||||
|
Count: 24,
|
||||||
|
ZeroCount: 4,
|
||||||
|
ZeroThreshold: 0.001,
|
||||||
|
Sum: 100, // Does not matter.
|
||||||
|
Schema: 0,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{2, 1, -2, 3},
|
||||||
|
NegativeSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []int64{2, 1, -2, 3},
|
||||||
|
},
|
||||||
|
subCases: []subCase{
|
||||||
|
{
|
||||||
|
quantile: "1.0001",
|
||||||
|
value: math.Inf(1),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
quantile: "1",
|
||||||
|
value: 16,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
quantile: "0.99",
|
||||||
|
value: 15.519999999999996,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
quantile: "0.9",
|
||||||
|
value: 11.200000000000003,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
quantile: "0.7",
|
||||||
|
value: 1.2666666666666657,
|
||||||
|
},
|
||||||
|
{ // Zero bucket.
|
||||||
|
quantile: "0.55",
|
||||||
|
value: 0.0006000000000000005,
|
||||||
|
},
|
||||||
|
{ // Zero bucket.
|
||||||
|
quantile: "0.5",
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
{ // Zero bucket.
|
||||||
|
quantile: "0.45",
|
||||||
|
value: -0.0005999999999999996,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
quantile: "0.3",
|
||||||
|
value: -1.266666666666667,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
quantile: "0.1",
|
||||||
|
value: -11.2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
quantile: "0.01",
|
||||||
|
value: -15.52,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
quantile: "0",
|
||||||
|
value: -16,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
quantile: "-1",
|
||||||
|
value: math.Inf(-1),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
test, err := NewTest(t, "")
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(test.Close)
|
||||||
|
for i, c := range cases {
|
||||||
|
t.Run(c.text, func(t *testing.T) {
|
||||||
|
seriesName := "sparse_histogram_series"
|
||||||
|
lbls := labels.FromStrings("__name__", seriesName)
|
||||||
|
engine := test.QueryEngine()
|
||||||
|
|
||||||
|
ts := int64(i+1) * int64(10*time.Minute/time.Millisecond)
|
||||||
|
app := test.Storage().Appender(context.TODO())
|
||||||
|
_, err = app.AppendHistogram(0, lbls, ts, c.h)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
|
for j, sc := range c.subCases {
|
||||||
|
t.Run(fmt.Sprintf("%d %s", j, sc.quantile), func(t *testing.T) {
|
||||||
|
queryString := fmt.Sprintf("histogram_quantile(%s, %s)", sc.quantile, seriesName)
|
||||||
|
qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
res := qry.Exec(test.Context())
|
||||||
|
require.NoError(t, res.Err)
|
||||||
|
|
||||||
|
vector, err := res.Vector()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Len(t, vector, 1)
|
||||||
|
require.Nil(t, vector[0].H)
|
||||||
|
require.True(t, almostEqual(sc.value, vector[0].V))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSparseHistogram_HistogramFraction(t *testing.T) {
|
||||||
|
// TODO(codesome): Integrate histograms into the PromQL testing framework
|
||||||
|
// and write more tests there.
|
||||||
|
type subCase struct {
|
||||||
|
lower, upper string
|
||||||
|
value float64
|
||||||
|
}
|
||||||
|
|
||||||
|
invariantCases := []subCase{
|
||||||
|
{
|
||||||
|
lower: "42",
|
||||||
|
upper: "3.1415",
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "0",
|
||||||
|
upper: "0",
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "0.000001",
|
||||||
|
upper: "0.000001",
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "42",
|
||||||
|
upper: "42",
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "-3.1",
|
||||||
|
upper: "-3.1",
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "3.1415",
|
||||||
|
upper: "NaN",
|
||||||
|
value: math.NaN(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "NaN",
|
||||||
|
upper: "42",
|
||||||
|
value: math.NaN(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "NaN",
|
||||||
|
upper: "NaN",
|
||||||
|
value: math.NaN(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "-Inf",
|
||||||
|
upper: "+Inf",
|
||||||
|
value: 1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
cases := []struct {
|
||||||
|
text string
|
||||||
|
// Histogram to test.
|
||||||
|
h *histogram.Histogram
|
||||||
|
// Different ranges to test for this histogram.
|
||||||
|
subCases []subCase
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
text: "empty histogram",
|
||||||
|
h: &histogram.Histogram{},
|
||||||
|
subCases: []subCase{
|
||||||
|
{
|
||||||
|
lower: "3.1415",
|
||||||
|
upper: "42",
|
||||||
|
value: math.NaN(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "all positive buckets with zero bucket",
|
||||||
|
h: &histogram.Histogram{
|
||||||
|
Count: 12,
|
||||||
|
ZeroCount: 2,
|
||||||
|
ZeroThreshold: 0.001,
|
||||||
|
Sum: 100, // Does not matter.
|
||||||
|
Schema: 0,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{2, 1, -2, 3}, // Abs: 2, 3, 1, 4
|
||||||
|
},
|
||||||
|
subCases: append([]subCase{
|
||||||
|
{
|
||||||
|
lower: "0",
|
||||||
|
upper: "+Inf",
|
||||||
|
value: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "-Inf",
|
||||||
|
upper: "0",
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "-0.001",
|
||||||
|
upper: "0",
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "0",
|
||||||
|
upper: "0.001",
|
||||||
|
value: 2. / 12.,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "0",
|
||||||
|
upper: "0.0005",
|
||||||
|
value: 1. / 12.,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "0.001",
|
||||||
|
upper: "inf",
|
||||||
|
value: 10. / 12.,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "-inf",
|
||||||
|
upper: "-0.001",
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "1",
|
||||||
|
upper: "2",
|
||||||
|
value: 3. / 12.,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "1.5",
|
||||||
|
upper: "2",
|
||||||
|
value: 1.5 / 12.,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "1",
|
||||||
|
upper: "8",
|
||||||
|
value: 4. / 12.,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "1",
|
||||||
|
upper: "6",
|
||||||
|
value: 3.5 / 12.,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "1.5",
|
||||||
|
upper: "6",
|
||||||
|
value: 2. / 12.,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "-2",
|
||||||
|
upper: "-1",
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "-2",
|
||||||
|
upper: "-1.5",
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "-8",
|
||||||
|
upper: "-1",
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "-6",
|
||||||
|
upper: "-1",
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "-6",
|
||||||
|
upper: "-1.5",
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
}, invariantCases...),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "all negative buckets with zero bucket",
|
||||||
|
h: &histogram.Histogram{
|
||||||
|
Count: 12,
|
||||||
|
ZeroCount: 2,
|
||||||
|
ZeroThreshold: 0.001,
|
||||||
|
Sum: 100, // Does not matter.
|
||||||
|
Schema: 0,
|
||||||
|
NegativeSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []int64{2, 1, -2, 3},
|
||||||
|
},
|
||||||
|
subCases: append([]subCase{
|
||||||
|
{
|
||||||
|
lower: "0",
|
||||||
|
upper: "+Inf",
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "-Inf",
|
||||||
|
upper: "0",
|
||||||
|
value: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "-0.001",
|
||||||
|
upper: "0",
|
||||||
|
value: 2. / 12.,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "0",
|
||||||
|
upper: "0.001",
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "-0.0005",
|
||||||
|
upper: "0",
|
||||||
|
value: 1. / 12.,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "0.001",
|
||||||
|
upper: "inf",
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "-inf",
|
||||||
|
upper: "-0.001",
|
||||||
|
value: 10. / 12.,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "1",
|
||||||
|
upper: "2",
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "1.5",
|
||||||
|
upper: "2",
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "1",
|
||||||
|
upper: "8",
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "1",
|
||||||
|
upper: "6",
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "1.5",
|
||||||
|
upper: "6",
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "-2",
|
||||||
|
upper: "-1",
|
||||||
|
value: 3. / 12.,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "-2",
|
||||||
|
upper: "-1.5",
|
||||||
|
value: 1.5 / 12.,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "-8",
|
||||||
|
upper: "-1",
|
||||||
|
value: 4. / 12.,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "-6",
|
||||||
|
upper: "-1",
|
||||||
|
value: 3.5 / 12.,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "-6",
|
||||||
|
upper: "-1.5",
|
||||||
|
value: 2. / 12.,
|
||||||
|
},
|
||||||
|
}, invariantCases...),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "both positive and negative buckets with zero bucket",
|
||||||
|
h: &histogram.Histogram{
|
||||||
|
Count: 24,
|
||||||
|
ZeroCount: 4,
|
||||||
|
ZeroThreshold: 0.001,
|
||||||
|
Sum: 100, // Does not matter.
|
||||||
|
Schema: 0,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{2, 1, -2, 3},
|
||||||
|
NegativeSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []int64{2, 1, -2, 3},
|
||||||
|
},
|
||||||
|
subCases: append([]subCase{
|
||||||
|
{
|
||||||
|
lower: "0",
|
||||||
|
upper: "+Inf",
|
||||||
|
value: 0.5,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "-Inf",
|
||||||
|
upper: "0",
|
||||||
|
value: 0.5,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "-0.001",
|
||||||
|
upper: "0",
|
||||||
|
value: 2. / 24,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "0",
|
||||||
|
upper: "0.001",
|
||||||
|
value: 2. / 24.,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "-0.0005",
|
||||||
|
upper: "0.0005",
|
||||||
|
value: 2. / 24.,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "0.001",
|
||||||
|
upper: "inf",
|
||||||
|
value: 10. / 24.,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "-inf",
|
||||||
|
upper: "-0.001",
|
||||||
|
value: 10. / 24.,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "1",
|
||||||
|
upper: "2",
|
||||||
|
value: 3. / 24.,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "1.5",
|
||||||
|
upper: "2",
|
||||||
|
value: 1.5 / 24.,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "1",
|
||||||
|
upper: "8",
|
||||||
|
value: 4. / 24.,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "1",
|
||||||
|
upper: "6",
|
||||||
|
value: 3.5 / 24.,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "1.5",
|
||||||
|
upper: "6",
|
||||||
|
value: 2. / 24.,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "-2",
|
||||||
|
upper: "-1",
|
||||||
|
value: 3. / 24.,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "-2",
|
||||||
|
upper: "-1.5",
|
||||||
|
value: 1.5 / 24.,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "-8",
|
||||||
|
upper: "-1",
|
||||||
|
value: 4. / 24.,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "-6",
|
||||||
|
upper: "-1",
|
||||||
|
value: 3.5 / 24.,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lower: "-6",
|
||||||
|
upper: "-1.5",
|
||||||
|
value: 2. / 24.,
|
||||||
|
},
|
||||||
|
}, invariantCases...),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, c := range cases {
|
||||||
|
t.Run(c.text, func(t *testing.T) {
|
||||||
|
test, err := NewTest(t, "")
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(test.Close)
|
||||||
|
|
||||||
|
seriesName := "sparse_histogram_series"
|
||||||
|
lbls := labels.FromStrings("__name__", seriesName)
|
||||||
|
engine := test.QueryEngine()
|
||||||
|
|
||||||
|
ts := int64(i+1) * int64(10*time.Minute/time.Millisecond)
|
||||||
|
app := test.Storage().Appender(context.TODO())
|
||||||
|
_, err = app.AppendHistogram(0, lbls, ts, c.h)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
|
for j, sc := range c.subCases {
|
||||||
|
t.Run(fmt.Sprintf("%d %s %s", j, sc.lower, sc.upper), func(t *testing.T) {
|
||||||
|
queryString := fmt.Sprintf("histogram_fraction(%s, %s, %s)", sc.lower, sc.upper, seriesName)
|
||||||
|
qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
res := qry.Exec(test.Context())
|
||||||
|
require.NoError(t, res.Err)
|
||||||
|
|
||||||
|
vector, err := res.Vector()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Len(t, vector, 1)
|
||||||
|
require.Nil(t, vector[0].H)
|
||||||
|
if math.IsNaN(sc.value) {
|
||||||
|
require.True(t, math.IsNaN(vector[0].V))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
require.Equal(t, sc.value, vector[0].V)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSparseHistogram_Sum_Count_AddOperator(t *testing.T) {
|
||||||
|
// TODO(codesome): Integrate histograms into the PromQL testing framework
|
||||||
|
// and write more tests there.
|
||||||
|
cases := []struct {
|
||||||
|
histograms []histogram.Histogram
|
||||||
|
expected histogram.FloatHistogram
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
histograms: []histogram.Histogram{
|
||||||
|
{
|
||||||
|
Schema: 0,
|
||||||
|
Count: 21,
|
||||||
|
Sum: 1234.5,
|
||||||
|
ZeroThreshold: 0.001,
|
||||||
|
ZeroCount: 4,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||||
|
NegativeSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 2, Length: 2},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []int64{2, 2, -3, 8},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Schema: 0,
|
||||||
|
Count: 36,
|
||||||
|
Sum: 2345.6,
|
||||||
|
ZeroThreshold: 0.001,
|
||||||
|
ZeroCount: 5,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 4},
|
||||||
|
{Offset: 0, Length: 0},
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
|
||||||
|
NegativeSpans: []histogram.Span{
|
||||||
|
{Offset: 1, Length: 4},
|
||||||
|
{Offset: 2, Length: 0},
|
||||||
|
{Offset: 2, Length: 3},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Schema: 0,
|
||||||
|
Count: 36,
|
||||||
|
Sum: 1111.1,
|
||||||
|
ZeroThreshold: 0.001,
|
||||||
|
ZeroCount: 5,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 4},
|
||||||
|
{Offset: 0, Length: 0},
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
|
||||||
|
NegativeSpans: []histogram.Span{
|
||||||
|
{Offset: 1, Length: 4},
|
||||||
|
{Offset: 2, Length: 0},
|
||||||
|
{Offset: 2, Length: 3},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: histogram.FloatHistogram{
|
||||||
|
Schema: 0,
|
||||||
|
ZeroThreshold: 0.001,
|
||||||
|
ZeroCount: 14,
|
||||||
|
Count: 93,
|
||||||
|
Sum: 4691.2,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
{Offset: 0, Length: 4},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []float64{3, 8, 2, 5, 3, 2, 2},
|
||||||
|
NegativeSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 4},
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 3, Length: 3},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []float64{2, 6, 8, 4, 15, 9, 10, 10, 4},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, c := range cases {
|
||||||
|
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||||
|
test, err := NewTest(t, "")
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(test.Close)
|
||||||
|
|
||||||
|
seriesName := "sparse_histogram_series"
|
||||||
|
|
||||||
|
engine := test.QueryEngine()
|
||||||
|
|
||||||
|
ts := int64(i+1) * int64(10*time.Minute/time.Millisecond)
|
||||||
|
app := test.Storage().Appender(context.TODO())
|
||||||
|
for idx, h := range c.histograms {
|
||||||
|
lbls := labels.FromStrings("__name__", seriesName, "idx", fmt.Sprintf("%d", idx))
|
||||||
|
// Since we mutate h later, we need to create a copy here.
|
||||||
|
_, err = app.AppendHistogram(0, lbls, ts, h.Copy())
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
|
queryAndCheck := func(queryString string, exp Vector) {
|
||||||
|
qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
res := qry.Exec(test.Context())
|
||||||
|
require.NoError(t, res.Err)
|
||||||
|
|
||||||
|
vector, err := res.Vector()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, exp, vector)
|
||||||
|
}
|
||||||
|
|
||||||
|
// sum().
|
||||||
|
queryString := fmt.Sprintf("sum(%s)", seriesName)
|
||||||
|
queryAndCheck(queryString, []Sample{
|
||||||
|
{Point{T: ts, H: &c.expected}, labels.Labels{}},
|
||||||
|
})
|
||||||
|
|
||||||
|
// + operator.
|
||||||
|
queryString = fmt.Sprintf(`%s{idx="0"}`, seriesName)
|
||||||
|
for idx := 1; idx < len(c.histograms); idx++ {
|
||||||
|
queryString += fmt.Sprintf(` + ignoring(idx) %s{idx="%d"}`, seriesName, idx)
|
||||||
|
}
|
||||||
|
queryAndCheck(queryString, []Sample{
|
||||||
|
{Point{T: ts, H: &c.expected}, labels.Labels{}},
|
||||||
|
})
|
||||||
|
|
||||||
|
// count().
|
||||||
|
queryString = fmt.Sprintf("count(%s)", seriesName)
|
||||||
|
queryAndCheck(queryString, []Sample{
|
||||||
|
{Point{T: ts, V: 3}, labels.Labels{}},
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestQueryLookbackDelta(t *testing.T) {
|
func TestQueryLookbackDelta(t *testing.T) {
|
||||||
var (
|
var (
|
||||||
load = `load 5m
|
load = `load 5m
|
||||||
|
|
|
@ -24,6 +24,7 @@ import (
|
||||||
"github.com/grafana/regexp"
|
"github.com/grafana/regexp"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/promql/parser"
|
"github.com/prometheus/prometheus/promql/parser"
|
||||||
)
|
)
|
||||||
|
@ -66,9 +67,11 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod
|
||||||
ms := args[0].(*parser.MatrixSelector)
|
ms := args[0].(*parser.MatrixSelector)
|
||||||
vs := ms.VectorSelector.(*parser.VectorSelector)
|
vs := ms.VectorSelector.(*parser.VectorSelector)
|
||||||
var (
|
var (
|
||||||
samples = vals[0].(Matrix)[0]
|
samples = vals[0].(Matrix)[0]
|
||||||
rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset)
|
rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset)
|
||||||
rangeEnd = enh.Ts - durationMilliseconds(vs.Offset)
|
rangeEnd = enh.Ts - durationMilliseconds(vs.Offset)
|
||||||
|
resultValue float64
|
||||||
|
resultHistogram *histogram.FloatHistogram
|
||||||
)
|
)
|
||||||
|
|
||||||
// No sense in trying to compute a rate without at least two points. Drop
|
// No sense in trying to compute a rate without at least two points. Drop
|
||||||
|
@ -77,14 +80,32 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod
|
||||||
return enh.Out
|
return enh.Out
|
||||||
}
|
}
|
||||||
|
|
||||||
resultValue := samples.Points[len(samples.Points)-1].V - samples.Points[0].V
|
if samples.Points[0].H != nil {
|
||||||
if isCounter {
|
resultHistogram = histogramRate(samples.Points, isCounter)
|
||||||
var lastValue float64
|
if resultHistogram == nil {
|
||||||
for _, sample := range samples.Points {
|
// Points are a mix of floats and histograms, or the histograms
|
||||||
if sample.V < lastValue {
|
// are not compatible with each other.
|
||||||
resultValue += lastValue
|
// TODO(beorn7): find a way of communicating the exact reason
|
||||||
|
return enh.Out
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
resultValue = samples.Points[len(samples.Points)-1].V - samples.Points[0].V
|
||||||
|
prevValue := samples.Points[0].V
|
||||||
|
// We have to iterate through everything even in the non-counter
|
||||||
|
// case because we have to check that everything is a float.
|
||||||
|
// TODO(beorn7): Find a way to check that earlier, e.g. by
|
||||||
|
// handing in a []FloatPoint and a []HistogramPoint separately.
|
||||||
|
for _, currPoint := range samples.Points[1:] {
|
||||||
|
if currPoint.H != nil {
|
||||||
|
return nil // Range contains a mix of histograms and floats.
|
||||||
}
|
}
|
||||||
lastValue = sample.V
|
if !isCounter {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if currPoint.V < prevValue {
|
||||||
|
resultValue += prevValue
|
||||||
|
}
|
||||||
|
prevValue = currPoint.V
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,6 +116,7 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod
|
||||||
sampledInterval := float64(samples.Points[len(samples.Points)-1].T-samples.Points[0].T) / 1000
|
sampledInterval := float64(samples.Points[len(samples.Points)-1].T-samples.Points[0].T) / 1000
|
||||||
averageDurationBetweenSamples := sampledInterval / float64(len(samples.Points)-1)
|
averageDurationBetweenSamples := sampledInterval / float64(len(samples.Points)-1)
|
||||||
|
|
||||||
|
// TODO(beorn7): Do this for histograms, too.
|
||||||
if isCounter && resultValue > 0 && samples.Points[0].V >= 0 {
|
if isCounter && resultValue > 0 && samples.Points[0].V >= 0 {
|
||||||
// Counters cannot be negative. If we have any slope at
|
// Counters cannot be negative. If we have any slope at
|
||||||
// all (i.e. resultValue went up), we can extrapolate
|
// all (i.e. resultValue went up), we can extrapolate
|
||||||
|
@ -126,16 +148,69 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod
|
||||||
} else {
|
} else {
|
||||||
extrapolateToInterval += averageDurationBetweenSamples / 2
|
extrapolateToInterval += averageDurationBetweenSamples / 2
|
||||||
}
|
}
|
||||||
resultValue = resultValue * (extrapolateToInterval / sampledInterval)
|
factor := extrapolateToInterval / sampledInterval
|
||||||
if isRate {
|
if isRate {
|
||||||
resultValue = resultValue / ms.Range.Seconds()
|
factor /= ms.Range.Seconds()
|
||||||
|
}
|
||||||
|
if resultHistogram == nil {
|
||||||
|
resultValue *= factor
|
||||||
|
} else {
|
||||||
|
resultHistogram.Scale(factor)
|
||||||
}
|
}
|
||||||
|
|
||||||
return append(enh.Out, Sample{
|
return append(enh.Out, Sample{
|
||||||
Point: Point{V: resultValue},
|
Point: Point{V: resultValue, H: resultHistogram},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// histogramRate is a helper function for extrapolatedRate. It requires
|
||||||
|
// points[0] to be a histogram. It returns nil if any other Point in points is
|
||||||
|
// not a histogram.
|
||||||
|
func histogramRate(points []Point, isCounter bool) *histogram.FloatHistogram {
|
||||||
|
prev := points[0].H // We already know that this is a histogram.
|
||||||
|
last := points[len(points)-1].H
|
||||||
|
if last == nil {
|
||||||
|
return nil // Range contains a mix of histograms and floats.
|
||||||
|
}
|
||||||
|
minSchema := prev.Schema
|
||||||
|
if last.Schema < minSchema {
|
||||||
|
minSchema = last.Schema
|
||||||
|
}
|
||||||
|
|
||||||
|
// First iteration to find out two things:
|
||||||
|
// - What's the smallest relevant schema?
|
||||||
|
// - Are all data points histograms?
|
||||||
|
// TODO(beorn7): Find a way to check that earlier, e.g. by handing in a
|
||||||
|
// []FloatPoint and a []HistogramPoint separately.
|
||||||
|
for _, currPoint := range points[1 : len(points)-1] {
|
||||||
|
curr := currPoint.H
|
||||||
|
if curr == nil {
|
||||||
|
return nil // Range contains a mix of histograms and floats.
|
||||||
|
}
|
||||||
|
if !isCounter {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if curr.Schema < minSchema {
|
||||||
|
minSchema = curr.Schema
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
h := last.CopyToSchema(minSchema)
|
||||||
|
h.Sub(prev)
|
||||||
|
|
||||||
|
if isCounter {
|
||||||
|
// Second iteration to deal with counter resets.
|
||||||
|
for _, currPoint := range points[1:] {
|
||||||
|
curr := currPoint.H
|
||||||
|
if curr.DetectReset(prev) {
|
||||||
|
h.Add(prev)
|
||||||
|
}
|
||||||
|
prev = curr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return h.Compact(0)
|
||||||
|
}
|
||||||
|
|
||||||
// === delta(Matrix parser.ValueTypeMatrix) Vector ===
|
// === delta(Matrix parser.ValueTypeMatrix) Vector ===
|
||||||
func funcDelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
func funcDelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||||
return extrapolatedRate(vals, args, enh, false, false)
|
return extrapolatedRate(vals, args, enh, false, false)
|
||||||
|
@ -793,6 +868,59 @@ func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNo
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// === histogram_count(Vector parser.ValueTypeVector) Vector ===
|
||||||
|
func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||||
|
inVec := vals[0].(Vector)
|
||||||
|
|
||||||
|
for _, sample := range inVec {
|
||||||
|
// Skip non-histogram samples.
|
||||||
|
if sample.H == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
enh.Out = append(enh.Out, Sample{
|
||||||
|
Metric: enh.DropMetricName(sample.Metric),
|
||||||
|
Point: Point{V: sample.H.Count},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return enh.Out
|
||||||
|
}
|
||||||
|
|
||||||
|
// === histogram_sum(Vector parser.ValueTypeVector) Vector ===
|
||||||
|
func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||||
|
inVec := vals[0].(Vector)
|
||||||
|
|
||||||
|
for _, sample := range inVec {
|
||||||
|
// Skip non-histogram samples.
|
||||||
|
if sample.H == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
enh.Out = append(enh.Out, Sample{
|
||||||
|
Metric: enh.DropMetricName(sample.Metric),
|
||||||
|
Point: Point{V: sample.H.Sum},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return enh.Out
|
||||||
|
}
|
||||||
|
|
||||||
|
// === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector ===
|
||||||
|
func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||||
|
lower := vals[0].(Vector)[0].V
|
||||||
|
upper := vals[1].(Vector)[0].V
|
||||||
|
inVec := vals[2].(Vector)
|
||||||
|
|
||||||
|
for _, sample := range inVec {
|
||||||
|
// Skip non-histogram samples.
|
||||||
|
if sample.H == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
enh.Out = append(enh.Out, Sample{
|
||||||
|
Metric: enh.DropMetricName(sample.Metric),
|
||||||
|
Point: Point{V: histogramFraction(lower, upper, sample.H)},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return enh.Out
|
||||||
|
}
|
||||||
|
|
||||||
// === histogram_quantile(k parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector ===
|
// === histogram_quantile(k parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector ===
|
||||||
func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||||
q := vals[0].(Vector)[0].V
|
q := vals[0].(Vector)[0].V
|
||||||
|
@ -805,26 +933,57 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev
|
||||||
v.buckets = v.buckets[:0]
|
v.buckets = v.buckets[:0]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, el := range inVec {
|
|
||||||
|
var histogramSamples []Sample
|
||||||
|
|
||||||
|
for _, sample := range inVec {
|
||||||
|
// We are only looking for conventional buckets here. Remember
|
||||||
|
// the histograms for later treatment.
|
||||||
|
if sample.H != nil {
|
||||||
|
histogramSamples = append(histogramSamples, sample)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
upperBound, err := strconv.ParseFloat(
|
upperBound, err := strconv.ParseFloat(
|
||||||
el.Metric.Get(model.BucketLabel), 64,
|
sample.Metric.Get(model.BucketLabel), 64,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Oops, no bucket label or malformed label value. Skip.
|
// Oops, no bucket label or malformed label value. Skip.
|
||||||
// TODO(beorn7): Issue a warning somehow.
|
// TODO(beorn7): Issue a warning somehow.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
enh.lblBuf = el.Metric.BytesWithoutLabels(enh.lblBuf, labels.BucketLabel)
|
enh.lblBuf = sample.Metric.BytesWithoutLabels(enh.lblBuf, labels.BucketLabel)
|
||||||
mb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)]
|
mb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)]
|
||||||
if !ok {
|
if !ok {
|
||||||
el.Metric = labels.NewBuilder(el.Metric).
|
sample.Metric = labels.NewBuilder(sample.Metric).
|
||||||
Del(excludedLabels...).
|
Del(excludedLabels...).
|
||||||
Labels(nil)
|
Labels(nil)
|
||||||
|
|
||||||
mb = &metricWithBuckets{el.Metric, nil}
|
mb = &metricWithBuckets{sample.Metric, nil}
|
||||||
enh.signatureToMetricWithBuckets[string(enh.lblBuf)] = mb
|
enh.signatureToMetricWithBuckets[string(enh.lblBuf)] = mb
|
||||||
}
|
}
|
||||||
mb.buckets = append(mb.buckets, bucket{upperBound, el.V})
|
mb.buckets = append(mb.buckets, bucket{upperBound, sample.V})
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now deal with the histograms.
|
||||||
|
for _, sample := range histogramSamples {
|
||||||
|
// We have to reconstruct the exact same signature as above for
|
||||||
|
// a conventional histogram, just ignoring any le label.
|
||||||
|
enh.lblBuf = sample.Metric.Bytes(enh.lblBuf)
|
||||||
|
if mb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)]; ok && len(mb.buckets) > 0 {
|
||||||
|
// At this data point, we have conventional histogram
|
||||||
|
// buckets and a native histogram with the same name and
|
||||||
|
// labels. Do not evaluate anything.
|
||||||
|
// TODO(beorn7): Issue a warning somehow.
|
||||||
|
delete(enh.signatureToMetricWithBuckets, string(enh.lblBuf))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
enh.Out = append(enh.Out, Sample{
|
||||||
|
Metric: enh.DropMetricName(sample.Metric),
|
||||||
|
Point: Point{V: histogramQuantile(q, sample.H)},
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, mb := range enh.signatureToMetricWithBuckets {
|
for _, mb := range enh.signatureToMetricWithBuckets {
|
||||||
|
@ -1103,7 +1262,10 @@ var FunctionCalls = map[string]FunctionCall{
|
||||||
"deriv": funcDeriv,
|
"deriv": funcDeriv,
|
||||||
"exp": funcExp,
|
"exp": funcExp,
|
||||||
"floor": funcFloor,
|
"floor": funcFloor,
|
||||||
|
"histogram_count": funcHistogramCount,
|
||||||
|
"histogram_fraction": funcHistogramFraction,
|
||||||
"histogram_quantile": funcHistogramQuantile,
|
"histogram_quantile": funcHistogramQuantile,
|
||||||
|
"histogram_sum": funcHistogramSum,
|
||||||
"holt_winters": funcHoltWinters,
|
"holt_winters": funcHoltWinters,
|
||||||
"hour": funcHour,
|
"hour": funcHour,
|
||||||
"idelta": funcIdelta,
|
"idelta": funcIdelta,
|
||||||
|
|
|
@ -163,6 +163,21 @@ var Functions = map[string]*Function{
|
||||||
ArgTypes: []ValueType{ValueTypeVector},
|
ArgTypes: []ValueType{ValueTypeVector},
|
||||||
ReturnType: ValueTypeVector,
|
ReturnType: ValueTypeVector,
|
||||||
},
|
},
|
||||||
|
"histogram_count": {
|
||||||
|
Name: "histogram_count",
|
||||||
|
ArgTypes: []ValueType{ValueTypeVector},
|
||||||
|
ReturnType: ValueTypeVector,
|
||||||
|
},
|
||||||
|
"histogram_sum": {
|
||||||
|
Name: "histogram_sum",
|
||||||
|
ArgTypes: []ValueType{ValueTypeVector},
|
||||||
|
ReturnType: ValueTypeVector,
|
||||||
|
},
|
||||||
|
"histogram_fraction": {
|
||||||
|
Name: "histogram_fraction",
|
||||||
|
ArgTypes: []ValueType{ValueTypeScalar, ValueTypeScalar, ValueTypeVector},
|
||||||
|
ReturnType: ValueTypeVector,
|
||||||
|
},
|
||||||
"histogram_quantile": {
|
"histogram_quantile": {
|
||||||
Name: "histogram_quantile",
|
Name: "histogram_quantile",
|
||||||
ArgTypes: []ValueType{ValueTypeScalar, ValueTypeVector},
|
ArgTypes: []ValueType{ValueTypeScalar, ValueTypeVector},
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"math"
|
"math"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -119,6 +120,176 @@ func bucketQuantile(q float64, buckets buckets) float64 {
|
||||||
return bucketStart + (bucketEnd-bucketStart)*(rank/count)
|
return bucketStart + (bucketEnd-bucketStart)*(rank/count)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// histogramQuantile calculates the quantile 'q' based on the given histogram.
|
||||||
|
//
|
||||||
|
// The quantile value is interpolated assuming a linear distribution within a
|
||||||
|
// bucket.
|
||||||
|
// TODO(beorn7): Find an interpolation method that is a better fit for
|
||||||
|
// exponential buckets (and think about configurable interpolation).
|
||||||
|
//
|
||||||
|
// A natural lower bound of 0 is assumed if the histogram has only positive
|
||||||
|
// buckets. Likewise, a natural upper bound of 0 is assumed if the histogram has
|
||||||
|
// only negative buckets.
|
||||||
|
// TODO(beorn7): Come to terms if we want that.
|
||||||
|
//
|
||||||
|
// There are a number of special cases (once we have a way to report errors
|
||||||
|
// happening during evaluations of AST functions, we should report those
|
||||||
|
// explicitly):
|
||||||
|
//
|
||||||
|
// If the histogram has 0 observations, NaN is returned.
|
||||||
|
//
|
||||||
|
// If q<0, -Inf is returned.
|
||||||
|
//
|
||||||
|
// If q>1, +Inf is returned.
|
||||||
|
//
|
||||||
|
// If q is NaN, NaN is returned.
|
||||||
|
func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 {
|
||||||
|
if q < 0 {
|
||||||
|
return math.Inf(-1)
|
||||||
|
}
|
||||||
|
if q > 1 {
|
||||||
|
return math.Inf(+1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if h.Count == 0 || math.IsNaN(q) {
|
||||||
|
return math.NaN()
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
bucket histogram.Bucket[float64]
|
||||||
|
count float64
|
||||||
|
it = h.AllBucketIterator()
|
||||||
|
rank = q * h.Count
|
||||||
|
)
|
||||||
|
for it.Next() {
|
||||||
|
bucket = it.At()
|
||||||
|
count += bucket.Count
|
||||||
|
if count >= rank {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if bucket.Lower < 0 && bucket.Upper > 0 {
|
||||||
|
if len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0 {
|
||||||
|
// The result is in the zero bucket and the histogram has only
|
||||||
|
// positive buckets. So we consider 0 to be the lower bound.
|
||||||
|
bucket.Lower = 0
|
||||||
|
} else if len(h.PositiveBuckets) == 0 && len(h.NegativeBuckets) > 0 {
|
||||||
|
// The result is in the zero bucket and the histogram has only
|
||||||
|
// negative buckets. So we consider 0 to be the upper bound.
|
||||||
|
bucket.Upper = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Due to numerical inaccuracies, we could end up with a higher count
|
||||||
|
// than h.Count. Thus, make sure count is never higher than h.Count.
|
||||||
|
if count > h.Count {
|
||||||
|
count = h.Count
|
||||||
|
}
|
||||||
|
// We could have hit the highest bucket without even reaching the rank
|
||||||
|
// (this should only happen if the histogram contains observations of
|
||||||
|
// the value NaN), in which case we simply return the upper limit of the
|
||||||
|
// highest explicit bucket.
|
||||||
|
if count < rank {
|
||||||
|
return bucket.Upper
|
||||||
|
}
|
||||||
|
|
||||||
|
rank -= count - bucket.Count
|
||||||
|
// TODO(codesome): Use a better estimation than linear.
|
||||||
|
return bucket.Lower + (bucket.Upper-bucket.Lower)*(rank/bucket.Count)
|
||||||
|
}
|
||||||
|
|
||||||
|
// histogramFraction calculates the fraction of observations between the
|
||||||
|
// provided lower and upper bounds, based on the provided histogram.
|
||||||
|
//
|
||||||
|
// histogramFraction is in a certain way the inverse of histogramQuantile. If
|
||||||
|
// histogramQuantile(0.9, h) returns 123.4, then histogramFraction(-Inf, 123.4, h)
|
||||||
|
// returns 0.9.
|
||||||
|
//
|
||||||
|
// The same notes (and TODOs) with regard to interpolation and assumptions about
|
||||||
|
// the zero bucket boundaries apply as for histogramQuantile.
|
||||||
|
//
|
||||||
|
// Whether either boundary is inclusive or exclusive doesn’t actually matter as
|
||||||
|
// long as interpolation has to be performed anyway. In the case of a boundary
|
||||||
|
// coinciding with a bucket boundary, the inclusive or exclusive nature of the
|
||||||
|
// boundary determines the exact behavior of the threshold. With the current
|
||||||
|
// implementation, that means that lower is exclusive for positive values and
|
||||||
|
// inclusive for negative values, while upper is inclusive for positive values
|
||||||
|
// and exclusive for negative values.
|
||||||
|
//
|
||||||
|
// Special cases:
|
||||||
|
//
|
||||||
|
// If the histogram has 0 observations, NaN is returned.
|
||||||
|
//
|
||||||
|
// Use a lower bound of -Inf to get the fraction of all observations below the
|
||||||
|
// upper bound.
|
||||||
|
//
|
||||||
|
// Use an upper bound of +Inf to get the fraction of all observations above the
|
||||||
|
// lower bound.
|
||||||
|
//
|
||||||
|
// If lower or upper is NaN, NaN is returned.
|
||||||
|
//
|
||||||
|
// If lower >= upper and the histogram has at least 1 observation, zero is returned.
|
||||||
|
func histogramFraction(lower, upper float64, h *histogram.FloatHistogram) float64 {
|
||||||
|
if h.Count == 0 || math.IsNaN(lower) || math.IsNaN(upper) {
|
||||||
|
return math.NaN()
|
||||||
|
}
|
||||||
|
if lower >= upper {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
rank, lowerRank, upperRank float64
|
||||||
|
lowerSet, upperSet bool
|
||||||
|
it = h.AllBucketIterator()
|
||||||
|
)
|
||||||
|
for it.Next() {
|
||||||
|
b := it.At()
|
||||||
|
if b.Lower < 0 && b.Upper > 0 {
|
||||||
|
if len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0 {
|
||||||
|
// This is the zero bucket and the histogram has only
|
||||||
|
// positive buckets. So we consider 0 to be the lower
|
||||||
|
// bound.
|
||||||
|
b.Lower = 0
|
||||||
|
} else if len(h.PositiveBuckets) == 0 && len(h.NegativeBuckets) > 0 {
|
||||||
|
// This is in the zero bucket and the histogram has only
|
||||||
|
// negative buckets. So we consider 0 to be the upper
|
||||||
|
// bound.
|
||||||
|
b.Upper = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !lowerSet && b.Lower >= lower {
|
||||||
|
lowerRank = rank
|
||||||
|
lowerSet = true
|
||||||
|
}
|
||||||
|
if !upperSet && b.Lower >= upper {
|
||||||
|
upperRank = rank
|
||||||
|
upperSet = true
|
||||||
|
}
|
||||||
|
if lowerSet && upperSet {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if !lowerSet && b.Lower < lower && b.Upper > lower {
|
||||||
|
lowerRank = rank + b.Count*(lower-b.Lower)/(b.Upper-b.Lower)
|
||||||
|
lowerSet = true
|
||||||
|
}
|
||||||
|
if !upperSet && b.Lower < upper && b.Upper > upper {
|
||||||
|
upperRank = rank + b.Count*(upper-b.Lower)/(b.Upper-b.Lower)
|
||||||
|
upperSet = true
|
||||||
|
}
|
||||||
|
if lowerSet && upperSet {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
rank += b.Count
|
||||||
|
}
|
||||||
|
if !lowerSet || lowerRank > h.Count {
|
||||||
|
lowerRank = h.Count
|
||||||
|
}
|
||||||
|
if !upperSet || upperRank > h.Count {
|
||||||
|
upperRank = h.Count
|
||||||
|
}
|
||||||
|
|
||||||
|
return (upperRank - lowerRank) / h.Count
|
||||||
|
}
|
||||||
|
|
||||||
// coalesceBuckets merges buckets with the same upper bound.
|
// coalesceBuckets merges buckets with the same upper bound.
|
||||||
//
|
//
|
||||||
// The input buckets must be sorted.
|
// The input buckets must be sorted.
|
||||||
|
|
|
@ -21,6 +21,7 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestLazyLoader_WithSamplesTill(t *testing.T) {
|
func TestLazyLoader_WithSamplesTill(t *testing.T) {
|
||||||
|
@ -47,7 +48,7 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) {
|
||||||
{
|
{
|
||||||
Metric: labels.FromStrings("__name__", "metric1"),
|
Metric: labels.FromStrings("__name__", "metric1"),
|
||||||
Points: []Point{
|
Points: []Point{
|
||||||
{0, 1}, {10000, 2}, {20000, 3}, {30000, 4}, {40000, 5},
|
{0, 1, nil}, {10000, 2, nil}, {20000, 3, nil}, {30000, 4, nil}, {40000, 5, nil},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -58,7 +59,7 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) {
|
||||||
{
|
{
|
||||||
Metric: labels.FromStrings("__name__", "metric1"),
|
Metric: labels.FromStrings("__name__", "metric1"),
|
||||||
Points: []Point{
|
Points: []Point{
|
||||||
{0, 1}, {10000, 2}, {20000, 3}, {30000, 4}, {40000, 5},
|
{0, 1, nil}, {10000, 2, nil}, {20000, 3, nil}, {30000, 4, nil}, {40000, 5, nil},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -69,7 +70,7 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) {
|
||||||
{
|
{
|
||||||
Metric: labels.FromStrings("__name__", "metric1"),
|
Metric: labels.FromStrings("__name__", "metric1"),
|
||||||
Points: []Point{
|
Points: []Point{
|
||||||
{0, 1}, {10000, 2}, {20000, 3}, {30000, 4}, {40000, 5}, {50000, 6}, {60000, 7},
|
{0, 1, nil}, {10000, 2, nil}, {20000, 3, nil}, {30000, 4, nil}, {40000, 5, nil}, {50000, 6, nil}, {60000, 7, nil},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -89,13 +90,13 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) {
|
||||||
{
|
{
|
||||||
Metric: labels.FromStrings("__name__", "metric1"),
|
Metric: labels.FromStrings("__name__", "metric1"),
|
||||||
Points: []Point{
|
Points: []Point{
|
||||||
{0, 1}, {10000, 1}, {20000, 1}, {30000, 1}, {40000, 1}, {50000, 1},
|
{0, 1, nil}, {10000, 1, nil}, {20000, 1, nil}, {30000, 1, nil}, {40000, 1, nil}, {50000, 1, nil},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Metric: labels.FromStrings("__name__", "metric2"),
|
Metric: labels.FromStrings("__name__", "metric2"),
|
||||||
Points: []Point{
|
Points: []Point{
|
||||||
{0, 1}, {10000, 2}, {20000, 3}, {30000, 4}, {40000, 5}, {50000, 6}, {60000, 7}, {70000, 8},
|
{0, 1, nil}, {10000, 2, nil}, {20000, 3, nil}, {30000, 4, nil}, {40000, 5, nil}, {50000, 6, nil}, {60000, 7, nil}, {70000, 8, nil},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -143,7 +144,7 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) {
|
||||||
Metric: storageSeries.Labels(),
|
Metric: storageSeries.Labels(),
|
||||||
}
|
}
|
||||||
it := storageSeries.Iterator()
|
it := storageSeries.Iterator()
|
||||||
for it.Next() {
|
for it.Next() == chunkenc.ValFloat {
|
||||||
t, v := it.At()
|
t, v := it.At()
|
||||||
got.Points = append(got.Points, Point{T: t, V: v})
|
got.Points = append(got.Points, Point{T: t, V: v})
|
||||||
}
|
}
|
||||||
|
|
140
promql/value.go
140
promql/value.go
|
@ -20,6 +20,7 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/promql/parser"
|
"github.com/prometheus/prometheus/promql/parser"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
@ -63,8 +64,8 @@ func (s Scalar) MarshalJSON() ([]byte, error) {
|
||||||
|
|
||||||
// Series is a stream of data points belonging to a metric.
|
// Series is a stream of data points belonging to a metric.
|
||||||
type Series struct {
|
type Series struct {
|
||||||
Metric labels.Labels `json:"metric"`
|
Metric labels.Labels
|
||||||
Points []Point `json:"values"`
|
Points []Point
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s Series) String() string {
|
func (s Series) String() string {
|
||||||
|
@ -75,15 +76,48 @@ func (s Series) String() string {
|
||||||
return fmt.Sprintf("%s =>\n%s", s.Metric, strings.Join(vals, "\n"))
|
return fmt.Sprintf("%s =>\n%s", s.Metric, strings.Join(vals, "\n"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MarshalJSON is mirrored in web/api/v1/api.go for efficiency reasons.
|
||||||
|
// This implementation is still provided for debug purposes and usage
|
||||||
|
// without jsoniter.
|
||||||
|
func (s Series) MarshalJSON() ([]byte, error) {
|
||||||
|
// Note that this is rather inefficient because it re-creates the whole
|
||||||
|
// series, just separated by Histogram Points and Value Points. For API
|
||||||
|
// purposes, there is a more efficcient jsoniter implementation in
|
||||||
|
// web/api/v1/api.go.
|
||||||
|
series := struct {
|
||||||
|
M labels.Labels `json:"metric"`
|
||||||
|
V []Point `json:"values,omitempty"`
|
||||||
|
H []Point `json:"histograms,omitempty"`
|
||||||
|
}{
|
||||||
|
M: s.Metric,
|
||||||
|
}
|
||||||
|
for _, p := range s.Points {
|
||||||
|
if p.H == nil {
|
||||||
|
series.V = append(series.V, p)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
series.H = append(series.H, p)
|
||||||
|
}
|
||||||
|
return json.Marshal(series)
|
||||||
|
}
|
||||||
|
|
||||||
// Point represents a single data point for a given timestamp.
|
// Point represents a single data point for a given timestamp.
|
||||||
|
// If H is not nil, then this is a histogram point and only (T, H) is valid.
|
||||||
|
// If H is nil, then only (T, V) is valid.
|
||||||
type Point struct {
|
type Point struct {
|
||||||
T int64
|
T int64
|
||||||
V float64
|
V float64
|
||||||
|
H *histogram.FloatHistogram
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p Point) String() string {
|
func (p Point) String() string {
|
||||||
v := strconv.FormatFloat(p.V, 'f', -1, 64)
|
var s string
|
||||||
return fmt.Sprintf("%v @[%v]", v, p.T)
|
if p.H != nil {
|
||||||
|
s = p.H.String()
|
||||||
|
} else {
|
||||||
|
s = strconv.FormatFloat(p.V, 'f', -1, 64)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s @[%v]", s, p.T)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON implements json.Marshaler.
|
// MarshalJSON implements json.Marshaler.
|
||||||
|
@ -96,8 +130,45 @@ func (p Point) String() string {
|
||||||
// slightly different results in terms of formatting and rounding of the
|
// slightly different results in terms of formatting and rounding of the
|
||||||
// timestamp.
|
// timestamp.
|
||||||
func (p Point) MarshalJSON() ([]byte, error) {
|
func (p Point) MarshalJSON() ([]byte, error) {
|
||||||
v := strconv.FormatFloat(p.V, 'f', -1, 64)
|
if p.H == nil {
|
||||||
return json.Marshal([...]interface{}{float64(p.T) / 1000, v})
|
v := strconv.FormatFloat(p.V, 'f', -1, 64)
|
||||||
|
return json.Marshal([...]interface{}{float64(p.T) / 1000, v})
|
||||||
|
}
|
||||||
|
h := struct {
|
||||||
|
Count string `json:"count"`
|
||||||
|
Sum string `json:"sum"`
|
||||||
|
Buckets [][]interface{} `json:"buckets,omitempty"`
|
||||||
|
}{
|
||||||
|
Count: strconv.FormatFloat(p.H.Count, 'f', -1, 64),
|
||||||
|
Sum: strconv.FormatFloat(p.H.Sum, 'f', -1, 64),
|
||||||
|
}
|
||||||
|
it := p.H.AllBucketIterator()
|
||||||
|
for it.Next() {
|
||||||
|
bucket := it.At()
|
||||||
|
if bucket.Count == 0 {
|
||||||
|
continue // No need to expose empty buckets in JSON.
|
||||||
|
}
|
||||||
|
boundaries := 2 // Exclusive on both sides AKA open interval.
|
||||||
|
if bucket.LowerInclusive {
|
||||||
|
if bucket.UpperInclusive {
|
||||||
|
boundaries = 3 // Inclusive on both sides AKA closed interval.
|
||||||
|
} else {
|
||||||
|
boundaries = 1 // Inclusive only on lower end AKA right open.
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if bucket.UpperInclusive {
|
||||||
|
boundaries = 0 // Inclusive only on upper end AKA left open.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bucketToMarshal := []interface{}{
|
||||||
|
boundaries,
|
||||||
|
strconv.FormatFloat(bucket.Lower, 'f', -1, 64),
|
||||||
|
strconv.FormatFloat(bucket.Upper, 'f', -1, 64),
|
||||||
|
strconv.FormatFloat(bucket.Count, 'f', -1, 64),
|
||||||
|
}
|
||||||
|
h.Buckets = append(h.Buckets, bucketToMarshal)
|
||||||
|
}
|
||||||
|
return json.Marshal([...]interface{}{float64(p.T) / 1000, h})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sample is a single sample belonging to a metric.
|
// Sample is a single sample belonging to a metric.
|
||||||
|
@ -111,15 +182,27 @@ func (s Sample) String() string {
|
||||||
return fmt.Sprintf("%s => %s", s.Metric, s.Point)
|
return fmt.Sprintf("%s => %s", s.Metric, s.Point)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MarshalJSON is mirrored in web/api/v1/api.go with jsoniter because Point
|
||||||
|
// wouldn't be marshaled with jsoniter in all cases otherwise.
|
||||||
func (s Sample) MarshalJSON() ([]byte, error) {
|
func (s Sample) MarshalJSON() ([]byte, error) {
|
||||||
v := struct {
|
if s.Point.H == nil {
|
||||||
|
v := struct {
|
||||||
|
M labels.Labels `json:"metric"`
|
||||||
|
V Point `json:"value"`
|
||||||
|
}{
|
||||||
|
M: s.Metric,
|
||||||
|
V: s.Point,
|
||||||
|
}
|
||||||
|
return json.Marshal(v)
|
||||||
|
}
|
||||||
|
h := struct {
|
||||||
M labels.Labels `json:"metric"`
|
M labels.Labels `json:"metric"`
|
||||||
V Point `json:"value"`
|
H Point `json:"histogram"`
|
||||||
}{
|
}{
|
||||||
M: s.Metric,
|
M: s.Metric,
|
||||||
V: s.Point,
|
H: s.Point,
|
||||||
}
|
}
|
||||||
return json.Marshal(v)
|
return json.Marshal(h)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Vector is basically only an alias for model.Samples, but the
|
// Vector is basically only an alias for model.Samples, but the
|
||||||
|
@ -296,19 +379,23 @@ func newStorageSeriesIterator(series Series) *storageSeriesIterator {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ssi *storageSeriesIterator) Seek(t int64) bool {
|
func (ssi *storageSeriesIterator) Seek(t int64) chunkenc.ValueType {
|
||||||
i := ssi.curr
|
i := ssi.curr
|
||||||
if i < 0 {
|
if i < 0 {
|
||||||
i = 0
|
i = 0
|
||||||
}
|
}
|
||||||
for ; i < len(ssi.points); i++ {
|
for ; i < len(ssi.points); i++ {
|
||||||
if ssi.points[i].T >= t {
|
p := ssi.points[i]
|
||||||
|
if p.T >= t {
|
||||||
ssi.curr = i
|
ssi.curr = i
|
||||||
return true
|
if p.H != nil {
|
||||||
|
return chunkenc.ValFloatHistogram
|
||||||
|
}
|
||||||
|
return chunkenc.ValFloat
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ssi.curr = len(ssi.points) - 1
|
ssi.curr = len(ssi.points) - 1
|
||||||
return false
|
return chunkenc.ValNone
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ssi *storageSeriesIterator) At() (t int64, v float64) {
|
func (ssi *storageSeriesIterator) At() (t int64, v float64) {
|
||||||
|
@ -316,9 +403,30 @@ func (ssi *storageSeriesIterator) At() (t int64, v float64) {
|
||||||
return p.T, p.V
|
return p.T, p.V
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ssi *storageSeriesIterator) Next() bool {
|
func (ssi *storageSeriesIterator) AtHistogram() (int64, *histogram.Histogram) {
|
||||||
|
panic(errors.New("storageSeriesIterator: AtHistogram not supported"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ssi *storageSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
|
||||||
|
p := ssi.points[ssi.curr]
|
||||||
|
return p.T, p.H
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ssi *storageSeriesIterator) AtT() int64 {
|
||||||
|
p := ssi.points[ssi.curr]
|
||||||
|
return p.T
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ssi *storageSeriesIterator) Next() chunkenc.ValueType {
|
||||||
ssi.curr++
|
ssi.curr++
|
||||||
return ssi.curr < len(ssi.points)
|
if ssi.curr >= len(ssi.points) {
|
||||||
|
return chunkenc.ValNone
|
||||||
|
}
|
||||||
|
p := ssi.points[ssi.curr]
|
||||||
|
if p.H != nil {
|
||||||
|
return chunkenc.ValFloatHistogram
|
||||||
|
}
|
||||||
|
return chunkenc.ValFloat
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ssi *storageSeriesIterator) Err() error {
|
func (ssi *storageSeriesIterator) Err() error {
|
||||||
|
|
|
@ -99,7 +99,7 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) {
|
||||||
|
|
||||||
results := []promql.Vector{
|
results := []promql.Vector{
|
||||||
{
|
{
|
||||||
{
|
promql.Sample{
|
||||||
Metric: labels.FromStrings(
|
Metric: labels.FromStrings(
|
||||||
"__name__", "ALERTS",
|
"__name__", "ALERTS",
|
||||||
"alertname", "HTTPRequestRateLow",
|
"alertname", "HTTPRequestRateLow",
|
||||||
|
@ -112,7 +112,7 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
{
|
promql.Sample{
|
||||||
Metric: labels.FromStrings(
|
Metric: labels.FromStrings(
|
||||||
"__name__", "ALERTS",
|
"__name__", "ALERTS",
|
||||||
"alertname", "HTTPRequestRateLow",
|
"alertname", "HTTPRequestRateLow",
|
||||||
|
@ -125,7 +125,7 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
{
|
promql.Sample{
|
||||||
Metric: labels.FromStrings(
|
Metric: labels.FromStrings(
|
||||||
"__name__", "ALERTS",
|
"__name__", "ALERTS",
|
||||||
"alertname", "HTTPRequestRateLow",
|
"alertname", "HTTPRequestRateLow",
|
||||||
|
@ -138,7 +138,7 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
{
|
promql.Sample{
|
||||||
Metric: labels.FromStrings(
|
Metric: labels.FromStrings(
|
||||||
"__name__", "ALERTS",
|
"__name__", "ALERTS",
|
||||||
"alertname", "HTTPRequestRateLow",
|
"alertname", "HTTPRequestRateLow",
|
||||||
|
@ -209,7 +209,7 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) {
|
||||||
true, log.NewNopLogger(),
|
true, log.NewNopLogger(),
|
||||||
)
|
)
|
||||||
result := promql.Vector{
|
result := promql.Vector{
|
||||||
{
|
promql.Sample{
|
||||||
Metric: labels.FromStrings(
|
Metric: labels.FromStrings(
|
||||||
"__name__", "ALERTS",
|
"__name__", "ALERTS",
|
||||||
"alertname", "ExternalLabelDoesNotExist",
|
"alertname", "ExternalLabelDoesNotExist",
|
||||||
|
@ -220,7 +220,7 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) {
|
||||||
),
|
),
|
||||||
Point: promql.Point{V: 1},
|
Point: promql.Point{V: 1},
|
||||||
},
|
},
|
||||||
{
|
promql.Sample{
|
||||||
Metric: labels.FromStrings(
|
Metric: labels.FromStrings(
|
||||||
"__name__", "ALERTS",
|
"__name__", "ALERTS",
|
||||||
"alertname", "ExternalLabelExists",
|
"alertname", "ExternalLabelExists",
|
||||||
|
@ -303,7 +303,7 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) {
|
||||||
true, log.NewNopLogger(),
|
true, log.NewNopLogger(),
|
||||||
)
|
)
|
||||||
result := promql.Vector{
|
result := promql.Vector{
|
||||||
{
|
promql.Sample{
|
||||||
Metric: labels.FromStrings(
|
Metric: labels.FromStrings(
|
||||||
"__name__", "ALERTS",
|
"__name__", "ALERTS",
|
||||||
"alertname", "ExternalURLDoesNotExist",
|
"alertname", "ExternalURLDoesNotExist",
|
||||||
|
@ -314,7 +314,7 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) {
|
||||||
),
|
),
|
||||||
Point: promql.Point{V: 1},
|
Point: promql.Point{V: 1},
|
||||||
},
|
},
|
||||||
{
|
promql.Sample{
|
||||||
Metric: labels.FromStrings(
|
Metric: labels.FromStrings(
|
||||||
"__name__", "ALERTS",
|
"__name__", "ALERTS",
|
||||||
"alertname", "ExternalURLExists",
|
"alertname", "ExternalURLExists",
|
||||||
|
@ -387,7 +387,7 @@ func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) {
|
||||||
true, log.NewNopLogger(),
|
true, log.NewNopLogger(),
|
||||||
)
|
)
|
||||||
result := promql.Vector{
|
result := promql.Vector{
|
||||||
{
|
promql.Sample{
|
||||||
Metric: labels.FromStrings(
|
Metric: labels.FromStrings(
|
||||||
"__name__", "ALERTS",
|
"__name__", "ALERTS",
|
||||||
"alertname", "EmptyLabel",
|
"alertname", "EmptyLabel",
|
||||||
|
|
|
@ -39,6 +39,7 @@ import (
|
||||||
"github.com/prometheus/prometheus/promql"
|
"github.com/prometheus/prometheus/promql"
|
||||||
"github.com/prometheus/prometheus/promql/parser"
|
"github.com/prometheus/prometheus/promql/parser"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/util/strutil"
|
"github.com/prometheus/prometheus/util/strutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -201,7 +202,7 @@ func EngineQueryFunc(engine *promql.Engine, q storage.Queryable) QueryFunc {
|
||||||
return v, nil
|
return v, nil
|
||||||
case promql.Scalar:
|
case promql.Scalar:
|
||||||
return promql.Vector{promql.Sample{
|
return promql.Vector{promql.Sample{
|
||||||
Point: promql.Point(v),
|
Point: promql.Point{T: v.T, V: v.V},
|
||||||
Metric: labels.Labels{},
|
Metric: labels.Labels{},
|
||||||
}}, nil
|
}}, nil
|
||||||
default:
|
default:
|
||||||
|
@ -798,7 +799,7 @@ func (g *Group) RestoreForState(ts time.Time) {
|
||||||
var t int64
|
var t int64
|
||||||
var v float64
|
var v float64
|
||||||
it := s.Iterator()
|
it := s.Iterator()
|
||||||
for it.Next() {
|
for it.Next() == chunkenc.ValFloat {
|
||||||
t, v = it.At()
|
t, v = it.At()
|
||||||
}
|
}
|
||||||
if it.Err() != nil {
|
if it.Err() != nil {
|
||||||
|
|
|
@ -37,6 +37,7 @@ import (
|
||||||
"github.com/prometheus/prometheus/promql"
|
"github.com/prometheus/prometheus/promql"
|
||||||
"github.com/prometheus/prometheus/promql/parser"
|
"github.com/prometheus/prometheus/promql/parser"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/util/teststorage"
|
"github.com/prometheus/prometheus/util/teststorage"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -67,7 +68,7 @@ func TestAlertingRule(t *testing.T) {
|
||||||
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
|
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
|
||||||
)
|
)
|
||||||
result := promql.Vector{
|
result := promql.Vector{
|
||||||
{
|
promql.Sample{
|
||||||
Metric: labels.FromStrings(
|
Metric: labels.FromStrings(
|
||||||
"__name__", "ALERTS",
|
"__name__", "ALERTS",
|
||||||
"alertname", "HTTPRequestRateLow",
|
"alertname", "HTTPRequestRateLow",
|
||||||
|
@ -79,7 +80,7 @@ func TestAlertingRule(t *testing.T) {
|
||||||
),
|
),
|
||||||
Point: promql.Point{V: 1},
|
Point: promql.Point{V: 1},
|
||||||
},
|
},
|
||||||
{
|
promql.Sample{
|
||||||
Metric: labels.FromStrings(
|
Metric: labels.FromStrings(
|
||||||
"__name__", "ALERTS",
|
"__name__", "ALERTS",
|
||||||
"alertname", "HTTPRequestRateLow",
|
"alertname", "HTTPRequestRateLow",
|
||||||
|
@ -91,7 +92,7 @@ func TestAlertingRule(t *testing.T) {
|
||||||
),
|
),
|
||||||
Point: promql.Point{V: 1},
|
Point: promql.Point{V: 1},
|
||||||
},
|
},
|
||||||
{
|
promql.Sample{
|
||||||
Metric: labels.FromStrings(
|
Metric: labels.FromStrings(
|
||||||
"__name__", "ALERTS",
|
"__name__", "ALERTS",
|
||||||
"alertname", "HTTPRequestRateLow",
|
"alertname", "HTTPRequestRateLow",
|
||||||
|
@ -103,7 +104,7 @@ func TestAlertingRule(t *testing.T) {
|
||||||
),
|
),
|
||||||
Point: promql.Point{V: 1},
|
Point: promql.Point{V: 1},
|
||||||
},
|
},
|
||||||
{
|
promql.Sample{
|
||||||
Metric: labels.FromStrings(
|
Metric: labels.FromStrings(
|
||||||
"__name__", "ALERTS",
|
"__name__", "ALERTS",
|
||||||
"alertname", "HTTPRequestRateLow",
|
"alertname", "HTTPRequestRateLow",
|
||||||
|
@ -210,7 +211,7 @@ func TestForStateAddSamples(t *testing.T) {
|
||||||
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
|
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
|
||||||
)
|
)
|
||||||
result := promql.Vector{
|
result := promql.Vector{
|
||||||
{
|
promql.Sample{
|
||||||
Metric: labels.FromStrings(
|
Metric: labels.FromStrings(
|
||||||
"__name__", "ALERTS_FOR_STATE",
|
"__name__", "ALERTS_FOR_STATE",
|
||||||
"alertname", "HTTPRequestRateLow",
|
"alertname", "HTTPRequestRateLow",
|
||||||
|
@ -221,7 +222,7 @@ func TestForStateAddSamples(t *testing.T) {
|
||||||
),
|
),
|
||||||
Point: promql.Point{V: 1},
|
Point: promql.Point{V: 1},
|
||||||
},
|
},
|
||||||
{
|
promql.Sample{
|
||||||
Metric: labels.FromStrings(
|
Metric: labels.FromStrings(
|
||||||
"__name__", "ALERTS_FOR_STATE",
|
"__name__", "ALERTS_FOR_STATE",
|
||||||
"alertname", "HTTPRequestRateLow",
|
"alertname", "HTTPRequestRateLow",
|
||||||
|
@ -232,7 +233,7 @@ func TestForStateAddSamples(t *testing.T) {
|
||||||
),
|
),
|
||||||
Point: promql.Point{V: 1},
|
Point: promql.Point{V: 1},
|
||||||
},
|
},
|
||||||
{
|
promql.Sample{
|
||||||
Metric: labels.FromStrings(
|
Metric: labels.FromStrings(
|
||||||
"__name__", "ALERTS_FOR_STATE",
|
"__name__", "ALERTS_FOR_STATE",
|
||||||
"alertname", "HTTPRequestRateLow",
|
"alertname", "HTTPRequestRateLow",
|
||||||
|
@ -243,7 +244,7 @@ func TestForStateAddSamples(t *testing.T) {
|
||||||
),
|
),
|
||||||
Point: promql.Point{V: 1},
|
Point: promql.Point{V: 1},
|
||||||
},
|
},
|
||||||
{
|
promql.Sample{
|
||||||
Metric: labels.FromStrings(
|
Metric: labels.FromStrings(
|
||||||
"__name__", "ALERTS_FOR_STATE",
|
"__name__", "ALERTS_FOR_STATE",
|
||||||
"alertname", "HTTPRequestRateLow",
|
"alertname", "HTTPRequestRateLow",
|
||||||
|
@ -597,7 +598,7 @@ func readSeriesSet(ss storage.SeriesSet) (map[string][]promql.Point, error) {
|
||||||
|
|
||||||
points := []promql.Point{}
|
points := []promql.Point{}
|
||||||
it := series.Iterator()
|
it := series.Iterator()
|
||||||
for it.Next() {
|
for it.Next() == chunkenc.ValFloat {
|
||||||
t, v := it.At()
|
t, v := it.At()
|
||||||
points = append(points, promql.Point{T: t, V: v})
|
points = append(points, promql.Point{T: t, V: v})
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,6 +20,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/metadata"
|
"github.com/prometheus/prometheus/model/metadata"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
@ -41,6 +42,10 @@ func (a nopAppender) AppendExemplar(storage.SeriesRef, labels.Labels, exemplar.E
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a nopAppender) AppendHistogram(storage.SeriesRef, labels.Labels, int64, *histogram.Histogram) (storage.SeriesRef, error) {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (a nopAppender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) {
|
func (a nopAppender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) {
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
@ -54,17 +59,25 @@ type sample struct {
|
||||||
v float64
|
v float64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type histogramSample struct {
|
||||||
|
t int64
|
||||||
|
h *histogram.Histogram
|
||||||
|
}
|
||||||
|
|
||||||
// collectResultAppender records all samples that were added through the appender.
|
// collectResultAppender records all samples that were added through the appender.
|
||||||
// It can be used as its zero value or be backed by another appender it writes samples through.
|
// It can be used as its zero value or be backed by another appender it writes samples through.
|
||||||
type collectResultAppender struct {
|
type collectResultAppender struct {
|
||||||
next storage.Appender
|
next storage.Appender
|
||||||
result []sample
|
result []sample
|
||||||
pendingResult []sample
|
pendingResult []sample
|
||||||
rolledbackResult []sample
|
rolledbackResult []sample
|
||||||
pendingExemplars []exemplar.Exemplar
|
pendingExemplars []exemplar.Exemplar
|
||||||
resultExemplars []exemplar.Exemplar
|
resultExemplars []exemplar.Exemplar
|
||||||
pendingMetadata []metadata.Metadata
|
resultHistograms []histogramSample
|
||||||
resultMetadata []metadata.Metadata
|
pendingHistograms []histogramSample
|
||||||
|
rolledbackHistograms []histogramSample
|
||||||
|
pendingMetadata []metadata.Metadata
|
||||||
|
resultMetadata []metadata.Metadata
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *collectResultAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
|
func (a *collectResultAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
|
||||||
|
@ -97,6 +110,15 @@ func (a *collectResultAppender) AppendExemplar(ref storage.SeriesRef, l labels.L
|
||||||
return a.next.AppendExemplar(ref, l, e)
|
return a.next.AppendExemplar(ref, l, e)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *collectResultAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (storage.SeriesRef, error) {
|
||||||
|
a.pendingHistograms = append(a.pendingHistograms, histogramSample{h: h, t: t})
|
||||||
|
if a.next == nil {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return a.next.AppendHistogram(ref, l, t, h)
|
||||||
|
}
|
||||||
|
|
||||||
func (a *collectResultAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
|
func (a *collectResultAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
|
||||||
a.pendingMetadata = append(a.pendingMetadata, m)
|
a.pendingMetadata = append(a.pendingMetadata, m)
|
||||||
if ref == 0 {
|
if ref == 0 {
|
||||||
|
@ -112,9 +134,11 @@ func (a *collectResultAppender) UpdateMetadata(ref storage.SeriesRef, l labels.L
|
||||||
func (a *collectResultAppender) Commit() error {
|
func (a *collectResultAppender) Commit() error {
|
||||||
a.result = append(a.result, a.pendingResult...)
|
a.result = append(a.result, a.pendingResult...)
|
||||||
a.resultExemplars = append(a.resultExemplars, a.pendingExemplars...)
|
a.resultExemplars = append(a.resultExemplars, a.pendingExemplars...)
|
||||||
|
a.resultHistograms = append(a.resultHistograms, a.pendingHistograms...)
|
||||||
a.resultMetadata = append(a.resultMetadata, a.pendingMetadata...)
|
a.resultMetadata = append(a.resultMetadata, a.pendingMetadata...)
|
||||||
a.pendingResult = nil
|
a.pendingResult = nil
|
||||||
a.pendingExemplars = nil
|
a.pendingExemplars = nil
|
||||||
|
a.pendingHistograms = nil
|
||||||
a.pendingMetadata = nil
|
a.pendingMetadata = nil
|
||||||
if a.next == nil {
|
if a.next == nil {
|
||||||
return nil
|
return nil
|
||||||
|
@ -124,7 +148,9 @@ func (a *collectResultAppender) Commit() error {
|
||||||
|
|
||||||
func (a *collectResultAppender) Rollback() error {
|
func (a *collectResultAppender) Rollback() error {
|
||||||
a.rolledbackResult = a.pendingResult
|
a.rolledbackResult = a.pendingResult
|
||||||
|
a.rolledbackHistograms = a.pendingHistograms
|
||||||
a.pendingResult = nil
|
a.pendingResult = nil
|
||||||
|
a.pendingHistograms = nil
|
||||||
if a.next == nil {
|
if a.next == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -132,6 +132,9 @@ type Options struct {
|
||||||
// Option to enable the experimental in-memory metadata storage and append
|
// Option to enable the experimental in-memory metadata storage and append
|
||||||
// metadata to the WAL.
|
// metadata to the WAL.
|
||||||
EnableMetadataStorage bool
|
EnableMetadataStorage bool
|
||||||
|
// Option to enable protobuf negotiation with the client. Note that the client can already
|
||||||
|
// send protobuf without needing to enable this.
|
||||||
|
EnableProtobufNegotiation bool
|
||||||
// Option to increase the interval used by scrape manager to throttle target groups updates.
|
// Option to increase the interval used by scrape manager to throttle target groups updates.
|
||||||
DiscoveryReloadInterval model.Duration
|
DiscoveryReloadInterval model.Duration
|
||||||
|
|
||||||
|
|
|
@ -40,6 +40,7 @@ import (
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/metadata"
|
"github.com/prometheus/prometheus/model/metadata"
|
||||||
"github.com/prometheus/prometheus/model/relabel"
|
"github.com/prometheus/prometheus/model/relabel"
|
||||||
|
@ -242,6 +243,8 @@ type scrapePool struct {
|
||||||
newLoop func(scrapeLoopOptions) loop
|
newLoop func(scrapeLoopOptions) loop
|
||||||
|
|
||||||
noDefaultPort bool
|
noDefaultPort bool
|
||||||
|
|
||||||
|
enableProtobufNegotiation bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type labelLimits struct {
|
type labelLimits struct {
|
||||||
|
@ -283,15 +286,16 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
sp := &scrapePool{
|
sp := &scrapePool{
|
||||||
cancel: cancel,
|
cancel: cancel,
|
||||||
appendable: app,
|
appendable: app,
|
||||||
config: cfg,
|
config: cfg,
|
||||||
client: client,
|
client: client,
|
||||||
activeTargets: map[uint64]*Target{},
|
activeTargets: map[uint64]*Target{},
|
||||||
loops: map[uint64]loop{},
|
loops: map[uint64]loop{},
|
||||||
logger: logger,
|
logger: logger,
|
||||||
httpOpts: options.HTTPClientOptions,
|
httpOpts: options.HTTPClientOptions,
|
||||||
noDefaultPort: options.NoDefaultPort,
|
noDefaultPort: options.NoDefaultPort,
|
||||||
|
enableProtobufNegotiation: options.EnableProtobufNegotiation,
|
||||||
}
|
}
|
||||||
sp.newLoop = func(opts scrapeLoopOptions) loop {
|
sp.newLoop = func(opts scrapeLoopOptions) loop {
|
||||||
// Update the targets retrieval function for metadata to a new scrape cache.
|
// Update the targets retrieval function for metadata to a new scrape cache.
|
||||||
|
@ -432,8 +436,12 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
||||||
|
|
||||||
t := sp.activeTargets[fp]
|
t := sp.activeTargets[fp]
|
||||||
interval, timeout, err := t.intervalAndTimeout(interval, timeout)
|
interval, timeout, err := t.intervalAndTimeout(interval, timeout)
|
||||||
|
acceptHeader := scrapeAcceptHeader
|
||||||
|
if sp.enableProtobufNegotiation {
|
||||||
|
acceptHeader = scrapeAcceptHeaderWithProtobuf
|
||||||
|
}
|
||||||
var (
|
var (
|
||||||
s = &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit}
|
s = &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit, acceptHeader: acceptHeader}
|
||||||
newLoop = sp.newLoop(scrapeLoopOptions{
|
newLoop = sp.newLoop(scrapeLoopOptions{
|
||||||
target: t,
|
target: t,
|
||||||
scraper: s,
|
scraper: s,
|
||||||
|
@ -536,8 +544,11 @@ func (sp *scrapePool) sync(targets []*Target) {
|
||||||
// for every target.
|
// for every target.
|
||||||
var err error
|
var err error
|
||||||
interval, timeout, err = t.intervalAndTimeout(interval, timeout)
|
interval, timeout, err = t.intervalAndTimeout(interval, timeout)
|
||||||
|
acceptHeader := scrapeAcceptHeader
|
||||||
s := &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit}
|
if sp.enableProtobufNegotiation {
|
||||||
|
acceptHeader = scrapeAcceptHeaderWithProtobuf
|
||||||
|
}
|
||||||
|
s := &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit, acceptHeader: acceptHeader}
|
||||||
l := sp.newLoop(scrapeLoopOptions{
|
l := sp.newLoop(scrapeLoopOptions{
|
||||||
target: t,
|
target: t,
|
||||||
scraper: s,
|
scraper: s,
|
||||||
|
@ -756,11 +767,15 @@ type targetScraper struct {
|
||||||
buf *bufio.Reader
|
buf *bufio.Reader
|
||||||
|
|
||||||
bodySizeLimit int64
|
bodySizeLimit int64
|
||||||
|
acceptHeader string
|
||||||
}
|
}
|
||||||
|
|
||||||
var errBodySizeLimit = errors.New("body size limit exceeded")
|
var errBodySizeLimit = errors.New("body size limit exceeded")
|
||||||
|
|
||||||
const acceptHeader = `application/openmetrics-text;version=1.0.0,application/openmetrics-text;version=0.0.1;q=0.75,text/plain;version=0.0.4;q=0.5,*/*;q=0.1`
|
const (
|
||||||
|
scrapeAcceptHeader = `encoding=delimited,application/openmetrics-text;version=1.0.0;q=0.75,text/plain;version=0.0.4;q=0.5,*/*;q=0.1`
|
||||||
|
scrapeAcceptHeaderWithProtobuf = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited,application/openmetrics-text;version=1.0.0;q=0.75,text/plain;version=0.0.4;q=0.5,*/*;q=0.1`
|
||||||
|
)
|
||||||
|
|
||||||
var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
||||||
|
|
||||||
|
@ -770,7 +785,7 @@ func (s *targetScraper) scrape(ctx context.Context, w io.Writer) (string, error)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
req.Header.Add("Accept", acceptHeader)
|
req.Header.Add("Accept", s.acceptHeader)
|
||||||
req.Header.Add("Accept-Encoding", "gzip")
|
req.Header.Add("Accept-Encoding", "gzip")
|
||||||
req.Header.Set("User-Agent", UserAgent)
|
req.Header.Set("User-Agent", UserAgent)
|
||||||
req.Header.Set("X-Prometheus-Scrape-Timeout-Seconds", strconv.FormatFloat(s.timeout.Seconds(), 'f', -1, 64))
|
req.Header.Set("X-Prometheus-Scrape-Timeout-Seconds", strconv.FormatFloat(s.timeout.Seconds(), 'f', -1, 64))
|
||||||
|
@ -1510,8 +1525,12 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string,
|
||||||
loop:
|
loop:
|
||||||
for {
|
for {
|
||||||
var (
|
var (
|
||||||
et textparse.Entry
|
et textparse.Entry
|
||||||
sampleAdded bool
|
sampleAdded, isHistogram bool
|
||||||
|
met []byte
|
||||||
|
parsedTimestamp *int64
|
||||||
|
val float64
|
||||||
|
h *histogram.Histogram
|
||||||
)
|
)
|
||||||
if et, err = p.Next(); err != nil {
|
if et, err = p.Next(); err != nil {
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
|
@ -1531,17 +1550,24 @@ loop:
|
||||||
continue
|
continue
|
||||||
case textparse.EntryComment:
|
case textparse.EntryComment:
|
||||||
continue
|
continue
|
||||||
|
case textparse.EntryHistogram:
|
||||||
|
isHistogram = true
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
total++
|
total++
|
||||||
|
|
||||||
t := defTime
|
t := defTime
|
||||||
met, tp, v := p.Series()
|
if isHistogram {
|
||||||
if !sl.honorTimestamps {
|
met, parsedTimestamp, h, _ = p.Histogram()
|
||||||
tp = nil
|
// TODO: ingest float histograms in tsdb.
|
||||||
|
} else {
|
||||||
|
met, parsedTimestamp, val = p.Series()
|
||||||
}
|
}
|
||||||
if tp != nil {
|
if !sl.honorTimestamps {
|
||||||
t = *tp
|
parsedTimestamp = nil
|
||||||
|
}
|
||||||
|
if parsedTimestamp != nil {
|
||||||
|
t = *parsedTimestamp
|
||||||
}
|
}
|
||||||
|
|
||||||
// Zero metadata out for current iteration until it's resolved.
|
// Zero metadata out for current iteration until it's resolved.
|
||||||
|
@ -1594,8 +1620,14 @@ loop:
|
||||||
updateMetadata(lset, true)
|
updateMetadata(lset, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
ref, err = app.Append(ref, lset, t, v)
|
if isHistogram {
|
||||||
sampleAdded, err = sl.checkAddError(ce, met, tp, err, &sampleLimitErr, &appErrs)
|
if h != nil {
|
||||||
|
ref, err = app.AppendHistogram(ref, lset, t, h)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ref, err = app.Append(ref, lset, t, val)
|
||||||
|
}
|
||||||
|
sampleAdded, err = sl.checkAddError(ce, met, parsedTimestamp, err, &sampleLimitErr, &appErrs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err != storage.ErrNotFound {
|
if err != storage.ErrNotFound {
|
||||||
level.Debug(sl.l).Log("msg", "Unexpected error", "series", string(met), "err", err)
|
level.Debug(sl.l).Log("msg", "Unexpected error", "series", string(met), "err", err)
|
||||||
|
@ -1604,7 +1636,7 @@ loop:
|
||||||
}
|
}
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
if tp == nil {
|
if parsedTimestamp == nil {
|
||||||
// Bypass staleness logic if there is an explicit timestamp.
|
// Bypass staleness logic if there is an explicit timestamp.
|
||||||
sl.cache.trackStaleness(hash, lset)
|
sl.cache.trackStaleness(hash, lset)
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,6 +44,7 @@ import (
|
||||||
"github.com/prometheus/prometheus/model/timestamp"
|
"github.com/prometheus/prometheus/model/timestamp"
|
||||||
"github.com/prometheus/prometheus/model/value"
|
"github.com/prometheus/prometheus/model/value"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/util/teststorage"
|
"github.com/prometheus/prometheus/util/teststorage"
|
||||||
"github.com/prometheus/prometheus/util/testutil"
|
"github.com/prometheus/prometheus/util/testutil"
|
||||||
)
|
)
|
||||||
|
@ -2146,11 +2147,15 @@ func TestTargetScraperScrapeOK(t *testing.T) {
|
||||||
expectedTimeout = "1.5"
|
expectedTimeout = "1.5"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var protobufParsing bool
|
||||||
|
|
||||||
server := httptest.NewServer(
|
server := httptest.NewServer(
|
||||||
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
accept := r.Header.Get("Accept")
|
if protobufParsing {
|
||||||
if !strings.HasPrefix(accept, "application/openmetrics-text;") {
|
accept := r.Header.Get("Accept")
|
||||||
t.Errorf("Expected Accept header to prefer application/openmetrics-text, got %q", accept)
|
if !strings.HasPrefix(accept, "application/vnd.google.protobuf;") {
|
||||||
|
t.Errorf("Expected Accept header to prefer application/vnd.google.protobuf, got %q", accept)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
timeout := r.Header.Get("X-Prometheus-Scrape-Timeout-Seconds")
|
timeout := r.Header.Get("X-Prometheus-Scrape-Timeout-Seconds")
|
||||||
|
@ -2169,22 +2174,29 @@ func TestTargetScraperScrapeOK(t *testing.T) {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ts := &targetScraper{
|
runTest := func(acceptHeader string) {
|
||||||
Target: &Target{
|
ts := &targetScraper{
|
||||||
labels: labels.FromStrings(
|
Target: &Target{
|
||||||
model.SchemeLabel, serverURL.Scheme,
|
labels: labels.FromStrings(
|
||||||
model.AddressLabel, serverURL.Host,
|
model.SchemeLabel, serverURL.Scheme,
|
||||||
),
|
model.AddressLabel, serverURL.Host,
|
||||||
},
|
),
|
||||||
client: http.DefaultClient,
|
},
|
||||||
timeout: configTimeout,
|
client: http.DefaultClient,
|
||||||
}
|
timeout: configTimeout,
|
||||||
var buf bytes.Buffer
|
acceptHeader: acceptHeader,
|
||||||
|
}
|
||||||
|
var buf bytes.Buffer
|
||||||
|
|
||||||
contentType, err := ts.scrape(context.Background(), &buf)
|
contentType, err := ts.scrape(context.Background(), &buf)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, "text/plain; version=0.0.4", contentType)
|
require.Equal(t, "text/plain; version=0.0.4", contentType)
|
||||||
require.Equal(t, "metric_a 1\nmetric_b 2\n", buf.String())
|
require.Equal(t, "metric_a 1\nmetric_b 2\n", buf.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
runTest(scrapeAcceptHeader)
|
||||||
|
protobufParsing = true
|
||||||
|
runTest(scrapeAcceptHeaderWithProtobuf)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTargetScrapeScrapeCancel(t *testing.T) {
|
func TestTargetScrapeScrapeCancel(t *testing.T) {
|
||||||
|
@ -2209,7 +2221,8 @@ func TestTargetScrapeScrapeCancel(t *testing.T) {
|
||||||
model.AddressLabel, serverURL.Host,
|
model.AddressLabel, serverURL.Host,
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
client: http.DefaultClient,
|
client: http.DefaultClient,
|
||||||
|
acceptHeader: scrapeAcceptHeader,
|
||||||
}
|
}
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
@ -2262,7 +2275,8 @@ func TestTargetScrapeScrapeNotFound(t *testing.T) {
|
||||||
model.AddressLabel, serverURL.Host,
|
model.AddressLabel, serverURL.Host,
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
client: http.DefaultClient,
|
client: http.DefaultClient,
|
||||||
|
acceptHeader: scrapeAcceptHeader,
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = ts.scrape(context.Background(), io.Discard)
|
_, err = ts.scrape(context.Background(), io.Discard)
|
||||||
|
@ -2304,6 +2318,7 @@ func TestTargetScraperBodySizeLimit(t *testing.T) {
|
||||||
},
|
},
|
||||||
client: http.DefaultClient,
|
client: http.DefaultClient,
|
||||||
bodySizeLimit: bodySizeLimit,
|
bodySizeLimit: bodySizeLimit,
|
||||||
|
acceptHeader: scrapeAcceptHeader,
|
||||||
}
|
}
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
|
|
||||||
|
@ -2900,7 +2915,7 @@ func TestScrapeReportSingleAppender(t *testing.T) {
|
||||||
c := 0
|
c := 0
|
||||||
for series.Next() {
|
for series.Next() {
|
||||||
i := series.At().Iterator()
|
i := series.At().Iterator()
|
||||||
for i.Next() {
|
for i.Next() != chunkenc.ValNone {
|
||||||
c++
|
c++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2973,7 +2988,7 @@ func TestScrapeReportLimit(t *testing.T) {
|
||||||
var found bool
|
var found bool
|
||||||
for series.Next() {
|
for series.Next() {
|
||||||
i := series.At().Iterator()
|
i := series.At().Iterator()
|
||||||
for i.Next() {
|
for i.Next() == chunkenc.ValFloat {
|
||||||
_, v := i.At()
|
_, v := i.At()
|
||||||
require.Equal(t, 1.0, v)
|
require.Equal(t, 1.0, v)
|
||||||
found = true
|
found = true
|
||||||
|
|
|
@ -40,14 +40,16 @@ for dir in ${DIRS}; do
|
||||||
-I="${PROM_PATH}" \
|
-I="${PROM_PATH}" \
|
||||||
-I="${GRPC_GATEWAY_ROOT}/third_party/googleapis" \
|
-I="${GRPC_GATEWAY_ROOT}/third_party/googleapis" \
|
||||||
./*.proto
|
./*.proto
|
||||||
|
protoc --gogofast_out=Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,paths=source_relative:. -I=. \
|
||||||
|
-I="${GOGOPROTO_PATH}" \
|
||||||
|
./io/prometheus/client/*.proto
|
||||||
sed -i.bak -E 's/import _ \"github.com\/gogo\/protobuf\/gogoproto\"//g' -- *.pb.go
|
sed -i.bak -E 's/import _ \"github.com\/gogo\/protobuf\/gogoproto\"//g' -- *.pb.go
|
||||||
sed -i.bak -E 's/import _ \"google\/protobuf\"//g' -- *.pb.go
|
sed -i.bak -E 's/import _ \"google\/protobuf\"//g' -- *.pb.go
|
||||||
sed -i.bak -E 's/\t_ \"google\/protobuf\"//g' -- *.pb.go
|
sed -i.bak -E 's/\t_ \"google\/protobuf\"//g' -- *.pb.go
|
||||||
sed -i.bak -E 's/golang\/protobuf\/descriptor/gogo\/protobuf\/protoc-gen-gogo\/descriptor/g' -- *.go
|
sed -i.bak -E 's/golang\/protobuf\/descriptor/gogo\/protobuf\/protoc-gen-gogo\/descriptor/g' -- *.go
|
||||||
sed -i.bak -E 's/golang\/protobuf/gogo\/protobuf/g' -- *.go
|
sed -i.bak -E 's/golang\/protobuf/gogo\/protobuf/g' -- *.go
|
||||||
rm -f -- *.bak
|
rm -f -- *.bak
|
||||||
goimports -w ./*.go
|
goimports -w ./*.go ./io/prometheus/client/*.go
|
||||||
popd
|
popd
|
||||||
done
|
done
|
||||||
|
|
||||||
|
|
|
@ -14,8 +14,10 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -25,8 +27,8 @@ type BufferedSeriesIterator struct {
|
||||||
buf *sampleRing
|
buf *sampleRing
|
||||||
delta int64
|
delta int64
|
||||||
|
|
||||||
lastTime int64
|
lastTime int64
|
||||||
ok bool
|
valueType chunkenc.ValueType
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBuffer returns a new iterator that buffers the values within the time range
|
// NewBuffer returns a new iterator that buffers the values within the time range
|
||||||
|
@ -39,6 +41,7 @@ func NewBuffer(delta int64) *BufferedSeriesIterator {
|
||||||
// NewBufferIterator returns a new iterator that buffers the values within the
|
// NewBufferIterator returns a new iterator that buffers the values within the
|
||||||
// time range of the current element and the duration of delta before.
|
// time range of the current element and the duration of delta before.
|
||||||
func NewBufferIterator(it chunkenc.Iterator, delta int64) *BufferedSeriesIterator {
|
func NewBufferIterator(it chunkenc.Iterator, delta int64) *BufferedSeriesIterator {
|
||||||
|
// TODO(codesome): based on encoding, allocate different buffer.
|
||||||
bit := &BufferedSeriesIterator{
|
bit := &BufferedSeriesIterator{
|
||||||
buf: newSampleRing(delta, 16),
|
buf: newSampleRing(delta, 16),
|
||||||
delta: delta,
|
delta: delta,
|
||||||
|
@ -53,10 +56,9 @@ func NewBufferIterator(it chunkenc.Iterator, delta int64) *BufferedSeriesIterato
|
||||||
func (b *BufferedSeriesIterator) Reset(it chunkenc.Iterator) {
|
func (b *BufferedSeriesIterator) Reset(it chunkenc.Iterator) {
|
||||||
b.it = it
|
b.it = it
|
||||||
b.lastTime = math.MinInt64
|
b.lastTime = math.MinInt64
|
||||||
b.ok = true
|
|
||||||
b.buf.reset()
|
b.buf.reset()
|
||||||
b.buf.delta = b.delta
|
b.buf.delta = b.delta
|
||||||
it.Next()
|
b.valueType = it.Next()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReduceDelta lowers the buffered time delta, for the current SeriesIterator only.
|
// ReduceDelta lowers the buffered time delta, for the current SeriesIterator only.
|
||||||
|
@ -66,8 +68,9 @@ func (b *BufferedSeriesIterator) ReduceDelta(delta int64) bool {
|
||||||
|
|
||||||
// PeekBack returns the nth previous element of the iterator. If there is none buffered,
|
// PeekBack returns the nth previous element of the iterator. If there is none buffered,
|
||||||
// ok is false.
|
// ok is false.
|
||||||
func (b *BufferedSeriesIterator) PeekBack(n int) (t int64, v float64, ok bool) {
|
func (b *BufferedSeriesIterator) PeekBack(n int) (t int64, v float64, h *histogram.Histogram, ok bool) {
|
||||||
return b.buf.nthLast(n)
|
s, ok := b.buf.nthLast(n)
|
||||||
|
return s.t, s.v, s.h, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
// Buffer returns an iterator over the buffered data. Invalidates previously
|
// Buffer returns an iterator over the buffered data. Invalidates previously
|
||||||
|
@ -77,63 +80,96 @@ func (b *BufferedSeriesIterator) Buffer() chunkenc.Iterator {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Seek advances the iterator to the element at time t or greater.
|
// Seek advances the iterator to the element at time t or greater.
|
||||||
func (b *BufferedSeriesIterator) Seek(t int64) bool {
|
func (b *BufferedSeriesIterator) Seek(t int64) chunkenc.ValueType {
|
||||||
t0 := t - b.buf.delta
|
t0 := t - b.buf.delta
|
||||||
|
|
||||||
// If the delta would cause us to seek backwards, preserve the buffer
|
// If the delta would cause us to seek backwards, preserve the buffer
|
||||||
// and just continue regular advancement while filling the buffer on the way.
|
// and just continue regular advancement while filling the buffer on the way.
|
||||||
if b.ok && t0 > b.lastTime {
|
if b.valueType != chunkenc.ValNone && t0 > b.lastTime {
|
||||||
b.buf.reset()
|
b.buf.reset()
|
||||||
|
|
||||||
b.ok = b.it.Seek(t0)
|
b.valueType = b.it.Seek(t0)
|
||||||
if !b.ok {
|
switch b.valueType {
|
||||||
return false
|
case chunkenc.ValNone:
|
||||||
|
return chunkenc.ValNone
|
||||||
|
case chunkenc.ValFloat:
|
||||||
|
b.lastTime, _ = b.At()
|
||||||
|
case chunkenc.ValHistogram:
|
||||||
|
b.lastTime, _ = b.AtHistogram()
|
||||||
|
case chunkenc.ValFloatHistogram:
|
||||||
|
b.lastTime, _ = b.AtFloatHistogram()
|
||||||
|
default:
|
||||||
|
panic(fmt.Errorf("BufferedSeriesIterator: unknown value type %v", b.valueType))
|
||||||
}
|
}
|
||||||
b.lastTime, _ = b.At()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.lastTime >= t {
|
if b.lastTime >= t {
|
||||||
return true
|
return b.valueType
|
||||||
}
|
}
|
||||||
for b.Next() {
|
for {
|
||||||
if b.lastTime >= t {
|
if b.valueType = b.Next(); b.valueType == chunkenc.ValNone || b.lastTime >= t {
|
||||||
return true
|
return b.valueType
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Next advances the iterator to the next element.
|
// Next advances the iterator to the next element.
|
||||||
func (b *BufferedSeriesIterator) Next() bool {
|
func (b *BufferedSeriesIterator) Next() chunkenc.ValueType {
|
||||||
if !b.ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add current element to buffer before advancing.
|
// Add current element to buffer before advancing.
|
||||||
b.buf.add(b.it.At())
|
switch b.valueType {
|
||||||
|
case chunkenc.ValNone:
|
||||||
b.ok = b.it.Next()
|
return chunkenc.ValNone
|
||||||
if b.ok {
|
case chunkenc.ValFloat:
|
||||||
b.lastTime, _ = b.At()
|
t, v := b.it.At()
|
||||||
|
b.buf.add(sample{t: t, v: v})
|
||||||
|
case chunkenc.ValHistogram:
|
||||||
|
t, h := b.it.AtHistogram()
|
||||||
|
b.buf.add(sample{t: t, h: h})
|
||||||
|
case chunkenc.ValFloatHistogram:
|
||||||
|
t, fh := b.it.AtFloatHistogram()
|
||||||
|
b.buf.add(sample{t: t, fh: fh})
|
||||||
|
default:
|
||||||
|
panic(fmt.Errorf("BufferedSeriesIterator: unknown value type %v", b.valueType))
|
||||||
}
|
}
|
||||||
|
|
||||||
return b.ok
|
b.valueType = b.it.Next()
|
||||||
|
if b.valueType != chunkenc.ValNone {
|
||||||
|
b.lastTime = b.AtT()
|
||||||
|
}
|
||||||
|
return b.valueType
|
||||||
}
|
}
|
||||||
|
|
||||||
// At returns the current element of the iterator.
|
// At returns the current float element of the iterator.
|
||||||
func (b *BufferedSeriesIterator) At() (int64, float64) {
|
func (b *BufferedSeriesIterator) At() (int64, float64) {
|
||||||
return b.it.At()
|
return b.it.At()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AtHistogram returns the current histogram element of the iterator.
|
||||||
|
func (b *BufferedSeriesIterator) AtHistogram() (int64, *histogram.Histogram) {
|
||||||
|
return b.it.AtHistogram()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AtFloatHistogram returns the current float-histogram element of the iterator.
|
||||||
|
func (b *BufferedSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
|
||||||
|
return b.it.AtFloatHistogram()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AtT returns the current timestamp of the iterator.
|
||||||
|
func (b *BufferedSeriesIterator) AtT() int64 {
|
||||||
|
return b.it.AtT()
|
||||||
|
}
|
||||||
|
|
||||||
// Err returns the last encountered error.
|
// Err returns the last encountered error.
|
||||||
func (b *BufferedSeriesIterator) Err() error {
|
func (b *BufferedSeriesIterator) Err() error {
|
||||||
return b.it.Err()
|
return b.it.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(beorn7): Consider having different sample types for different value types.
|
||||||
type sample struct {
|
type sample struct {
|
||||||
t int64
|
t int64
|
||||||
v float64
|
v float64
|
||||||
|
h *histogram.Histogram
|
||||||
|
fh *histogram.FloatHistogram
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s sample) T() int64 {
|
func (s sample) T() int64 {
|
||||||
|
@ -144,6 +180,25 @@ func (s sample) V() float64 {
|
||||||
return s.v
|
return s.v
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s sample) H() *histogram.Histogram {
|
||||||
|
return s.h
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s sample) FH() *histogram.FloatHistogram {
|
||||||
|
return s.fh
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s sample) Type() chunkenc.ValueType {
|
||||||
|
switch {
|
||||||
|
case s.h != nil:
|
||||||
|
return chunkenc.ValHistogram
|
||||||
|
case s.fh != nil:
|
||||||
|
return chunkenc.ValFloatHistogram
|
||||||
|
default:
|
||||||
|
return chunkenc.ValFloat
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type sampleRing struct {
|
type sampleRing struct {
|
||||||
delta int64
|
delta int64
|
||||||
|
|
||||||
|
@ -180,13 +235,24 @@ type sampleRingIterator struct {
|
||||||
i int
|
i int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (it *sampleRingIterator) Next() bool {
|
func (it *sampleRingIterator) Next() chunkenc.ValueType {
|
||||||
it.i++
|
it.i++
|
||||||
return it.i < it.r.l
|
if it.i >= it.r.l {
|
||||||
|
return chunkenc.ValNone
|
||||||
|
}
|
||||||
|
s := it.r.at(it.i)
|
||||||
|
switch {
|
||||||
|
case s.h != nil:
|
||||||
|
return chunkenc.ValHistogram
|
||||||
|
case s.fh != nil:
|
||||||
|
return chunkenc.ValFloatHistogram
|
||||||
|
default:
|
||||||
|
return chunkenc.ValFloat
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (it *sampleRingIterator) Seek(int64) bool {
|
func (it *sampleRingIterator) Seek(int64) chunkenc.ValueType {
|
||||||
return false
|
return chunkenc.ValNone
|
||||||
}
|
}
|
||||||
|
|
||||||
func (it *sampleRingIterator) Err() error {
|
func (it *sampleRingIterator) Err() error {
|
||||||
|
@ -194,18 +260,36 @@ func (it *sampleRingIterator) Err() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (it *sampleRingIterator) At() (int64, float64) {
|
func (it *sampleRingIterator) At() (int64, float64) {
|
||||||
return it.r.at(it.i)
|
s := it.r.at(it.i)
|
||||||
|
return s.t, s.v
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *sampleRing) at(i int) (int64, float64) {
|
func (it *sampleRingIterator) AtHistogram() (int64, *histogram.Histogram) {
|
||||||
|
s := it.r.at(it.i)
|
||||||
|
return s.t, s.h
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *sampleRingIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
|
||||||
|
s := it.r.at(it.i)
|
||||||
|
if s.fh == nil {
|
||||||
|
return s.t, s.h.ToFloat()
|
||||||
|
}
|
||||||
|
return s.t, s.fh
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *sampleRingIterator) AtT() int64 {
|
||||||
|
s := it.r.at(it.i)
|
||||||
|
return s.t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *sampleRing) at(i int) sample {
|
||||||
j := (r.f + i) % len(r.buf)
|
j := (r.f + i) % len(r.buf)
|
||||||
s := r.buf[j]
|
return r.buf[j]
|
||||||
return s.t, s.v
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// add adds a sample to the ring buffer and frees all samples that fall
|
// add adds a sample to the ring buffer and frees all samples that fall
|
||||||
// out of the delta range.
|
// out of the delta range.
|
||||||
func (r *sampleRing) add(t int64, v float64) {
|
func (r *sampleRing) add(s sample) {
|
||||||
l := len(r.buf)
|
l := len(r.buf)
|
||||||
// Grow the ring buffer if it fits no more elements.
|
// Grow the ring buffer if it fits no more elements.
|
||||||
if l == r.l {
|
if l == r.l {
|
||||||
|
@ -224,11 +308,11 @@ func (r *sampleRing) add(t int64, v float64) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
r.buf[r.i] = sample{t: t, v: v}
|
r.buf[r.i] = s
|
||||||
r.l++
|
r.l++
|
||||||
|
|
||||||
// Free head of the buffer of samples that just fell out of the range.
|
// Free head of the buffer of samples that just fell out of the range.
|
||||||
tmin := t - r.delta
|
tmin := s.t - r.delta
|
||||||
for r.buf[r.f].t < tmin {
|
for r.buf[r.f].t < tmin {
|
||||||
r.f++
|
r.f++
|
||||||
if r.f >= l {
|
if r.f >= l {
|
||||||
|
@ -264,12 +348,11 @@ func (r *sampleRing) reduceDelta(delta int64) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// nthLast returns the nth most recent element added to the ring.
|
// nthLast returns the nth most recent element added to the ring.
|
||||||
func (r *sampleRing) nthLast(n int) (int64, float64, bool) {
|
func (r *sampleRing) nthLast(n int) (sample, bool) {
|
||||||
if n > r.l {
|
if n > r.l {
|
||||||
return 0, 0, false
|
return sample{}, false
|
||||||
}
|
}
|
||||||
t, v := r.at(r.l - n)
|
return r.at(r.l - n), true
|
||||||
return t, v, true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *sampleRing) samples() []sample {
|
func (r *sampleRing) samples() []sample {
|
||||||
|
|
|
@ -18,6 +18,9 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestSampleRing(t *testing.T) {
|
func TestSampleRing(t *testing.T) {
|
||||||
|
@ -64,7 +67,7 @@ func TestSampleRing(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, s := range input {
|
for i, s := range input {
|
||||||
r.add(s.t, s.v)
|
r.add(s)
|
||||||
buffered := r.samples()
|
buffered := r.samples()
|
||||||
|
|
||||||
for _, sold := range input[:i] {
|
for _, sold := range input[:i] {
|
||||||
|
@ -92,7 +95,7 @@ func TestBufferedSeriesIterator(t *testing.T) {
|
||||||
bufferEq := func(exp []sample) {
|
bufferEq := func(exp []sample) {
|
||||||
var b []sample
|
var b []sample
|
||||||
bit := it.Buffer()
|
bit := it.Buffer()
|
||||||
for bit.Next() {
|
for bit.Next() == chunkenc.ValFloat {
|
||||||
t, v := bit.At()
|
t, v := bit.At()
|
||||||
b = append(b, sample{t: t, v: v})
|
b = append(b, sample{t: t, v: v})
|
||||||
}
|
}
|
||||||
|
@ -104,7 +107,7 @@ func TestBufferedSeriesIterator(t *testing.T) {
|
||||||
require.Equal(t, ev, v, "value mismatch")
|
require.Equal(t, ev, v, "value mismatch")
|
||||||
}
|
}
|
||||||
prevSampleEq := func(ets int64, ev float64, eok bool) {
|
prevSampleEq := func(ets int64, ev float64, eok bool) {
|
||||||
ts, v, ok := it.PeekBack(1)
|
ts, v, _, ok := it.PeekBack(1)
|
||||||
require.Equal(t, eok, ok, "exist mismatch")
|
require.Equal(t, eok, ok, "exist mismatch")
|
||||||
require.Equal(t, ets, ts, "timestamp mismatch")
|
require.Equal(t, ets, ts, "timestamp mismatch")
|
||||||
require.Equal(t, ev, v, "value mismatch")
|
require.Equal(t, ev, v, "value mismatch")
|
||||||
|
@ -121,35 +124,35 @@ func TestBufferedSeriesIterator(t *testing.T) {
|
||||||
sample{t: 101, v: 10},
|
sample{t: 101, v: 10},
|
||||||
}), 2)
|
}), 2)
|
||||||
|
|
||||||
require.True(t, it.Seek(-123), "seek failed")
|
require.Equal(t, chunkenc.ValFloat, it.Seek(-123), "seek failed")
|
||||||
sampleEq(1, 2)
|
sampleEq(1, 2)
|
||||||
prevSampleEq(0, 0, false)
|
prevSampleEq(0, 0, false)
|
||||||
bufferEq(nil)
|
bufferEq(nil)
|
||||||
|
|
||||||
require.True(t, it.Next(), "next failed")
|
require.Equal(t, chunkenc.ValFloat, it.Next(), "next failed")
|
||||||
sampleEq(2, 3)
|
sampleEq(2, 3)
|
||||||
prevSampleEq(1, 2, true)
|
prevSampleEq(1, 2, true)
|
||||||
bufferEq([]sample{{t: 1, v: 2}})
|
bufferEq([]sample{{t: 1, v: 2}})
|
||||||
|
|
||||||
require.True(t, it.Next(), "next failed")
|
require.Equal(t, chunkenc.ValFloat, it.Next(), "next failed")
|
||||||
require.True(t, it.Next(), "next failed")
|
require.Equal(t, chunkenc.ValFloat, it.Next(), "next failed")
|
||||||
require.True(t, it.Next(), "next failed")
|
require.Equal(t, chunkenc.ValFloat, it.Next(), "next failed")
|
||||||
sampleEq(5, 6)
|
sampleEq(5, 6)
|
||||||
prevSampleEq(4, 5, true)
|
prevSampleEq(4, 5, true)
|
||||||
bufferEq([]sample{{t: 2, v: 3}, {t: 3, v: 4}, {t: 4, v: 5}})
|
bufferEq([]sample{{t: 2, v: 3}, {t: 3, v: 4}, {t: 4, v: 5}})
|
||||||
|
|
||||||
require.True(t, it.Seek(5), "seek failed")
|
require.Equal(t, chunkenc.ValFloat, it.Seek(5), "seek failed")
|
||||||
sampleEq(5, 6)
|
sampleEq(5, 6)
|
||||||
prevSampleEq(4, 5, true)
|
prevSampleEq(4, 5, true)
|
||||||
bufferEq([]sample{{t: 2, v: 3}, {t: 3, v: 4}, {t: 4, v: 5}})
|
bufferEq([]sample{{t: 2, v: 3}, {t: 3, v: 4}, {t: 4, v: 5}})
|
||||||
|
|
||||||
require.True(t, it.Seek(101), "seek failed")
|
require.Equal(t, chunkenc.ValFloat, it.Seek(101), "seek failed")
|
||||||
sampleEq(101, 10)
|
sampleEq(101, 10)
|
||||||
prevSampleEq(100, 9, true)
|
prevSampleEq(100, 9, true)
|
||||||
bufferEq([]sample{{t: 99, v: 8}, {t: 100, v: 9}})
|
bufferEq([]sample{{t: 99, v: 8}, {t: 100, v: 9}})
|
||||||
|
|
||||||
require.False(t, it.Next(), "next succeeded unexpectedly")
|
require.Equal(t, chunkenc.ValNone, it.Next(), "next succeeded unexpectedly")
|
||||||
require.False(t, it.Seek(1024), "seek succeeded unexpectedly")
|
require.Equal(t, chunkenc.ValNone, it.Seek(1024), "seek succeeded unexpectedly")
|
||||||
}
|
}
|
||||||
|
|
||||||
// At() should not be called once Next() returns false.
|
// At() should not be called once Next() returns false.
|
||||||
|
@ -157,14 +160,19 @@ func TestBufferedSeriesIteratorNoBadAt(t *testing.T) {
|
||||||
done := false
|
done := false
|
||||||
|
|
||||||
m := &mockSeriesIterator{
|
m := &mockSeriesIterator{
|
||||||
seek: func(int64) bool { return false },
|
seek: func(int64) chunkenc.ValueType { return chunkenc.ValNone },
|
||||||
at: func() (int64, float64) {
|
at: func() (int64, float64) {
|
||||||
require.False(t, done, "unexpectedly done")
|
require.False(t, done, "unexpectedly done")
|
||||||
done = true
|
done = true
|
||||||
return 0, 0
|
return 0, 0
|
||||||
},
|
},
|
||||||
next: func() bool { return !done },
|
next: func() chunkenc.ValueType {
|
||||||
err: func() error { return nil },
|
if done {
|
||||||
|
return chunkenc.ValNone
|
||||||
|
}
|
||||||
|
return chunkenc.ValFloat
|
||||||
|
},
|
||||||
|
err: func() error { return nil },
|
||||||
}
|
}
|
||||||
|
|
||||||
it := NewBufferIterator(m, 60)
|
it := NewBufferIterator(m, 60)
|
||||||
|
@ -180,23 +188,35 @@ func BenchmarkBufferedSeriesIterator(b *testing.B) {
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
|
|
||||||
for it.Next() {
|
for it.Next() != chunkenc.ValNone {
|
||||||
// scan everything
|
// scan everything
|
||||||
}
|
}
|
||||||
require.NoError(b, it.Err())
|
require.NoError(b, it.Err())
|
||||||
}
|
}
|
||||||
|
|
||||||
type mockSeriesIterator struct {
|
type mockSeriesIterator struct {
|
||||||
seek func(int64) bool
|
seek func(int64) chunkenc.ValueType
|
||||||
at func() (int64, float64)
|
at func() (int64, float64)
|
||||||
next func() bool
|
next func() chunkenc.ValueType
|
||||||
err func() error
|
err func() error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockSeriesIterator) Seek(t int64) bool { return m.seek(t) }
|
func (m *mockSeriesIterator) Seek(t int64) chunkenc.ValueType { return m.seek(t) }
|
||||||
func (m *mockSeriesIterator) At() (int64, float64) { return m.at() }
|
func (m *mockSeriesIterator) At() (int64, float64) { return m.at() }
|
||||||
func (m *mockSeriesIterator) Next() bool { return m.next() }
|
func (m *mockSeriesIterator) Next() chunkenc.ValueType { return m.next() }
|
||||||
func (m *mockSeriesIterator) Err() error { return m.err() }
|
func (m *mockSeriesIterator) Err() error { return m.err() }
|
||||||
|
|
||||||
|
func (m *mockSeriesIterator) AtHistogram() (int64, *histogram.Histogram) {
|
||||||
|
return 0, nil // Not really mocked.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
|
||||||
|
return 0, nil // Not really mocked.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockSeriesIterator) AtT() int64 {
|
||||||
|
return 0 // Not really mocked.
|
||||||
|
}
|
||||||
|
|
||||||
type fakeSeriesIterator struct {
|
type fakeSeriesIterator struct {
|
||||||
nsamples int64
|
nsamples int64
|
||||||
|
@ -209,17 +229,35 @@ func newFakeSeriesIterator(nsamples, step int64) *fakeSeriesIterator {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (it *fakeSeriesIterator) At() (int64, float64) {
|
func (it *fakeSeriesIterator) At() (int64, float64) {
|
||||||
return it.idx * it.step, 123 // value doesn't matter
|
return it.idx * it.step, 123 // Value doesn't matter.
|
||||||
}
|
}
|
||||||
|
|
||||||
func (it *fakeSeriesIterator) Next() bool {
|
func (it *fakeSeriesIterator) AtHistogram() (int64, *histogram.Histogram) {
|
||||||
|
return it.idx * it.step, &histogram.Histogram{} // Value doesn't matter.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *fakeSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
|
||||||
|
return it.idx * it.step, &histogram.FloatHistogram{} // Value doesn't matter.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *fakeSeriesIterator) AtT() int64 {
|
||||||
|
return it.idx * it.step
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *fakeSeriesIterator) Next() chunkenc.ValueType {
|
||||||
it.idx++
|
it.idx++
|
||||||
return it.idx < it.nsamples
|
if it.idx >= it.nsamples {
|
||||||
|
return chunkenc.ValNone
|
||||||
|
}
|
||||||
|
return chunkenc.ValFloat
|
||||||
}
|
}
|
||||||
|
|
||||||
func (it *fakeSeriesIterator) Seek(t int64) bool {
|
func (it *fakeSeriesIterator) Seek(t int64) chunkenc.ValueType {
|
||||||
it.idx = t / it.step
|
it.idx = t / it.step
|
||||||
return it.idx < it.nsamples
|
if it.idx >= it.nsamples {
|
||||||
|
return chunkenc.ValNone
|
||||||
|
}
|
||||||
|
return chunkenc.ValFloat
|
||||||
}
|
}
|
||||||
|
|
||||||
func (it *fakeSeriesIterator) Err() error { return nil }
|
func (it *fakeSeriesIterator) Err() error { return nil }
|
||||||
|
|
|
@ -21,6 +21,7 @@ import (
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/metadata"
|
"github.com/prometheus/prometheus/model/metadata"
|
||||||
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||||
|
@ -173,6 +174,20 @@ func (f *fanoutAppender) AppendExemplar(ref SeriesRef, l labels.Labels, e exempl
|
||||||
return ref, nil
|
return ref, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *fanoutAppender) AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (SeriesRef, error) {
|
||||||
|
ref, err := f.primary.AppendHistogram(ref, l, t, h)
|
||||||
|
if err != nil {
|
||||||
|
return ref, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, appender := range f.secondaries {
|
||||||
|
if _, err := appender.AppendHistogram(ref, l, t, h); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ref, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (f *fanoutAppender) UpdateMetadata(ref SeriesRef, l labels.Labels, m metadata.Metadata) (SeriesRef, error) {
|
func (f *fanoutAppender) UpdateMetadata(ref SeriesRef, l labels.Labels, m metadata.Metadata) (SeriesRef, error) {
|
||||||
ref, err := f.primary.UpdateMetadata(ref, l, m)
|
ref, err := f.primary.UpdateMetadata(ref, l, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -23,6 +23,7 @@ import (
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/util/teststorage"
|
"github.com/prometheus/prometheus/util/teststorage"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -90,7 +91,7 @@ func TestFanout_SelectSorted(t *testing.T) {
|
||||||
seriesLabels := series.Labels()
|
seriesLabels := series.Labels()
|
||||||
labelsResult = seriesLabels
|
labelsResult = seriesLabels
|
||||||
iterator := series.Iterator()
|
iterator := series.Iterator()
|
||||||
for iterator.Next() {
|
for iterator.Next() == chunkenc.ValFloat {
|
||||||
timestamp, value := iterator.At()
|
timestamp, value := iterator.At()
|
||||||
result[timestamp] = value
|
result[timestamp] = value
|
||||||
}
|
}
|
||||||
|
@ -116,7 +117,7 @@ func TestFanout_SelectSorted(t *testing.T) {
|
||||||
seriesLabels := series.Labels()
|
seriesLabels := series.Labels()
|
||||||
labelsResult = seriesLabels
|
labelsResult = seriesLabels
|
||||||
iterator := series.Iterator()
|
iterator := series.Iterator()
|
||||||
for iterator.Next() {
|
for iterator.Next() == chunkenc.ValFloat {
|
||||||
timestamp, value := iterator.At()
|
timestamp, value := iterator.At()
|
||||||
result[timestamp] = value
|
result[timestamp] = value
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/metadata"
|
"github.com/prometheus/prometheus/model/metadata"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
|
@ -35,11 +36,16 @@ var (
|
||||||
// ErrTooOldSample is when out of order support is enabled but the sample is outside the time window allowed.
|
// ErrTooOldSample is when out of order support is enabled but the sample is outside the time window allowed.
|
||||||
ErrTooOldSample = errors.New("too old sample")
|
ErrTooOldSample = errors.New("too old sample")
|
||||||
// ErrDuplicateSampleForTimestamp is when the sample has same timestamp but different value.
|
// ErrDuplicateSampleForTimestamp is when the sample has same timestamp but different value.
|
||||||
ErrDuplicateSampleForTimestamp = errors.New("duplicate sample for timestamp")
|
ErrDuplicateSampleForTimestamp = errors.New("duplicate sample for timestamp")
|
||||||
ErrOutOfOrderExemplar = errors.New("out of order exemplar")
|
ErrOutOfOrderExemplar = errors.New("out of order exemplar")
|
||||||
ErrDuplicateExemplar = errors.New("duplicate exemplar")
|
ErrDuplicateExemplar = errors.New("duplicate exemplar")
|
||||||
ErrExemplarLabelLength = fmt.Errorf("label length for exemplar exceeds maximum of %d UTF-8 characters", exemplar.ExemplarMaxLabelSetLength)
|
ErrExemplarLabelLength = fmt.Errorf("label length for exemplar exceeds maximum of %d UTF-8 characters", exemplar.ExemplarMaxLabelSetLength)
|
||||||
ErrExemplarsDisabled = fmt.Errorf("exemplar storage is disabled or max exemplars is less than or equal to 0")
|
ErrExemplarsDisabled = fmt.Errorf("exemplar storage is disabled or max exemplars is less than or equal to 0")
|
||||||
|
ErrNativeHistogramsDisabled = fmt.Errorf("native histograms are disabled")
|
||||||
|
ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets")
|
||||||
|
ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative")
|
||||||
|
ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative")
|
||||||
|
ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided")
|
||||||
)
|
)
|
||||||
|
|
||||||
// SeriesRef is a generic series reference. In prometheus it is either a
|
// SeriesRef is a generic series reference. In prometheus it is either a
|
||||||
|
@ -207,6 +213,9 @@ func (f QueryableFunc) Querier(ctx context.Context, mint, maxt int64) (Querier,
|
||||||
// It must be completed with a call to Commit or Rollback and must not be reused afterwards.
|
// It must be completed with a call to Commit or Rollback and must not be reused afterwards.
|
||||||
//
|
//
|
||||||
// Operations on the Appender interface are not goroutine-safe.
|
// Operations on the Appender interface are not goroutine-safe.
|
||||||
|
//
|
||||||
|
// The type of samples (float64, histogram, etc) appended for a given series must remain same within an Appender.
|
||||||
|
// The behaviour is undefined if samples of different types are appended to the same series in a single Commit().
|
||||||
type Appender interface {
|
type Appender interface {
|
||||||
// Append adds a sample pair for the given series.
|
// Append adds a sample pair for the given series.
|
||||||
// An optional series reference can be provided to accelerate calls.
|
// An optional series reference can be provided to accelerate calls.
|
||||||
|
@ -227,7 +236,9 @@ type Appender interface {
|
||||||
// Rollback rolls back all modifications made in the appender so far.
|
// Rollback rolls back all modifications made in the appender so far.
|
||||||
// Appender has to be discarded after rollback.
|
// Appender has to be discarded after rollback.
|
||||||
Rollback() error
|
Rollback() error
|
||||||
|
|
||||||
ExemplarAppender
|
ExemplarAppender
|
||||||
|
HistogramAppender
|
||||||
MetadataUpdater
|
MetadataUpdater
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -257,6 +268,22 @@ type ExemplarAppender interface {
|
||||||
AppendExemplar(ref SeriesRef, l labels.Labels, e exemplar.Exemplar) (SeriesRef, error)
|
AppendExemplar(ref SeriesRef, l labels.Labels, e exemplar.Exemplar) (SeriesRef, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HistogramAppender provides an interface for appending histograms to the storage.
|
||||||
|
type HistogramAppender interface {
|
||||||
|
// AppendHistogram adds a histogram for the given series labels. An
|
||||||
|
// optional reference number can be provided to accelerate calls. A
|
||||||
|
// reference number is returned which can be used to add further
|
||||||
|
// histograms in the same or later transactions. Returned reference
|
||||||
|
// numbers are ephemeral and may be rejected in calls to Append() at any
|
||||||
|
// point. Adding the sample via Append() returns a new reference number.
|
||||||
|
// If the reference is 0, it must not be used for caching.
|
||||||
|
//
|
||||||
|
// For efficiency reasons, the histogram is passed as a
|
||||||
|
// pointer. AppendHistogram won't mutate the histogram, but in turn
|
||||||
|
// depends on the caller to not mutate it either.
|
||||||
|
AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (SeriesRef, error)
|
||||||
|
}
|
||||||
|
|
||||||
// MetadataUpdater provides an interface for associating metadata to stored series.
|
// MetadataUpdater provides an interface for associating metadata to stored series.
|
||||||
type MetadataUpdater interface {
|
type MetadataUpdater interface {
|
||||||
// UpdateMetadata updates a metadata entry for the given series and labels.
|
// UpdateMetadata updates a metadata entry for the given series and labels.
|
||||||
|
|
|
@ -16,6 +16,7 @@ package storage
|
||||||
import (
|
import (
|
||||||
"math"
|
"math"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -24,12 +25,18 @@ type MemoizedSeriesIterator struct {
|
||||||
it chunkenc.Iterator
|
it chunkenc.Iterator
|
||||||
delta int64
|
delta int64
|
||||||
|
|
||||||
lastTime int64
|
lastTime int64
|
||||||
ok bool
|
valueType chunkenc.ValueType
|
||||||
|
|
||||||
// Keep track of the previously returned value.
|
// Keep track of the previously returned value.
|
||||||
prevTime int64
|
prevTime int64
|
||||||
prevValue float64
|
prevValue float64
|
||||||
|
prevHistogram *histogram.Histogram
|
||||||
|
prevFloatHistogram *histogram.FloatHistogram
|
||||||
|
// TODO(beorn7): MemoizedSeriesIterator is currently only used by the
|
||||||
|
// PromQL engine, which only works with FloatHistograms. For better
|
||||||
|
// performance, we could change MemoizedSeriesIterator to also only
|
||||||
|
// handle FloatHistograms.
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMemoizedEmptyIterator is like NewMemoizedIterator but it's initialised with an empty iterator.
|
// NewMemoizedEmptyIterator is like NewMemoizedIterator but it's initialised with an empty iterator.
|
||||||
|
@ -53,70 +60,93 @@ func NewMemoizedIterator(it chunkenc.Iterator, delta int64) *MemoizedSeriesItera
|
||||||
func (b *MemoizedSeriesIterator) Reset(it chunkenc.Iterator) {
|
func (b *MemoizedSeriesIterator) Reset(it chunkenc.Iterator) {
|
||||||
b.it = it
|
b.it = it
|
||||||
b.lastTime = math.MinInt64
|
b.lastTime = math.MinInt64
|
||||||
b.ok = true
|
|
||||||
b.prevTime = math.MinInt64
|
b.prevTime = math.MinInt64
|
||||||
it.Next()
|
b.valueType = it.Next()
|
||||||
}
|
}
|
||||||
|
|
||||||
// PeekPrev returns the previous element of the iterator. If there is none buffered,
|
// PeekPrev returns the previous element of the iterator. If there is none buffered,
|
||||||
// ok is false.
|
// ok is false.
|
||||||
func (b *MemoizedSeriesIterator) PeekPrev() (t int64, v float64, ok bool) {
|
func (b *MemoizedSeriesIterator) PeekPrev() (t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, ok bool) {
|
||||||
if b.prevTime == math.MinInt64 {
|
if b.prevTime == math.MinInt64 {
|
||||||
return 0, 0, false
|
return 0, 0, nil, nil, false
|
||||||
}
|
}
|
||||||
return b.prevTime, b.prevValue, true
|
return b.prevTime, b.prevValue, b.prevHistogram, b.prevFloatHistogram, true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Seek advances the iterator to the element at time t or greater.
|
// Seek advances the iterator to the element at time t or greater.
|
||||||
func (b *MemoizedSeriesIterator) Seek(t int64) bool {
|
func (b *MemoizedSeriesIterator) Seek(t int64) chunkenc.ValueType {
|
||||||
t0 := t - b.delta
|
t0 := t - b.delta
|
||||||
|
|
||||||
if b.ok && t0 > b.lastTime {
|
if b.valueType != chunkenc.ValNone && t0 > b.lastTime {
|
||||||
// Reset the previously stored element because the seek advanced
|
// Reset the previously stored element because the seek advanced
|
||||||
// more than the delta.
|
// more than the delta.
|
||||||
b.prevTime = math.MinInt64
|
b.prevTime = math.MinInt64
|
||||||
|
|
||||||
b.ok = b.it.Seek(t0)
|
b.valueType = b.it.Seek(t0)
|
||||||
if !b.ok {
|
if b.valueType == chunkenc.ValNone {
|
||||||
return false
|
return chunkenc.ValNone
|
||||||
}
|
}
|
||||||
b.lastTime, _ = b.it.At()
|
b.lastTime = b.it.AtT()
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.lastTime >= t {
|
if b.lastTime >= t {
|
||||||
return true
|
return b.valueType
|
||||||
}
|
}
|
||||||
for b.Next() {
|
for b.Next() != chunkenc.ValNone {
|
||||||
if b.lastTime >= t {
|
if b.lastTime >= t {
|
||||||
return true
|
return b.valueType
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return false
|
return chunkenc.ValNone
|
||||||
}
|
}
|
||||||
|
|
||||||
// Next advances the iterator to the next element.
|
// Next advances the iterator to the next element.
|
||||||
func (b *MemoizedSeriesIterator) Next() bool {
|
func (b *MemoizedSeriesIterator) Next() chunkenc.ValueType {
|
||||||
if !b.ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keep track of the previous element.
|
// Keep track of the previous element.
|
||||||
b.prevTime, b.prevValue = b.it.At()
|
switch b.valueType {
|
||||||
|
case chunkenc.ValNone:
|
||||||
b.ok = b.it.Next()
|
return chunkenc.ValNone
|
||||||
if b.ok {
|
case chunkenc.ValFloat:
|
||||||
b.lastTime, _ = b.it.At()
|
b.prevTime, b.prevValue = b.it.At()
|
||||||
|
b.prevHistogram = nil
|
||||||
|
b.prevFloatHistogram = nil
|
||||||
|
case chunkenc.ValHistogram:
|
||||||
|
b.prevValue = 0
|
||||||
|
b.prevTime, b.prevHistogram = b.it.AtHistogram()
|
||||||
|
_, b.prevFloatHistogram = b.it.AtFloatHistogram()
|
||||||
|
case chunkenc.ValFloatHistogram:
|
||||||
|
b.prevValue = 0
|
||||||
|
b.prevHistogram = nil
|
||||||
|
b.prevTime, b.prevFloatHistogram = b.it.AtFloatHistogram()
|
||||||
}
|
}
|
||||||
|
|
||||||
return b.ok
|
b.valueType = b.it.Next()
|
||||||
|
if b.valueType != chunkenc.ValNone {
|
||||||
|
b.lastTime = b.it.AtT()
|
||||||
|
}
|
||||||
|
return b.valueType
|
||||||
}
|
}
|
||||||
|
|
||||||
// At returns the current element of the iterator.
|
// At returns the current float element of the iterator.
|
||||||
func (b *MemoizedSeriesIterator) At() (int64, float64) {
|
func (b *MemoizedSeriesIterator) At() (int64, float64) {
|
||||||
return b.it.At()
|
return b.it.At()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AtHistogram returns the current histogram element of the iterator.
|
||||||
|
func (b *MemoizedSeriesIterator) AtHistogram() (int64, *histogram.Histogram) {
|
||||||
|
return b.it.AtHistogram()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AtFloatHistogram returns the current float-histogram element of the iterator.
|
||||||
|
func (b *MemoizedSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
|
||||||
|
return b.it.AtFloatHistogram()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AtT returns the current timestamp of the iterator.
|
||||||
|
func (b *MemoizedSeriesIterator) AtT() int64 {
|
||||||
|
return b.it.AtT()
|
||||||
|
}
|
||||||
|
|
||||||
// Err returns the last encountered error.
|
// Err returns the last encountered error.
|
||||||
func (b *MemoizedSeriesIterator) Err() error {
|
func (b *MemoizedSeriesIterator) Err() error {
|
||||||
return b.it.Err()
|
return b.it.Err()
|
||||||
|
|
|
@ -17,9 +17,12 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMemoizedSeriesIterator(t *testing.T) {
|
func TestMemoizedSeriesIterator(t *testing.T) {
|
||||||
|
// TODO(beorn7): Include histograms in testing.
|
||||||
var it *MemoizedSeriesIterator
|
var it *MemoizedSeriesIterator
|
||||||
|
|
||||||
sampleEq := func(ets int64, ev float64) {
|
sampleEq := func(ets int64, ev float64) {
|
||||||
|
@ -28,7 +31,7 @@ func TestMemoizedSeriesIterator(t *testing.T) {
|
||||||
require.Equal(t, ev, v, "value mismatch")
|
require.Equal(t, ev, v, "value mismatch")
|
||||||
}
|
}
|
||||||
prevSampleEq := func(ets int64, ev float64, eok bool) {
|
prevSampleEq := func(ets int64, ev float64, eok bool) {
|
||||||
ts, v, ok := it.PeekPrev()
|
ts, v, _, _, ok := it.PeekPrev()
|
||||||
require.Equal(t, eok, ok, "exist mismatch")
|
require.Equal(t, eok, ok, "exist mismatch")
|
||||||
require.Equal(t, ets, ts, "timestamp mismatch")
|
require.Equal(t, ets, ts, "timestamp mismatch")
|
||||||
require.Equal(t, ev, v, "value mismatch")
|
require.Equal(t, ev, v, "value mismatch")
|
||||||
|
@ -45,30 +48,30 @@ func TestMemoizedSeriesIterator(t *testing.T) {
|
||||||
sample{t: 101, v: 10},
|
sample{t: 101, v: 10},
|
||||||
}), 2)
|
}), 2)
|
||||||
|
|
||||||
require.True(t, it.Seek(-123), "seek failed")
|
require.Equal(t, it.Seek(-123), chunkenc.ValFloat, "seek failed")
|
||||||
sampleEq(1, 2)
|
sampleEq(1, 2)
|
||||||
prevSampleEq(0, 0, false)
|
prevSampleEq(0, 0, false)
|
||||||
|
|
||||||
require.True(t, it.Next(), "next failed")
|
require.Equal(t, it.Next(), chunkenc.ValFloat, "next failed")
|
||||||
sampleEq(2, 3)
|
sampleEq(2, 3)
|
||||||
prevSampleEq(1, 2, true)
|
prevSampleEq(1, 2, true)
|
||||||
|
|
||||||
require.True(t, it.Next(), "next failed")
|
require.Equal(t, it.Next(), chunkenc.ValFloat, "next failed")
|
||||||
require.True(t, it.Next(), "next failed")
|
require.Equal(t, it.Next(), chunkenc.ValFloat, "next failed")
|
||||||
require.True(t, it.Next(), "next failed")
|
require.Equal(t, it.Next(), chunkenc.ValFloat, "next failed")
|
||||||
sampleEq(5, 6)
|
sampleEq(5, 6)
|
||||||
prevSampleEq(4, 5, true)
|
prevSampleEq(4, 5, true)
|
||||||
|
|
||||||
require.True(t, it.Seek(5), "seek failed")
|
require.Equal(t, it.Seek(5), chunkenc.ValFloat, "seek failed")
|
||||||
sampleEq(5, 6)
|
sampleEq(5, 6)
|
||||||
prevSampleEq(4, 5, true)
|
prevSampleEq(4, 5, true)
|
||||||
|
|
||||||
require.True(t, it.Seek(101), "seek failed")
|
require.Equal(t, it.Seek(101), chunkenc.ValFloat, "seek failed")
|
||||||
sampleEq(101, 10)
|
sampleEq(101, 10)
|
||||||
prevSampleEq(100, 9, true)
|
prevSampleEq(100, 9, true)
|
||||||
|
|
||||||
require.False(t, it.Next(), "next succeeded unexpectedly")
|
require.Equal(t, it.Next(), chunkenc.ValNone, "next succeeded unexpectedly")
|
||||||
require.False(t, it.Seek(1024), "seek succeeded unexpectedly")
|
require.Equal(t, it.Seek(1024), chunkenc.ValNone, "seek succeeded unexpectedly")
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkMemoizedSeriesIterator(b *testing.B) {
|
func BenchmarkMemoizedSeriesIterator(b *testing.B) {
|
||||||
|
@ -79,7 +82,7 @@ func BenchmarkMemoizedSeriesIterator(b *testing.B) {
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
|
|
||||||
for it.Next() {
|
for it.Next() != chunkenc.ValNone {
|
||||||
// scan everything
|
// scan everything
|
||||||
}
|
}
|
||||||
require.NoError(b, it.Err())
|
require.NoError(b, it.Err())
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
|
|
||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
|
@ -442,7 +443,7 @@ type chainSampleIterator struct {
|
||||||
h samplesIteratorHeap
|
h samplesIteratorHeap
|
||||||
|
|
||||||
curr chunkenc.Iterator
|
curr chunkenc.Iterator
|
||||||
lastt int64
|
lastT int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewChainSampleIterator returns a single iterator that iterates over the samples from the given iterators in a sorted
|
// NewChainSampleIterator returns a single iterator that iterates over the samples from the given iterators in a sorted
|
||||||
|
@ -452,60 +453,82 @@ func NewChainSampleIterator(iterators []chunkenc.Iterator) chunkenc.Iterator {
|
||||||
return &chainSampleIterator{
|
return &chainSampleIterator{
|
||||||
iterators: iterators,
|
iterators: iterators,
|
||||||
h: nil,
|
h: nil,
|
||||||
lastt: math.MinInt64,
|
lastT: math.MinInt64,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *chainSampleIterator) Seek(t int64) bool {
|
func (c *chainSampleIterator) Seek(t int64) chunkenc.ValueType {
|
||||||
// No-op check
|
// No-op check.
|
||||||
if c.curr != nil && c.lastt >= t {
|
if c.curr != nil && c.lastT >= t {
|
||||||
return true
|
return c.curr.Seek(c.lastT)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.h = samplesIteratorHeap{}
|
c.h = samplesIteratorHeap{}
|
||||||
for _, iter := range c.iterators {
|
for _, iter := range c.iterators {
|
||||||
if iter.Seek(t) {
|
if iter.Seek(t) != chunkenc.ValNone {
|
||||||
heap.Push(&c.h, iter)
|
heap.Push(&c.h, iter)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(c.h) > 0 {
|
if len(c.h) > 0 {
|
||||||
c.curr = heap.Pop(&c.h).(chunkenc.Iterator)
|
c.curr = heap.Pop(&c.h).(chunkenc.Iterator)
|
||||||
c.lastt, _ = c.curr.At()
|
c.lastT = c.curr.AtT()
|
||||||
return true
|
return c.curr.Seek(c.lastT)
|
||||||
}
|
}
|
||||||
c.curr = nil
|
c.curr = nil
|
||||||
return false
|
return chunkenc.ValNone
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *chainSampleIterator) At() (t int64, v float64) {
|
func (c *chainSampleIterator) At() (t int64, v float64) {
|
||||||
if c.curr == nil {
|
if c.curr == nil {
|
||||||
panic("chainSampleIterator.At() called before first .Next() or after .Next() returned false.")
|
panic("chainSampleIterator.At called before first .Next or after .Next returned false.")
|
||||||
}
|
}
|
||||||
return c.curr.At()
|
return c.curr.At()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *chainSampleIterator) Next() bool {
|
func (c *chainSampleIterator) AtHistogram() (int64, *histogram.Histogram) {
|
||||||
|
if c.curr == nil {
|
||||||
|
panic("chainSampleIterator.AtHistogram called before first .Next or after .Next returned false.")
|
||||||
|
}
|
||||||
|
return c.curr.AtHistogram()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *chainSampleIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
|
||||||
|
if c.curr == nil {
|
||||||
|
panic("chainSampleIterator.AtFloatHistogram called before first .Next or after .Next returned false.")
|
||||||
|
}
|
||||||
|
return c.curr.AtFloatHistogram()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *chainSampleIterator) AtT() int64 {
|
||||||
|
if c.curr == nil {
|
||||||
|
panic("chainSampleIterator.AtT called before first .Next or after .Next returned false.")
|
||||||
|
}
|
||||||
|
return c.curr.AtT()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *chainSampleIterator) Next() chunkenc.ValueType {
|
||||||
if c.h == nil {
|
if c.h == nil {
|
||||||
c.h = samplesIteratorHeap{}
|
c.h = samplesIteratorHeap{}
|
||||||
// We call c.curr.Next() as the first thing below.
|
// We call c.curr.Next() as the first thing below.
|
||||||
// So, we don't call Next() on it here.
|
// So, we don't call Next() on it here.
|
||||||
c.curr = c.iterators[0]
|
c.curr = c.iterators[0]
|
||||||
for _, iter := range c.iterators[1:] {
|
for _, iter := range c.iterators[1:] {
|
||||||
if iter.Next() {
|
if iter.Next() != chunkenc.ValNone {
|
||||||
heap.Push(&c.h, iter)
|
heap.Push(&c.h, iter)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.curr == nil {
|
if c.curr == nil {
|
||||||
return false
|
return chunkenc.ValNone
|
||||||
}
|
}
|
||||||
|
|
||||||
var currt int64
|
var currT int64
|
||||||
|
var currValueType chunkenc.ValueType
|
||||||
for {
|
for {
|
||||||
if c.curr.Next() {
|
currValueType = c.curr.Next()
|
||||||
currt, _ = c.curr.At()
|
if currValueType != chunkenc.ValNone {
|
||||||
if currt == c.lastt {
|
currT = c.curr.AtT()
|
||||||
|
if currT == c.lastT {
|
||||||
// Ignoring sample for the same timestamp.
|
// Ignoring sample for the same timestamp.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -516,7 +539,8 @@ func (c *chainSampleIterator) Next() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check current iterator with the top of the heap.
|
// Check current iterator with the top of the heap.
|
||||||
if nextt, _ := c.h[0].At(); currt < nextt {
|
nextT := c.h[0].AtT()
|
||||||
|
if currT < nextT {
|
||||||
// Current iterator has smaller timestamp than the heap.
|
// Current iterator has smaller timestamp than the heap.
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -525,18 +549,19 @@ func (c *chainSampleIterator) Next() bool {
|
||||||
} else if len(c.h) == 0 {
|
} else if len(c.h) == 0 {
|
||||||
// No iterator left to iterate.
|
// No iterator left to iterate.
|
||||||
c.curr = nil
|
c.curr = nil
|
||||||
return false
|
return chunkenc.ValNone
|
||||||
}
|
}
|
||||||
|
|
||||||
c.curr = heap.Pop(&c.h).(chunkenc.Iterator)
|
c.curr = heap.Pop(&c.h).(chunkenc.Iterator)
|
||||||
currt, _ = c.curr.At()
|
currT = c.curr.AtT()
|
||||||
if currt != c.lastt {
|
currValueType = c.curr.Seek(currT)
|
||||||
|
if currT != c.lastT {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
c.lastt = currt
|
c.lastT = currT
|
||||||
return true
|
return currValueType
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *chainSampleIterator) Err() error {
|
func (c *chainSampleIterator) Err() error {
|
||||||
|
@ -553,9 +578,7 @@ func (h samplesIteratorHeap) Len() int { return len(h) }
|
||||||
func (h samplesIteratorHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
|
func (h samplesIteratorHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
|
||||||
|
|
||||||
func (h samplesIteratorHeap) Less(i, j int) bool {
|
func (h samplesIteratorHeap) Less(i, j int) bool {
|
||||||
at, _ := h[i].At()
|
return h[i].AtT() < h[j].AtT()
|
||||||
bt, _ := h[j].At()
|
|
||||||
return at < bt
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *samplesIteratorHeap) Push(x interface{}) {
|
func (h *samplesIteratorHeap) Push(x interface{}) {
|
||||||
|
|
|
@ -23,6 +23,7 @@ import (
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
||||||
|
@ -62,116 +63,116 @@ func TestMergeQuerierWithChainMerger(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "one querier, two series",
|
name: "one querier, two series",
|
||||||
querierSeries: [][]Series{{
|
querierSeries: [][]Series{{
|
||||||
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}, sample{3, 3}}),
|
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}),
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}),
|
||||||
}},
|
}},
|
||||||
expected: NewMockSeriesSet(
|
expected: NewMockSeriesSet(
|
||||||
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}, sample{3, 3}}),
|
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}),
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two queriers, one different series each",
|
name: "two queriers, one different series each",
|
||||||
querierSeries: [][]Series{{
|
querierSeries: [][]Series{{
|
||||||
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}, sample{3, 3}}),
|
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}),
|
||||||
}, {
|
}, {
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}),
|
||||||
}},
|
}},
|
||||||
expected: NewMockSeriesSet(
|
expected: NewMockSeriesSet(
|
||||||
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}, sample{3, 3}}),
|
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}),
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two time unsorted queriers, two series each",
|
name: "two time unsorted queriers, two series each",
|
||||||
querierSeries: [][]Series{{
|
querierSeries: [][]Series{{
|
||||||
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5}, sample{6, 6}}),
|
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5, nil, nil}, sample{6, 6, nil, nil}}),
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}),
|
||||||
}, {
|
}, {
|
||||||
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}, sample{3, 3}}),
|
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}),
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{3, 3}, sample{4, 4}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{3, 3, nil, nil}, sample{4, 4, nil, nil}}),
|
||||||
}},
|
}},
|
||||||
expected: NewMockSeriesSet(
|
expected: NewMockSeriesSet(
|
||||||
NewListSeries(
|
NewListSeries(
|
||||||
labels.FromStrings("bar", "baz"),
|
labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{5, 5}, sample{6, 6}},
|
[]tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}, sample{6, 6, nil, nil}},
|
||||||
),
|
),
|
||||||
NewListSeries(
|
NewListSeries(
|
||||||
labels.FromStrings("foo", "bar"),
|
labels.FromStrings("foo", "bar"),
|
||||||
[]tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{4, 4}},
|
[]tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{4, 4, nil, nil}},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "five queriers, only two queriers have two time unsorted series each",
|
name: "five queriers, only two queriers have two time unsorted series each",
|
||||||
querierSeries: [][]Series{{}, {}, {
|
querierSeries: [][]Series{{}, {}, {
|
||||||
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5}, sample{6, 6}}),
|
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5, nil, nil}, sample{6, 6, nil, nil}}),
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}),
|
||||||
}, {
|
}, {
|
||||||
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}, sample{3, 3}}),
|
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}),
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{3, 3}, sample{4, 4}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{3, 3, nil, nil}, sample{4, 4, nil, nil}}),
|
||||||
}, {}},
|
}, {}},
|
||||||
expected: NewMockSeriesSet(
|
expected: NewMockSeriesSet(
|
||||||
NewListSeries(
|
NewListSeries(
|
||||||
labels.FromStrings("bar", "baz"),
|
labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{5, 5}, sample{6, 6}},
|
[]tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}, sample{6, 6, nil, nil}},
|
||||||
),
|
),
|
||||||
NewListSeries(
|
NewListSeries(
|
||||||
labels.FromStrings("foo", "bar"),
|
labels.FromStrings("foo", "bar"),
|
||||||
[]tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{4, 4}},
|
[]tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{4, 4, nil, nil}},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two queriers, only two queriers have two time unsorted series each, with 3 noop and one nil querier together",
|
name: "two queriers, only two queriers have two time unsorted series each, with 3 noop and one nil querier together",
|
||||||
querierSeries: [][]Series{{}, {}, {
|
querierSeries: [][]Series{{}, {}, {
|
||||||
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5}, sample{6, 6}}),
|
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5, nil, nil}, sample{6, 6, nil, nil}}),
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}),
|
||||||
}, {
|
}, {
|
||||||
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}, sample{3, 3}}),
|
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}),
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{3, 3}, sample{4, 4}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{3, 3, nil, nil}, sample{4, 4, nil, nil}}),
|
||||||
}, {}},
|
}, {}},
|
||||||
extraQueriers: []Querier{NoopQuerier(), NoopQuerier(), nil, NoopQuerier()},
|
extraQueriers: []Querier{NoopQuerier(), NoopQuerier(), nil, NoopQuerier()},
|
||||||
expected: NewMockSeriesSet(
|
expected: NewMockSeriesSet(
|
||||||
NewListSeries(
|
NewListSeries(
|
||||||
labels.FromStrings("bar", "baz"),
|
labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{5, 5}, sample{6, 6}},
|
[]tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}, sample{6, 6, nil, nil}},
|
||||||
),
|
),
|
||||||
NewListSeries(
|
NewListSeries(
|
||||||
labels.FromStrings("foo", "bar"),
|
labels.FromStrings("foo", "bar"),
|
||||||
[]tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{4, 4}},
|
[]tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{4, 4, nil, nil}},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two queriers, with two series, one is overlapping",
|
name: "two queriers, with two series, one is overlapping",
|
||||||
querierSeries: [][]Series{{}, {}, {
|
querierSeries: [][]Series{{}, {}, {
|
||||||
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{2, 21}, sample{3, 31}, sample{5, 5}, sample{6, 6}}),
|
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{2, 21, nil, nil}, sample{3, 31, nil, nil}, sample{5, 5, nil, nil}, sample{6, 6, nil, nil}}),
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}),
|
||||||
}, {
|
}, {
|
||||||
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 22}, sample{3, 32}}),
|
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 22, nil, nil}, sample{3, 32, nil, nil}}),
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{3, 3}, sample{4, 4}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{3, 3, nil, nil}, sample{4, 4, nil, nil}}),
|
||||||
}, {}},
|
}, {}},
|
||||||
expected: NewMockSeriesSet(
|
expected: NewMockSeriesSet(
|
||||||
NewListSeries(
|
NewListSeries(
|
||||||
labels.FromStrings("bar", "baz"),
|
labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{sample{1, 1}, sample{2, 21}, sample{3, 31}, sample{5, 5}, sample{6, 6}},
|
[]tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 21, nil, nil}, sample{3, 31, nil, nil}, sample{5, 5, nil, nil}, sample{6, 6, nil, nil}},
|
||||||
),
|
),
|
||||||
NewListSeries(
|
NewListSeries(
|
||||||
labels.FromStrings("foo", "bar"),
|
labels.FromStrings("foo", "bar"),
|
||||||
[]tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{4, 4}},
|
[]tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{4, 4, nil, nil}},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two queries, one with NaN samples series",
|
name: "two queries, one with NaN samples series",
|
||||||
querierSeries: [][]Series{{
|
querierSeries: [][]Series{{
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, math.NaN()}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, math.NaN(), nil, nil}}),
|
||||||
}, {
|
}, {
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{1, 1}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{1, 1, nil, nil}}),
|
||||||
}},
|
}},
|
||||||
expected: NewMockSeriesSet(
|
expected: NewMockSeriesSet(
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, math.NaN()}, sample{1, 1}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, math.NaN(), nil, nil}, sample{1, 1, nil, nil}}),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
|
@ -245,108 +246,108 @@ func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "one querier, two series",
|
name: "one querier, two series",
|
||||||
chkQuerierSeries: [][]ChunkSeries{{
|
chkQuerierSeries: [][]ChunkSeries{{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}}, []tsdbutil.Sample{sample{3, 3}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}}, []tsdbutil.Sample{sample{2, 2}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}}, []tsdbutil.Sample{sample{2, 2, nil, nil}}),
|
||||||
}},
|
}},
|
||||||
expected: NewMockChunkSeriesSet(
|
expected: NewMockChunkSeriesSet(
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}}, []tsdbutil.Sample{sample{3, 3}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}}, []tsdbutil.Sample{sample{2, 2}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}}, []tsdbutil.Sample{sample{2, 2, nil, nil}}),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two secondaries, one different series each",
|
name: "two secondaries, one different series each",
|
||||||
chkQuerierSeries: [][]ChunkSeries{{
|
chkQuerierSeries: [][]ChunkSeries{{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}}, []tsdbutil.Sample{sample{3, 3}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}}),
|
||||||
}, {
|
}, {
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}}, []tsdbutil.Sample{sample{2, 2}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}}, []tsdbutil.Sample{sample{2, 2, nil, nil}}),
|
||||||
}},
|
}},
|
||||||
expected: NewMockChunkSeriesSet(
|
expected: NewMockChunkSeriesSet(
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}}, []tsdbutil.Sample{sample{3, 3}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}}, []tsdbutil.Sample{sample{2, 2}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}}, []tsdbutil.Sample{sample{2, 2, nil, nil}}),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two secondaries, two not in time order series each",
|
name: "two secondaries, two not in time order series each",
|
||||||
chkQuerierSeries: [][]ChunkSeries{{
|
chkQuerierSeries: [][]ChunkSeries{{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5}}, []tsdbutil.Sample{sample{6, 6}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5, nil, nil}}, []tsdbutil.Sample{sample{6, 6, nil, nil}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}}, []tsdbutil.Sample{sample{2, 2}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}}, []tsdbutil.Sample{sample{2, 2, nil, nil}}),
|
||||||
}, {
|
}, {
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}}, []tsdbutil.Sample{sample{3, 3}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{3, 3}}, []tsdbutil.Sample{sample{4, 4}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{3, 3, nil, nil}}, []tsdbutil.Sample{sample{4, 4, nil, nil}}),
|
||||||
}},
|
}},
|
||||||
expected: NewMockChunkSeriesSet(
|
expected: NewMockChunkSeriesSet(
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{sample{1, 1}, sample{2, 2}},
|
[]tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{3, 3}},
|
[]tsdbutil.Sample{sample{3, 3, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{5, 5}},
|
[]tsdbutil.Sample{sample{5, 5, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{6, 6}},
|
[]tsdbutil.Sample{sample{6, 6, nil, nil}},
|
||||||
),
|
),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"),
|
||||||
[]tsdbutil.Sample{sample{0, 0}, sample{1, 1}},
|
[]tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{2, 2}},
|
[]tsdbutil.Sample{sample{2, 2, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{3, 3}},
|
[]tsdbutil.Sample{sample{3, 3, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{4, 4}},
|
[]tsdbutil.Sample{sample{4, 4, nil, nil}},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "five secondaries, only two have two not in time order series each",
|
name: "five secondaries, only two have two not in time order series each",
|
||||||
chkQuerierSeries: [][]ChunkSeries{{}, {}, {
|
chkQuerierSeries: [][]ChunkSeries{{}, {}, {
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5}}, []tsdbutil.Sample{sample{6, 6}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5, nil, nil}}, []tsdbutil.Sample{sample{6, 6, nil, nil}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}}, []tsdbutil.Sample{sample{2, 2}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}}, []tsdbutil.Sample{sample{2, 2, nil, nil}}),
|
||||||
}, {
|
}, {
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}}, []tsdbutil.Sample{sample{3, 3}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{3, 3}}, []tsdbutil.Sample{sample{4, 4}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{3, 3, nil, nil}}, []tsdbutil.Sample{sample{4, 4, nil, nil}}),
|
||||||
}, {}},
|
}, {}},
|
||||||
expected: NewMockChunkSeriesSet(
|
expected: NewMockChunkSeriesSet(
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{sample{1, 1}, sample{2, 2}},
|
[]tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{3, 3}},
|
[]tsdbutil.Sample{sample{3, 3, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{5, 5}},
|
[]tsdbutil.Sample{sample{5, 5, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{6, 6}},
|
[]tsdbutil.Sample{sample{6, 6, nil, nil}},
|
||||||
),
|
),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"),
|
||||||
[]tsdbutil.Sample{sample{0, 0}, sample{1, 1}},
|
[]tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{2, 2}},
|
[]tsdbutil.Sample{sample{2, 2, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{3, 3}},
|
[]tsdbutil.Sample{sample{3, 3, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{4, 4}},
|
[]tsdbutil.Sample{sample{4, 4, nil, nil}},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two secondaries, with two not in time order series each, with 3 noop queries and one nil together",
|
name: "two secondaries, with two not in time order series each, with 3 noop queries and one nil together",
|
||||||
chkQuerierSeries: [][]ChunkSeries{{
|
chkQuerierSeries: [][]ChunkSeries{{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5}}, []tsdbutil.Sample{sample{6, 6}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5, nil, nil}}, []tsdbutil.Sample{sample{6, 6, nil, nil}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}}, []tsdbutil.Sample{sample{2, 2}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}}, []tsdbutil.Sample{sample{2, 2, nil, nil}}),
|
||||||
}, {
|
}, {
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}}, []tsdbutil.Sample{sample{3, 3}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{3, 3}}, []tsdbutil.Sample{sample{4, 4}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{3, 3, nil, nil}}, []tsdbutil.Sample{sample{4, 4, nil, nil}}),
|
||||||
}},
|
}},
|
||||||
extraQueriers: []ChunkQuerier{NoopChunkedQuerier(), NoopChunkedQuerier(), nil, NoopChunkedQuerier()},
|
extraQueriers: []ChunkQuerier{NoopChunkedQuerier(), NoopChunkedQuerier(), nil, NoopChunkedQuerier()},
|
||||||
expected: NewMockChunkSeriesSet(
|
expected: NewMockChunkSeriesSet(
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{sample{1, 1}, sample{2, 2}},
|
[]tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{3, 3}},
|
[]tsdbutil.Sample{sample{3, 3, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{5, 5}},
|
[]tsdbutil.Sample{sample{5, 5, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{6, 6}},
|
[]tsdbutil.Sample{sample{6, 6, nil, nil}},
|
||||||
),
|
),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"),
|
||||||
[]tsdbutil.Sample{sample{0, 0}, sample{1, 1}},
|
[]tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{2, 2}},
|
[]tsdbutil.Sample{sample{2, 2, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{3, 3}},
|
[]tsdbutil.Sample{sample{3, 3, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{4, 4}},
|
[]tsdbutil.Sample{sample{4, 4, nil, nil}},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two queries, one with NaN samples series",
|
name: "two queries, one with NaN samples series",
|
||||||
chkQuerierSeries: [][]ChunkSeries{{
|
chkQuerierSeries: [][]ChunkSeries{{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, math.NaN()}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, math.NaN(), nil, nil}}),
|
||||||
}, {
|
}, {
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{1, 1}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{1, 1, nil, nil}}),
|
||||||
}},
|
}},
|
||||||
expected: NewMockChunkSeriesSet(
|
expected: NewMockChunkSeriesSet(
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, math.NaN()}}, []tsdbutil.Sample{sample{1, 1}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, math.NaN(), nil, nil}}, []tsdbutil.Sample{sample{1, 1, nil, nil}}),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
|
@ -384,6 +385,22 @@ func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) {
|
||||||
func TestCompactingChunkSeriesMerger(t *testing.T) {
|
func TestCompactingChunkSeriesMerger(t *testing.T) {
|
||||||
m := NewCompactingChunkSeriesMerger(ChainedSeriesMerge)
|
m := NewCompactingChunkSeriesMerger(ChainedSeriesMerge)
|
||||||
|
|
||||||
|
// histogramSample returns a histogram that is unique to the ts.
|
||||||
|
histogramSample := func(ts int64) sample {
|
||||||
|
idx := ts + 1
|
||||||
|
return sample{t: ts, h: &histogram.Histogram{
|
||||||
|
Schema: 2,
|
||||||
|
ZeroThreshold: 0.001,
|
||||||
|
ZeroCount: 2 * uint64(idx),
|
||||||
|
Count: 5 * uint64(idx),
|
||||||
|
Sum: 12.34 * float64(idx),
|
||||||
|
PositiveSpans: []histogram.Span{{Offset: 1, Length: 2}, {Offset: 2, Length: 1}},
|
||||||
|
NegativeSpans: []histogram.Span{{Offset: 2, Length: 1}, {Offset: 1, Length: 2}},
|
||||||
|
PositiveBuckets: []int64{1 * idx, -1 * idx, 3 * idx},
|
||||||
|
NegativeBuckets: []int64{1 * idx, 2 * idx, 3 * idx},
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
for _, tc := range []struct {
|
for _, tc := range []struct {
|
||||||
name string
|
name string
|
||||||
input []ChunkSeries
|
input []ChunkSeries
|
||||||
|
@ -399,9 +416,9 @@ func TestCompactingChunkSeriesMerger(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "single series",
|
name: "single series",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}}, []tsdbutil.Sample{sample{3, 3}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}}, []tsdbutil.Sample{sample{3, 3}}),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two empty series",
|
name: "two empty series",
|
||||||
|
@ -414,55 +431,55 @@ func TestCompactingChunkSeriesMerger(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "two non overlapping",
|
name: "two non overlapping",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}}, []tsdbutil.Sample{sample{3, 3}, sample{5, 5}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{7, 7}, sample{9, 9}}, []tsdbutil.Sample{sample{10, 10}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{7, 7, nil, nil}, sample{9, 9, nil, nil}}, []tsdbutil.Sample{sample{10, 10, nil, nil}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}}, []tsdbutil.Sample{sample{3, 3}, sample{5, 5}}, []tsdbutil.Sample{sample{7, 7}, sample{9, 9}}, []tsdbutil.Sample{sample{10, 10}}),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}, []tsdbutil.Sample{sample{7, 7, nil, nil}, sample{9, 9, nil, nil}}, []tsdbutil.Sample{sample{10, 10, nil, nil}}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two overlapping",
|
name: "two overlapping",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}}, []tsdbutil.Sample{sample{3, 3}, sample{8, 8}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}, sample{8, 8, nil, nil}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{7, 7}, sample{9, 9}}, []tsdbutil.Sample{sample{10, 10}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{7, 7, nil, nil}, sample{9, 9, nil, nil}}, []tsdbutil.Sample{sample{10, 10, nil, nil}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}}, []tsdbutil.Sample{sample{3, 3}, sample{7, 7}, sample{8, 8}, sample{9, 9}}, []tsdbutil.Sample{sample{10, 10}}),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}, sample{7, 7, nil, nil}, sample{8, 8, nil, nil}, sample{9, 9, nil, nil}}, []tsdbutil.Sample{sample{10, 10, nil, nil}}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two duplicated",
|
name: "two duplicated",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{5, 5}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{2, 2}, sample{3, 3}, sample{5, 5}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{5, 5}}),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "three overlapping",
|
name: "three overlapping",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{5, 5}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{2, 2}, sample{3, 3}, sample{6, 6}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{6, 6, nil, nil}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{0, 0}, sample{4, 4}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{4, 4, nil, nil}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{4, 4}, sample{5, 5}, sample{6, 6}}),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{4, 4, nil, nil}, sample{5, 5, nil, nil}, sample{6, 6, nil, nil}}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "three in chained overlap",
|
name: "three in chained overlap",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{5, 5}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{4, 4}, sample{6, 66}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{4, 4, nil, nil}, sample{6, 66, nil, nil}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{6, 6}, sample{10, 10}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{6, 6, nil, nil}, sample{10, 10, nil, nil}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{4, 4}, sample{5, 5}, sample{6, 66}, sample{10, 10}}),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{4, 4, nil, nil}, sample{5, 5, nil, nil}, sample{6, 66, nil, nil}, sample{10, 10, nil, nil}}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "three in chained overlap complex",
|
name: "three in chained overlap complex",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{0, 0}, sample{5, 5}}, []tsdbutil.Sample{sample{10, 10}, sample{15, 15}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{5, 5, nil, nil}}, []tsdbutil.Sample{sample{10, 10, nil, nil}, sample{15, 15, nil, nil}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{2, 2}, sample{20, 20}}, []tsdbutil.Sample{sample{25, 25}, sample{30, 30}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{2, 2, nil, nil}, sample{20, 20, nil, nil}}, []tsdbutil.Sample{sample{25, 25, nil, nil}, sample{30, 30, nil, nil}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{18, 18}, sample{26, 26}}, []tsdbutil.Sample{sample{31, 31}, sample{35, 35}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{18, 18, nil, nil}, sample{26, 26, nil, nil}}, []tsdbutil.Sample{sample{31, 31, nil, nil}, sample{35, 35, nil, nil}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{sample{0, 0}, sample{2, 2}, sample{5, 5}, sample{10, 10}, sample{15, 15}, sample{18, 18}, sample{20, 20}, sample{25, 25}, sample{26, 26}, sample{30, 30}},
|
[]tsdbutil.Sample{sample{0, 0, nil, nil}, sample{2, 2, nil, nil}, sample{5, 5, nil, nil}, sample{10, 10, nil, nil}, sample{15, 15, nil, nil}, sample{18, 18, nil, nil}, sample{20, 20, nil, nil}, sample{25, 25, nil, nil}, sample{26, 26, nil, nil}, sample{30, 30, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{31, 31}, sample{35, 35}},
|
[]tsdbutil.Sample{sample{31, 31, nil, nil}, sample{35, 35, nil, nil}},
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -486,6 +503,32 @@ func TestCompactingChunkSeriesMerger(t *testing.T) {
|
||||||
tsdbutil.GenerateSamples(120, 30),
|
tsdbutil.GenerateSamples(120, 30),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "histogram chunks overlapping",
|
||||||
|
input: []ChunkSeries{
|
||||||
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{histogramSample(0), histogramSample(5)}, []tsdbutil.Sample{histogramSample(10), histogramSample(15)}),
|
||||||
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{histogramSample(2), histogramSample(20)}, []tsdbutil.Sample{histogramSample(25), histogramSample(30)}),
|
||||||
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{histogramSample(18), histogramSample(26)}, []tsdbutil.Sample{histogramSample(31), histogramSample(35)}),
|
||||||
|
},
|
||||||
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||||
|
[]tsdbutil.Sample{histogramSample(0), histogramSample(2), histogramSample(5), histogramSample(10), histogramSample(15), histogramSample(18), histogramSample(20), histogramSample(25), histogramSample(26), histogramSample(30)},
|
||||||
|
[]tsdbutil.Sample{histogramSample(31), histogramSample(35)},
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "histogram chunks overlapping with float chunks",
|
||||||
|
input: []ChunkSeries{
|
||||||
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{histogramSample(0), histogramSample(5)}, []tsdbutil.Sample{histogramSample(10), histogramSample(15)}),
|
||||||
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{12, 12, nil, nil}}, []tsdbutil.Sample{sample{14, 14, nil, nil}}),
|
||||||
|
},
|
||||||
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||||
|
[]tsdbutil.Sample{histogramSample(0)},
|
||||||
|
[]tsdbutil.Sample{sample{1, 1, nil, nil}},
|
||||||
|
[]tsdbutil.Sample{histogramSample(5), histogramSample(10)},
|
||||||
|
[]tsdbutil.Sample{sample{12, 12, nil, nil}, sample{14, 14, nil, nil}},
|
||||||
|
[]tsdbutil.Sample{histogramSample(15)},
|
||||||
|
),
|
||||||
|
},
|
||||||
} {
|
} {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
merged := m(tc.input...)
|
merged := m(tc.input...)
|
||||||
|
@ -517,9 +560,9 @@ func TestConcatenatingChunkSeriesMerger(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "single series",
|
name: "single series",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}}, []tsdbutil.Sample{sample{3, 3}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}}, []tsdbutil.Sample{sample{3, 3}}),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two empty series",
|
name: "two empty series",
|
||||||
|
@ -532,70 +575,70 @@ func TestConcatenatingChunkSeriesMerger(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "two non overlapping",
|
name: "two non overlapping",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}}, []tsdbutil.Sample{sample{3, 3}, sample{5, 5}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{7, 7}, sample{9, 9}}, []tsdbutil.Sample{sample{10, 10}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{7, 7, nil, nil}, sample{9, 9, nil, nil}}, []tsdbutil.Sample{sample{10, 10, nil, nil}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}}, []tsdbutil.Sample{sample{3, 3}, sample{5, 5}}, []tsdbutil.Sample{sample{7, 7}, sample{9, 9}}, []tsdbutil.Sample{sample{10, 10}}),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}, []tsdbutil.Sample{sample{7, 7, nil, nil}, sample{9, 9, nil, nil}}, []tsdbutil.Sample{sample{10, 10, nil, nil}}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two overlapping",
|
name: "two overlapping",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}}, []tsdbutil.Sample{sample{3, 3}, sample{8, 8}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}, sample{8, 8, nil, nil}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{7, 7}, sample{9, 9}}, []tsdbutil.Sample{sample{10, 10}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{7, 7, nil, nil}, sample{9, 9, nil, nil}}, []tsdbutil.Sample{sample{10, 10, nil, nil}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{sample{1, 1}, sample{2, 2}}, []tsdbutil.Sample{sample{3, 3}, sample{8, 8}},
|
[]tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}, []tsdbutil.Sample{sample{3, 3, nil, nil}, sample{8, 8, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{7, 7}, sample{9, 9}}, []tsdbutil.Sample{sample{10, 10}},
|
[]tsdbutil.Sample{sample{7, 7, nil, nil}, sample{9, 9, nil, nil}}, []tsdbutil.Sample{sample{10, 10, nil, nil}},
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two duplicated",
|
name: "two duplicated",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{5, 5}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{2, 2}, sample{3, 3}, sample{5, 5}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{5, 5}},
|
[]tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{2, 2}, sample{3, 3}, sample{5, 5}},
|
[]tsdbutil.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}},
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "three overlapping",
|
name: "three overlapping",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{5, 5}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{2, 2}, sample{3, 3}, sample{6, 6}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{6, 6, nil, nil}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{0, 0}, sample{4, 4}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{4, 4, nil, nil}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{5, 5}},
|
[]tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{2, 2}, sample{3, 3}, sample{6, 6}},
|
[]tsdbutil.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{6, 6, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{0, 0}, sample{4, 4}},
|
[]tsdbutil.Sample{sample{0, 0, nil, nil}, sample{4, 4, nil, nil}},
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "three in chained overlap",
|
name: "three in chained overlap",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{5, 5}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{4, 4}, sample{6, 66}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{4, 4, nil, nil}, sample{6, 66, nil, nil}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{6, 6}, sample{10, 10}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{6, 6, nil, nil}, sample{10, 10, nil, nil}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{5, 5}},
|
[]tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 5, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{4, 4}, sample{6, 66}},
|
[]tsdbutil.Sample{sample{4, 4, nil, nil}, sample{6, 66, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{6, 6}, sample{10, 10}},
|
[]tsdbutil.Sample{sample{6, 6, nil, nil}, sample{10, 10, nil, nil}},
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "three in chained overlap complex",
|
name: "three in chained overlap complex",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{0, 0}, sample{5, 5}}, []tsdbutil.Sample{sample{10, 10}, sample{15, 15}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{5, 5, nil, nil}}, []tsdbutil.Sample{sample{10, 10, nil, nil}, sample{15, 15, nil, nil}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{2, 2}, sample{20, 20}}, []tsdbutil.Sample{sample{25, 25}, sample{30, 30}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{2, 2, nil, nil}, sample{20, 20, nil, nil}}, []tsdbutil.Sample{sample{25, 25, nil, nil}, sample{30, 30, nil, nil}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{18, 18}, sample{26, 26}}, []tsdbutil.Sample{sample{31, 31}, sample{35, 35}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{18, 18, nil, nil}, sample{26, 26, nil, nil}}, []tsdbutil.Sample{sample{31, 31, nil, nil}, sample{35, 35, nil, nil}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{sample{0, 0}, sample{5, 5}}, []tsdbutil.Sample{sample{10, 10}, sample{15, 15}},
|
[]tsdbutil.Sample{sample{0, 0, nil, nil}, sample{5, 5, nil, nil}}, []tsdbutil.Sample{sample{10, 10, nil, nil}, sample{15, 15, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{2, 2}, sample{20, 20}}, []tsdbutil.Sample{sample{25, 25}, sample{30, 30}},
|
[]tsdbutil.Sample{sample{2, 2, nil, nil}, sample{20, 20, nil, nil}}, []tsdbutil.Sample{sample{25, 25, nil, nil}, sample{30, 30, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{18, 18}, sample{26, 26}}, []tsdbutil.Sample{sample{31, 31}, sample{35, 35}},
|
[]tsdbutil.Sample{sample{18, 18, nil, nil}, sample{26, 26, nil, nil}}, []tsdbutil.Sample{sample{31, 31, nil, nil}, sample{35, 35, nil, nil}},
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -732,38 +775,38 @@ func TestChainSampleIterator(t *testing.T) {
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
input: []chunkenc.Iterator{
|
input: []chunkenc.Iterator{
|
||||||
NewListSeriesIterator(samples{sample{0, 0}, sample{1, 1}}),
|
NewListSeriesIterator(samples{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}}),
|
||||||
},
|
},
|
||||||
expected: []tsdbutil.Sample{sample{0, 0}, sample{1, 1}},
|
expected: []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: []chunkenc.Iterator{
|
input: []chunkenc.Iterator{
|
||||||
NewListSeriesIterator(samples{sample{0, 0}, sample{1, 1}}),
|
NewListSeriesIterator(samples{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}}),
|
||||||
NewListSeriesIterator(samples{sample{2, 2}, sample{3, 3}}),
|
NewListSeriesIterator(samples{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}),
|
||||||
},
|
},
|
||||||
expected: []tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}, sample{3, 3}},
|
expected: []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: []chunkenc.Iterator{
|
input: []chunkenc.Iterator{
|
||||||
NewListSeriesIterator(samples{sample{0, 0}, sample{3, 3}}),
|
NewListSeriesIterator(samples{sample{0, 0, nil, nil}, sample{3, 3, nil, nil}}),
|
||||||
NewListSeriesIterator(samples{sample{1, 1}, sample{4, 4}}),
|
NewListSeriesIterator(samples{sample{1, 1, nil, nil}, sample{4, 4, nil, nil}}),
|
||||||
NewListSeriesIterator(samples{sample{2, 2}, sample{5, 5}}),
|
NewListSeriesIterator(samples{sample{2, 2, nil, nil}, sample{5, 5, nil, nil}}),
|
||||||
},
|
},
|
||||||
expected: []tsdbutil.Sample{
|
expected: []tsdbutil.Sample{
|
||||||
sample{0, 0}, sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{4, 4}, sample{5, 5},
|
sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{4, 4, nil, nil}, sample{5, 5, nil, nil},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Overlap.
|
// Overlap.
|
||||||
{
|
{
|
||||||
input: []chunkenc.Iterator{
|
input: []chunkenc.Iterator{
|
||||||
NewListSeriesIterator(samples{sample{0, 0}, sample{1, 1}}),
|
NewListSeriesIterator(samples{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}}),
|
||||||
NewListSeriesIterator(samples{sample{0, 0}, sample{2, 2}}),
|
NewListSeriesIterator(samples{sample{0, 0, nil, nil}, sample{2, 2, nil, nil}}),
|
||||||
NewListSeriesIterator(samples{sample{2, 2}, sample{3, 3}}),
|
NewListSeriesIterator(samples{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}),
|
||||||
NewListSeriesIterator(samples{}),
|
NewListSeriesIterator(samples{}),
|
||||||
NewListSeriesIterator(samples{}),
|
NewListSeriesIterator(samples{}),
|
||||||
NewListSeriesIterator(samples{}),
|
NewListSeriesIterator(samples{}),
|
||||||
},
|
},
|
||||||
expected: []tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}, sample{3, 3}},
|
expected: []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}},
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
merged := NewChainSampleIterator(tc.input)
|
merged := NewChainSampleIterator(tc.input)
|
||||||
|
@ -781,42 +824,42 @@ func TestChainSampleIteratorSeek(t *testing.T) {
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
input: []chunkenc.Iterator{
|
input: []chunkenc.Iterator{
|
||||||
NewListSeriesIterator(samples{sample{0, 0}, sample{1, 1}, sample{2, 2}}),
|
NewListSeriesIterator(samples{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}),
|
||||||
},
|
},
|
||||||
seek: 1,
|
seek: 1,
|
||||||
expected: []tsdbutil.Sample{sample{1, 1}, sample{2, 2}},
|
expected: []tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: []chunkenc.Iterator{
|
input: []chunkenc.Iterator{
|
||||||
NewListSeriesIterator(samples{sample{0, 0}, sample{1, 1}}),
|
NewListSeriesIterator(samples{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}}),
|
||||||
NewListSeriesIterator(samples{sample{2, 2}, sample{3, 3}}),
|
NewListSeriesIterator(samples{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}),
|
||||||
},
|
},
|
||||||
seek: 2,
|
seek: 2,
|
||||||
expected: []tsdbutil.Sample{sample{2, 2}, sample{3, 3}},
|
expected: []tsdbutil.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: []chunkenc.Iterator{
|
input: []chunkenc.Iterator{
|
||||||
NewListSeriesIterator(samples{sample{0, 0}, sample{3, 3}}),
|
NewListSeriesIterator(samples{sample{0, 0, nil, nil}, sample{3, 3, nil, nil}}),
|
||||||
NewListSeriesIterator(samples{sample{1, 1}, sample{4, 4}}),
|
NewListSeriesIterator(samples{sample{1, 1, nil, nil}, sample{4, 4, nil, nil}}),
|
||||||
NewListSeriesIterator(samples{sample{2, 2}, sample{5, 5}}),
|
NewListSeriesIterator(samples{sample{2, 2, nil, nil}, sample{5, 5, nil, nil}}),
|
||||||
},
|
},
|
||||||
seek: 2,
|
seek: 2,
|
||||||
expected: []tsdbutil.Sample{sample{2, 2}, sample{3, 3}, sample{4, 4}, sample{5, 5}},
|
expected: []tsdbutil.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{4, 4, nil, nil}, sample{5, 5, nil, nil}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: []chunkenc.Iterator{
|
input: []chunkenc.Iterator{
|
||||||
NewListSeriesIterator(samples{sample{0, 0}, sample{2, 2}, sample{3, 3}}),
|
NewListSeriesIterator(samples{sample{0, 0, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}),
|
||||||
NewListSeriesIterator(samples{sample{0, 0}, sample{1, 1}, sample{2, 2}}),
|
NewListSeriesIterator(samples{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}}),
|
||||||
},
|
},
|
||||||
seek: 0,
|
seek: 0,
|
||||||
expected: []tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}, sample{3, 3}},
|
expected: []tsdbutil.Sample{sample{0, 0, nil, nil}, sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}},
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
merged := NewChainSampleIterator(tc.input)
|
merged := NewChainSampleIterator(tc.input)
|
||||||
actual := []tsdbutil.Sample{}
|
actual := []tsdbutil.Sample{}
|
||||||
if merged.Seek(tc.seek) {
|
if merged.Seek(tc.seek) == chunkenc.ValFloat {
|
||||||
t, v := merged.At()
|
t, v := merged.At()
|
||||||
actual = append(actual, sample{t, v})
|
actual = append(actual, sample{t, v, nil, nil})
|
||||||
}
|
}
|
||||||
s, err := ExpandSamples(merged, nil)
|
s, err := ExpandSamples(merged, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
|
@ -26,6 +26,7 @@ import (
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/textparse"
|
"github.com/prometheus/prometheus/model/textparse"
|
||||||
"github.com/prometheus/prometheus/prompb"
|
"github.com/prometheus/prometheus/prompb"
|
||||||
|
@ -118,7 +119,8 @@ func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult,
|
||||||
iter := series.Iterator()
|
iter := series.Iterator()
|
||||||
samples := []prompb.Sample{}
|
samples := []prompb.Sample{}
|
||||||
|
|
||||||
for iter.Next() {
|
for iter.Next() == chunkenc.ValFloat {
|
||||||
|
// TODO(beorn7): Add Histogram support.
|
||||||
numSamples++
|
numSamples++
|
||||||
if sampleLimit > 0 && numSamples > sampleLimit {
|
if sampleLimit > 0 && numSamples > sampleLimit {
|
||||||
return nil, ss.Warnings(), HTTPError{
|
return nil, ss.Warnings(), HTTPError{
|
||||||
|
@ -355,37 +357,65 @@ func newConcreteSeriersIterator(series *concreteSeries) chunkenc.Iterator {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Seek implements storage.SeriesIterator.
|
// Seek implements storage.SeriesIterator.
|
||||||
func (c *concreteSeriesIterator) Seek(t int64) bool {
|
func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType {
|
||||||
if c.cur == -1 {
|
if c.cur == -1 {
|
||||||
c.cur = 0
|
c.cur = 0
|
||||||
}
|
}
|
||||||
if c.cur >= len(c.series.samples) {
|
if c.cur >= len(c.series.samples) {
|
||||||
return false
|
return chunkenc.ValNone
|
||||||
}
|
}
|
||||||
// No-op check.
|
// No-op check.
|
||||||
if s := c.series.samples[c.cur]; s.Timestamp >= t {
|
if s := c.series.samples[c.cur]; s.Timestamp >= t {
|
||||||
return true
|
return chunkenc.ValFloat
|
||||||
}
|
}
|
||||||
// Do binary search between current position and end.
|
// Do binary search between current position and end.
|
||||||
c.cur += sort.Search(len(c.series.samples)-c.cur, func(n int) bool {
|
c.cur += sort.Search(len(c.series.samples)-c.cur, func(n int) bool {
|
||||||
return c.series.samples[n+c.cur].Timestamp >= t
|
return c.series.samples[n+c.cur].Timestamp >= t
|
||||||
})
|
})
|
||||||
return c.cur < len(c.series.samples)
|
if c.cur < len(c.series.samples) {
|
||||||
|
return chunkenc.ValFloat
|
||||||
|
}
|
||||||
|
return chunkenc.ValNone
|
||||||
|
// TODO(beorn7): Add histogram support.
|
||||||
}
|
}
|
||||||
|
|
||||||
// At implements storage.SeriesIterator.
|
// At implements chunkenc.Iterator.
|
||||||
func (c *concreteSeriesIterator) At() (t int64, v float64) {
|
func (c *concreteSeriesIterator) At() (t int64, v float64) {
|
||||||
s := c.series.samples[c.cur]
|
s := c.series.samples[c.cur]
|
||||||
return s.Timestamp, s.Value
|
return s.Timestamp, s.Value
|
||||||
}
|
}
|
||||||
|
|
||||||
// Next implements storage.SeriesIterator.
|
// AtHistogram always returns (0, nil) because there is no support for histogram
|
||||||
func (c *concreteSeriesIterator) Next() bool {
|
// values yet.
|
||||||
c.cur++
|
// TODO(beorn7): Fix that for histogram support in remote storage.
|
||||||
return c.cur < len(c.series.samples)
|
func (c *concreteSeriesIterator) AtHistogram() (int64, *histogram.Histogram) {
|
||||||
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Err implements storage.SeriesIterator.
|
// AtFloatHistogram always returns (0, nil) because there is no support for histogram
|
||||||
|
// values yet.
|
||||||
|
// TODO(beorn7): Fix that for histogram support in remote storage.
|
||||||
|
func (c *concreteSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AtT implements chunkenc.Iterator.
|
||||||
|
func (c *concreteSeriesIterator) AtT() int64 {
|
||||||
|
s := c.series.samples[c.cur]
|
||||||
|
return s.Timestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next implements chunkenc.Iterator.
|
||||||
|
func (c *concreteSeriesIterator) Next() chunkenc.ValueType {
|
||||||
|
c.cur++
|
||||||
|
if c.cur < len(c.series.samples) {
|
||||||
|
return chunkenc.ValFloat
|
||||||
|
}
|
||||||
|
return chunkenc.ValNone
|
||||||
|
// TODO(beorn7): Add histogram support.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Err implements chunkenc.Iterator.
|
||||||
func (c *concreteSeriesIterator) Err() error {
|
func (c *concreteSeriesIterator) Err() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -472,6 +502,56 @@ func exemplarProtoToExemplar(ep prompb.Exemplar) exemplar.Exemplar {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HistogramProtoToHistogram extracts a (normal integer) Histogram from the
|
||||||
|
// provided proto message. The caller has to make sure that the proto message
|
||||||
|
// represents an interger histogram and not a float histogram.
|
||||||
|
func HistogramProtoToHistogram(hp prompb.Histogram) *histogram.Histogram {
|
||||||
|
return &histogram.Histogram{
|
||||||
|
Schema: hp.Schema,
|
||||||
|
ZeroThreshold: hp.ZeroThreshold,
|
||||||
|
ZeroCount: hp.GetZeroCountInt(),
|
||||||
|
Count: hp.GetCountInt(),
|
||||||
|
Sum: hp.Sum,
|
||||||
|
PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()),
|
||||||
|
PositiveBuckets: hp.GetPositiveDeltas(),
|
||||||
|
NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()),
|
||||||
|
NegativeBuckets: hp.GetNegativeDeltas(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func spansProtoToSpans(s []*prompb.BucketSpan) []histogram.Span {
|
||||||
|
spans := make([]histogram.Span, len(s))
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
|
||||||
|
}
|
||||||
|
|
||||||
|
return spans
|
||||||
|
}
|
||||||
|
|
||||||
|
func HistogramToHistogramProto(timestamp int64, h *histogram.Histogram) prompb.Histogram {
|
||||||
|
return prompb.Histogram{
|
||||||
|
Count: &prompb.Histogram_CountInt{CountInt: h.Count},
|
||||||
|
Sum: h.Sum,
|
||||||
|
Schema: h.Schema,
|
||||||
|
ZeroThreshold: h.ZeroThreshold,
|
||||||
|
ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount},
|
||||||
|
NegativeSpans: spansToSpansProto(h.NegativeSpans),
|
||||||
|
NegativeDeltas: h.NegativeBuckets,
|
||||||
|
PositiveSpans: spansToSpansProto(h.PositiveSpans),
|
||||||
|
PositiveDeltas: h.PositiveBuckets,
|
||||||
|
Timestamp: timestamp,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func spansToSpansProto(s []histogram.Span) []*prompb.BucketSpan {
|
||||||
|
spans := make([]*prompb.BucketSpan, len(s))
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
spans[i] = &prompb.BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
|
||||||
|
}
|
||||||
|
|
||||||
|
return spans
|
||||||
|
}
|
||||||
|
|
||||||
// LabelProtosToMetric unpack a []*prompb.Label to a model.Metric
|
// LabelProtosToMetric unpack a []*prompb.Label to a model.Metric
|
||||||
func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric {
|
func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric {
|
||||||
metric := make(model.Metric, len(labelPairs))
|
metric := make(model.Metric, len(labelPairs))
|
||||||
|
|
|
@ -20,12 +20,26 @@ import (
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/textparse"
|
"github.com/prometheus/prometheus/model/textparse"
|
||||||
"github.com/prometheus/prometheus/prompb"
|
"github.com/prometheus/prometheus/prompb"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var testHistogram = histogram.Histogram{
|
||||||
|
Schema: 2,
|
||||||
|
ZeroThreshold: 1e-128,
|
||||||
|
ZeroCount: 0,
|
||||||
|
Count: 0,
|
||||||
|
Sum: 20,
|
||||||
|
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
|
||||||
|
PositiveBuckets: []int64{1},
|
||||||
|
NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}},
|
||||||
|
NegativeBuckets: []int64{-1},
|
||||||
|
}
|
||||||
|
|
||||||
var writeRequestFixture = &prompb.WriteRequest{
|
var writeRequestFixture = &prompb.WriteRequest{
|
||||||
Timeseries: []prompb.TimeSeries{
|
Timeseries: []prompb.TimeSeries{
|
||||||
{
|
{
|
||||||
|
@ -36,8 +50,9 @@ var writeRequestFixture = &prompb.WriteRequest{
|
||||||
{Name: "d", Value: "e"},
|
{Name: "d", Value: "e"},
|
||||||
{Name: "foo", Value: "bar"},
|
{Name: "foo", Value: "bar"},
|
||||||
},
|
},
|
||||||
Samples: []prompb.Sample{{Value: 1, Timestamp: 0}},
|
Samples: []prompb.Sample{{Value: 1, Timestamp: 0}},
|
||||||
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "f", Value: "g"}}, Value: 1, Timestamp: 0}},
|
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "f", Value: "g"}}, Value: 1, Timestamp: 0}},
|
||||||
|
Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram)},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Labels: []prompb.Label{
|
Labels: []prompb.Label{
|
||||||
|
@ -47,8 +62,9 @@ var writeRequestFixture = &prompb.WriteRequest{
|
||||||
{Name: "d", Value: "e"},
|
{Name: "d", Value: "e"},
|
||||||
{Name: "foo", Value: "bar"},
|
{Name: "foo", Value: "bar"},
|
||||||
},
|
},
|
||||||
Samples: []prompb.Sample{{Value: 2, Timestamp: 1}},
|
Samples: []prompb.Sample{{Value: 2, Timestamp: 1}},
|
||||||
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "h", Value: "i"}}, Value: 2, Timestamp: 1}},
|
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "h", Value: "i"}}, Value: 2, Timestamp: 1}},
|
||||||
|
Histograms: []prompb.Histogram{HistogramToHistogramProto(1, &testHistogram)},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -202,39 +218,39 @@ func TestConcreteSeriesIterator(t *testing.T) {
|
||||||
it := series.Iterator()
|
it := series.Iterator()
|
||||||
|
|
||||||
// Seek to the first sample with ts=1.
|
// Seek to the first sample with ts=1.
|
||||||
require.True(t, it.Seek(1))
|
require.Equal(t, chunkenc.ValFloat, it.Seek(1))
|
||||||
ts, v := it.At()
|
ts, v := it.At()
|
||||||
require.Equal(t, int64(1), ts)
|
require.Equal(t, int64(1), ts)
|
||||||
require.Equal(t, 1., v)
|
require.Equal(t, 1., v)
|
||||||
|
|
||||||
// Seek one further, next sample still has ts=1.
|
// Seek one further, next sample still has ts=1.
|
||||||
require.True(t, it.Next())
|
require.Equal(t, chunkenc.ValFloat, it.Next())
|
||||||
ts, v = it.At()
|
ts, v = it.At()
|
||||||
require.Equal(t, int64(1), ts)
|
require.Equal(t, int64(1), ts)
|
||||||
require.Equal(t, 1.5, v)
|
require.Equal(t, 1.5, v)
|
||||||
|
|
||||||
// Seek again to 1 and make sure we stay where we are.
|
// Seek again to 1 and make sure we stay where we are.
|
||||||
require.True(t, it.Seek(1))
|
require.Equal(t, chunkenc.ValFloat, it.Seek(1))
|
||||||
ts, v = it.At()
|
ts, v = it.At()
|
||||||
require.Equal(t, int64(1), ts)
|
require.Equal(t, int64(1), ts)
|
||||||
require.Equal(t, 1.5, v)
|
require.Equal(t, 1.5, v)
|
||||||
|
|
||||||
// Another seek.
|
// Another seek.
|
||||||
require.True(t, it.Seek(3))
|
require.Equal(t, chunkenc.ValFloat, it.Seek(3))
|
||||||
ts, v = it.At()
|
ts, v = it.At()
|
||||||
require.Equal(t, int64(3), ts)
|
require.Equal(t, int64(3), ts)
|
||||||
require.Equal(t, 3., v)
|
require.Equal(t, 3., v)
|
||||||
|
|
||||||
// And we don't go back.
|
// And we don't go back.
|
||||||
require.True(t, it.Seek(2))
|
require.Equal(t, chunkenc.ValFloat, it.Seek(2))
|
||||||
ts, v = it.At()
|
ts, v = it.At()
|
||||||
require.Equal(t, int64(3), ts)
|
require.Equal(t, int64(3), ts)
|
||||||
require.Equal(t, 3., v)
|
require.Equal(t, 3., v)
|
||||||
|
|
||||||
// Seek beyond the end.
|
// Seek beyond the end.
|
||||||
require.False(t, it.Seek(5))
|
require.Equal(t, chunkenc.ValNone, it.Seek(5))
|
||||||
// And we don't go back. (This exposes issue #10027.)
|
// And we don't go back. (This exposes issue #10027.)
|
||||||
require.False(t, it.Seek(2))
|
require.Equal(t, chunkenc.ValNone, it.Seek(2))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFromQueryResultWithDuplicates(t *testing.T) {
|
func TestFromQueryResultWithDuplicates(t *testing.T) {
|
||||||
|
@ -345,3 +361,9 @@ func TestDecodeWriteRequest(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, writeRequestFixture, actual)
|
require.Equal(t, writeRequestFixture, actual)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestNilHistogramProto(t *testing.T) {
|
||||||
|
// This function will panic if it impromperly handles nil
|
||||||
|
// values, causing the test to fail.
|
||||||
|
HistogramProtoToHistogram(prompb.Histogram{})
|
||||||
|
}
|
||||||
|
|
|
@ -32,6 +32,7 @@ import (
|
||||||
"go.uber.org/atomic"
|
"go.uber.org/atomic"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/relabel"
|
"github.com/prometheus/prometheus/model/relabel"
|
||||||
"github.com/prometheus/prometheus/prompb"
|
"github.com/prometheus/prometheus/prompb"
|
||||||
|
@ -54,30 +55,35 @@ const (
|
||||||
type queueManagerMetrics struct {
|
type queueManagerMetrics struct {
|
||||||
reg prometheus.Registerer
|
reg prometheus.Registerer
|
||||||
|
|
||||||
samplesTotal prometheus.Counter
|
samplesTotal prometheus.Counter
|
||||||
exemplarsTotal prometheus.Counter
|
exemplarsTotal prometheus.Counter
|
||||||
metadataTotal prometheus.Counter
|
histogramsTotal prometheus.Counter
|
||||||
failedSamplesTotal prometheus.Counter
|
metadataTotal prometheus.Counter
|
||||||
failedExemplarsTotal prometheus.Counter
|
failedSamplesTotal prometheus.Counter
|
||||||
failedMetadataTotal prometheus.Counter
|
failedExemplarsTotal prometheus.Counter
|
||||||
retriedSamplesTotal prometheus.Counter
|
failedHistogramsTotal prometheus.Counter
|
||||||
retriedExemplarsTotal prometheus.Counter
|
failedMetadataTotal prometheus.Counter
|
||||||
retriedMetadataTotal prometheus.Counter
|
retriedSamplesTotal prometheus.Counter
|
||||||
droppedSamplesTotal prometheus.Counter
|
retriedExemplarsTotal prometheus.Counter
|
||||||
droppedExemplarsTotal prometheus.Counter
|
retriedHistogramsTotal prometheus.Counter
|
||||||
enqueueRetriesTotal prometheus.Counter
|
retriedMetadataTotal prometheus.Counter
|
||||||
sentBatchDuration prometheus.Histogram
|
droppedSamplesTotal prometheus.Counter
|
||||||
highestSentTimestamp *maxTimestamp
|
droppedExemplarsTotal prometheus.Counter
|
||||||
pendingSamples prometheus.Gauge
|
droppedHistogramsTotal prometheus.Counter
|
||||||
pendingExemplars prometheus.Gauge
|
enqueueRetriesTotal prometheus.Counter
|
||||||
shardCapacity prometheus.Gauge
|
sentBatchDuration prometheus.Histogram
|
||||||
numShards prometheus.Gauge
|
highestSentTimestamp *maxTimestamp
|
||||||
maxNumShards prometheus.Gauge
|
pendingSamples prometheus.Gauge
|
||||||
minNumShards prometheus.Gauge
|
pendingExemplars prometheus.Gauge
|
||||||
desiredNumShards prometheus.Gauge
|
pendingHistograms prometheus.Gauge
|
||||||
sentBytesTotal prometheus.Counter
|
shardCapacity prometheus.Gauge
|
||||||
metadataBytesTotal prometheus.Counter
|
numShards prometheus.Gauge
|
||||||
maxSamplesPerSend prometheus.Gauge
|
maxNumShards prometheus.Gauge
|
||||||
|
minNumShards prometheus.Gauge
|
||||||
|
desiredNumShards prometheus.Gauge
|
||||||
|
sentBytesTotal prometheus.Counter
|
||||||
|
metadataBytesTotal prometheus.Counter
|
||||||
|
maxSamplesPerSend prometheus.Gauge
|
||||||
}
|
}
|
||||||
|
|
||||||
func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManagerMetrics {
|
func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManagerMetrics {
|
||||||
|
@ -103,6 +109,13 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager
|
||||||
Help: "Total number of exemplars sent to remote storage.",
|
Help: "Total number of exemplars sent to remote storage.",
|
||||||
ConstLabels: constLabels,
|
ConstLabels: constLabels,
|
||||||
})
|
})
|
||||||
|
m.histogramsTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
||||||
|
Namespace: namespace,
|
||||||
|
Subsystem: subsystem,
|
||||||
|
Name: "histograms_total",
|
||||||
|
Help: "Total number of histograms sent to remote storage.",
|
||||||
|
ConstLabels: constLabels,
|
||||||
|
})
|
||||||
m.metadataTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
m.metadataTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Subsystem: subsystem,
|
Subsystem: subsystem,
|
||||||
|
@ -124,6 +137,13 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager
|
||||||
Help: "Total number of exemplars which failed on send to remote storage, non-recoverable errors.",
|
Help: "Total number of exemplars which failed on send to remote storage, non-recoverable errors.",
|
||||||
ConstLabels: constLabels,
|
ConstLabels: constLabels,
|
||||||
})
|
})
|
||||||
|
m.failedHistogramsTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
||||||
|
Namespace: namespace,
|
||||||
|
Subsystem: subsystem,
|
||||||
|
Name: "histograms_failed_total",
|
||||||
|
Help: "Total number of histograms which failed on send to remote storage, non-recoverable errors.",
|
||||||
|
ConstLabels: constLabels,
|
||||||
|
})
|
||||||
m.failedMetadataTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
m.failedMetadataTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Subsystem: subsystem,
|
Subsystem: subsystem,
|
||||||
|
@ -145,6 +165,13 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager
|
||||||
Help: "Total number of exemplars which failed on send to remote storage but were retried because the send error was recoverable.",
|
Help: "Total number of exemplars which failed on send to remote storage but were retried because the send error was recoverable.",
|
||||||
ConstLabels: constLabels,
|
ConstLabels: constLabels,
|
||||||
})
|
})
|
||||||
|
m.retriedHistogramsTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
||||||
|
Namespace: namespace,
|
||||||
|
Subsystem: subsystem,
|
||||||
|
Name: "histograms_retried_total",
|
||||||
|
Help: "Total number of histograms which failed on send to remote storage but were retried because the send error was recoverable.",
|
||||||
|
ConstLabels: constLabels,
|
||||||
|
})
|
||||||
m.retriedMetadataTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
m.retriedMetadataTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Subsystem: subsystem,
|
Subsystem: subsystem,
|
||||||
|
@ -166,6 +193,13 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager
|
||||||
Help: "Total number of exemplars which were dropped after being read from the WAL before being sent via remote write, either via relabelling or unintentionally because of an unknown reference ID.",
|
Help: "Total number of exemplars which were dropped after being read from the WAL before being sent via remote write, either via relabelling or unintentionally because of an unknown reference ID.",
|
||||||
ConstLabels: constLabels,
|
ConstLabels: constLabels,
|
||||||
})
|
})
|
||||||
|
m.droppedHistogramsTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
||||||
|
Namespace: namespace,
|
||||||
|
Subsystem: subsystem,
|
||||||
|
Name: "histograms_dropped_total",
|
||||||
|
Help: "Total number of histograms which were dropped after being read from the WAL before being sent via remote write, either via relabelling or unintentionally because of an unknown reference ID.",
|
||||||
|
ConstLabels: constLabels,
|
||||||
|
})
|
||||||
m.enqueueRetriesTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
m.enqueueRetriesTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Subsystem: subsystem,
|
Subsystem: subsystem,
|
||||||
|
@ -204,6 +238,13 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager
|
||||||
Help: "The number of exemplars pending in the queues shards to be sent to the remote storage.",
|
Help: "The number of exemplars pending in the queues shards to be sent to the remote storage.",
|
||||||
ConstLabels: constLabels,
|
ConstLabels: constLabels,
|
||||||
})
|
})
|
||||||
|
m.pendingHistograms = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||||
|
Namespace: namespace,
|
||||||
|
Subsystem: subsystem,
|
||||||
|
Name: "histograms_pending",
|
||||||
|
Help: "The number of histograms pending in the queues shards to be sent to the remote storage.",
|
||||||
|
ConstLabels: constLabels,
|
||||||
|
})
|
||||||
m.shardCapacity = prometheus.NewGauge(prometheus.GaugeOpts{
|
m.shardCapacity = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Subsystem: subsystem,
|
Subsystem: subsystem,
|
||||||
|
@ -269,20 +310,25 @@ func (m *queueManagerMetrics) register() {
|
||||||
m.reg.MustRegister(
|
m.reg.MustRegister(
|
||||||
m.samplesTotal,
|
m.samplesTotal,
|
||||||
m.exemplarsTotal,
|
m.exemplarsTotal,
|
||||||
|
m.histogramsTotal,
|
||||||
m.metadataTotal,
|
m.metadataTotal,
|
||||||
m.failedSamplesTotal,
|
m.failedSamplesTotal,
|
||||||
m.failedExemplarsTotal,
|
m.failedExemplarsTotal,
|
||||||
|
m.failedHistogramsTotal,
|
||||||
m.failedMetadataTotal,
|
m.failedMetadataTotal,
|
||||||
m.retriedSamplesTotal,
|
m.retriedSamplesTotal,
|
||||||
m.retriedExemplarsTotal,
|
m.retriedExemplarsTotal,
|
||||||
|
m.retriedHistogramsTotal,
|
||||||
m.retriedMetadataTotal,
|
m.retriedMetadataTotal,
|
||||||
m.droppedSamplesTotal,
|
m.droppedSamplesTotal,
|
||||||
m.droppedExemplarsTotal,
|
m.droppedExemplarsTotal,
|
||||||
|
m.droppedHistogramsTotal,
|
||||||
m.enqueueRetriesTotal,
|
m.enqueueRetriesTotal,
|
||||||
m.sentBatchDuration,
|
m.sentBatchDuration,
|
||||||
m.highestSentTimestamp,
|
m.highestSentTimestamp,
|
||||||
m.pendingSamples,
|
m.pendingSamples,
|
||||||
m.pendingExemplars,
|
m.pendingExemplars,
|
||||||
|
m.pendingHistograms,
|
||||||
m.shardCapacity,
|
m.shardCapacity,
|
||||||
m.numShards,
|
m.numShards,
|
||||||
m.maxNumShards,
|
m.maxNumShards,
|
||||||
|
@ -299,20 +345,25 @@ func (m *queueManagerMetrics) unregister() {
|
||||||
if m.reg != nil {
|
if m.reg != nil {
|
||||||
m.reg.Unregister(m.samplesTotal)
|
m.reg.Unregister(m.samplesTotal)
|
||||||
m.reg.Unregister(m.exemplarsTotal)
|
m.reg.Unregister(m.exemplarsTotal)
|
||||||
|
m.reg.Unregister(m.histogramsTotal)
|
||||||
m.reg.Unregister(m.metadataTotal)
|
m.reg.Unregister(m.metadataTotal)
|
||||||
m.reg.Unregister(m.failedSamplesTotal)
|
m.reg.Unregister(m.failedSamplesTotal)
|
||||||
m.reg.Unregister(m.failedExemplarsTotal)
|
m.reg.Unregister(m.failedExemplarsTotal)
|
||||||
|
m.reg.Unregister(m.failedHistogramsTotal)
|
||||||
m.reg.Unregister(m.failedMetadataTotal)
|
m.reg.Unregister(m.failedMetadataTotal)
|
||||||
m.reg.Unregister(m.retriedSamplesTotal)
|
m.reg.Unregister(m.retriedSamplesTotal)
|
||||||
m.reg.Unregister(m.retriedExemplarsTotal)
|
m.reg.Unregister(m.retriedExemplarsTotal)
|
||||||
|
m.reg.Unregister(m.retriedHistogramsTotal)
|
||||||
m.reg.Unregister(m.retriedMetadataTotal)
|
m.reg.Unregister(m.retriedMetadataTotal)
|
||||||
m.reg.Unregister(m.droppedSamplesTotal)
|
m.reg.Unregister(m.droppedSamplesTotal)
|
||||||
m.reg.Unregister(m.droppedExemplarsTotal)
|
m.reg.Unregister(m.droppedExemplarsTotal)
|
||||||
|
m.reg.Unregister(m.droppedHistogramsTotal)
|
||||||
m.reg.Unregister(m.enqueueRetriesTotal)
|
m.reg.Unregister(m.enqueueRetriesTotal)
|
||||||
m.reg.Unregister(m.sentBatchDuration)
|
m.reg.Unregister(m.sentBatchDuration)
|
||||||
m.reg.Unregister(m.highestSentTimestamp)
|
m.reg.Unregister(m.highestSentTimestamp)
|
||||||
m.reg.Unregister(m.pendingSamples)
|
m.reg.Unregister(m.pendingSamples)
|
||||||
m.reg.Unregister(m.pendingExemplars)
|
m.reg.Unregister(m.pendingExemplars)
|
||||||
|
m.reg.Unregister(m.pendingHistograms)
|
||||||
m.reg.Unregister(m.shardCapacity)
|
m.reg.Unregister(m.shardCapacity)
|
||||||
m.reg.Unregister(m.numShards)
|
m.reg.Unregister(m.numShards)
|
||||||
m.reg.Unregister(m.maxNumShards)
|
m.reg.Unregister(m.maxNumShards)
|
||||||
|
@ -341,15 +392,16 @@ type WriteClient interface {
|
||||||
type QueueManager struct {
|
type QueueManager struct {
|
||||||
lastSendTimestamp atomic.Int64
|
lastSendTimestamp atomic.Int64
|
||||||
|
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
flushDeadline time.Duration
|
flushDeadline time.Duration
|
||||||
cfg config.QueueConfig
|
cfg config.QueueConfig
|
||||||
mcfg config.MetadataConfig
|
mcfg config.MetadataConfig
|
||||||
externalLabels labels.Labels
|
externalLabels labels.Labels
|
||||||
relabelConfigs []*relabel.Config
|
relabelConfigs []*relabel.Config
|
||||||
sendExemplars bool
|
sendExemplars bool
|
||||||
watcher *wlog.Watcher
|
sendNativeHistograms bool
|
||||||
metadataWatcher *MetadataWatcher
|
watcher *wlog.Watcher
|
||||||
|
metadataWatcher *MetadataWatcher
|
||||||
|
|
||||||
clientMtx sync.RWMutex
|
clientMtx sync.RWMutex
|
||||||
storeClient WriteClient
|
storeClient WriteClient
|
||||||
|
@ -396,6 +448,7 @@ func NewQueueManager(
|
||||||
highestRecvTimestamp *maxTimestamp,
|
highestRecvTimestamp *maxTimestamp,
|
||||||
sm ReadyScrapeManager,
|
sm ReadyScrapeManager,
|
||||||
enableExemplarRemoteWrite bool,
|
enableExemplarRemoteWrite bool,
|
||||||
|
enableNativeHistogramRemoteWrite bool,
|
||||||
) *QueueManager {
|
) *QueueManager {
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = log.NewNopLogger()
|
||||||
|
@ -403,14 +456,15 @@ func NewQueueManager(
|
||||||
|
|
||||||
logger = log.With(logger, remoteName, client.Name(), endpoint, client.Endpoint())
|
logger = log.With(logger, remoteName, client.Name(), endpoint, client.Endpoint())
|
||||||
t := &QueueManager{
|
t := &QueueManager{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
flushDeadline: flushDeadline,
|
flushDeadline: flushDeadline,
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
mcfg: mCfg,
|
mcfg: mCfg,
|
||||||
externalLabels: externalLabels,
|
externalLabels: externalLabels,
|
||||||
relabelConfigs: relabelConfigs,
|
relabelConfigs: relabelConfigs,
|
||||||
storeClient: client,
|
storeClient: client,
|
||||||
sendExemplars: enableExemplarRemoteWrite,
|
sendExemplars: enableExemplarRemoteWrite,
|
||||||
|
sendNativeHistograms: enableNativeHistogramRemoteWrite,
|
||||||
|
|
||||||
seriesLabels: make(map[chunks.HeadSeriesRef]labels.Labels),
|
seriesLabels: make(map[chunks.HeadSeriesRef]labels.Labels),
|
||||||
seriesSegmentIndexes: make(map[chunks.HeadSeriesRef]int),
|
seriesSegmentIndexes: make(map[chunks.HeadSeriesRef]int),
|
||||||
|
@ -430,7 +484,7 @@ func NewQueueManager(
|
||||||
highestRecvTimestamp: highestRecvTimestamp,
|
highestRecvTimestamp: highestRecvTimestamp,
|
||||||
}
|
}
|
||||||
|
|
||||||
t.watcher = wlog.NewWatcher(watcherMetrics, readerMetrics, logger, client.Name(), t, dir, enableExemplarRemoteWrite)
|
t.watcher = wlog.NewWatcher(watcherMetrics, readerMetrics, logger, client.Name(), t, dir, enableExemplarRemoteWrite, enableNativeHistogramRemoteWrite)
|
||||||
if t.mcfg.Send {
|
if t.mcfg.Send {
|
||||||
t.metadataWatcher = NewMetadataWatcher(logger, sm, client.Name(), t, t.mcfg.SendInterval, flushDeadline)
|
t.metadataWatcher = NewMetadataWatcher(logger, sm, client.Name(), t, t.mcfg.SendInterval, flushDeadline)
|
||||||
}
|
}
|
||||||
|
@ -538,11 +592,11 @@ outer:
|
||||||
return false
|
return false
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
if t.shards.enqueue(s.Ref, sampleOrExemplar{
|
if t.shards.enqueue(s.Ref, timeSeries{
|
||||||
seriesLabels: lbls,
|
seriesLabels: lbls,
|
||||||
timestamp: s.T,
|
timestamp: s.T,
|
||||||
value: s.V,
|
value: s.V,
|
||||||
isSample: true,
|
sType: tSample,
|
||||||
}) {
|
}) {
|
||||||
continue outer
|
continue outer
|
||||||
}
|
}
|
||||||
|
@ -588,11 +642,59 @@ outer:
|
||||||
return false
|
return false
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
if t.shards.enqueue(e.Ref, sampleOrExemplar{
|
if t.shards.enqueue(e.Ref, timeSeries{
|
||||||
seriesLabels: lbls,
|
seriesLabels: lbls,
|
||||||
timestamp: e.T,
|
timestamp: e.T,
|
||||||
value: e.V,
|
value: e.V,
|
||||||
exemplarLabels: e.Labels,
|
exemplarLabels: e.Labels,
|
||||||
|
sType: tExemplar,
|
||||||
|
}) {
|
||||||
|
continue outer
|
||||||
|
}
|
||||||
|
|
||||||
|
t.metrics.enqueueRetriesTotal.Inc()
|
||||||
|
time.Sleep(time.Duration(backoff))
|
||||||
|
backoff = backoff * 2
|
||||||
|
if backoff > t.cfg.MaxBackoff {
|
||||||
|
backoff = t.cfg.MaxBackoff
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *QueueManager) AppendHistograms(histograms []record.RefHistogramSample) bool {
|
||||||
|
if !t.sendNativeHistograms {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
outer:
|
||||||
|
for _, h := range histograms {
|
||||||
|
t.seriesMtx.Lock()
|
||||||
|
lbls, ok := t.seriesLabels[h.Ref]
|
||||||
|
if !ok {
|
||||||
|
t.metrics.droppedHistogramsTotal.Inc()
|
||||||
|
t.dataDropped.incr(1)
|
||||||
|
if _, ok := t.droppedSeries[h.Ref]; !ok {
|
||||||
|
level.Info(t.logger).Log("msg", "Dropped histogram for series that was not explicitly dropped via relabelling", "ref", h.Ref)
|
||||||
|
}
|
||||||
|
t.seriesMtx.Unlock()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
t.seriesMtx.Unlock()
|
||||||
|
|
||||||
|
backoff := model.Duration(5 * time.Millisecond)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-t.quit:
|
||||||
|
return false
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
if t.shards.enqueue(h.Ref, timeSeries{
|
||||||
|
seriesLabels: lbls,
|
||||||
|
timestamp: h.T,
|
||||||
|
histogram: h.H,
|
||||||
|
sType: tHistogram,
|
||||||
}) {
|
}) {
|
||||||
continue outer
|
continue outer
|
||||||
}
|
}
|
||||||
|
@ -921,8 +1023,9 @@ type shards struct {
|
||||||
qm *QueueManager
|
qm *QueueManager
|
||||||
queues []*queue
|
queues []*queue
|
||||||
// So we can accurately track how many of each are lost during shard shutdowns.
|
// So we can accurately track how many of each are lost during shard shutdowns.
|
||||||
enqueuedSamples atomic.Int64
|
enqueuedSamples atomic.Int64
|
||||||
enqueuedExemplars atomic.Int64
|
enqueuedExemplars atomic.Int64
|
||||||
|
enqueuedHistograms atomic.Int64
|
||||||
|
|
||||||
// Emulate a wait group with a channel and an atomic int, as you
|
// Emulate a wait group with a channel and an atomic int, as you
|
||||||
// cannot select on a wait group.
|
// cannot select on a wait group.
|
||||||
|
@ -934,9 +1037,10 @@ type shards struct {
|
||||||
|
|
||||||
// Hard shutdown context is used to terminate outgoing HTTP connections
|
// Hard shutdown context is used to terminate outgoing HTTP connections
|
||||||
// after giving them a chance to terminate.
|
// after giving them a chance to terminate.
|
||||||
hardShutdown context.CancelFunc
|
hardShutdown context.CancelFunc
|
||||||
samplesDroppedOnHardShutdown atomic.Uint32
|
samplesDroppedOnHardShutdown atomic.Uint32
|
||||||
exemplarsDroppedOnHardShutdown atomic.Uint32
|
exemplarsDroppedOnHardShutdown atomic.Uint32
|
||||||
|
histogramsDroppedOnHardShutdown atomic.Uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
// start the shards; must be called before any call to enqueue.
|
// start the shards; must be called before any call to enqueue.
|
||||||
|
@ -961,8 +1065,10 @@ func (s *shards) start(n int) {
|
||||||
s.done = make(chan struct{})
|
s.done = make(chan struct{})
|
||||||
s.enqueuedSamples.Store(0)
|
s.enqueuedSamples.Store(0)
|
||||||
s.enqueuedExemplars.Store(0)
|
s.enqueuedExemplars.Store(0)
|
||||||
|
s.enqueuedHistograms.Store(0)
|
||||||
s.samplesDroppedOnHardShutdown.Store(0)
|
s.samplesDroppedOnHardShutdown.Store(0)
|
||||||
s.exemplarsDroppedOnHardShutdown.Store(0)
|
s.exemplarsDroppedOnHardShutdown.Store(0)
|
||||||
|
s.histogramsDroppedOnHardShutdown.Store(0)
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
go s.runShard(hardShutdownCtx, i, newQueues[i])
|
go s.runShard(hardShutdownCtx, i, newQueues[i])
|
||||||
}
|
}
|
||||||
|
@ -1008,7 +1114,7 @@ func (s *shards) stop() {
|
||||||
// retry. A shard is full when its configured capacity has been reached,
|
// retry. A shard is full when its configured capacity has been reached,
|
||||||
// specifically, when s.queues[shard] has filled its batchQueue channel and the
|
// specifically, when s.queues[shard] has filled its batchQueue channel and the
|
||||||
// partial batch has also been filled.
|
// partial batch has also been filled.
|
||||||
func (s *shards) enqueue(ref chunks.HeadSeriesRef, data sampleOrExemplar) bool {
|
func (s *shards) enqueue(ref chunks.HeadSeriesRef, data timeSeries) bool {
|
||||||
s.mtx.RLock()
|
s.mtx.RLock()
|
||||||
defer s.mtx.RUnlock()
|
defer s.mtx.RUnlock()
|
||||||
|
|
||||||
|
@ -1021,12 +1127,16 @@ func (s *shards) enqueue(ref chunks.HeadSeriesRef, data sampleOrExemplar) bool {
|
||||||
if !appended {
|
if !appended {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if data.isSample {
|
switch data.sType {
|
||||||
|
case tSample:
|
||||||
s.qm.metrics.pendingSamples.Inc()
|
s.qm.metrics.pendingSamples.Inc()
|
||||||
s.enqueuedSamples.Inc()
|
s.enqueuedSamples.Inc()
|
||||||
} else {
|
case tExemplar:
|
||||||
s.qm.metrics.pendingExemplars.Inc()
|
s.qm.metrics.pendingExemplars.Inc()
|
||||||
s.enqueuedExemplars.Inc()
|
s.enqueuedExemplars.Inc()
|
||||||
|
case tHistogram:
|
||||||
|
s.qm.metrics.pendingHistograms.Inc()
|
||||||
|
s.enqueuedHistograms.Inc()
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -1035,24 +1145,34 @@ func (s *shards) enqueue(ref chunks.HeadSeriesRef, data sampleOrExemplar) bool {
|
||||||
type queue struct {
|
type queue struct {
|
||||||
// batchMtx covers operations appending to or publishing the partial batch.
|
// batchMtx covers operations appending to or publishing the partial batch.
|
||||||
batchMtx sync.Mutex
|
batchMtx sync.Mutex
|
||||||
batch []sampleOrExemplar
|
batch []timeSeries
|
||||||
batchQueue chan []sampleOrExemplar
|
batchQueue chan []timeSeries
|
||||||
|
|
||||||
// Since we know there are a limited number of batches out, using a stack
|
// Since we know there are a limited number of batches out, using a stack
|
||||||
// is easy and safe so a sync.Pool is not necessary.
|
// is easy and safe so a sync.Pool is not necessary.
|
||||||
// poolMtx covers adding and removing batches from the batchPool.
|
// poolMtx covers adding and removing batches from the batchPool.
|
||||||
poolMtx sync.Mutex
|
poolMtx sync.Mutex
|
||||||
batchPool [][]sampleOrExemplar
|
batchPool [][]timeSeries
|
||||||
}
|
}
|
||||||
|
|
||||||
type sampleOrExemplar struct {
|
type timeSeries struct {
|
||||||
seriesLabels labels.Labels
|
seriesLabels labels.Labels
|
||||||
value float64
|
value float64
|
||||||
|
histogram *histogram.Histogram
|
||||||
timestamp int64
|
timestamp int64
|
||||||
exemplarLabels labels.Labels
|
exemplarLabels labels.Labels
|
||||||
isSample bool
|
// The type of series: sample, exemplar, or histogram.
|
||||||
|
sType seriesType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type seriesType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
tSample seriesType = iota
|
||||||
|
tExemplar
|
||||||
|
tHistogram
|
||||||
|
)
|
||||||
|
|
||||||
func newQueue(batchSize, capacity int) *queue {
|
func newQueue(batchSize, capacity int) *queue {
|
||||||
batches := capacity / batchSize
|
batches := capacity / batchSize
|
||||||
// Always create an unbuffered channel even if capacity is configured to be
|
// Always create an unbuffered channel even if capacity is configured to be
|
||||||
|
@ -1061,17 +1181,17 @@ func newQueue(batchSize, capacity int) *queue {
|
||||||
batches = 1
|
batches = 1
|
||||||
}
|
}
|
||||||
return &queue{
|
return &queue{
|
||||||
batch: make([]sampleOrExemplar, 0, batchSize),
|
batch: make([]timeSeries, 0, batchSize),
|
||||||
batchQueue: make(chan []sampleOrExemplar, batches),
|
batchQueue: make(chan []timeSeries, batches),
|
||||||
// batchPool should have capacity for everything in the channel + 1 for
|
// batchPool should have capacity for everything in the channel + 1 for
|
||||||
// the batch being processed.
|
// the batch being processed.
|
||||||
batchPool: make([][]sampleOrExemplar, 0, batches+1),
|
batchPool: make([][]timeSeries, 0, batches+1),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Append the sampleOrExemplar to the buffered batch. Returns false if it
|
// Append the timeSeries to the buffered batch. Returns false if it
|
||||||
// cannot be added and must be retried.
|
// cannot be added and must be retried.
|
||||||
func (q *queue) Append(datum sampleOrExemplar) bool {
|
func (q *queue) Append(datum timeSeries) bool {
|
||||||
q.batchMtx.Lock()
|
q.batchMtx.Lock()
|
||||||
defer q.batchMtx.Unlock()
|
defer q.batchMtx.Unlock()
|
||||||
q.batch = append(q.batch, datum)
|
q.batch = append(q.batch, datum)
|
||||||
|
@ -1089,12 +1209,12 @@ func (q *queue) Append(datum sampleOrExemplar) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *queue) Chan() <-chan []sampleOrExemplar {
|
func (q *queue) Chan() <-chan []timeSeries {
|
||||||
return q.batchQueue
|
return q.batchQueue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Batch returns the current batch and allocates a new batch.
|
// Batch returns the current batch and allocates a new batch.
|
||||||
func (q *queue) Batch() []sampleOrExemplar {
|
func (q *queue) Batch() []timeSeries {
|
||||||
q.batchMtx.Lock()
|
q.batchMtx.Lock()
|
||||||
defer q.batchMtx.Unlock()
|
defer q.batchMtx.Unlock()
|
||||||
|
|
||||||
|
@ -1109,7 +1229,7 @@ func (q *queue) Batch() []sampleOrExemplar {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReturnForReuse adds the batch buffer back to the internal pool.
|
// ReturnForReuse adds the batch buffer back to the internal pool.
|
||||||
func (q *queue) ReturnForReuse(batch []sampleOrExemplar) {
|
func (q *queue) ReturnForReuse(batch []timeSeries) {
|
||||||
q.poolMtx.Lock()
|
q.poolMtx.Lock()
|
||||||
defer q.poolMtx.Unlock()
|
defer q.poolMtx.Unlock()
|
||||||
if len(q.batchPool) < cap(q.batchPool) {
|
if len(q.batchPool) < cap(q.batchPool) {
|
||||||
|
@ -1149,7 +1269,7 @@ func (q *queue) tryEnqueueingBatch(done <-chan struct{}) bool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *queue) newBatch(capacity int) []sampleOrExemplar {
|
func (q *queue) newBatch(capacity int) []timeSeries {
|
||||||
q.poolMtx.Lock()
|
q.poolMtx.Lock()
|
||||||
defer q.poolMtx.Unlock()
|
defer q.poolMtx.Unlock()
|
||||||
batches := len(q.batchPool)
|
batches := len(q.batchPool)
|
||||||
|
@ -1158,7 +1278,7 @@ func (q *queue) newBatch(capacity int) []sampleOrExemplar {
|
||||||
q.batchPool = q.batchPool[:batches-1]
|
q.batchPool = q.batchPool[:batches-1]
|
||||||
return batch
|
return batch
|
||||||
}
|
}
|
||||||
return make([]sampleOrExemplar, 0, capacity)
|
return make([]timeSeries, 0, capacity)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
|
func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
|
||||||
|
@ -1209,22 +1329,26 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
|
||||||
// Remove them from pending and mark them as failed.
|
// Remove them from pending and mark them as failed.
|
||||||
droppedSamples := int(s.enqueuedSamples.Load())
|
droppedSamples := int(s.enqueuedSamples.Load())
|
||||||
droppedExemplars := int(s.enqueuedExemplars.Load())
|
droppedExemplars := int(s.enqueuedExemplars.Load())
|
||||||
|
droppedHistograms := int(s.enqueuedHistograms.Load())
|
||||||
s.qm.metrics.pendingSamples.Sub(float64(droppedSamples))
|
s.qm.metrics.pendingSamples.Sub(float64(droppedSamples))
|
||||||
s.qm.metrics.pendingExemplars.Sub(float64(droppedExemplars))
|
s.qm.metrics.pendingExemplars.Sub(float64(droppedExemplars))
|
||||||
|
s.qm.metrics.pendingHistograms.Sub(float64(droppedHistograms))
|
||||||
s.qm.metrics.failedSamplesTotal.Add(float64(droppedSamples))
|
s.qm.metrics.failedSamplesTotal.Add(float64(droppedSamples))
|
||||||
s.qm.metrics.failedExemplarsTotal.Add(float64(droppedExemplars))
|
s.qm.metrics.failedExemplarsTotal.Add(float64(droppedExemplars))
|
||||||
|
s.qm.metrics.failedHistogramsTotal.Add(float64(droppedHistograms))
|
||||||
s.samplesDroppedOnHardShutdown.Add(uint32(droppedSamples))
|
s.samplesDroppedOnHardShutdown.Add(uint32(droppedSamples))
|
||||||
s.exemplarsDroppedOnHardShutdown.Add(uint32(droppedExemplars))
|
s.exemplarsDroppedOnHardShutdown.Add(uint32(droppedExemplars))
|
||||||
|
s.histogramsDroppedOnHardShutdown.Add(uint32(droppedHistograms))
|
||||||
return
|
return
|
||||||
|
|
||||||
case batch, ok := <-batchQueue:
|
case batch, ok := <-batchQueue:
|
||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
nPendingSamples, nPendingExemplars := s.populateTimeSeries(batch, pendingData)
|
nPendingSamples, nPendingExemplars, nPendingHistograms := s.populateTimeSeries(batch, pendingData)
|
||||||
queue.ReturnForReuse(batch)
|
queue.ReturnForReuse(batch)
|
||||||
n := nPendingSamples + nPendingExemplars
|
n := nPendingSamples + nPendingExemplars + nPendingHistograms
|
||||||
s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, pBuf, &buf)
|
s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
|
||||||
|
|
||||||
stop()
|
stop()
|
||||||
timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline))
|
timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline))
|
||||||
|
@ -1232,10 +1356,10 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
|
||||||
case <-timer.C:
|
case <-timer.C:
|
||||||
batch := queue.Batch()
|
batch := queue.Batch()
|
||||||
if len(batch) > 0 {
|
if len(batch) > 0 {
|
||||||
nPendingSamples, nPendingExemplars := s.populateTimeSeries(batch, pendingData)
|
nPendingSamples, nPendingExemplars, nPendingHistograms := s.populateTimeSeries(batch, pendingData)
|
||||||
n := nPendingSamples + nPendingExemplars
|
n := nPendingSamples + nPendingExemplars + nPendingHistograms
|
||||||
level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples, "exemplars", nPendingExemplars, "shard", shardNum)
|
level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples, "exemplars", nPendingExemplars, "shard", shardNum)
|
||||||
s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, pBuf, &buf)
|
s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
|
||||||
}
|
}
|
||||||
queue.ReturnForReuse(batch)
|
queue.ReturnForReuse(batch)
|
||||||
timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline))
|
timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline))
|
||||||
|
@ -1243,43 +1367,51 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *shards) populateTimeSeries(batch []sampleOrExemplar, pendingData []prompb.TimeSeries) (int, int) {
|
func (s *shards) populateTimeSeries(batch []timeSeries, pendingData []prompb.TimeSeries) (int, int, int) {
|
||||||
var nPendingSamples, nPendingExemplars int
|
var nPendingSamples, nPendingExemplars, nPendingHistograms int
|
||||||
for nPending, d := range batch {
|
for nPending, d := range batch {
|
||||||
pendingData[nPending].Samples = pendingData[nPending].Samples[:0]
|
pendingData[nPending].Samples = pendingData[nPending].Samples[:0]
|
||||||
if s.qm.sendExemplars {
|
if s.qm.sendExemplars {
|
||||||
pendingData[nPending].Exemplars = pendingData[nPending].Exemplars[:0]
|
pendingData[nPending].Exemplars = pendingData[nPending].Exemplars[:0]
|
||||||
}
|
}
|
||||||
|
if s.qm.sendNativeHistograms {
|
||||||
|
pendingData[nPending].Histograms = pendingData[nPending].Histograms[:0]
|
||||||
|
}
|
||||||
|
|
||||||
// Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff)
|
// Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff)
|
||||||
// retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll
|
// retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll
|
||||||
// stop reading from the queue. This makes it safe to reference pendingSamples by index.
|
// stop reading from the queue. This makes it safe to reference pendingSamples by index.
|
||||||
if d.isSample {
|
pendingData[nPending].Labels = labelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels)
|
||||||
pendingData[nPending].Labels = labelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels)
|
switch d.sType {
|
||||||
|
case tSample:
|
||||||
pendingData[nPending].Samples = append(pendingData[nPending].Samples, prompb.Sample{
|
pendingData[nPending].Samples = append(pendingData[nPending].Samples, prompb.Sample{
|
||||||
Value: d.value,
|
Value: d.value,
|
||||||
Timestamp: d.timestamp,
|
Timestamp: d.timestamp,
|
||||||
})
|
})
|
||||||
nPendingSamples++
|
nPendingSamples++
|
||||||
} else {
|
case tExemplar:
|
||||||
pendingData[nPending].Labels = labelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels)
|
|
||||||
pendingData[nPending].Exemplars = append(pendingData[nPending].Exemplars, prompb.Exemplar{
|
pendingData[nPending].Exemplars = append(pendingData[nPending].Exemplars, prompb.Exemplar{
|
||||||
Labels: labelsToLabelsProto(d.exemplarLabels, nil),
|
Labels: labelsToLabelsProto(d.exemplarLabels, nil),
|
||||||
Value: d.value,
|
Value: d.value,
|
||||||
Timestamp: d.timestamp,
|
Timestamp: d.timestamp,
|
||||||
})
|
})
|
||||||
nPendingExemplars++
|
nPendingExemplars++
|
||||||
|
case tHistogram:
|
||||||
|
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, HistogramToHistogramProto(d.timestamp, d.histogram))
|
||||||
|
nPendingHistograms++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nPendingSamples, nPendingExemplars
|
return nPendingSamples, nPendingExemplars, nPendingHistograms
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount int, pBuf *proto.Buffer, buf *[]byte) {
|
func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte) {
|
||||||
begin := time.Now()
|
begin := time.Now()
|
||||||
err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, pBuf, buf)
|
err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, histogramCount, pBuf, buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "err", err)
|
level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "err", err)
|
||||||
s.qm.metrics.failedSamplesTotal.Add(float64(sampleCount))
|
s.qm.metrics.failedSamplesTotal.Add(float64(sampleCount))
|
||||||
s.qm.metrics.failedExemplarsTotal.Add(float64(exemplarCount))
|
s.qm.metrics.failedExemplarsTotal.Add(float64(exemplarCount))
|
||||||
|
s.qm.metrics.failedHistogramsTotal.Add(float64(histogramCount))
|
||||||
}
|
}
|
||||||
|
|
||||||
// These counters are used to calculate the dynamic sharding, and as such
|
// These counters are used to calculate the dynamic sharding, and as such
|
||||||
|
@ -1287,16 +1419,18 @@ func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, s
|
||||||
s.qm.dataOut.incr(int64(len(samples)))
|
s.qm.dataOut.incr(int64(len(samples)))
|
||||||
s.qm.dataOutDuration.incr(int64(time.Since(begin)))
|
s.qm.dataOutDuration.incr(int64(time.Since(begin)))
|
||||||
s.qm.lastSendTimestamp.Store(time.Now().Unix())
|
s.qm.lastSendTimestamp.Store(time.Now().Unix())
|
||||||
// Pending samples/exemplars also should be subtracted as an error means
|
// Pending samples/exemplars/histograms also should be subtracted as an error means
|
||||||
// they will not be retried.
|
// they will not be retried.
|
||||||
s.qm.metrics.pendingSamples.Sub(float64(sampleCount))
|
s.qm.metrics.pendingSamples.Sub(float64(sampleCount))
|
||||||
s.qm.metrics.pendingExemplars.Sub(float64(exemplarCount))
|
s.qm.metrics.pendingExemplars.Sub(float64(exemplarCount))
|
||||||
|
s.qm.metrics.pendingHistograms.Sub(float64(histogramCount))
|
||||||
s.enqueuedSamples.Sub(int64(sampleCount))
|
s.enqueuedSamples.Sub(int64(sampleCount))
|
||||||
s.enqueuedExemplars.Sub(int64(exemplarCount))
|
s.enqueuedExemplars.Sub(int64(exemplarCount))
|
||||||
|
s.enqueuedHistograms.Sub(int64(histogramCount))
|
||||||
}
|
}
|
||||||
|
|
||||||
// sendSamples to the remote storage with backoff for recoverable errors.
|
// sendSamples to the remote storage with backoff for recoverable errors.
|
||||||
func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount int, pBuf *proto.Buffer, buf *[]byte) error {
|
func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte) error {
|
||||||
// Build the WriteRequest with no metadata.
|
// Build the WriteRequest with no metadata.
|
||||||
req, highest, err := buildWriteRequest(samples, nil, pBuf, *buf)
|
req, highest, err := buildWriteRequest(samples, nil, pBuf, *buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1326,10 +1460,14 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
|
||||||
if exemplarCount > 0 {
|
if exemplarCount > 0 {
|
||||||
span.SetAttributes(attribute.Int("exemplars", exemplarCount))
|
span.SetAttributes(attribute.Int("exemplars", exemplarCount))
|
||||||
}
|
}
|
||||||
|
if histogramCount > 0 {
|
||||||
|
span.SetAttributes(attribute.Int("histograms", histogramCount))
|
||||||
|
}
|
||||||
|
|
||||||
begin := time.Now()
|
begin := time.Now()
|
||||||
s.qm.metrics.samplesTotal.Add(float64(sampleCount))
|
s.qm.metrics.samplesTotal.Add(float64(sampleCount))
|
||||||
s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount))
|
s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount))
|
||||||
|
s.qm.metrics.histogramsTotal.Add(float64(histogramCount))
|
||||||
err := s.qm.client().Store(ctx, *buf)
|
err := s.qm.client().Store(ctx, *buf)
|
||||||
s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds())
|
s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds())
|
||||||
|
|
||||||
|
@ -1344,6 +1482,7 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
|
||||||
onRetry := func() {
|
onRetry := func() {
|
||||||
s.qm.metrics.retriedSamplesTotal.Add(float64(sampleCount))
|
s.qm.metrics.retriedSamplesTotal.Add(float64(sampleCount))
|
||||||
s.qm.metrics.retriedExemplarsTotal.Add(float64(exemplarCount))
|
s.qm.metrics.retriedExemplarsTotal.Add(float64(exemplarCount))
|
||||||
|
s.qm.metrics.retriedHistogramsTotal.Add(float64(histogramCount))
|
||||||
}
|
}
|
||||||
|
|
||||||
err = sendWriteRequestWithBackoff(ctx, s.qm.cfg, s.qm.logger, attemptStore, onRetry)
|
err = sendWriteRequestWithBackoff(ctx, s.qm.cfg, s.qm.logger, attemptStore, onRetry)
|
||||||
|
@ -1420,6 +1559,9 @@ func buildWriteRequest(samples []prompb.TimeSeries, metadata []prompb.MetricMeta
|
||||||
if len(ts.Exemplars) > 0 && ts.Exemplars[0].Timestamp > highest {
|
if len(ts.Exemplars) > 0 && ts.Exemplars[0].Timestamp > highest {
|
||||||
highest = ts.Exemplars[0].Timestamp
|
highest = ts.Exemplars[0].Timestamp
|
||||||
}
|
}
|
||||||
|
if len(ts.Histograms) > 0 && ts.Histograms[0].Timestamp > highest {
|
||||||
|
highest = ts.Histograms[0].Timestamp
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
req := &prompb.WriteRequest{
|
req := &prompb.WriteRequest{
|
||||||
|
|
|
@ -36,6 +36,7 @@ import (
|
||||||
"go.uber.org/atomic"
|
"go.uber.org/atomic"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/textparse"
|
"github.com/prometheus/prometheus/model/textparse"
|
||||||
"github.com/prometheus/prometheus/model/timestamp"
|
"github.com/prometheus/prometheus/model/timestamp"
|
||||||
|
@ -60,13 +61,15 @@ func newHighestTimestampMetric() *maxTimestamp {
|
||||||
|
|
||||||
func TestSampleDelivery(t *testing.T) {
|
func TestSampleDelivery(t *testing.T) {
|
||||||
testcases := []struct {
|
testcases := []struct {
|
||||||
name string
|
name string
|
||||||
samples bool
|
samples bool
|
||||||
exemplars bool
|
exemplars bool
|
||||||
|
histograms bool
|
||||||
}{
|
}{
|
||||||
{samples: true, exemplars: false, name: "samples only"},
|
{samples: true, exemplars: false, histograms: false, name: "samples only"},
|
||||||
{samples: true, exemplars: true, name: "both samples and exemplars"},
|
{samples: true, exemplars: true, histograms: true, name: "samples, exemplars, and histograms"},
|
||||||
{samples: false, exemplars: true, name: "exemplars only"},
|
{samples: false, exemplars: true, histograms: false, name: "exemplars only"},
|
||||||
|
{samples: false, exemplars: false, histograms: true, name: "histograms only"},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Let's create an even number of send batches so we don't run into the
|
// Let's create an even number of send batches so we don't run into the
|
||||||
|
@ -86,6 +89,7 @@ func TestSampleDelivery(t *testing.T) {
|
||||||
writeConfig := baseRemoteWriteConfig("http://test-storage.com")
|
writeConfig := baseRemoteWriteConfig("http://test-storage.com")
|
||||||
writeConfig.QueueConfig = queueConfig
|
writeConfig.QueueConfig = queueConfig
|
||||||
writeConfig.SendExemplars = true
|
writeConfig.SendExemplars = true
|
||||||
|
writeConfig.SendNativeHistograms = true
|
||||||
|
|
||||||
conf := &config.Config{
|
conf := &config.Config{
|
||||||
GlobalConfig: config.DefaultGlobalConfig,
|
GlobalConfig: config.DefaultGlobalConfig,
|
||||||
|
@ -97,9 +101,10 @@ func TestSampleDelivery(t *testing.T) {
|
||||||
for _, tc := range testcases {
|
for _, tc := range testcases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
var (
|
var (
|
||||||
series []record.RefSeries
|
series []record.RefSeries
|
||||||
samples []record.RefSample
|
samples []record.RefSample
|
||||||
exemplars []record.RefExemplar
|
exemplars []record.RefExemplar
|
||||||
|
histograms []record.RefHistogramSample
|
||||||
)
|
)
|
||||||
|
|
||||||
// Generates same series in both cases.
|
// Generates same series in both cases.
|
||||||
|
@ -109,6 +114,9 @@ func TestSampleDelivery(t *testing.T) {
|
||||||
if tc.exemplars {
|
if tc.exemplars {
|
||||||
exemplars, series = createExemplars(n, n)
|
exemplars, series = createExemplars(n, n)
|
||||||
}
|
}
|
||||||
|
if tc.histograms {
|
||||||
|
histograms, series = createHistograms(n, n)
|
||||||
|
}
|
||||||
|
|
||||||
// Apply new config.
|
// Apply new config.
|
||||||
queueConfig.Capacity = len(samples)
|
queueConfig.Capacity = len(samples)
|
||||||
|
@ -126,15 +134,19 @@ func TestSampleDelivery(t *testing.T) {
|
||||||
// Send first half of data.
|
// Send first half of data.
|
||||||
c.expectSamples(samples[:len(samples)/2], series)
|
c.expectSamples(samples[:len(samples)/2], series)
|
||||||
c.expectExemplars(exemplars[:len(exemplars)/2], series)
|
c.expectExemplars(exemplars[:len(exemplars)/2], series)
|
||||||
|
c.expectHistograms(histograms[:len(histograms)/2], series)
|
||||||
qm.Append(samples[:len(samples)/2])
|
qm.Append(samples[:len(samples)/2])
|
||||||
qm.AppendExemplars(exemplars[:len(exemplars)/2])
|
qm.AppendExemplars(exemplars[:len(exemplars)/2])
|
||||||
|
qm.AppendHistograms(histograms[:len(histograms)/2])
|
||||||
c.waitForExpectedData(t)
|
c.waitForExpectedData(t)
|
||||||
|
|
||||||
// Send second half of data.
|
// Send second half of data.
|
||||||
c.expectSamples(samples[len(samples)/2:], series)
|
c.expectSamples(samples[len(samples)/2:], series)
|
||||||
c.expectExemplars(exemplars[len(exemplars)/2:], series)
|
c.expectExemplars(exemplars[len(exemplars)/2:], series)
|
||||||
|
c.expectHistograms(histograms[len(histograms)/2:], series)
|
||||||
qm.Append(samples[len(samples)/2:])
|
qm.Append(samples[len(samples)/2:])
|
||||||
qm.AppendExemplars(exemplars[len(exemplars)/2:])
|
qm.AppendExemplars(exemplars[len(exemplars)/2:])
|
||||||
|
qm.AppendHistograms(histograms[len(histograms)/2:])
|
||||||
c.waitForExpectedData(t)
|
c.waitForExpectedData(t)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -149,7 +161,7 @@ func TestMetadataDelivery(t *testing.T) {
|
||||||
mcfg := config.DefaultMetadataConfig
|
mcfg := config.DefaultMetadataConfig
|
||||||
|
|
||||||
metrics := newQueueManagerMetrics(nil, "", "")
|
metrics := newQueueManagerMetrics(nil, "", "")
|
||||||
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false)
|
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false)
|
||||||
m.Start()
|
m.Start()
|
||||||
defer m.Stop()
|
defer m.Stop()
|
||||||
|
|
||||||
|
@ -188,7 +200,7 @@ func TestSampleDeliveryTimeout(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
metrics := newQueueManagerMetrics(nil, "", "")
|
metrics := newQueueManagerMetrics(nil, "", "")
|
||||||
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false)
|
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false)
|
||||||
m.StoreSeries(series, 0)
|
m.StoreSeries(series, 0)
|
||||||
m.Start()
|
m.Start()
|
||||||
defer m.Stop()
|
defer m.Stop()
|
||||||
|
@ -230,7 +242,7 @@ func TestSampleDeliveryOrder(t *testing.T) {
|
||||||
mcfg := config.DefaultMetadataConfig
|
mcfg := config.DefaultMetadataConfig
|
||||||
|
|
||||||
metrics := newQueueManagerMetrics(nil, "", "")
|
metrics := newQueueManagerMetrics(nil, "", "")
|
||||||
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false)
|
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false)
|
||||||
m.StoreSeries(series, 0)
|
m.StoreSeries(series, 0)
|
||||||
|
|
||||||
m.Start()
|
m.Start()
|
||||||
|
@ -250,7 +262,7 @@ func TestShutdown(t *testing.T) {
|
||||||
mcfg := config.DefaultMetadataConfig
|
mcfg := config.DefaultMetadataConfig
|
||||||
metrics := newQueueManagerMetrics(nil, "", "")
|
metrics := newQueueManagerMetrics(nil, "", "")
|
||||||
|
|
||||||
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false)
|
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false, false)
|
||||||
n := 2 * config.DefaultQueueConfig.MaxSamplesPerSend
|
n := 2 * config.DefaultQueueConfig.MaxSamplesPerSend
|
||||||
samples, series := createTimeseries(n, n)
|
samples, series := createTimeseries(n, n)
|
||||||
m.StoreSeries(series, 0)
|
m.StoreSeries(series, 0)
|
||||||
|
@ -288,7 +300,7 @@ func TestSeriesReset(t *testing.T) {
|
||||||
cfg := config.DefaultQueueConfig
|
cfg := config.DefaultQueueConfig
|
||||||
mcfg := config.DefaultMetadataConfig
|
mcfg := config.DefaultMetadataConfig
|
||||||
metrics := newQueueManagerMetrics(nil, "", "")
|
metrics := newQueueManagerMetrics(nil, "", "")
|
||||||
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false)
|
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false, false)
|
||||||
for i := 0; i < numSegments; i++ {
|
for i := 0; i < numSegments; i++ {
|
||||||
series := []record.RefSeries{}
|
series := []record.RefSeries{}
|
||||||
for j := 0; j < numSeries; j++ {
|
for j := 0; j < numSeries; j++ {
|
||||||
|
@ -317,7 +329,7 @@ func TestReshard(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
metrics := newQueueManagerMetrics(nil, "", "")
|
metrics := newQueueManagerMetrics(nil, "", "")
|
||||||
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false)
|
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false)
|
||||||
m.StoreSeries(series, 0)
|
m.StoreSeries(series, 0)
|
||||||
|
|
||||||
m.Start()
|
m.Start()
|
||||||
|
@ -353,7 +365,7 @@ func TestReshardRaceWithStop(t *testing.T) {
|
||||||
go func() {
|
go func() {
|
||||||
for {
|
for {
|
||||||
metrics := newQueueManagerMetrics(nil, "", "")
|
metrics := newQueueManagerMetrics(nil, "", "")
|
||||||
m = NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false)
|
m = NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false)
|
||||||
m.Start()
|
m.Start()
|
||||||
h.Unlock()
|
h.Unlock()
|
||||||
h.Lock()
|
h.Lock()
|
||||||
|
@ -388,7 +400,7 @@ func TestReshardPartialBatch(t *testing.T) {
|
||||||
cfg.BatchSendDeadline = model.Duration(batchSendDeadline)
|
cfg.BatchSendDeadline = model.Duration(batchSendDeadline)
|
||||||
|
|
||||||
metrics := newQueueManagerMetrics(nil, "", "")
|
metrics := newQueueManagerMetrics(nil, "", "")
|
||||||
m := NewQueueManager(metrics, nil, nil, nil, t.TempDir(), newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, flushDeadline, newPool(), newHighestTimestampMetric(), nil, false)
|
m := NewQueueManager(metrics, nil, nil, nil, t.TempDir(), newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, flushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false)
|
||||||
m.StoreSeries(series, 0)
|
m.StoreSeries(series, 0)
|
||||||
|
|
||||||
m.Start()
|
m.Start()
|
||||||
|
@ -433,7 +445,7 @@ func TestQueueFilledDeadlock(t *testing.T) {
|
||||||
|
|
||||||
metrics := newQueueManagerMetrics(nil, "", "")
|
metrics := newQueueManagerMetrics(nil, "", "")
|
||||||
|
|
||||||
m := NewQueueManager(metrics, nil, nil, nil, t.TempDir(), newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, flushDeadline, newPool(), newHighestTimestampMetric(), nil, false)
|
m := NewQueueManager(metrics, nil, nil, nil, t.TempDir(), newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, flushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false)
|
||||||
m.StoreSeries(series, 0)
|
m.StoreSeries(series, 0)
|
||||||
m.Start()
|
m.Start()
|
||||||
defer m.Stop()
|
defer m.Stop()
|
||||||
|
@ -460,7 +472,7 @@ func TestReleaseNoninternedString(t *testing.T) {
|
||||||
mcfg := config.DefaultMetadataConfig
|
mcfg := config.DefaultMetadataConfig
|
||||||
metrics := newQueueManagerMetrics(nil, "", "")
|
metrics := newQueueManagerMetrics(nil, "", "")
|
||||||
c := NewTestWriteClient()
|
c := NewTestWriteClient()
|
||||||
m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false)
|
m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false)
|
||||||
m.Start()
|
m.Start()
|
||||||
defer m.Stop()
|
defer m.Stop()
|
||||||
|
|
||||||
|
@ -507,7 +519,7 @@ func TestShouldReshard(t *testing.T) {
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
metrics := newQueueManagerMetrics(nil, "", "")
|
metrics := newQueueManagerMetrics(nil, "", "")
|
||||||
client := NewTestWriteClient()
|
client := NewTestWriteClient()
|
||||||
m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, client, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false)
|
m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, client, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false)
|
||||||
m.numShards = c.startingShards
|
m.numShards = c.startingShards
|
||||||
m.dataIn.incr(c.samplesIn)
|
m.dataIn.incr(c.samplesIn)
|
||||||
m.dataOut.incr(c.samplesOut)
|
m.dataOut.incr(c.samplesOut)
|
||||||
|
@ -566,21 +578,54 @@ func createExemplars(numExemplars, numSeries int) ([]record.RefExemplar, []recor
|
||||||
return exemplars, series
|
return exemplars, series
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func createHistograms(numSamples, numSeries int) ([]record.RefHistogramSample, []record.RefSeries) {
|
||||||
|
histograms := make([]record.RefHistogramSample, 0, numSamples)
|
||||||
|
series := make([]record.RefSeries, 0, numSeries)
|
||||||
|
for i := 0; i < numSeries; i++ {
|
||||||
|
name := fmt.Sprintf("test_metric_%d", i)
|
||||||
|
for j := 0; j < numSamples; j++ {
|
||||||
|
h := record.RefHistogramSample{
|
||||||
|
Ref: chunks.HeadSeriesRef(i),
|
||||||
|
T: int64(j),
|
||||||
|
H: &histogram.Histogram{
|
||||||
|
Schema: 2,
|
||||||
|
ZeroThreshold: 1e-128,
|
||||||
|
ZeroCount: 0,
|
||||||
|
Count: 2,
|
||||||
|
Sum: 0,
|
||||||
|
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
|
||||||
|
PositiveBuckets: []int64{int64(i) + 1},
|
||||||
|
NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}},
|
||||||
|
NegativeBuckets: []int64{int64(-i) - 1},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
histograms = append(histograms, h)
|
||||||
|
}
|
||||||
|
series = append(series, record.RefSeries{
|
||||||
|
Ref: chunks.HeadSeriesRef(i),
|
||||||
|
Labels: labels.Labels{{Name: "__name__", Value: name}},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return histograms, series
|
||||||
|
}
|
||||||
|
|
||||||
func getSeriesNameFromRef(r record.RefSeries) string {
|
func getSeriesNameFromRef(r record.RefSeries) string {
|
||||||
return r.Labels.Get("__name__")
|
return r.Labels.Get("__name__")
|
||||||
}
|
}
|
||||||
|
|
||||||
type TestWriteClient struct {
|
type TestWriteClient struct {
|
||||||
receivedSamples map[string][]prompb.Sample
|
receivedSamples map[string][]prompb.Sample
|
||||||
expectedSamples map[string][]prompb.Sample
|
expectedSamples map[string][]prompb.Sample
|
||||||
receivedExemplars map[string][]prompb.Exemplar
|
receivedExemplars map[string][]prompb.Exemplar
|
||||||
expectedExemplars map[string][]prompb.Exemplar
|
expectedExemplars map[string][]prompb.Exemplar
|
||||||
receivedMetadata map[string][]prompb.MetricMetadata
|
receivedHistograms map[string][]prompb.Histogram
|
||||||
writesReceived int
|
expectedHistograms map[string][]prompb.Histogram
|
||||||
withWaitGroup bool
|
receivedMetadata map[string][]prompb.MetricMetadata
|
||||||
wg sync.WaitGroup
|
writesReceived int
|
||||||
mtx sync.Mutex
|
withWaitGroup bool
|
||||||
buf []byte
|
wg sync.WaitGroup
|
||||||
|
mtx sync.Mutex
|
||||||
|
buf []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewTestWriteClient() *TestWriteClient {
|
func NewTestWriteClient() *TestWriteClient {
|
||||||
|
@ -634,6 +679,23 @@ func (c *TestWriteClient) expectExemplars(ss []record.RefExemplar, series []reco
|
||||||
c.wg.Add(len(ss))
|
c.wg.Add(len(ss))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *TestWriteClient) expectHistograms(hh []record.RefHistogramSample, series []record.RefSeries) {
|
||||||
|
if !c.withWaitGroup {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.mtx.Lock()
|
||||||
|
defer c.mtx.Unlock()
|
||||||
|
|
||||||
|
c.expectedHistograms = map[string][]prompb.Histogram{}
|
||||||
|
c.receivedHistograms = map[string][]prompb.Histogram{}
|
||||||
|
|
||||||
|
for _, h := range hh {
|
||||||
|
seriesName := getSeriesNameFromRef(series[h.Ref])
|
||||||
|
c.expectedHistograms[seriesName] = append(c.expectedHistograms[seriesName], HistogramToHistogramProto(h.T, h.H))
|
||||||
|
}
|
||||||
|
c.wg.Add(len(hh))
|
||||||
|
}
|
||||||
|
|
||||||
func (c *TestWriteClient) waitForExpectedData(tb testing.TB) {
|
func (c *TestWriteClient) waitForExpectedData(tb testing.TB) {
|
||||||
if !c.withWaitGroup {
|
if !c.withWaitGroup {
|
||||||
return
|
return
|
||||||
|
@ -647,6 +709,9 @@ func (c *TestWriteClient) waitForExpectedData(tb testing.TB) {
|
||||||
for ts, expectedExemplar := range c.expectedExemplars {
|
for ts, expectedExemplar := range c.expectedExemplars {
|
||||||
require.Equal(tb, expectedExemplar, c.receivedExemplars[ts], ts)
|
require.Equal(tb, expectedExemplar, c.receivedExemplars[ts], ts)
|
||||||
}
|
}
|
||||||
|
for ts, expectedHistogram := range c.expectedHistograms {
|
||||||
|
require.Equal(tb, expectedHistogram, c.receivedHistograms[ts], ts)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *TestWriteClient) Store(_ context.Context, req []byte) error {
|
func (c *TestWriteClient) Store(_ context.Context, req []byte) error {
|
||||||
|
@ -666,7 +731,6 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte) error {
|
||||||
if err := proto.Unmarshal(reqBuf, &reqProto); err != nil {
|
if err := proto.Unmarshal(reqBuf, &reqProto); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
count := 0
|
count := 0
|
||||||
for _, ts := range reqProto.Timeseries {
|
for _, ts := range reqProto.Timeseries {
|
||||||
labels := labelProtosToLabels(ts.Labels)
|
labels := labelProtosToLabels(ts.Labels)
|
||||||
|
@ -680,6 +744,11 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte) error {
|
||||||
count++
|
count++
|
||||||
c.receivedExemplars[seriesName] = append(c.receivedExemplars[seriesName], ex)
|
c.receivedExemplars[seriesName] = append(c.receivedExemplars[seriesName], ex)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, histogram := range ts.Histograms {
|
||||||
|
count++
|
||||||
|
c.receivedHistograms[seriesName] = append(c.receivedHistograms[seriesName], histogram)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if c.withWaitGroup {
|
if c.withWaitGroup {
|
||||||
c.wg.Add(-count)
|
c.wg.Add(-count)
|
||||||
|
@ -776,7 +845,7 @@ func BenchmarkSampleSend(b *testing.B) {
|
||||||
dir := b.TempDir()
|
dir := b.TempDir()
|
||||||
|
|
||||||
metrics := newQueueManagerMetrics(nil, "", "")
|
metrics := newQueueManagerMetrics(nil, "", "")
|
||||||
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false)
|
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false)
|
||||||
m.StoreSeries(series, 0)
|
m.StoreSeries(series, 0)
|
||||||
|
|
||||||
// These should be received by the client.
|
// These should be received by the client.
|
||||||
|
@ -822,7 +891,7 @@ func BenchmarkStartup(b *testing.B) {
|
||||||
c := NewTestBlockedWriteClient()
|
c := NewTestBlockedWriteClient()
|
||||||
m := NewQueueManager(metrics, nil, nil, logger, dir,
|
m := NewQueueManager(metrics, nil, nil, logger, dir,
|
||||||
newEWMARate(ewmaWeight, shardUpdateDuration),
|
newEWMARate(ewmaWeight, shardUpdateDuration),
|
||||||
cfg, mcfg, labels.EmptyLabels(), nil, c, 1*time.Minute, newPool(), newHighestTimestampMetric(), nil, false)
|
cfg, mcfg, labels.EmptyLabels(), nil, c, 1*time.Minute, newPool(), newHighestTimestampMetric(), nil, false, false)
|
||||||
m.watcher.SetStartTime(timestamp.Time(math.MaxInt64))
|
m.watcher.SetStartTime(timestamp.Time(math.MaxInt64))
|
||||||
m.watcher.MaxSegment = segments[len(segments)-2]
|
m.watcher.MaxSegment = segments[len(segments)-2]
|
||||||
err := m.watcher.Run()
|
err := m.watcher.Run()
|
||||||
|
@ -898,7 +967,7 @@ func TestCalculateDesiredShards(t *testing.T) {
|
||||||
|
|
||||||
metrics := newQueueManagerMetrics(nil, "", "")
|
metrics := newQueueManagerMetrics(nil, "", "")
|
||||||
samplesIn := newEWMARate(ewmaWeight, shardUpdateDuration)
|
samplesIn := newEWMARate(ewmaWeight, shardUpdateDuration)
|
||||||
m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false)
|
m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false)
|
||||||
|
|
||||||
// Need to start the queue manager so the proper metrics are initialized.
|
// Need to start the queue manager so the proper metrics are initialized.
|
||||||
// However we can stop it right away since we don't need to do any actual
|
// However we can stop it right away since we don't need to do any actual
|
||||||
|
@ -975,7 +1044,7 @@ func TestCalculateDesiredShardsDetail(t *testing.T) {
|
||||||
|
|
||||||
metrics := newQueueManagerMetrics(nil, "", "")
|
metrics := newQueueManagerMetrics(nil, "", "")
|
||||||
samplesIn := newEWMARate(ewmaWeight, shardUpdateDuration)
|
samplesIn := newEWMARate(ewmaWeight, shardUpdateDuration)
|
||||||
m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false)
|
m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false)
|
||||||
|
|
||||||
for _, tc := range []struct {
|
for _, tc := range []struct {
|
||||||
name string
|
name string
|
||||||
|
@ -1166,7 +1235,7 @@ func TestQueue_FlushAndShutdownDoesNotDeadlock(t *testing.T) {
|
||||||
batchSize := 10
|
batchSize := 10
|
||||||
queue := newQueue(batchSize, capacity)
|
queue := newQueue(batchSize, capacity)
|
||||||
for i := 0; i < capacity+batchSize; i++ {
|
for i := 0; i < capacity+batchSize; i++ {
|
||||||
queue.Append(sampleOrExemplar{})
|
queue.Append(timeSeries{})
|
||||||
}
|
}
|
||||||
|
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
|
|
|
@ -26,6 +26,7 @@ import (
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/metadata"
|
"github.com/prometheus/prometheus/model/metadata"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
@ -45,6 +46,12 @@ var (
|
||||||
Name: "exemplars_in_total",
|
Name: "exemplars_in_total",
|
||||||
Help: "Exemplars in to remote storage, compare to exemplars out for queue managers.",
|
Help: "Exemplars in to remote storage, compare to exemplars out for queue managers.",
|
||||||
})
|
})
|
||||||
|
histogramsIn = promauto.NewCounter(prometheus.CounterOpts{
|
||||||
|
Namespace: namespace,
|
||||||
|
Subsystem: subsystem,
|
||||||
|
Name: "histograms_in_total",
|
||||||
|
Help: "HistogramSamples in to remote storage, compare to histograms out for queue managers.",
|
||||||
|
})
|
||||||
)
|
)
|
||||||
|
|
||||||
// WriteStorage represents all the remote write storage.
|
// WriteStorage represents all the remote write storage.
|
||||||
|
@ -188,6 +195,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
|
||||||
rws.highestTimestamp,
|
rws.highestTimestamp,
|
||||||
rws.scraper,
|
rws.scraper,
|
||||||
rwConf.SendExemplars,
|
rwConf.SendExemplars,
|
||||||
|
rwConf.SendNativeHistograms,
|
||||||
)
|
)
|
||||||
// Keep track of which queues are new so we know which to start.
|
// Keep track of which queues are new so we know which to start.
|
||||||
newHashes = append(newHashes, hash)
|
newHashes = append(newHashes, hash)
|
||||||
|
@ -251,6 +259,7 @@ type timestampTracker struct {
|
||||||
writeStorage *WriteStorage
|
writeStorage *WriteStorage
|
||||||
samples int64
|
samples int64
|
||||||
exemplars int64
|
exemplars int64
|
||||||
|
histograms int64
|
||||||
highestTimestamp int64
|
highestTimestamp int64
|
||||||
highestRecvTimestamp *maxTimestamp
|
highestRecvTimestamp *maxTimestamp
|
||||||
}
|
}
|
||||||
|
@ -269,6 +278,14 @@ func (t *timestampTracker) AppendExemplar(_ storage.SeriesRef, _ labels.Labels,
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *timestampTracker) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, ts int64, h *histogram.Histogram) (storage.SeriesRef, error) {
|
||||||
|
t.histograms++
|
||||||
|
if ts > t.highestTimestamp {
|
||||||
|
t.highestTimestamp = ts
|
||||||
|
}
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) {
|
func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) {
|
||||||
// TODO: Add and increment a `metadata` field when we get around to wiring metadata in remote_write.
|
// TODO: Add and increment a `metadata` field when we get around to wiring metadata in remote_write.
|
||||||
// UpadteMetadata is no-op for remote write (where timestampTracker is being used) for now.
|
// UpadteMetadata is no-op for remote write (where timestampTracker is being used) for now.
|
||||||
|
@ -277,10 +294,11 @@ func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels,
|
||||||
|
|
||||||
// Commit implements storage.Appender.
|
// Commit implements storage.Appender.
|
||||||
func (t *timestampTracker) Commit() error {
|
func (t *timestampTracker) Commit() error {
|
||||||
t.writeStorage.samplesIn.incr(t.samples + t.exemplars)
|
t.writeStorage.samplesIn.incr(t.samples + t.exemplars + t.histograms)
|
||||||
|
|
||||||
samplesIn.Add(float64(t.samples))
|
samplesIn.Add(float64(t.samples))
|
||||||
exemplarsIn.Add(float64(t.exemplars))
|
exemplarsIn.Add(float64(t.exemplars))
|
||||||
|
histogramsIn.Add(float64(t.histograms))
|
||||||
t.highestRecvTimestamp.Set(float64(t.highestTimestamp / 1000))
|
t.highestRecvTimestamp.Set(float64(t.highestTimestamp / 1000))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -117,6 +117,20 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
|
||||||
level.Debug(h.logger).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr)
|
level.Debug(h.logger).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, hp := range ts.Histograms {
|
||||||
|
hs := HistogramProtoToHistogram(hp)
|
||||||
|
_, err = app.AppendHistogram(0, labels, hp.Timestamp, hs)
|
||||||
|
if err != nil {
|
||||||
|
unwrappedErr := errors.Unwrap(err)
|
||||||
|
// Althogh AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
|
||||||
|
// a note indicating its inclusion in the future.
|
||||||
|
if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
|
||||||
|
level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if outOfOrderExemplarErrs > 0 {
|
if outOfOrderExemplarErrs > 0 {
|
||||||
|
|
|
@ -26,6 +26,7 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/metadata"
|
"github.com/prometheus/prometheus/model/metadata"
|
||||||
"github.com/prometheus/prometheus/prompb"
|
"github.com/prometheus/prometheus/prompb"
|
||||||
|
@ -50,6 +51,7 @@ func TestRemoteWriteHandler(t *testing.T) {
|
||||||
|
|
||||||
i := 0
|
i := 0
|
||||||
j := 0
|
j := 0
|
||||||
|
k := 0
|
||||||
for _, ts := range writeRequestFixture.Timeseries {
|
for _, ts := range writeRequestFixture.Timeseries {
|
||||||
labels := labelProtosToLabels(ts.Labels)
|
labels := labelProtosToLabels(ts.Labels)
|
||||||
for _, s := range ts.Samples {
|
for _, s := range ts.Samples {
|
||||||
|
@ -62,6 +64,12 @@ func TestRemoteWriteHandler(t *testing.T) {
|
||||||
require.Equal(t, mockExemplar{labels, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
|
require.Equal(t, mockExemplar{labels, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
|
||||||
j++
|
j++
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, hp := range ts.Histograms {
|
||||||
|
h := HistogramProtoToHistogram(hp)
|
||||||
|
require.Equal(t, mockHistogram{labels, hp.Timestamp, h}, appendable.histograms[k])
|
||||||
|
k++
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -113,6 +121,28 @@ func TestOutOfOrderExemplar(t *testing.T) {
|
||||||
require.Equal(t, http.StatusNoContent, resp.StatusCode)
|
require.Equal(t, http.StatusNoContent, resp.StatusCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestOutOfOrderHistogram(t *testing.T) {
|
||||||
|
buf, _, err := buildWriteRequest([]prompb.TimeSeries{{
|
||||||
|
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}},
|
||||||
|
Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram)},
|
||||||
|
}}, nil, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
req, err := http.NewRequest("", "", bytes.NewReader(buf))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
appendable := &mockAppendable{
|
||||||
|
latestHistogram: 100,
|
||||||
|
}
|
||||||
|
handler := NewWriteHandler(log.NewNopLogger(), appendable)
|
||||||
|
|
||||||
|
recorder := httptest.NewRecorder()
|
||||||
|
handler.ServeHTTP(recorder, req)
|
||||||
|
|
||||||
|
resp := recorder.Result()
|
||||||
|
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
func TestCommitErr(t *testing.T) {
|
func TestCommitErr(t *testing.T) {
|
||||||
buf, _, err := buildWriteRequest(writeRequestFixture.Timeseries, nil, nil, nil)
|
buf, _, err := buildWriteRequest(writeRequestFixture.Timeseries, nil, nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -136,11 +166,13 @@ func TestCommitErr(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
type mockAppendable struct {
|
type mockAppendable struct {
|
||||||
latestSample int64
|
latestSample int64
|
||||||
samples []mockSample
|
samples []mockSample
|
||||||
latestExemplar int64
|
latestExemplar int64
|
||||||
exemplars []mockExemplar
|
exemplars []mockExemplar
|
||||||
commitErr error
|
latestHistogram int64
|
||||||
|
histograms []mockHistogram
|
||||||
|
commitErr error
|
||||||
}
|
}
|
||||||
|
|
||||||
type mockSample struct {
|
type mockSample struct {
|
||||||
|
@ -156,6 +188,12 @@ type mockExemplar struct {
|
||||||
v float64
|
v float64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type mockHistogram struct {
|
||||||
|
l labels.Labels
|
||||||
|
t int64
|
||||||
|
h *histogram.Histogram
|
||||||
|
}
|
||||||
|
|
||||||
func (m *mockAppendable) Appender(_ context.Context) storage.Appender {
|
func (m *mockAppendable) Appender(_ context.Context) storage.Appender {
|
||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
@ -188,6 +226,16 @@ func (m *mockAppendable) AppendExemplar(_ storage.SeriesRef, l labels.Labels, e
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *mockAppendable) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (storage.SeriesRef, error) {
|
||||||
|
if t < m.latestHistogram {
|
||||||
|
return 0, storage.ErrOutOfOrderSample
|
||||||
|
}
|
||||||
|
|
||||||
|
m.latestHistogram = t
|
||||||
|
m.histograms = append(m.histograms, mockHistogram{l, t, h})
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (m *mockAppendable) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) {
|
func (m *mockAppendable) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) {
|
||||||
// TODO: Wire metadata in a mockAppendable field when we get around to handling metadata in remote_write.
|
// TODO: Wire metadata in a mockAppendable field when we get around to handling metadata in remote_write.
|
||||||
// UpdateMetadata is no-op for remote write (where mockAppendable is being used to test) for now.
|
// UpdateMetadata is no-op for remote write (where mockAppendable is being used to test) for now.
|
||||||
|
|
|
@ -14,9 +14,11 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
|
@ -90,21 +92,39 @@ func (it *listSeriesIterator) At() (int64, float64) {
|
||||||
return s.T(), s.V()
|
return s.T(), s.V()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (it *listSeriesIterator) Next() bool {
|
func (it *listSeriesIterator) AtHistogram() (int64, *histogram.Histogram) {
|
||||||
it.idx++
|
s := it.samples.Get(it.idx)
|
||||||
return it.idx < it.samples.Len()
|
return s.T(), s.H()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (it *listSeriesIterator) Seek(t int64) bool {
|
func (it *listSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
|
||||||
|
s := it.samples.Get(it.idx)
|
||||||
|
return s.T(), s.FH()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *listSeriesIterator) AtT() int64 {
|
||||||
|
s := it.samples.Get(it.idx)
|
||||||
|
return s.T()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *listSeriesIterator) Next() chunkenc.ValueType {
|
||||||
|
it.idx++
|
||||||
|
if it.idx >= it.samples.Len() {
|
||||||
|
return chunkenc.ValNone
|
||||||
|
}
|
||||||
|
return it.samples.Get(it.idx).Type()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *listSeriesIterator) Seek(t int64) chunkenc.ValueType {
|
||||||
if it.idx == -1 {
|
if it.idx == -1 {
|
||||||
it.idx = 0
|
it.idx = 0
|
||||||
}
|
}
|
||||||
if it.idx >= it.samples.Len() {
|
if it.idx >= it.samples.Len() {
|
||||||
return false
|
return chunkenc.ValNone
|
||||||
}
|
}
|
||||||
// No-op check.
|
// No-op check.
|
||||||
if s := it.samples.Get(it.idx); s.T() >= t {
|
if s := it.samples.Get(it.idx); s.T() >= t {
|
||||||
return true
|
return s.Type()
|
||||||
}
|
}
|
||||||
// Do binary search between current position and end.
|
// Do binary search between current position and end.
|
||||||
it.idx += sort.Search(it.samples.Len()-it.idx, func(i int) bool {
|
it.idx += sort.Search(it.samples.Len()-it.idx, func(i int) bool {
|
||||||
|
@ -112,7 +132,10 @@ func (it *listSeriesIterator) Seek(t int64) bool {
|
||||||
return s.T() >= t
|
return s.T() >= t
|
||||||
})
|
})
|
||||||
|
|
||||||
return it.idx < it.samples.Len()
|
if it.idx >= it.samples.Len() {
|
||||||
|
return chunkenc.ValNone
|
||||||
|
}
|
||||||
|
return it.samples.Get(it.idx).Type()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (it *listSeriesIterator) Err() error { return nil }
|
func (it *listSeriesIterator) Err() error { return nil }
|
||||||
|
@ -230,27 +253,32 @@ func NewSeriesToChunkEncoder(series Series) ChunkSeries {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *seriesToChunkEncoder) Iterator() chunks.Iterator {
|
func (s *seriesToChunkEncoder) Iterator() chunks.Iterator {
|
||||||
chk := chunkenc.NewXORChunk()
|
var (
|
||||||
app, err := chk.Appender()
|
chk chunkenc.Chunk
|
||||||
if err != nil {
|
app chunkenc.Appender
|
||||||
return errChunksIterator{err: err}
|
err error
|
||||||
}
|
)
|
||||||
mint := int64(math.MaxInt64)
|
mint := int64(math.MaxInt64)
|
||||||
maxt := int64(math.MinInt64)
|
maxt := int64(math.MinInt64)
|
||||||
|
|
||||||
chks := []chunks.Meta{}
|
chks := []chunks.Meta{}
|
||||||
|
|
||||||
i := 0
|
i := 0
|
||||||
seriesIter := s.Series.Iterator()
|
seriesIter := s.Series.Iterator()
|
||||||
for seriesIter.Next() {
|
lastType := chunkenc.ValNone
|
||||||
// Create a new chunk if too many samples in the current one.
|
for typ := seriesIter.Next(); typ != chunkenc.ValNone; typ = seriesIter.Next() {
|
||||||
if i >= seriesToChunkEncoderSplit {
|
if typ != lastType || i >= seriesToChunkEncoderSplit {
|
||||||
chks = append(chks, chunks.Meta{
|
// Create a new chunk if the sample type changed or too many samples in the current one.
|
||||||
MinTime: mint,
|
if chk != nil {
|
||||||
MaxTime: maxt,
|
chks = append(chks, chunks.Meta{
|
||||||
Chunk: chk,
|
MinTime: mint,
|
||||||
})
|
MaxTime: maxt,
|
||||||
chk = chunkenc.NewXORChunk()
|
Chunk: chk,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
chk, err = chunkenc.NewEmptyChunk(typ.ChunkEncoding())
|
||||||
|
if err != nil {
|
||||||
|
return errChunksIterator{err: err}
|
||||||
|
}
|
||||||
app, err = chk.Appender()
|
app, err = chk.Appender()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errChunksIterator{err: err}
|
return errChunksIterator{err: err}
|
||||||
|
@ -259,9 +287,23 @@ func (s *seriesToChunkEncoder) Iterator() chunks.Iterator {
|
||||||
// maxt is immediately overwritten below which is why setting it here won't make a difference.
|
// maxt is immediately overwritten below which is why setting it here won't make a difference.
|
||||||
i = 0
|
i = 0
|
||||||
}
|
}
|
||||||
|
lastType = typ
|
||||||
|
|
||||||
t, v := seriesIter.At()
|
var (
|
||||||
app.Append(t, v)
|
t int64
|
||||||
|
v float64
|
||||||
|
h *histogram.Histogram
|
||||||
|
)
|
||||||
|
switch typ {
|
||||||
|
case chunkenc.ValFloat:
|
||||||
|
t, v = seriesIter.At()
|
||||||
|
app.Append(t, v)
|
||||||
|
case chunkenc.ValHistogram:
|
||||||
|
t, h = seriesIter.AtHistogram()
|
||||||
|
app.AppendHistogram(t, h)
|
||||||
|
default:
|
||||||
|
return errChunksIterator{err: fmt.Errorf("unknown sample type %s", typ.String())}
|
||||||
|
}
|
||||||
|
|
||||||
maxt = t
|
maxt = t
|
||||||
if mint == math.MaxInt64 {
|
if mint == math.MaxInt64 {
|
||||||
|
@ -273,11 +315,13 @@ func (s *seriesToChunkEncoder) Iterator() chunks.Iterator {
|
||||||
return errChunksIterator{err: err}
|
return errChunksIterator{err: err}
|
||||||
}
|
}
|
||||||
|
|
||||||
chks = append(chks, chunks.Meta{
|
if chk != nil {
|
||||||
MinTime: mint,
|
chks = append(chks, chunks.Meta{
|
||||||
MaxTime: maxt,
|
MinTime: mint,
|
||||||
Chunk: chk,
|
MaxTime: maxt,
|
||||||
})
|
Chunk: chk,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
return NewListChunkSeriesIterator(chks...)
|
return NewListChunkSeriesIterator(chks...)
|
||||||
}
|
}
|
||||||
|
@ -293,21 +337,34 @@ func (e errChunksIterator) Err() error { return e.err }
|
||||||
// ExpandSamples iterates over all samples in the iterator, buffering all in slice.
|
// ExpandSamples iterates over all samples in the iterator, buffering all in slice.
|
||||||
// Optionally it takes samples constructor, useful when you want to compare sample slices with different
|
// Optionally it takes samples constructor, useful when you want to compare sample slices with different
|
||||||
// sample implementations. if nil, sample type from this package will be used.
|
// sample implementations. if nil, sample type from this package will be used.
|
||||||
func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, v float64) tsdbutil.Sample) ([]tsdbutil.Sample, error) {
|
func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample) ([]tsdbutil.Sample, error) {
|
||||||
if newSampleFn == nil {
|
if newSampleFn == nil {
|
||||||
newSampleFn = func(t int64, v float64) tsdbutil.Sample { return sample{t, v} }
|
newSampleFn = func(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample {
|
||||||
|
return sample{t, v, h, fh}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var result []tsdbutil.Sample
|
var result []tsdbutil.Sample
|
||||||
for iter.Next() {
|
for {
|
||||||
t, v := iter.At()
|
switch iter.Next() {
|
||||||
// NaNs can't be compared normally, so substitute for another value.
|
case chunkenc.ValNone:
|
||||||
if math.IsNaN(v) {
|
return result, iter.Err()
|
||||||
v = -42
|
case chunkenc.ValFloat:
|
||||||
|
t, v := iter.At()
|
||||||
|
// NaNs can't be compared normally, so substitute for another value.
|
||||||
|
if math.IsNaN(v) {
|
||||||
|
v = -42
|
||||||
|
}
|
||||||
|
result = append(result, newSampleFn(t, v, nil, nil))
|
||||||
|
case chunkenc.ValHistogram:
|
||||||
|
t, h := iter.AtHistogram()
|
||||||
|
result = append(result, newSampleFn(t, 0, h, nil))
|
||||||
|
case chunkenc.ValFloatHistogram:
|
||||||
|
t, fh := iter.AtFloatHistogram()
|
||||||
|
result = append(result, newSampleFn(t, 0, nil, fh))
|
||||||
|
|
||||||
}
|
}
|
||||||
result = append(result, newSampleFn(t, v))
|
|
||||||
}
|
}
|
||||||
return result, iter.Err()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExpandChunks iterates over all chunks in the iterator, buffering all in slice.
|
// ExpandChunks iterates over all chunks in the iterator, buffering all in slice.
|
||||||
|
|
|
@ -17,43 +17,51 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestListSeriesIterator(t *testing.T) {
|
func TestListSeriesIterator(t *testing.T) {
|
||||||
it := NewListSeriesIterator(samples{sample{0, 0}, sample{1, 1}, sample{1, 1.5}, sample{2, 2}, sample{3, 3}})
|
it := NewListSeriesIterator(samples{
|
||||||
|
sample{0, 0, nil, nil},
|
||||||
|
sample{1, 1, nil, nil},
|
||||||
|
sample{1, 1.5, nil, nil},
|
||||||
|
sample{2, 2, nil, nil},
|
||||||
|
sample{3, 3, nil, nil},
|
||||||
|
})
|
||||||
|
|
||||||
// Seek to the first sample with ts=1.
|
// Seek to the first sample with ts=1.
|
||||||
require.True(t, it.Seek(1))
|
require.Equal(t, chunkenc.ValFloat, it.Seek(1))
|
||||||
ts, v := it.At()
|
ts, v := it.At()
|
||||||
require.Equal(t, int64(1), ts)
|
require.Equal(t, int64(1), ts)
|
||||||
require.Equal(t, 1., v)
|
require.Equal(t, 1., v)
|
||||||
|
|
||||||
// Seek one further, next sample still has ts=1.
|
// Seek one further, next sample still has ts=1.
|
||||||
require.True(t, it.Next())
|
require.Equal(t, chunkenc.ValFloat, it.Next())
|
||||||
ts, v = it.At()
|
ts, v = it.At()
|
||||||
require.Equal(t, int64(1), ts)
|
require.Equal(t, int64(1), ts)
|
||||||
require.Equal(t, 1.5, v)
|
require.Equal(t, 1.5, v)
|
||||||
|
|
||||||
// Seek again to 1 and make sure we stay where we are.
|
// Seek again to 1 and make sure we stay where we are.
|
||||||
require.True(t, it.Seek(1))
|
require.Equal(t, chunkenc.ValFloat, it.Seek(1))
|
||||||
ts, v = it.At()
|
ts, v = it.At()
|
||||||
require.Equal(t, int64(1), ts)
|
require.Equal(t, int64(1), ts)
|
||||||
require.Equal(t, 1.5, v)
|
require.Equal(t, 1.5, v)
|
||||||
|
|
||||||
// Another seek.
|
// Another seek.
|
||||||
require.True(t, it.Seek(3))
|
require.Equal(t, chunkenc.ValFloat, it.Seek(3))
|
||||||
ts, v = it.At()
|
ts, v = it.At()
|
||||||
require.Equal(t, int64(3), ts)
|
require.Equal(t, int64(3), ts)
|
||||||
require.Equal(t, 3., v)
|
require.Equal(t, 3., v)
|
||||||
|
|
||||||
// And we don't go back.
|
// And we don't go back.
|
||||||
require.True(t, it.Seek(2))
|
require.Equal(t, chunkenc.ValFloat, it.Seek(2))
|
||||||
ts, v = it.At()
|
ts, v = it.At()
|
||||||
require.Equal(t, int64(3), ts)
|
require.Equal(t, int64(3), ts)
|
||||||
require.Equal(t, 3., v)
|
require.Equal(t, 3., v)
|
||||||
|
|
||||||
// Seek beyond the end.
|
// Seek beyond the end.
|
||||||
require.False(t, it.Seek(5))
|
require.Equal(t, chunkenc.ValNone, it.Seek(5))
|
||||||
// And we don't go back. (This exposes issue #10027.)
|
// And we don't go back. (This exposes issue #10027.)
|
||||||
require.False(t, it.Seek(2))
|
require.Equal(t, chunkenc.ValNone, it.Seek(2))
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,6 +30,7 @@ import (
|
||||||
"go.uber.org/atomic"
|
"go.uber.org/atomic"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/metadata"
|
"github.com/prometheus/prometheus/model/metadata"
|
||||||
"github.com/prometheus/prometheus/model/timestamp"
|
"github.com/prometheus/prometheus/model/timestamp"
|
||||||
|
@ -815,6 +816,11 @@ func (a *appender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exem
|
||||||
return storage.SeriesRef(s.ref), nil
|
return storage.SeriesRef(s.ref), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (storage.SeriesRef, error) {
|
||||||
|
// TODO: Add histogram support.
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (a *appender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
|
func (a *appender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
|
||||||
// TODO: Wire metadata in the Agent's appender.
|
// TODO: Wire metadata in the Agent's appender.
|
||||||
return 0, nil
|
return 0, nil
|
||||||
|
|
|
@ -30,8 +30,10 @@ import (
|
||||||
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
||||||
|
@ -163,7 +165,7 @@ func TestCorruptedChunk(t *testing.T) {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
tmpdir := t.TempDir()
|
tmpdir := t.TempDir()
|
||||||
|
|
||||||
series := storage.NewListSeries(labels.FromStrings("a", "b"), []tsdbutil.Sample{sample{1, 1}})
|
series := storage.NewListSeries(labels.FromStrings("a", "b"), []tsdbutil.Sample{sample{1, 1, nil, nil}})
|
||||||
blockDir := createBlock(t, tmpdir, []storage.Series{series})
|
blockDir := createBlock(t, tmpdir, []storage.Series{series})
|
||||||
files, err := sequenceFiles(chunkDir(blockDir))
|
files, err := sequenceFiles(chunkDir(blockDir))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -192,7 +194,7 @@ func TestCorruptedChunk(t *testing.T) {
|
||||||
// Check chunk errors during iter time.
|
// Check chunk errors during iter time.
|
||||||
require.True(t, set.Next())
|
require.True(t, set.Next())
|
||||||
it := set.At().Iterator()
|
it := set.At().Iterator()
|
||||||
require.Equal(t, false, it.Next())
|
require.Equal(t, chunkenc.ValNone, it.Next())
|
||||||
require.Equal(t, tc.iterErr.Error(), it.Err().Error())
|
require.Equal(t, tc.iterErr.Error(), it.Err().Error())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -203,10 +205,10 @@ func TestLabelValuesWithMatchers(t *testing.T) {
|
||||||
|
|
||||||
var seriesEntries []storage.Series
|
var seriesEntries []storage.Series
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
seriesEntries = append(seriesEntries, storage.NewListSeries(labels.FromStrings(
|
seriesEntries = append(seriesEntries, storage.NewListSeries(labels.Labels{
|
||||||
"tens", fmt.Sprintf("value%d", i/10),
|
{Name: "tens", Value: fmt.Sprintf("value%d", i/10)},
|
||||||
"unique", fmt.Sprintf("value%d", i),
|
{Name: "unique", Value: fmt.Sprintf("value%d", i)},
|
||||||
), []tsdbutil.Sample{sample{100, 0}}))
|
}, []tsdbutil.Sample{sample{100, 0, nil, nil}}))
|
||||||
}
|
}
|
||||||
|
|
||||||
blockDir := createBlock(t, tmpdir, seriesEntries)
|
blockDir := createBlock(t, tmpdir, seriesEntries)
|
||||||
|
@ -358,11 +360,13 @@ func BenchmarkLabelValuesWithMatchers(b *testing.B) {
|
||||||
var seriesEntries []storage.Series
|
var seriesEntries []storage.Series
|
||||||
metricCount := 1000000
|
metricCount := 1000000
|
||||||
for i := 0; i < metricCount; i++ {
|
for i := 0; i < metricCount; i++ {
|
||||||
seriesEntries = append(seriesEntries, storage.NewListSeries(labels.FromStrings(
|
// Note these series are not created in sort order: 'value2' sorts after 'value10'.
|
||||||
"a_unique", fmt.Sprintf("value%d", i),
|
// This makes a big difference to the benchmark timing.
|
||||||
"b_tens", fmt.Sprintf("value%d", i/(metricCount/10)),
|
seriesEntries = append(seriesEntries, storage.NewListSeries(labels.Labels{
|
||||||
"c_ninety", fmt.Sprintf("value%d", i/(metricCount/10)/9), // "0" for the first 90%, then "1"
|
{Name: "a_unique", Value: fmt.Sprintf("value%d", i)},
|
||||||
), []tsdbutil.Sample{sample{100, 0}}))
|
{Name: "b_tens", Value: fmt.Sprintf("value%d", i/(metricCount/10))},
|
||||||
|
{Name: "c_ninety", Value: fmt.Sprintf("value%d", i/(metricCount/10)/9)}, // "0" for the first 90%, then "1"
|
||||||
|
}, []tsdbutil.Sample{sample{100, 0, nil, nil}}))
|
||||||
}
|
}
|
||||||
|
|
||||||
blockDir := createBlock(b, tmpdir, seriesEntries)
|
blockDir := createBlock(b, tmpdir, seriesEntries)
|
||||||
|
@ -396,23 +400,23 @@ func TestLabelNamesWithMatchers(t *testing.T) {
|
||||||
|
|
||||||
var seriesEntries []storage.Series
|
var seriesEntries []storage.Series
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
seriesEntries = append(seriesEntries, storage.NewListSeries(labels.FromStrings(
|
seriesEntries = append(seriesEntries, storage.NewListSeries(labels.Labels{
|
||||||
"unique", fmt.Sprintf("value%d", i),
|
{Name: "unique", Value: fmt.Sprintf("value%d", i)},
|
||||||
), []tsdbutil.Sample{sample{100, 0}}))
|
}, []tsdbutil.Sample{sample{100, 0, nil, nil}}))
|
||||||
|
|
||||||
if i%10 == 0 {
|
if i%10 == 0 {
|
||||||
seriesEntries = append(seriesEntries, storage.NewListSeries(labels.FromStrings(
|
seriesEntries = append(seriesEntries, storage.NewListSeries(labels.Labels{
|
||||||
"tens", fmt.Sprintf("value%d", i/10),
|
{Name: "tens", Value: fmt.Sprintf("value%d", i/10)},
|
||||||
"unique", fmt.Sprintf("value%d", i),
|
{Name: "unique", Value: fmt.Sprintf("value%d", i)},
|
||||||
), []tsdbutil.Sample{sample{100, 0}}))
|
}, []tsdbutil.Sample{sample{100, 0, nil, nil}}))
|
||||||
}
|
}
|
||||||
|
|
||||||
if i%20 == 0 {
|
if i%20 == 0 {
|
||||||
seriesEntries = append(seriesEntries, storage.NewListSeries(labels.FromStrings(
|
seriesEntries = append(seriesEntries, storage.NewListSeries(labels.Labels{
|
||||||
"tens", fmt.Sprintf("value%d", i/10),
|
{Name: "tens", Value: fmt.Sprintf("value%d", i/10)},
|
||||||
"twenties", fmt.Sprintf("value%d", i/20),
|
{Name: "twenties", Value: fmt.Sprintf("value%d", i/20)},
|
||||||
"unique", fmt.Sprintf("value%d", i),
|
{Name: "unique", Value: fmt.Sprintf("value%d", i)},
|
||||||
), []tsdbutil.Sample{sample{100, 0}}))
|
}, []tsdbutil.Sample{sample{100, 0, nil, nil}}))
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -491,15 +495,34 @@ func createHead(tb testing.TB, w *wlog.WL, series []storage.Series, chunkDir str
|
||||||
head, err := NewHead(nil, nil, w, nil, opts, nil)
|
head, err := NewHead(nil, nil, w, nil, opts, nil)
|
||||||
require.NoError(tb, err)
|
require.NoError(tb, err)
|
||||||
|
|
||||||
app := head.Appender(context.Background())
|
ctx := context.Background()
|
||||||
|
app := head.Appender(ctx)
|
||||||
for _, s := range series {
|
for _, s := range series {
|
||||||
ref := storage.SeriesRef(0)
|
ref := storage.SeriesRef(0)
|
||||||
it := s.Iterator()
|
it := s.Iterator()
|
||||||
lset := s.Labels()
|
lset := s.Labels()
|
||||||
for it.Next() {
|
typ := it.Next()
|
||||||
t, v := it.At()
|
lastTyp := typ
|
||||||
ref, err = app.Append(ref, lset, t, v)
|
for ; typ != chunkenc.ValNone; typ = it.Next() {
|
||||||
|
if lastTyp != typ {
|
||||||
|
// The behaviour of appender is undefined if samples of different types
|
||||||
|
// are appended to the same series in a single Commit().
|
||||||
|
require.NoError(tb, app.Commit())
|
||||||
|
app = head.Appender(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch typ {
|
||||||
|
case chunkenc.ValFloat:
|
||||||
|
t, v := it.At()
|
||||||
|
ref, err = app.Append(ref, lset, t, v)
|
||||||
|
case chunkenc.ValHistogram:
|
||||||
|
t, h := it.AtHistogram()
|
||||||
|
ref, err = app.AppendHistogram(ref, lset, t, h)
|
||||||
|
default:
|
||||||
|
err = fmt.Errorf("unknown sample type %s", typ.String())
|
||||||
|
}
|
||||||
require.NoError(tb, err)
|
require.NoError(tb, err)
|
||||||
|
lastTyp = typ
|
||||||
}
|
}
|
||||||
require.NoError(tb, it.Err())
|
require.NoError(tb, it.Err())
|
||||||
}
|
}
|
||||||
|
@ -525,7 +548,7 @@ func createHeadWithOOOSamples(tb testing.TB, w *wlog.WL, series []storage.Series
|
||||||
lset := s.Labels()
|
lset := s.Labels()
|
||||||
os := tsdbutil.SampleSlice{}
|
os := tsdbutil.SampleSlice{}
|
||||||
count := 0
|
count := 0
|
||||||
for it.Next() {
|
for it.Next() == chunkenc.ValFloat {
|
||||||
totalSamples++
|
totalSamples++
|
||||||
count++
|
count++
|
||||||
t, v := it.At()
|
t, v := it.At()
|
||||||
|
@ -572,8 +595,67 @@ const (
|
||||||
defaultLabelValue = "labelValue"
|
defaultLabelValue = "labelValue"
|
||||||
)
|
)
|
||||||
|
|
||||||
// genSeries generates series with a given number of labels and values.
|
// genSeries generates series of float64 samples with a given number of labels and values.
|
||||||
func genSeries(totalSeries, labelCount int, mint, maxt int64) []storage.Series {
|
func genSeries(totalSeries, labelCount int, mint, maxt int64) []storage.Series {
|
||||||
|
return genSeriesFromSampleGenerator(totalSeries, labelCount, mint, maxt, 1, func(ts int64) tsdbutil.Sample {
|
||||||
|
return sample{t: ts, v: rand.Float64()}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// genHistogramSeries generates series of histogram samples with a given number of labels and values.
|
||||||
|
func genHistogramSeries(totalSeries, labelCount int, mint, maxt, step int64) []storage.Series {
|
||||||
|
return genSeriesFromSampleGenerator(totalSeries, labelCount, mint, maxt, step, func(ts int64) tsdbutil.Sample {
|
||||||
|
h := &histogram.Histogram{
|
||||||
|
Count: 5 + uint64(ts*4),
|
||||||
|
ZeroCount: 2 + uint64(ts),
|
||||||
|
ZeroThreshold: 0.001,
|
||||||
|
Sum: 18.4 * rand.Float64(),
|
||||||
|
Schema: 1,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{int64(ts + 1), 1, -1, 0},
|
||||||
|
}
|
||||||
|
return sample{t: ts, h: h}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// genHistogramAndFloatSeries generates series of mixed histogram and float64 samples with a given number of labels and values.
|
||||||
|
func genHistogramAndFloatSeries(totalSeries, labelCount int, mint, maxt, step int64) []storage.Series {
|
||||||
|
floatSample := false
|
||||||
|
count := 0
|
||||||
|
return genSeriesFromSampleGenerator(totalSeries, labelCount, mint, maxt, step, func(ts int64) tsdbutil.Sample {
|
||||||
|
count++
|
||||||
|
var s sample
|
||||||
|
if floatSample {
|
||||||
|
s = sample{t: ts, v: rand.Float64()}
|
||||||
|
} else {
|
||||||
|
h := &histogram.Histogram{
|
||||||
|
Count: 5 + uint64(ts*4),
|
||||||
|
ZeroCount: 2 + uint64(ts),
|
||||||
|
ZeroThreshold: 0.001,
|
||||||
|
Sum: 18.4 * rand.Float64(),
|
||||||
|
Schema: 1,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{int64(ts + 1), 1, -1, 0},
|
||||||
|
}
|
||||||
|
s = sample{t: ts, h: h}
|
||||||
|
}
|
||||||
|
|
||||||
|
if count%5 == 0 {
|
||||||
|
// Flip the sample type for every 5 samples.
|
||||||
|
floatSample = !floatSample
|
||||||
|
}
|
||||||
|
|
||||||
|
return s
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func genSeriesFromSampleGenerator(totalSeries, labelCount int, mint, maxt, step int64, generator func(ts int64) tsdbutil.Sample) []storage.Series {
|
||||||
if totalSeries == 0 || labelCount == 0 {
|
if totalSeries == 0 || labelCount == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -587,8 +669,8 @@ func genSeries(totalSeries, labelCount int, mint, maxt int64) []storage.Series {
|
||||||
lbls[defaultLabelName+strconv.Itoa(j)] = defaultLabelValue + strconv.Itoa(j)
|
lbls[defaultLabelName+strconv.Itoa(j)] = defaultLabelValue + strconv.Itoa(j)
|
||||||
}
|
}
|
||||||
samples := make([]tsdbutil.Sample, 0, maxt-mint+1)
|
samples := make([]tsdbutil.Sample, 0, maxt-mint+1)
|
||||||
for t := mint; t < maxt; t++ {
|
for t := mint; t < maxt; t += step {
|
||||||
samples = append(samples, sample{t: t, v: rand.Float64()})
|
samples = append(samples, generator(t))
|
||||||
}
|
}
|
||||||
series[i] = storage.NewListSeries(labels.FromMap(lbls), samples)
|
series[i] = storage.NewListSeries(labels.FromMap(lbls), samples)
|
||||||
}
|
}
|
||||||
|
|
|
@ -71,6 +71,7 @@ func (w *BlockWriter) initHead() error {
|
||||||
opts := DefaultHeadOptions()
|
opts := DefaultHeadOptions()
|
||||||
opts.ChunkRange = w.blockSize
|
opts.ChunkRange = w.blockSize
|
||||||
opts.ChunkDirRoot = w.chunkDir
|
opts.ChunkDirRoot = w.chunkDir
|
||||||
|
opts.EnableNativeHistograms.Store(true)
|
||||||
h, err := NewHead(nil, w.logger, nil, nil, opts, NewHeadStats())
|
h, err := NewHead(nil, w.logger, nil, nil, opts, NewHeadStats())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "tsdb.NewHead")
|
return errors.Wrap(err, "tsdb.NewHead")
|
||||||
|
|
|
@ -18,27 +18,32 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Encoding is the identifier for a chunk encoding.
|
// Encoding is the identifier for a chunk encoding.
|
||||||
type Encoding uint8
|
type Encoding uint8
|
||||||
|
|
||||||
|
// The different available chunk encodings.
|
||||||
|
const (
|
||||||
|
EncNone Encoding = iota
|
||||||
|
EncXOR
|
||||||
|
EncHistogram
|
||||||
|
)
|
||||||
|
|
||||||
func (e Encoding) String() string {
|
func (e Encoding) String() string {
|
||||||
switch e {
|
switch e {
|
||||||
case EncNone:
|
case EncNone:
|
||||||
return "none"
|
return "none"
|
||||||
case EncXOR:
|
case EncXOR:
|
||||||
return "XOR"
|
return "XOR"
|
||||||
|
case EncHistogram:
|
||||||
|
return "histogram"
|
||||||
}
|
}
|
||||||
return "<unknown>"
|
return "<unknown>"
|
||||||
}
|
}
|
||||||
|
|
||||||
// The different available chunk encodings.
|
|
||||||
const (
|
|
||||||
EncNone Encoding = iota
|
|
||||||
EncXOR
|
|
||||||
)
|
|
||||||
|
|
||||||
// Chunk encodings for out-of-order chunks.
|
// Chunk encodings for out-of-order chunks.
|
||||||
// These encodings must be only used by the Head block for its internal bookkeeping.
|
// These encodings must be only used by the Head block for its internal bookkeeping.
|
||||||
const (
|
const (
|
||||||
|
@ -50,8 +55,9 @@ func IsOutOfOrderChunk(e Encoding) bool {
|
||||||
return (e & OutOfOrderMask) != 0
|
return (e & OutOfOrderMask) != 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsValidEncoding returns true for supported encodings.
|
||||||
func IsValidEncoding(e Encoding) bool {
|
func IsValidEncoding(e Encoding) bool {
|
||||||
return e == EncXOR || e == EncOOOXOR
|
return e == EncXOR || e == EncOOOXOR || e == EncHistogram
|
||||||
}
|
}
|
||||||
|
|
||||||
// Chunk holds a sequence of sample pairs that can be iterated over and appended to.
|
// Chunk holds a sequence of sample pairs that can be iterated over and appended to.
|
||||||
|
@ -84,26 +90,80 @@ type Chunk interface {
|
||||||
// Appender adds sample pairs to a chunk.
|
// Appender adds sample pairs to a chunk.
|
||||||
type Appender interface {
|
type Appender interface {
|
||||||
Append(int64, float64)
|
Append(int64, float64)
|
||||||
|
AppendHistogram(t int64, h *histogram.Histogram)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Iterator is a simple iterator that can only get the next value.
|
// Iterator is a simple iterator that can only get the next value.
|
||||||
// Iterator iterates over the samples of a time series, in timestamp-increasing order.
|
// Iterator iterates over the samples of a time series, in timestamp-increasing order.
|
||||||
type Iterator interface {
|
type Iterator interface {
|
||||||
// Next advances the iterator by one.
|
// Next advances the iterator by one and returns the type of the value
|
||||||
Next() bool
|
// at the new position (or ValNone if the iterator is exhausted).
|
||||||
// Seek advances the iterator forward to the first sample with the timestamp equal or greater than t.
|
Next() ValueType
|
||||||
// If current sample found by previous `Next` or `Seek` operation already has this property, Seek has no effect.
|
// Seek advances the iterator forward to the first sample with a
|
||||||
// Seek returns true, if such sample exists, false otherwise.
|
// timestamp equal or greater than t. If the current sample found by a
|
||||||
// Iterator is exhausted when the Seek returns false.
|
// previous `Next` or `Seek` operation already has this property, Seek
|
||||||
Seek(t int64) bool
|
// has no effect. If a sample has been found, Seek returns the type of
|
||||||
// At returns the current timestamp/value pair.
|
// its value. Otherwise, it returns ValNone, after with the iterator is
|
||||||
// Before the iterator has advanced At behaviour is unspecified.
|
// exhausted.
|
||||||
|
Seek(t int64) ValueType
|
||||||
|
// At returns the current timestamp/value pair if the value is a float.
|
||||||
|
// Before the iterator has advanced, the behaviour is unspecified.
|
||||||
At() (int64, float64)
|
At() (int64, float64)
|
||||||
// Err returns the current error. It should be used only after iterator is
|
// AtHistogram returns the current timestamp/value pair if the value is
|
||||||
// exhausted, that is `Next` or `Seek` returns false.
|
// a histogram with integer counts. Before the iterator has advanced,
|
||||||
|
// the behaviour is unspecified.
|
||||||
|
AtHistogram() (int64, *histogram.Histogram)
|
||||||
|
// AtFloatHistogram returns the current timestamp/value pair if the
|
||||||
|
// value is a histogram with floating-point counts. It also works if the
|
||||||
|
// value is a histogram with integer counts, in which case a
|
||||||
|
// FloatHistogram copy of the histogram is returned. Before the iterator
|
||||||
|
// has advanced, the behaviour is unspecified.
|
||||||
|
AtFloatHistogram() (int64, *histogram.FloatHistogram)
|
||||||
|
// AtT returns the current timestamp.
|
||||||
|
// Before the iterator has advanced, the behaviour is unspecified.
|
||||||
|
AtT() int64
|
||||||
|
// Err returns the current error. It should be used only after the
|
||||||
|
// iterator is exhausted, i.e. `Next` or `Seek` have returned ValNone.
|
||||||
Err() error
|
Err() error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ValueType defines the type of a value an Iterator points to.
|
||||||
|
type ValueType uint8
|
||||||
|
|
||||||
|
// Possible values for ValueType.
|
||||||
|
const (
|
||||||
|
ValNone ValueType = iota // No value at the current position.
|
||||||
|
ValFloat // A simple float, retrieved with At.
|
||||||
|
ValHistogram // A histogram, retrieve with AtHistogram, but AtFloatHistogram works, too.
|
||||||
|
ValFloatHistogram // A floating-point histogram, retrieve with AtFloatHistogram.
|
||||||
|
)
|
||||||
|
|
||||||
|
func (v ValueType) String() string {
|
||||||
|
switch v {
|
||||||
|
case ValNone:
|
||||||
|
return "none"
|
||||||
|
case ValFloat:
|
||||||
|
return "float"
|
||||||
|
case ValHistogram:
|
||||||
|
return "histogram"
|
||||||
|
case ValFloatHistogram:
|
||||||
|
return "floathistogram"
|
||||||
|
default:
|
||||||
|
return "unknown"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v ValueType) ChunkEncoding() Encoding {
|
||||||
|
switch v {
|
||||||
|
case ValFloat:
|
||||||
|
return EncXOR
|
||||||
|
case ValHistogram:
|
||||||
|
return EncHistogram
|
||||||
|
default:
|
||||||
|
return EncNone
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// MockSeriesIterator returns an iterator for a mock series with custom timeStamps and values.
|
// MockSeriesIterator returns an iterator for a mock series with custom timeStamps and values.
|
||||||
func MockSeriesIterator(timestamps []int64, values []float64) Iterator {
|
func MockSeriesIterator(timestamps []int64, values []float64) Iterator {
|
||||||
return &mockSeriesIterator{
|
return &mockSeriesIterator{
|
||||||
|
@ -119,18 +179,29 @@ type mockSeriesIterator struct {
|
||||||
currIndex int
|
currIndex int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (it *mockSeriesIterator) Seek(int64) bool { return false }
|
func (it *mockSeriesIterator) Seek(int64) ValueType { return ValNone }
|
||||||
|
|
||||||
func (it *mockSeriesIterator) At() (int64, float64) {
|
func (it *mockSeriesIterator) At() (int64, float64) {
|
||||||
return it.timeStamps[it.currIndex], it.values[it.currIndex]
|
return it.timeStamps[it.currIndex], it.values[it.currIndex]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (it *mockSeriesIterator) Next() bool {
|
func (it *mockSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { return math.MinInt64, nil }
|
||||||
|
|
||||||
|
func (it *mockSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
|
||||||
|
return math.MinInt64, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *mockSeriesIterator) AtT() int64 {
|
||||||
|
return it.timeStamps[it.currIndex]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *mockSeriesIterator) Next() ValueType {
|
||||||
if it.currIndex < len(it.timeStamps)-1 {
|
if it.currIndex < len(it.timeStamps)-1 {
|
||||||
it.currIndex++
|
it.currIndex++
|
||||||
return true
|
return ValFloat
|
||||||
}
|
}
|
||||||
|
|
||||||
return false
|
return ValNone
|
||||||
}
|
}
|
||||||
func (it *mockSeriesIterator) Err() error { return nil }
|
func (it *mockSeriesIterator) Err() error { return nil }
|
||||||
|
|
||||||
|
@ -141,10 +212,13 @@ func NewNopIterator() Iterator {
|
||||||
|
|
||||||
type nopIterator struct{}
|
type nopIterator struct{}
|
||||||
|
|
||||||
func (nopIterator) Seek(int64) bool { return false }
|
func (nopIterator) Next() ValueType { return ValNone }
|
||||||
func (nopIterator) At() (int64, float64) { return math.MinInt64, 0 }
|
func (nopIterator) Seek(int64) ValueType { return ValNone }
|
||||||
func (nopIterator) Next() bool { return false }
|
func (nopIterator) At() (int64, float64) { return math.MinInt64, 0 }
|
||||||
func (nopIterator) Err() error { return nil }
|
func (nopIterator) AtHistogram() (int64, *histogram.Histogram) { return math.MinInt64, nil }
|
||||||
|
func (nopIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { return math.MinInt64, nil }
|
||||||
|
func (nopIterator) AtT() int64 { return math.MinInt64 }
|
||||||
|
func (nopIterator) Err() error { return nil }
|
||||||
|
|
||||||
// Pool is used to create and reuse chunk references to avoid allocations.
|
// Pool is used to create and reuse chunk references to avoid allocations.
|
||||||
type Pool interface {
|
type Pool interface {
|
||||||
|
@ -154,7 +228,8 @@ type Pool interface {
|
||||||
|
|
||||||
// pool is a memory pool of chunk objects.
|
// pool is a memory pool of chunk objects.
|
||||||
type pool struct {
|
type pool struct {
|
||||||
xor sync.Pool
|
xor sync.Pool
|
||||||
|
histogram sync.Pool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPool returns a new pool.
|
// NewPool returns a new pool.
|
||||||
|
@ -165,6 +240,11 @@ func NewPool() Pool {
|
||||||
return &XORChunk{b: bstream{}}
|
return &XORChunk{b: bstream{}}
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
histogram: sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
return &HistogramChunk{b: bstream{}}
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -175,6 +255,11 @@ func (p *pool) Get(e Encoding, b []byte) (Chunk, error) {
|
||||||
c.b.stream = b
|
c.b.stream = b
|
||||||
c.b.count = 0
|
c.b.count = 0
|
||||||
return c, nil
|
return c, nil
|
||||||
|
case EncHistogram:
|
||||||
|
c := p.histogram.Get().(*HistogramChunk)
|
||||||
|
c.b.stream = b
|
||||||
|
c.b.count = 0
|
||||||
|
return c, nil
|
||||||
}
|
}
|
||||||
return nil, errors.Errorf("invalid chunk encoding %q", e)
|
return nil, errors.Errorf("invalid chunk encoding %q", e)
|
||||||
}
|
}
|
||||||
|
@ -192,6 +277,17 @@ func (p *pool) Put(c Chunk) error {
|
||||||
xc.b.stream = nil
|
xc.b.stream = nil
|
||||||
xc.b.count = 0
|
xc.b.count = 0
|
||||||
p.xor.Put(c)
|
p.xor.Put(c)
|
||||||
|
case EncHistogram:
|
||||||
|
sh, ok := c.(*HistogramChunk)
|
||||||
|
// This may happen often with wrapped chunks. Nothing we can really do about
|
||||||
|
// it but returning an error would cause a lot of allocations again. Thus,
|
||||||
|
// we just skip it.
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
sh.b.stream = nil
|
||||||
|
sh.b.count = 0
|
||||||
|
p.histogram.Put(c)
|
||||||
default:
|
default:
|
||||||
return errors.Errorf("invalid chunk encoding %q", c.Encoding())
|
return errors.Errorf("invalid chunk encoding %q", c.Encoding())
|
||||||
}
|
}
|
||||||
|
@ -205,6 +301,19 @@ func FromData(e Encoding, d []byte) (Chunk, error) {
|
||||||
switch e {
|
switch e {
|
||||||
case EncXOR, EncOOOXOR:
|
case EncXOR, EncOOOXOR:
|
||||||
return &XORChunk{b: bstream{count: 0, stream: d}}, nil
|
return &XORChunk{b: bstream{count: 0, stream: d}}, nil
|
||||||
|
case EncHistogram:
|
||||||
|
return &HistogramChunk{b: bstream{count: 0, stream: d}}, nil
|
||||||
|
}
|
||||||
|
return nil, errors.Errorf("invalid chunk encoding %q", e)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEmptyChunk returns an empty chunk for the given encoding.
|
||||||
|
func NewEmptyChunk(e Encoding) (Chunk, error) {
|
||||||
|
switch e {
|
||||||
|
case EncXOR:
|
||||||
|
return NewXORChunk(), nil
|
||||||
|
case EncHistogram:
|
||||||
|
return NewHistogramChunk(), nil
|
||||||
}
|
}
|
||||||
return nil, errors.Errorf("invalid chunk encoding %q", e)
|
return nil, errors.Errorf("invalid chunk encoding %q", e)
|
||||||
}
|
}
|
||||||
|
|
|
@ -71,7 +71,7 @@ func testChunk(t *testing.T, c Chunk) {
|
||||||
// 1. Expand iterator in simple case.
|
// 1. Expand iterator in simple case.
|
||||||
it1 := c.Iterator(nil)
|
it1 := c.Iterator(nil)
|
||||||
var res1 []pair
|
var res1 []pair
|
||||||
for it1.Next() {
|
for it1.Next() == ValFloat {
|
||||||
ts, v := it1.At()
|
ts, v := it1.At()
|
||||||
res1 = append(res1, pair{t: ts, v: v})
|
res1 = append(res1, pair{t: ts, v: v})
|
||||||
}
|
}
|
||||||
|
@ -81,7 +81,7 @@ func testChunk(t *testing.T, c Chunk) {
|
||||||
// 2. Expand second iterator while reusing first one.
|
// 2. Expand second iterator while reusing first one.
|
||||||
it2 := c.Iterator(it1)
|
it2 := c.Iterator(it1)
|
||||||
var res2 []pair
|
var res2 []pair
|
||||||
for it2.Next() {
|
for it2.Next() == ValFloat {
|
||||||
ts, v := it2.At()
|
ts, v := it2.At()
|
||||||
res2 = append(res2, pair{t: ts, v: v})
|
res2 = append(res2, pair{t: ts, v: v})
|
||||||
}
|
}
|
||||||
|
@ -93,20 +93,20 @@ func testChunk(t *testing.T, c Chunk) {
|
||||||
|
|
||||||
it3 := c.Iterator(nil)
|
it3 := c.Iterator(nil)
|
||||||
var res3 []pair
|
var res3 []pair
|
||||||
require.Equal(t, true, it3.Seek(exp[mid].t))
|
require.Equal(t, ValFloat, it3.Seek(exp[mid].t))
|
||||||
// Below ones should not matter.
|
// Below ones should not matter.
|
||||||
require.Equal(t, true, it3.Seek(exp[mid].t))
|
require.Equal(t, ValFloat, it3.Seek(exp[mid].t))
|
||||||
require.Equal(t, true, it3.Seek(exp[mid].t))
|
require.Equal(t, ValFloat, it3.Seek(exp[mid].t))
|
||||||
ts, v = it3.At()
|
ts, v = it3.At()
|
||||||
res3 = append(res3, pair{t: ts, v: v})
|
res3 = append(res3, pair{t: ts, v: v})
|
||||||
|
|
||||||
for it3.Next() {
|
for it3.Next() == ValFloat {
|
||||||
ts, v := it3.At()
|
ts, v := it3.At()
|
||||||
res3 = append(res3, pair{t: ts, v: v})
|
res3 = append(res3, pair{t: ts, v: v})
|
||||||
}
|
}
|
||||||
require.NoError(t, it3.Err())
|
require.NoError(t, it3.Err())
|
||||||
require.Equal(t, exp[mid:], res3)
|
require.Equal(t, exp[mid:], res3)
|
||||||
require.Equal(t, false, it3.Seek(exp[len(exp)-1].t+1))
|
require.Equal(t, ValNone, it3.Seek(exp[len(exp)-1].t+1))
|
||||||
}
|
}
|
||||||
|
|
||||||
func benchmarkIterator(b *testing.B, newChunk func() Chunk) {
|
func benchmarkIterator(b *testing.B, newChunk func() Chunk) {
|
||||||
|
@ -148,7 +148,7 @@ func benchmarkIterator(b *testing.B, newChunk func() Chunk) {
|
||||||
for i := 0; i < b.N; {
|
for i := 0; i < b.N; {
|
||||||
it := chunk.Iterator(it)
|
it := chunk.Iterator(it)
|
||||||
|
|
||||||
for it.Next() {
|
for it.Next() == ValFloat {
|
||||||
_, v := it.At()
|
_, v := it.At()
|
||||||
res = v
|
res = v
|
||||||
i++
|
i++
|
||||||
|
|
876
tsdb/chunkenc/histogram.go
Normal file
876
tsdb/chunkenc/histogram.go
Normal file
|
@ -0,0 +1,876 @@
|
||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package chunkenc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
|
"github.com/prometheus/prometheus/model/value"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HistogramChunk holds encoded sample data for a sparse, high-resolution
|
||||||
|
// histogram.
|
||||||
|
//
|
||||||
|
// Each sample has multiple "fields", stored in the following way (raw = store
|
||||||
|
// number directly, delta = store delta to the previous number, dod = store
|
||||||
|
// delta of the delta to the previous number, xor = what we do for regular
|
||||||
|
// sample values):
|
||||||
|
//
|
||||||
|
// field → ts count zeroCount sum []posbuckets []negbuckets
|
||||||
|
// sample 1 raw raw raw raw []raw []raw
|
||||||
|
// sample 2 delta delta delta xor []delta []delta
|
||||||
|
// sample >2 dod dod dod xor []dod []dod
|
||||||
|
type HistogramChunk struct {
|
||||||
|
b bstream
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHistogramChunk returns a new chunk with histogram encoding of the given
|
||||||
|
// size.
|
||||||
|
func NewHistogramChunk() *HistogramChunk {
|
||||||
|
b := make([]byte, 3, 128)
|
||||||
|
return &HistogramChunk{b: bstream{stream: b, count: 0}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encoding returns the encoding type.
|
||||||
|
func (c *HistogramChunk) Encoding() Encoding {
|
||||||
|
return EncHistogram
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bytes returns the underlying byte slice of the chunk.
|
||||||
|
func (c *HistogramChunk) Bytes() []byte {
|
||||||
|
return c.b.bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NumSamples returns the number of samples in the chunk.
|
||||||
|
func (c *HistogramChunk) NumSamples() int {
|
||||||
|
return int(binary.BigEndian.Uint16(c.Bytes()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Layout returns the histogram layout. Only call this on chunks that have at
|
||||||
|
// least one sample.
|
||||||
|
func (c *HistogramChunk) Layout() (
|
||||||
|
schema int32, zeroThreshold float64,
|
||||||
|
negativeSpans, positiveSpans []histogram.Span,
|
||||||
|
err error,
|
||||||
|
) {
|
||||||
|
if c.NumSamples() == 0 {
|
||||||
|
panic("HistoChunk.Layout() called on an empty chunk")
|
||||||
|
}
|
||||||
|
b := newBReader(c.Bytes()[2:])
|
||||||
|
return readHistogramChunkLayout(&b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CounterResetHeader defines the first 2 bits of the chunk header.
|
||||||
|
type CounterResetHeader byte
|
||||||
|
|
||||||
|
const (
|
||||||
|
// CounterReset means there was definitely a counter reset that resulted in this chunk.
|
||||||
|
CounterReset CounterResetHeader = 0b10000000
|
||||||
|
// NotCounterReset means there was definitely no counter reset when cutting this chunk.
|
||||||
|
NotCounterReset CounterResetHeader = 0b01000000
|
||||||
|
// GaugeType means this chunk contains a gauge histogram, where counter resets do not happen.
|
||||||
|
GaugeType CounterResetHeader = 0b11000000
|
||||||
|
// UnknownCounterReset means we cannot say if this chunk was created due to a counter reset or not.
|
||||||
|
// An explicit counter reset detection needs to happen during query time.
|
||||||
|
UnknownCounterReset CounterResetHeader = 0b00000000
|
||||||
|
)
|
||||||
|
|
||||||
|
// SetCounterResetHeader sets the counter reset header.
|
||||||
|
func (c *HistogramChunk) SetCounterResetHeader(h CounterResetHeader) {
|
||||||
|
switch h {
|
||||||
|
case CounterReset, NotCounterReset, GaugeType, UnknownCounterReset:
|
||||||
|
bytes := c.Bytes()
|
||||||
|
bytes[2] = (bytes[2] & 0b00111111) | byte(h)
|
||||||
|
default:
|
||||||
|
panic("invalid CounterResetHeader type")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCounterResetHeader returns the info about the first 2 bits of the chunk
|
||||||
|
// header.
|
||||||
|
func (c *HistogramChunk) GetCounterResetHeader() CounterResetHeader {
|
||||||
|
return CounterResetHeader(c.Bytes()[2] & 0b11000000)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compact implements the Chunk interface.
|
||||||
|
func (c *HistogramChunk) Compact() {
|
||||||
|
if l := len(c.b.stream); cap(c.b.stream) > l+chunkCompactCapacityThreshold {
|
||||||
|
buf := make([]byte, l)
|
||||||
|
copy(buf, c.b.stream)
|
||||||
|
c.b.stream = buf
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Appender implements the Chunk interface.
|
||||||
|
func (c *HistogramChunk) Appender() (Appender, error) {
|
||||||
|
it := c.iterator(nil)
|
||||||
|
|
||||||
|
// To get an appender, we must know the state it would have if we had
|
||||||
|
// appended all existing data from scratch. We iterate through the end
|
||||||
|
// and populate via the iterator's state.
|
||||||
|
for it.Next() == ValHistogram {
|
||||||
|
}
|
||||||
|
if err := it.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
a := &HistogramAppender{
|
||||||
|
b: &c.b,
|
||||||
|
|
||||||
|
schema: it.schema,
|
||||||
|
zThreshold: it.zThreshold,
|
||||||
|
pSpans: it.pSpans,
|
||||||
|
nSpans: it.nSpans,
|
||||||
|
t: it.t,
|
||||||
|
cnt: it.cnt,
|
||||||
|
zCnt: it.zCnt,
|
||||||
|
tDelta: it.tDelta,
|
||||||
|
cntDelta: it.cntDelta,
|
||||||
|
zCntDelta: it.zCntDelta,
|
||||||
|
pBuckets: it.pBuckets,
|
||||||
|
nBuckets: it.nBuckets,
|
||||||
|
pBucketsDelta: it.pBucketsDelta,
|
||||||
|
nBucketsDelta: it.nBucketsDelta,
|
||||||
|
|
||||||
|
sum: it.sum,
|
||||||
|
leading: it.leading,
|
||||||
|
trailing: it.trailing,
|
||||||
|
}
|
||||||
|
if it.numTotal == 0 {
|
||||||
|
a.leading = 0xff
|
||||||
|
}
|
||||||
|
return a, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func countSpans(spans []histogram.Span) int {
|
||||||
|
var cnt int
|
||||||
|
for _, s := range spans {
|
||||||
|
cnt += int(s.Length)
|
||||||
|
}
|
||||||
|
return cnt
|
||||||
|
}
|
||||||
|
|
||||||
|
func newHistogramIterator(b []byte) *histogramIterator {
|
||||||
|
it := &histogramIterator{
|
||||||
|
br: newBReader(b),
|
||||||
|
numTotal: binary.BigEndian.Uint16(b),
|
||||||
|
t: math.MinInt64,
|
||||||
|
}
|
||||||
|
// The first 3 bytes contain chunk headers.
|
||||||
|
// We skip that for actual samples.
|
||||||
|
_, _ = it.br.readBits(24)
|
||||||
|
return it
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *HistogramChunk) iterator(it Iterator) *histogramIterator {
|
||||||
|
// This commet is copied from XORChunk.iterator:
|
||||||
|
// Should iterators guarantee to act on a copy of the data so it doesn't lock append?
|
||||||
|
// When using striped locks to guard access to chunks, probably yes.
|
||||||
|
// Could only copy data if the chunk is not completed yet.
|
||||||
|
if histogramIter, ok := it.(*histogramIterator); ok {
|
||||||
|
histogramIter.Reset(c.b.bytes())
|
||||||
|
return histogramIter
|
||||||
|
}
|
||||||
|
return newHistogramIterator(c.b.bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterator implements the Chunk interface.
|
||||||
|
func (c *HistogramChunk) Iterator(it Iterator) Iterator {
|
||||||
|
return c.iterator(it)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HistogramAppender is an Appender implementation for sparse histograms.
|
||||||
|
type HistogramAppender struct {
|
||||||
|
b *bstream
|
||||||
|
|
||||||
|
// Layout:
|
||||||
|
schema int32
|
||||||
|
zThreshold float64
|
||||||
|
pSpans, nSpans []histogram.Span
|
||||||
|
|
||||||
|
// Although we intend to start new chunks on counter resets, we still
|
||||||
|
// have to handle negative deltas for gauge histograms. Therefore, even
|
||||||
|
// deltas are signed types here (even for tDelta to not treat that one
|
||||||
|
// specially).
|
||||||
|
t int64
|
||||||
|
cnt, zCnt uint64
|
||||||
|
tDelta, cntDelta, zCntDelta int64
|
||||||
|
pBuckets, nBuckets []int64
|
||||||
|
pBucketsDelta, nBucketsDelta []int64
|
||||||
|
|
||||||
|
// The sum is Gorilla xor encoded.
|
||||||
|
sum float64
|
||||||
|
leading uint8
|
||||||
|
trailing uint8
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append implements Appender. This implementation panics because normal float
|
||||||
|
// samples must never be appended to a histogram chunk.
|
||||||
|
func (a *HistogramAppender) Append(int64, float64) {
|
||||||
|
panic("appended a float sample to a histogram chunk")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Appendable returns whether the chunk can be appended to, and if so
|
||||||
|
// whether any recoding needs to happen using the provided interjections
|
||||||
|
// (in case of any new buckets, positive or negative range, respectively).
|
||||||
|
//
|
||||||
|
// The chunk is not appendable in the following cases:
|
||||||
|
//
|
||||||
|
// • The schema has changed.
|
||||||
|
//
|
||||||
|
// • The threshold for the zero bucket has changed.
|
||||||
|
//
|
||||||
|
// • Any buckets have disappeared.
|
||||||
|
//
|
||||||
|
// • There was a counter reset in the count of observations or in any bucket,
|
||||||
|
// including the zero bucket.
|
||||||
|
//
|
||||||
|
// • The last sample in the chunk was stale while the current sample is not stale.
|
||||||
|
//
|
||||||
|
// The method returns an additional boolean set to true if it is not appendable
|
||||||
|
// because of a counter reset. If the given sample is stale, it is always ok to
|
||||||
|
// append. If counterReset is true, okToAppend is always false.
|
||||||
|
func (a *HistogramAppender) Appendable(h *histogram.Histogram) (
|
||||||
|
positiveInterjections, negativeInterjections []Interjection,
|
||||||
|
okToAppend, counterReset bool,
|
||||||
|
) {
|
||||||
|
if value.IsStaleNaN(h.Sum) {
|
||||||
|
// This is a stale sample whose buckets and spans don't matter.
|
||||||
|
okToAppend = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if value.IsStaleNaN(a.sum) {
|
||||||
|
// If the last sample was stale, then we can only accept stale
|
||||||
|
// samples in this chunk.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if h.Count < a.cnt {
|
||||||
|
// There has been a counter reset.
|
||||||
|
counterReset = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if h.Schema != a.schema || h.ZeroThreshold != a.zThreshold {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if h.ZeroCount < a.zCnt {
|
||||||
|
// There has been a counter reset since ZeroThreshold didn't change.
|
||||||
|
counterReset = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var ok bool
|
||||||
|
positiveInterjections, ok = compareSpans(a.pSpans, h.PositiveSpans)
|
||||||
|
if !ok {
|
||||||
|
counterReset = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
negativeInterjections, ok = compareSpans(a.nSpans, h.NegativeSpans)
|
||||||
|
if !ok {
|
||||||
|
counterReset = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if counterResetInAnyBucket(a.pBuckets, h.PositiveBuckets, a.pSpans, h.PositiveSpans) ||
|
||||||
|
counterResetInAnyBucket(a.nBuckets, h.NegativeBuckets, a.nSpans, h.NegativeSpans) {
|
||||||
|
counterReset, positiveInterjections, negativeInterjections = true, nil, nil
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
okToAppend = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// counterResetInAnyBucket returns true if there was a counter reset for any
|
||||||
|
// bucket. This should be called only when the bucket layout is the same or new
|
||||||
|
// buckets were added. It does not handle the case of buckets missing.
|
||||||
|
func counterResetInAnyBucket(oldBuckets, newBuckets []int64, oldSpans, newSpans []histogram.Span) bool {
|
||||||
|
if len(oldSpans) == 0 || len(oldBuckets) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
oldSpanSliceIdx, newSpanSliceIdx := 0, 0 // Index for the span slices.
|
||||||
|
oldInsideSpanIdx, newInsideSpanIdx := uint32(0), uint32(0) // Index inside a span.
|
||||||
|
oldIdx, newIdx := oldSpans[0].Offset, newSpans[0].Offset
|
||||||
|
|
||||||
|
oldBucketSliceIdx, newBucketSliceIdx := 0, 0 // Index inside bucket slice.
|
||||||
|
oldVal, newVal := oldBuckets[0], newBuckets[0]
|
||||||
|
|
||||||
|
// Since we assume that new spans won't have missing buckets, there will never be a case
|
||||||
|
// where the old index will not find a matching new index.
|
||||||
|
for {
|
||||||
|
if oldIdx == newIdx {
|
||||||
|
if newVal < oldVal {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if oldIdx <= newIdx {
|
||||||
|
// Moving ahead old bucket and span by 1 index.
|
||||||
|
if oldInsideSpanIdx == oldSpans[oldSpanSliceIdx].Length-1 {
|
||||||
|
// Current span is over.
|
||||||
|
oldSpanSliceIdx++
|
||||||
|
oldInsideSpanIdx = 0
|
||||||
|
if oldSpanSliceIdx >= len(oldSpans) {
|
||||||
|
// All old spans are over.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
oldIdx += 1 + oldSpans[oldSpanSliceIdx].Offset
|
||||||
|
} else {
|
||||||
|
oldInsideSpanIdx++
|
||||||
|
oldIdx++
|
||||||
|
}
|
||||||
|
oldBucketSliceIdx++
|
||||||
|
oldVal += oldBuckets[oldBucketSliceIdx]
|
||||||
|
}
|
||||||
|
|
||||||
|
if oldIdx > newIdx {
|
||||||
|
// Moving ahead new bucket and span by 1 index.
|
||||||
|
if newInsideSpanIdx == newSpans[newSpanSliceIdx].Length-1 {
|
||||||
|
// Current span is over.
|
||||||
|
newSpanSliceIdx++
|
||||||
|
newInsideSpanIdx = 0
|
||||||
|
if newSpanSliceIdx >= len(newSpans) {
|
||||||
|
// All new spans are over.
|
||||||
|
// This should not happen, old spans above should catch this first.
|
||||||
|
panic("new spans over before old spans in counterReset")
|
||||||
|
}
|
||||||
|
newIdx += 1 + newSpans[newSpanSliceIdx].Offset
|
||||||
|
} else {
|
||||||
|
newInsideSpanIdx++
|
||||||
|
newIdx++
|
||||||
|
}
|
||||||
|
newBucketSliceIdx++
|
||||||
|
newVal += newBuckets[newBucketSliceIdx]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendHistogram appends a histogram to the chunk. The caller must ensure that
|
||||||
|
// the histogram is properly structured, e.g. the number of buckets used
|
||||||
|
// corresponds to the number conveyed by the span structures. First call
|
||||||
|
// Appendable() and act accordingly!
|
||||||
|
func (a *HistogramAppender) AppendHistogram(t int64, h *histogram.Histogram) {
|
||||||
|
var tDelta, cntDelta, zCntDelta int64
|
||||||
|
num := binary.BigEndian.Uint16(a.b.bytes())
|
||||||
|
|
||||||
|
if value.IsStaleNaN(h.Sum) {
|
||||||
|
// Emptying out other fields to write no buckets, and an empty
|
||||||
|
// layout in case of first histogram in the chunk.
|
||||||
|
h = &histogram.Histogram{Sum: h.Sum}
|
||||||
|
}
|
||||||
|
|
||||||
|
if num == 0 {
|
||||||
|
// The first append gets the privilege to dictate the layout
|
||||||
|
// but it's also responsible for encoding it into the chunk!
|
||||||
|
writeHistogramChunkLayout(a.b, h.Schema, h.ZeroThreshold, h.PositiveSpans, h.NegativeSpans)
|
||||||
|
a.schema = h.Schema
|
||||||
|
a.zThreshold = h.ZeroThreshold
|
||||||
|
|
||||||
|
if len(h.PositiveSpans) > 0 {
|
||||||
|
a.pSpans = make([]histogram.Span, len(h.PositiveSpans))
|
||||||
|
copy(a.pSpans, h.PositiveSpans)
|
||||||
|
} else {
|
||||||
|
a.pSpans = nil
|
||||||
|
}
|
||||||
|
if len(h.NegativeSpans) > 0 {
|
||||||
|
a.nSpans = make([]histogram.Span, len(h.NegativeSpans))
|
||||||
|
copy(a.nSpans, h.NegativeSpans)
|
||||||
|
} else {
|
||||||
|
a.nSpans = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
numPBuckets, numNBuckets := countSpans(h.PositiveSpans), countSpans(h.NegativeSpans)
|
||||||
|
if numPBuckets > 0 {
|
||||||
|
a.pBuckets = make([]int64, numPBuckets)
|
||||||
|
a.pBucketsDelta = make([]int64, numPBuckets)
|
||||||
|
} else {
|
||||||
|
a.pBuckets = nil
|
||||||
|
a.pBucketsDelta = nil
|
||||||
|
}
|
||||||
|
if numNBuckets > 0 {
|
||||||
|
a.nBuckets = make([]int64, numNBuckets)
|
||||||
|
a.nBucketsDelta = make([]int64, numNBuckets)
|
||||||
|
} else {
|
||||||
|
a.nBuckets = nil
|
||||||
|
a.nBucketsDelta = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now store the actual data.
|
||||||
|
putVarbitInt(a.b, t)
|
||||||
|
putVarbitUint(a.b, h.Count)
|
||||||
|
putVarbitUint(a.b, h.ZeroCount)
|
||||||
|
a.b.writeBits(math.Float64bits(h.Sum), 64)
|
||||||
|
for _, b := range h.PositiveBuckets {
|
||||||
|
putVarbitInt(a.b, b)
|
||||||
|
}
|
||||||
|
for _, b := range h.NegativeBuckets {
|
||||||
|
putVarbitInt(a.b, b)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// The case for the 2nd sample with single deltas is implicitly handled correctly with the double delta code,
|
||||||
|
// so we don't need a separate single delta logic for the 2nd sample.
|
||||||
|
|
||||||
|
tDelta = t - a.t
|
||||||
|
cntDelta = int64(h.Count) - int64(a.cnt)
|
||||||
|
zCntDelta = int64(h.ZeroCount) - int64(a.zCnt)
|
||||||
|
|
||||||
|
tDod := tDelta - a.tDelta
|
||||||
|
cntDod := cntDelta - a.cntDelta
|
||||||
|
zCntDod := zCntDelta - a.zCntDelta
|
||||||
|
|
||||||
|
if value.IsStaleNaN(h.Sum) {
|
||||||
|
cntDod, zCntDod = 0, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
putVarbitInt(a.b, tDod)
|
||||||
|
putVarbitInt(a.b, cntDod)
|
||||||
|
putVarbitInt(a.b, zCntDod)
|
||||||
|
|
||||||
|
a.writeSumDelta(h.Sum)
|
||||||
|
|
||||||
|
for i, b := range h.PositiveBuckets {
|
||||||
|
delta := b - a.pBuckets[i]
|
||||||
|
dod := delta - a.pBucketsDelta[i]
|
||||||
|
putVarbitInt(a.b, dod)
|
||||||
|
a.pBucketsDelta[i] = delta
|
||||||
|
}
|
||||||
|
for i, b := range h.NegativeBuckets {
|
||||||
|
delta := b - a.nBuckets[i]
|
||||||
|
dod := delta - a.nBucketsDelta[i]
|
||||||
|
putVarbitInt(a.b, dod)
|
||||||
|
a.nBucketsDelta[i] = delta
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
binary.BigEndian.PutUint16(a.b.bytes(), num+1)
|
||||||
|
|
||||||
|
a.t = t
|
||||||
|
a.cnt = h.Count
|
||||||
|
a.zCnt = h.ZeroCount
|
||||||
|
a.tDelta = tDelta
|
||||||
|
a.cntDelta = cntDelta
|
||||||
|
a.zCntDelta = zCntDelta
|
||||||
|
|
||||||
|
copy(a.pBuckets, h.PositiveBuckets)
|
||||||
|
copy(a.nBuckets, h.NegativeBuckets)
|
||||||
|
// Note that the bucket deltas were already updated above.
|
||||||
|
a.sum = h.Sum
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recode converts the current chunk to accommodate an expansion of the set of
|
||||||
|
// (positive and/or negative) buckets used, according to the provided
|
||||||
|
// interjections, resulting in the honoring of the provided new positive and
|
||||||
|
// negative spans. To continue appending, use the returned Appender rather than
|
||||||
|
// the receiver of this method.
|
||||||
|
func (a *HistogramAppender) Recode(
|
||||||
|
positiveInterjections, negativeInterjections []Interjection,
|
||||||
|
positiveSpans, negativeSpans []histogram.Span,
|
||||||
|
) (Chunk, Appender) {
|
||||||
|
// TODO(beorn7): This currently just decodes everything and then encodes
|
||||||
|
// it again with the new span layout. This can probably be done in-place
|
||||||
|
// by editing the chunk. But let's first see how expensive it is in the
|
||||||
|
// big picture. Also, in-place editing might create concurrency issues.
|
||||||
|
byts := a.b.bytes()
|
||||||
|
it := newHistogramIterator(byts)
|
||||||
|
hc := NewHistogramChunk()
|
||||||
|
app, err := hc.Appender()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
numPositiveBuckets, numNegativeBuckets := countSpans(positiveSpans), countSpans(negativeSpans)
|
||||||
|
|
||||||
|
for it.Next() == ValHistogram {
|
||||||
|
tOld, hOld := it.AtHistogram()
|
||||||
|
|
||||||
|
// We have to newly allocate slices for the modified buckets
|
||||||
|
// here because they are kept by the appender until the next
|
||||||
|
// append.
|
||||||
|
// TODO(beorn7): We might be able to optimize this.
|
||||||
|
var positiveBuckets, negativeBuckets []int64
|
||||||
|
if numPositiveBuckets > 0 {
|
||||||
|
positiveBuckets = make([]int64, numPositiveBuckets)
|
||||||
|
}
|
||||||
|
if numNegativeBuckets > 0 {
|
||||||
|
negativeBuckets = make([]int64, numNegativeBuckets)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save the modified histogram to the new chunk.
|
||||||
|
hOld.PositiveSpans, hOld.NegativeSpans = positiveSpans, negativeSpans
|
||||||
|
if len(positiveInterjections) > 0 {
|
||||||
|
hOld.PositiveBuckets = interject(hOld.PositiveBuckets, positiveBuckets, positiveInterjections)
|
||||||
|
}
|
||||||
|
if len(negativeInterjections) > 0 {
|
||||||
|
hOld.NegativeBuckets = interject(hOld.NegativeBuckets, negativeBuckets, negativeInterjections)
|
||||||
|
}
|
||||||
|
app.AppendHistogram(tOld, hOld)
|
||||||
|
}
|
||||||
|
|
||||||
|
hc.SetCounterResetHeader(CounterResetHeader(byts[2] & 0b11000000))
|
||||||
|
return hc, app
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *HistogramAppender) writeSumDelta(v float64) {
|
||||||
|
xorWrite(a.b, v, a.sum, &a.leading, &a.trailing)
|
||||||
|
}
|
||||||
|
|
||||||
|
type histogramIterator struct {
|
||||||
|
br bstreamReader
|
||||||
|
numTotal uint16
|
||||||
|
numRead uint16
|
||||||
|
|
||||||
|
// Layout:
|
||||||
|
schema int32
|
||||||
|
zThreshold float64
|
||||||
|
pSpans, nSpans []histogram.Span
|
||||||
|
|
||||||
|
// For the fields that are tracked as deltas and ultimately dod's.
|
||||||
|
t int64
|
||||||
|
cnt, zCnt uint64
|
||||||
|
tDelta, cntDelta, zCntDelta int64
|
||||||
|
pBuckets, nBuckets []int64 // Delta between buckets.
|
||||||
|
pFloatBuckets, nFloatBuckets []float64 // Absolute counts.
|
||||||
|
pBucketsDelta, nBucketsDelta []int64
|
||||||
|
|
||||||
|
// The sum is Gorilla xor encoded.
|
||||||
|
sum float64
|
||||||
|
leading uint8
|
||||||
|
trailing uint8
|
||||||
|
|
||||||
|
// Track calls to retrieve methods. Once they have been called, we
|
||||||
|
// cannot recycle the bucket slices anymore because we have returned
|
||||||
|
// them in the histogram.
|
||||||
|
atHistogramCalled, atFloatHistogramCalled bool
|
||||||
|
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *histogramIterator) Seek(t int64) ValueType {
|
||||||
|
if it.err != nil {
|
||||||
|
return ValNone
|
||||||
|
}
|
||||||
|
|
||||||
|
for t > it.t || it.numRead == 0 {
|
||||||
|
if it.Next() == ValNone {
|
||||||
|
return ValNone
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ValHistogram
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *histogramIterator) At() (int64, float64) {
|
||||||
|
panic("cannot call histogramIterator.At")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *histogramIterator) AtHistogram() (int64, *histogram.Histogram) {
|
||||||
|
if value.IsStaleNaN(it.sum) {
|
||||||
|
return it.t, &histogram.Histogram{Sum: it.sum}
|
||||||
|
}
|
||||||
|
it.atHistogramCalled = true
|
||||||
|
return it.t, &histogram.Histogram{
|
||||||
|
Count: it.cnt,
|
||||||
|
ZeroCount: it.zCnt,
|
||||||
|
Sum: it.sum,
|
||||||
|
ZeroThreshold: it.zThreshold,
|
||||||
|
Schema: it.schema,
|
||||||
|
PositiveSpans: it.pSpans,
|
||||||
|
NegativeSpans: it.nSpans,
|
||||||
|
PositiveBuckets: it.pBuckets,
|
||||||
|
NegativeBuckets: it.nBuckets,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *histogramIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
|
||||||
|
if value.IsStaleNaN(it.sum) {
|
||||||
|
return it.t, &histogram.FloatHistogram{Sum: it.sum}
|
||||||
|
}
|
||||||
|
it.atFloatHistogramCalled = true
|
||||||
|
return it.t, &histogram.FloatHistogram{
|
||||||
|
Count: float64(it.cnt),
|
||||||
|
ZeroCount: float64(it.zCnt),
|
||||||
|
Sum: it.sum,
|
||||||
|
ZeroThreshold: it.zThreshold,
|
||||||
|
Schema: it.schema,
|
||||||
|
PositiveSpans: it.pSpans,
|
||||||
|
NegativeSpans: it.nSpans,
|
||||||
|
PositiveBuckets: it.pFloatBuckets,
|
||||||
|
NegativeBuckets: it.nFloatBuckets,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *histogramIterator) AtT() int64 {
|
||||||
|
return it.t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *histogramIterator) Err() error {
|
||||||
|
return it.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *histogramIterator) Reset(b []byte) {
|
||||||
|
// The first 2 bytes contain chunk headers.
|
||||||
|
// We skip that for actual samples.
|
||||||
|
it.br = newBReader(b[2:])
|
||||||
|
it.numTotal = binary.BigEndian.Uint16(b)
|
||||||
|
it.numRead = 0
|
||||||
|
|
||||||
|
it.t, it.cnt, it.zCnt = 0, 0, 0
|
||||||
|
it.tDelta, it.cntDelta, it.zCntDelta = 0, 0, 0
|
||||||
|
|
||||||
|
// Recycle slices that have not been returned yet. Otherwise, start from
|
||||||
|
// scratch.
|
||||||
|
if it.atHistogramCalled {
|
||||||
|
it.atHistogramCalled = false
|
||||||
|
it.pBuckets, it.nBuckets = nil, nil
|
||||||
|
} else {
|
||||||
|
it.pBuckets = it.pBuckets[:0]
|
||||||
|
it.nBuckets = it.nBuckets[:0]
|
||||||
|
}
|
||||||
|
if it.atFloatHistogramCalled {
|
||||||
|
it.atFloatHistogramCalled = false
|
||||||
|
it.pFloatBuckets, it.nFloatBuckets = nil, nil
|
||||||
|
} else {
|
||||||
|
it.pFloatBuckets = it.pFloatBuckets[:0]
|
||||||
|
it.nFloatBuckets = it.nFloatBuckets[:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
it.pBucketsDelta = it.pBucketsDelta[:0]
|
||||||
|
it.pBucketsDelta = it.pBucketsDelta[:0]
|
||||||
|
|
||||||
|
it.sum = 0
|
||||||
|
it.leading = 0
|
||||||
|
it.trailing = 0
|
||||||
|
it.err = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *histogramIterator) Next() ValueType {
|
||||||
|
if it.err != nil || it.numRead == it.numTotal {
|
||||||
|
return ValNone
|
||||||
|
}
|
||||||
|
|
||||||
|
if it.numRead == 0 {
|
||||||
|
// The first read is responsible for reading the chunk layout
|
||||||
|
// and for initializing fields that depend on it. We give
|
||||||
|
// counter reset info at chunk level, hence we discard it here.
|
||||||
|
schema, zeroThreshold, posSpans, negSpans, err := readHistogramChunkLayout(&it.br)
|
||||||
|
if err != nil {
|
||||||
|
it.err = err
|
||||||
|
return ValNone
|
||||||
|
}
|
||||||
|
it.schema = schema
|
||||||
|
it.zThreshold = zeroThreshold
|
||||||
|
it.pSpans, it.nSpans = posSpans, negSpans
|
||||||
|
numPBuckets, numNBuckets := countSpans(posSpans), countSpans(negSpans)
|
||||||
|
// Allocate bucket slices as needed, recycling existing slices
|
||||||
|
// in case this iterator was reset and already has slices of a
|
||||||
|
// sufficient capacity.
|
||||||
|
if numPBuckets > 0 {
|
||||||
|
if cap(it.pBuckets) < numPBuckets {
|
||||||
|
it.pBuckets = make([]int64, numPBuckets)
|
||||||
|
// If cap(it.pBuckets) isn't sufficient, neither is the cap of the others.
|
||||||
|
it.pBucketsDelta = make([]int64, numPBuckets)
|
||||||
|
it.pFloatBuckets = make([]float64, numPBuckets)
|
||||||
|
} else {
|
||||||
|
for i := 0; i < numPBuckets; i++ {
|
||||||
|
it.pBuckets = append(it.pBuckets, 0)
|
||||||
|
it.pBucketsDelta = append(it.pBucketsDelta, 0)
|
||||||
|
it.pFloatBuckets = append(it.pFloatBuckets, 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if numNBuckets > 0 {
|
||||||
|
if cap(it.nBuckets) < numNBuckets {
|
||||||
|
it.nBuckets = make([]int64, numNBuckets)
|
||||||
|
// If cap(it.nBuckets) isn't sufficient, neither is the cap of the others.
|
||||||
|
it.nBucketsDelta = make([]int64, numNBuckets)
|
||||||
|
it.nFloatBuckets = make([]float64, numNBuckets)
|
||||||
|
} else {
|
||||||
|
for i := 0; i < numNBuckets; i++ {
|
||||||
|
it.nBuckets = append(it.nBuckets, 0)
|
||||||
|
it.nBucketsDelta = append(it.nBucketsDelta, 0)
|
||||||
|
it.pFloatBuckets = append(it.pFloatBuckets, 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now read the actual data.
|
||||||
|
t, err := readVarbitInt(&it.br)
|
||||||
|
if err != nil {
|
||||||
|
it.err = err
|
||||||
|
return ValNone
|
||||||
|
}
|
||||||
|
it.t = t
|
||||||
|
|
||||||
|
cnt, err := readVarbitUint(&it.br)
|
||||||
|
if err != nil {
|
||||||
|
it.err = err
|
||||||
|
return ValNone
|
||||||
|
}
|
||||||
|
it.cnt = cnt
|
||||||
|
|
||||||
|
zcnt, err := readVarbitUint(&it.br)
|
||||||
|
if err != nil {
|
||||||
|
it.err = err
|
||||||
|
return ValNone
|
||||||
|
}
|
||||||
|
it.zCnt = zcnt
|
||||||
|
|
||||||
|
sum, err := it.br.readBits(64)
|
||||||
|
if err != nil {
|
||||||
|
it.err = err
|
||||||
|
return ValNone
|
||||||
|
}
|
||||||
|
it.sum = math.Float64frombits(sum)
|
||||||
|
|
||||||
|
var current int64
|
||||||
|
for i := range it.pBuckets {
|
||||||
|
v, err := readVarbitInt(&it.br)
|
||||||
|
if err != nil {
|
||||||
|
it.err = err
|
||||||
|
return ValNone
|
||||||
|
}
|
||||||
|
it.pBuckets[i] = v
|
||||||
|
current += it.pBuckets[i]
|
||||||
|
it.pFloatBuckets[i] = float64(current)
|
||||||
|
}
|
||||||
|
current = 0
|
||||||
|
for i := range it.nBuckets {
|
||||||
|
v, err := readVarbitInt(&it.br)
|
||||||
|
if err != nil {
|
||||||
|
it.err = err
|
||||||
|
return ValNone
|
||||||
|
}
|
||||||
|
it.nBuckets[i] = v
|
||||||
|
current += it.nBuckets[i]
|
||||||
|
it.nFloatBuckets[i] = float64(current)
|
||||||
|
}
|
||||||
|
|
||||||
|
it.numRead++
|
||||||
|
return ValHistogram
|
||||||
|
}
|
||||||
|
|
||||||
|
// The case for the 2nd sample with single deltas is implicitly handled correctly with the double delta code,
|
||||||
|
// so we don't need a separate single delta logic for the 2nd sample.
|
||||||
|
|
||||||
|
// Recycle bucket slices that have not been returned yet. Otherwise,
|
||||||
|
// copy them.
|
||||||
|
if it.atHistogramCalled {
|
||||||
|
it.atHistogramCalled = false
|
||||||
|
if len(it.pBuckets) > 0 {
|
||||||
|
newBuckets := make([]int64, len(it.pBuckets))
|
||||||
|
copy(newBuckets, it.pBuckets)
|
||||||
|
it.pBuckets = newBuckets
|
||||||
|
} else {
|
||||||
|
it.pBuckets = nil
|
||||||
|
}
|
||||||
|
if len(it.nBuckets) > 0 {
|
||||||
|
newBuckets := make([]int64, len(it.nBuckets))
|
||||||
|
copy(newBuckets, it.nBuckets)
|
||||||
|
it.nBuckets = newBuckets
|
||||||
|
} else {
|
||||||
|
it.nBuckets = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// FloatBuckets are set from scratch, so simply create empty ones.
|
||||||
|
if it.atFloatHistogramCalled {
|
||||||
|
it.atFloatHistogramCalled = false
|
||||||
|
if len(it.pFloatBuckets) > 0 {
|
||||||
|
it.pFloatBuckets = make([]float64, len(it.pFloatBuckets))
|
||||||
|
} else {
|
||||||
|
it.pFloatBuckets = nil
|
||||||
|
}
|
||||||
|
if len(it.nFloatBuckets) > 0 {
|
||||||
|
it.nFloatBuckets = make([]float64, len(it.nFloatBuckets))
|
||||||
|
} else {
|
||||||
|
it.nFloatBuckets = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tDod, err := readVarbitInt(&it.br)
|
||||||
|
if err != nil {
|
||||||
|
it.err = err
|
||||||
|
return ValNone
|
||||||
|
}
|
||||||
|
it.tDelta = it.tDelta + tDod
|
||||||
|
it.t += it.tDelta
|
||||||
|
|
||||||
|
cntDod, err := readVarbitInt(&it.br)
|
||||||
|
if err != nil {
|
||||||
|
it.err = err
|
||||||
|
return ValNone
|
||||||
|
}
|
||||||
|
it.cntDelta = it.cntDelta + cntDod
|
||||||
|
it.cnt = uint64(int64(it.cnt) + it.cntDelta)
|
||||||
|
|
||||||
|
zcntDod, err := readVarbitInt(&it.br)
|
||||||
|
if err != nil {
|
||||||
|
it.err = err
|
||||||
|
return ValNone
|
||||||
|
}
|
||||||
|
it.zCntDelta = it.zCntDelta + zcntDod
|
||||||
|
it.zCnt = uint64(int64(it.zCnt) + it.zCntDelta)
|
||||||
|
|
||||||
|
ok := it.readSum()
|
||||||
|
if !ok {
|
||||||
|
return ValNone
|
||||||
|
}
|
||||||
|
|
||||||
|
if value.IsStaleNaN(it.sum) {
|
||||||
|
it.numRead++
|
||||||
|
return ValHistogram
|
||||||
|
}
|
||||||
|
|
||||||
|
var current int64
|
||||||
|
for i := range it.pBuckets {
|
||||||
|
dod, err := readVarbitInt(&it.br)
|
||||||
|
if err != nil {
|
||||||
|
it.err = err
|
||||||
|
return ValNone
|
||||||
|
}
|
||||||
|
it.pBucketsDelta[i] += dod
|
||||||
|
it.pBuckets[i] += it.pBucketsDelta[i]
|
||||||
|
current += it.pBuckets[i]
|
||||||
|
it.pFloatBuckets[i] = float64(current)
|
||||||
|
}
|
||||||
|
|
||||||
|
current = 0
|
||||||
|
for i := range it.nBuckets {
|
||||||
|
dod, err := readVarbitInt(&it.br)
|
||||||
|
if err != nil {
|
||||||
|
it.err = err
|
||||||
|
return ValNone
|
||||||
|
}
|
||||||
|
it.nBucketsDelta[i] += dod
|
||||||
|
it.nBuckets[i] += it.nBucketsDelta[i]
|
||||||
|
current += it.nBuckets[i]
|
||||||
|
it.nFloatBuckets[i] = float64(current)
|
||||||
|
}
|
||||||
|
|
||||||
|
it.numRead++
|
||||||
|
return ValHistogram
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *histogramIterator) readSum() bool {
|
||||||
|
err := xorRead(&it.br, &it.sum, &it.leading, &it.trailing)
|
||||||
|
if err != nil {
|
||||||
|
it.err = err
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
334
tsdb/chunkenc/histogram_meta.go
Normal file
334
tsdb/chunkenc/histogram_meta.go
Normal file
|
@ -0,0 +1,334 @@
|
||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package chunkenc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
|
)
|
||||||
|
|
||||||
|
func writeHistogramChunkLayout(b *bstream, schema int32, zeroThreshold float64, positiveSpans, negativeSpans []histogram.Span) {
|
||||||
|
putZeroThreshold(b, zeroThreshold)
|
||||||
|
putVarbitInt(b, int64(schema))
|
||||||
|
putHistogramChunkLayoutSpans(b, positiveSpans)
|
||||||
|
putHistogramChunkLayoutSpans(b, negativeSpans)
|
||||||
|
}
|
||||||
|
|
||||||
|
func readHistogramChunkLayout(b *bstreamReader) (
|
||||||
|
schema int32, zeroThreshold float64,
|
||||||
|
positiveSpans, negativeSpans []histogram.Span,
|
||||||
|
err error,
|
||||||
|
) {
|
||||||
|
zeroThreshold, err = readZeroThreshold(b)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
v, err := readVarbitInt(b)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
schema = int32(v)
|
||||||
|
|
||||||
|
positiveSpans, err = readHistogramChunkLayoutSpans(b)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
negativeSpans, err = readHistogramChunkLayoutSpans(b)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func putHistogramChunkLayoutSpans(b *bstream, spans []histogram.Span) {
|
||||||
|
putVarbitUint(b, uint64(len(spans)))
|
||||||
|
for _, s := range spans {
|
||||||
|
putVarbitUint(b, uint64(s.Length))
|
||||||
|
putVarbitInt(b, int64(s.Offset))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func readHistogramChunkLayoutSpans(b *bstreamReader) ([]histogram.Span, error) {
|
||||||
|
var spans []histogram.Span
|
||||||
|
num, err := readVarbitUint(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for i := 0; i < int(num); i++ {
|
||||||
|
|
||||||
|
length, err := readVarbitUint(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
offset, err := readVarbitInt(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
spans = append(spans, histogram.Span{
|
||||||
|
Length: uint32(length),
|
||||||
|
Offset: int32(offset),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return spans, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// putZeroThreshold writes the zero threshold to the bstream. It stores typical
|
||||||
|
// values in just one byte, but needs 9 bytes for other values. In detail:
|
||||||
|
//
|
||||||
|
// * If the threshold is 0, store a single zero byte.
|
||||||
|
//
|
||||||
|
// - If the threshold is a power of 2 between (and including) 2^-243 and 2^10,
|
||||||
|
// take the exponent from the IEEE 754 representation of the threshold, which
|
||||||
|
// covers a range between (and including) -242 and 11. (2^-243 is 0.5*2^-242
|
||||||
|
// in IEEE 754 representation, and 2^10 is 0.5*2^11.) Add 243 to the exponent
|
||||||
|
// and store the result (which will be between 1 and 254) as a single
|
||||||
|
// byte. Note that small powers of two are preferred values for the zero
|
||||||
|
// threshold. The default value for the zero threshold is 2^-128 (or
|
||||||
|
// 0.5*2^-127 in IEEE 754 representation) and will therefore be encoded as a
|
||||||
|
// single byte (with value 116).
|
||||||
|
//
|
||||||
|
// - In all other cases, store 255 as a single byte, followed by the 8 bytes of
|
||||||
|
// the threshold as a float64, i.e. taking 9 bytes in total.
|
||||||
|
func putZeroThreshold(b *bstream, threshold float64) {
|
||||||
|
if threshold == 0 {
|
||||||
|
b.writeByte(0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
frac, exp := math.Frexp(threshold)
|
||||||
|
if frac != 0.5 || exp < -242 || exp > 11 {
|
||||||
|
b.writeByte(255)
|
||||||
|
b.writeBits(math.Float64bits(threshold), 64)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b.writeByte(byte(exp + 243))
|
||||||
|
}
|
||||||
|
|
||||||
|
// readZeroThreshold reads the zero threshold written with putZeroThreshold.
|
||||||
|
func readZeroThreshold(br *bstreamReader) (float64, error) {
|
||||||
|
b, err := br.ReadByte()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
switch b {
|
||||||
|
case 0:
|
||||||
|
return 0, nil
|
||||||
|
case 255:
|
||||||
|
v, err := br.readBits(64)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return math.Float64frombits(v), nil
|
||||||
|
default:
|
||||||
|
return math.Ldexp(0.5, int(b)-243), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type bucketIterator struct {
|
||||||
|
spans []histogram.Span
|
||||||
|
span int // Span position of last yielded bucket.
|
||||||
|
bucket int // Bucket position within span of last yielded bucket.
|
||||||
|
idx int // Bucket index (globally across all spans) of last yielded bucket.
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBucketIterator(spans []histogram.Span) *bucketIterator {
|
||||||
|
b := bucketIterator{
|
||||||
|
spans: spans,
|
||||||
|
span: 0,
|
||||||
|
bucket: -1,
|
||||||
|
idx: -1,
|
||||||
|
}
|
||||||
|
if len(spans) > 0 {
|
||||||
|
b.idx += int(spans[0].Offset)
|
||||||
|
}
|
||||||
|
return &b
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *bucketIterator) Next() (int, bool) {
|
||||||
|
// We're already out of bounds.
|
||||||
|
if b.span >= len(b.spans) {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
try:
|
||||||
|
if b.bucket < int(b.spans[b.span].Length-1) { // Try to move within same span.
|
||||||
|
b.bucket++
|
||||||
|
b.idx++
|
||||||
|
return b.idx, true
|
||||||
|
} else if b.span < len(b.spans)-1 { // Try to move from one span to the next.
|
||||||
|
b.span++
|
||||||
|
b.idx += int(b.spans[b.span].Offset + 1)
|
||||||
|
b.bucket = 0
|
||||||
|
if b.spans[b.span].Length == 0 {
|
||||||
|
// Pathological case that should never happen. We can't use this span, let's try again.
|
||||||
|
goto try
|
||||||
|
}
|
||||||
|
return b.idx, true
|
||||||
|
}
|
||||||
|
// We're out of options.
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// An Interjection describes how many new buckets have to be introduced before
|
||||||
|
// processing the pos'th delta from the original slice.
|
||||||
|
type Interjection struct {
|
||||||
|
pos int
|
||||||
|
num int
|
||||||
|
}
|
||||||
|
|
||||||
|
// compareSpans returns the interjections to convert a slice of deltas to a new
|
||||||
|
// slice representing an expanded set of buckets, or false if incompatible
|
||||||
|
// (e.g. if buckets were removed).
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// Let's say the old buckets look like this:
|
||||||
|
//
|
||||||
|
// span syntax: [offset, length]
|
||||||
|
// spans : [ 0 , 2 ] [2,1] [ 3 , 2 ] [3,1] [1,1]
|
||||||
|
// bucket idx : [0] [1] 2 3 [4] 5 6 7 [8] [9] 10 11 12 [13] 14 [15]
|
||||||
|
// raw values 6 3 3 2 4 5 1
|
||||||
|
// deltas 6 -3 0 -1 2 1 -4
|
||||||
|
//
|
||||||
|
// But now we introduce a new bucket layout. (Carefully chosen example where we
|
||||||
|
// have a span appended, one unchanged[*], one prepended, and two merge - in
|
||||||
|
// that order.)
|
||||||
|
//
|
||||||
|
// [*] unchanged in terms of which bucket indices they represent. but to achieve
|
||||||
|
// that, their offset needs to change if "disrupted" by spans changing ahead of
|
||||||
|
// them
|
||||||
|
//
|
||||||
|
// \/ this one is "unchanged"
|
||||||
|
// spans : [ 0 , 3 ] [1,1] [ 1 , 4 ] [ 3 , 3 ]
|
||||||
|
// bucket idx : [0] [1] [2] 3 [4] 5 [6] [7] [8] [9] 10 11 12 [13] [14] [15]
|
||||||
|
// raw values 6 3 0 3 0 0 2 4 5 0 1
|
||||||
|
// deltas 6 -3 -3 3 -3 0 2 2 1 -5 1
|
||||||
|
// delta mods: / \ / \ / \
|
||||||
|
//
|
||||||
|
// Note that whenever any new buckets are introduced, the subsequent "old"
|
||||||
|
// bucket needs to readjust its delta to the new base of 0. Thus, for the caller
|
||||||
|
// who wants to transform the set of original deltas to a new set of deltas to
|
||||||
|
// match a new span layout that adds buckets, we simply need to generate a list
|
||||||
|
// of interjections.
|
||||||
|
//
|
||||||
|
// Note: Within compareSpans we don't have to worry about the changes to the
|
||||||
|
// spans themselves, thanks to the iterators we get to work with the more useful
|
||||||
|
// bucket indices (which of course directly correspond to the buckets we have to
|
||||||
|
// adjust).
|
||||||
|
func compareSpans(a, b []histogram.Span) ([]Interjection, bool) {
|
||||||
|
ai := newBucketIterator(a)
|
||||||
|
bi := newBucketIterator(b)
|
||||||
|
|
||||||
|
var interjections []Interjection
|
||||||
|
|
||||||
|
// When inter.num becomes > 0, this becomes a valid interjection that
|
||||||
|
// should be yielded when we finish a streak of new buckets.
|
||||||
|
var inter Interjection
|
||||||
|
|
||||||
|
av, aOK := ai.Next()
|
||||||
|
bv, bOK := bi.Next()
|
||||||
|
loop:
|
||||||
|
for {
|
||||||
|
switch {
|
||||||
|
case aOK && bOK:
|
||||||
|
switch {
|
||||||
|
case av == bv: // Both have an identical value. move on!
|
||||||
|
// Finish WIP interjection and reset.
|
||||||
|
if inter.num > 0 {
|
||||||
|
interjections = append(interjections, inter)
|
||||||
|
}
|
||||||
|
inter.num = 0
|
||||||
|
av, aOK = ai.Next()
|
||||||
|
bv, bOK = bi.Next()
|
||||||
|
inter.pos++
|
||||||
|
case av < bv: // b misses a value that is in a.
|
||||||
|
return interjections, false
|
||||||
|
case av > bv: // a misses a value that is in b. Forward b and recompare.
|
||||||
|
inter.num++
|
||||||
|
bv, bOK = bi.Next()
|
||||||
|
}
|
||||||
|
case aOK && !bOK: // b misses a value that is in a.
|
||||||
|
return interjections, false
|
||||||
|
case !aOK && bOK: // a misses a value that is in b. Forward b and recompare.
|
||||||
|
inter.num++
|
||||||
|
bv, bOK = bi.Next()
|
||||||
|
default: // Both iterators ran out. We're done.
|
||||||
|
if inter.num > 0 {
|
||||||
|
interjections = append(interjections, inter)
|
||||||
|
}
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return interjections, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// interject merges 'in' with the provided interjections and writes them into
|
||||||
|
// 'out', which must already have the appropriate length.
|
||||||
|
func interject(in, out []int64, interjections []Interjection) []int64 {
|
||||||
|
var (
|
||||||
|
j int // Position in out.
|
||||||
|
v int64 // The last value seen.
|
||||||
|
interj int // The next interjection to process.
|
||||||
|
)
|
||||||
|
for i, d := range in {
|
||||||
|
if interj < len(interjections) && i == interjections[interj].pos {
|
||||||
|
|
||||||
|
// We have an interjection!
|
||||||
|
// Add interjection.num new delta values such that their
|
||||||
|
// bucket values equate 0.
|
||||||
|
out[j] = int64(-v)
|
||||||
|
j++
|
||||||
|
for x := 1; x < interjections[interj].num; x++ {
|
||||||
|
out[j] = 0
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
interj++
|
||||||
|
|
||||||
|
// Now save the value from the input. The delta value we
|
||||||
|
// should save is the original delta value + the last
|
||||||
|
// value of the point before the interjection (to undo
|
||||||
|
// the delta that was introduced by the interjection).
|
||||||
|
out[j] = d + v
|
||||||
|
j++
|
||||||
|
v = d + v
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there was no interjection, the original delta is still
|
||||||
|
// valid.
|
||||||
|
out[j] = d
|
||||||
|
j++
|
||||||
|
v += d
|
||||||
|
}
|
||||||
|
switch interj {
|
||||||
|
case len(interjections):
|
||||||
|
// All interjections processed. Nothing more to do.
|
||||||
|
case len(interjections) - 1:
|
||||||
|
// One more interjection to process at the end.
|
||||||
|
out[j] = int64(-v)
|
||||||
|
j++
|
||||||
|
for x := 1; x < interjections[interj].num; x++ {
|
||||||
|
out[j] = 0
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic("unprocessed interjections left")
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
371
tsdb/chunkenc/histogram_meta_test.go
Normal file
371
tsdb/chunkenc/histogram_meta_test.go
Normal file
|
@ -0,0 +1,371 @@
|
||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// The code in this file was largely written by Damian Gryski as part of
|
||||||
|
// https://github.com/dgryski/go-tsz and published under the license below.
|
||||||
|
// It was modified to accommodate reading from byte slices without modifying
|
||||||
|
// the underlying bytes, which would panic when reading from mmap'd
|
||||||
|
// read-only byte slices.
|
||||||
|
package chunkenc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Example of a span layout and resulting bucket indices (_idx_ is used in this
|
||||||
|
// histogram, others are shown just for context):
|
||||||
|
//
|
||||||
|
// spans : [offset: 0, length: 2] [offset 1, length 1]
|
||||||
|
// bucket idx : _0_ _1_ 2 [3] 4 ...
|
||||||
|
|
||||||
|
func TestBucketIterator(t *testing.T) {
|
||||||
|
type test struct {
|
||||||
|
spans []histogram.Span
|
||||||
|
idxs []int
|
||||||
|
}
|
||||||
|
tests := []test{
|
||||||
|
{
|
||||||
|
spans: []histogram.Span{
|
||||||
|
{
|
||||||
|
Offset: 0,
|
||||||
|
Length: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
idxs: []int{0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
spans: []histogram.Span{
|
||||||
|
{
|
||||||
|
Offset: 0,
|
||||||
|
Length: 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Offset: 1,
|
||||||
|
Length: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
idxs: []int{0, 1, 3},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
spans: []histogram.Span{
|
||||||
|
{
|
||||||
|
Offset: 100,
|
||||||
|
Length: 4,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Offset: 8,
|
||||||
|
Length: 7,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Offset: 0,
|
||||||
|
Length: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
idxs: []int{100, 101, 102, 103, 112, 113, 114, 115, 116, 117, 118, 119},
|
||||||
|
},
|
||||||
|
// The below 2 sets ore the ones described in compareSpans's comments.
|
||||||
|
{
|
||||||
|
spans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 2, Length: 1},
|
||||||
|
{Offset: 3, Length: 2},
|
||||||
|
{Offset: 3, Length: 1},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
},
|
||||||
|
idxs: []int{0, 1, 4, 8, 9, 13, 15},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
spans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
{Offset: 1, Length: 4},
|
||||||
|
{Offset: 3, Length: 3},
|
||||||
|
},
|
||||||
|
idxs: []int{0, 1, 2, 4, 6, 7, 8, 9, 13, 14, 15},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range tests {
|
||||||
|
b := newBucketIterator(test.spans)
|
||||||
|
var got []int
|
||||||
|
v, ok := b.Next()
|
||||||
|
for ok {
|
||||||
|
got = append(got, v)
|
||||||
|
v, ok = b.Next()
|
||||||
|
}
|
||||||
|
require.Equal(t, test.idxs, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInterjection(t *testing.T) {
|
||||||
|
scenarios := []struct {
|
||||||
|
description string
|
||||||
|
spansA, spansB []histogram.Span
|
||||||
|
valid bool
|
||||||
|
interjections []Interjection
|
||||||
|
bucketsIn, bucketsOut []int64
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
description: "single prepend at the beginning",
|
||||||
|
spansA: []histogram.Span{
|
||||||
|
{Offset: -10, Length: 3},
|
||||||
|
},
|
||||||
|
spansB: []histogram.Span{
|
||||||
|
{Offset: -11, Length: 4},
|
||||||
|
},
|
||||||
|
valid: true,
|
||||||
|
interjections: []Interjection{
|
||||||
|
{
|
||||||
|
pos: 0,
|
||||||
|
num: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
bucketsIn: []int64{6, -3, 0},
|
||||||
|
bucketsOut: []int64{0, 6, -3, 0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "single append at the end",
|
||||||
|
spansA: []histogram.Span{
|
||||||
|
{Offset: -10, Length: 3},
|
||||||
|
},
|
||||||
|
spansB: []histogram.Span{
|
||||||
|
{Offset: -10, Length: 4},
|
||||||
|
},
|
||||||
|
valid: true,
|
||||||
|
interjections: []Interjection{
|
||||||
|
{
|
||||||
|
pos: 3,
|
||||||
|
num: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
bucketsIn: []int64{6, -3, 0},
|
||||||
|
bucketsOut: []int64{6, -3, 0, -3},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "double prepend at the beginning",
|
||||||
|
spansA: []histogram.Span{
|
||||||
|
{Offset: -10, Length: 3},
|
||||||
|
},
|
||||||
|
spansB: []histogram.Span{
|
||||||
|
{Offset: -12, Length: 5},
|
||||||
|
},
|
||||||
|
valid: true,
|
||||||
|
interjections: []Interjection{
|
||||||
|
{
|
||||||
|
pos: 0,
|
||||||
|
num: 2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
bucketsIn: []int64{6, -3, 0},
|
||||||
|
bucketsOut: []int64{0, 0, 6, -3, 0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "double append at the end",
|
||||||
|
spansA: []histogram.Span{
|
||||||
|
{Offset: -10, Length: 3},
|
||||||
|
},
|
||||||
|
spansB: []histogram.Span{
|
||||||
|
{Offset: -10, Length: 5},
|
||||||
|
},
|
||||||
|
valid: true,
|
||||||
|
interjections: []Interjection{
|
||||||
|
{
|
||||||
|
pos: 3,
|
||||||
|
num: 2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
bucketsIn: []int64{6, -3, 0},
|
||||||
|
bucketsOut: []int64{6, -3, 0, -3, 0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "double prepond at the beginning and double append at the end",
|
||||||
|
spansA: []histogram.Span{
|
||||||
|
{Offset: -10, Length: 3},
|
||||||
|
},
|
||||||
|
spansB: []histogram.Span{
|
||||||
|
{Offset: -12, Length: 7},
|
||||||
|
},
|
||||||
|
valid: true,
|
||||||
|
interjections: []Interjection{
|
||||||
|
{
|
||||||
|
pos: 0,
|
||||||
|
num: 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pos: 3,
|
||||||
|
num: 2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
bucketsIn: []int64{6, -3, 0},
|
||||||
|
bucketsOut: []int64{0, 0, 6, -3, 0, -3, 0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "single removal of bucket at the start",
|
||||||
|
spansA: []histogram.Span{
|
||||||
|
{Offset: -10, Length: 4},
|
||||||
|
},
|
||||||
|
spansB: []histogram.Span{
|
||||||
|
{Offset: -9, Length: 3},
|
||||||
|
},
|
||||||
|
valid: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "single removal of bucket in the middle",
|
||||||
|
spansA: []histogram.Span{
|
||||||
|
{Offset: -10, Length: 4},
|
||||||
|
},
|
||||||
|
spansB: []histogram.Span{
|
||||||
|
{Offset: -10, Length: 2},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
},
|
||||||
|
valid: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "single removal of bucket at the end",
|
||||||
|
spansA: []histogram.Span{
|
||||||
|
{Offset: -10, Length: 4},
|
||||||
|
},
|
||||||
|
spansB: []histogram.Span{
|
||||||
|
{Offset: -10, Length: 3},
|
||||||
|
},
|
||||||
|
valid: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "as described in doc comment",
|
||||||
|
spansA: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 2, Length: 1},
|
||||||
|
{Offset: 3, Length: 2},
|
||||||
|
{Offset: 3, Length: 1},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
},
|
||||||
|
spansB: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
{Offset: 1, Length: 4},
|
||||||
|
{Offset: 3, Length: 3},
|
||||||
|
},
|
||||||
|
valid: true,
|
||||||
|
interjections: []Interjection{
|
||||||
|
{
|
||||||
|
pos: 2,
|
||||||
|
num: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pos: 3,
|
||||||
|
num: 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pos: 6,
|
||||||
|
num: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
bucketsIn: []int64{6, -3, 0, -1, 2, 1, -4},
|
||||||
|
bucketsOut: []int64{6, -3, -3, 3, -3, 0, 2, 2, 1, -5, 1},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, s := range scenarios {
|
||||||
|
t.Run(s.description, func(t *testing.T) {
|
||||||
|
interjections, valid := compareSpans(s.spansA, s.spansB)
|
||||||
|
if !s.valid {
|
||||||
|
require.False(t, valid, "compareScan unexpectedly returned true")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
require.True(t, valid, "compareScan unexpectedly returned false")
|
||||||
|
require.Equal(t, s.interjections, interjections)
|
||||||
|
|
||||||
|
gotBuckets := make([]int64, len(s.bucketsOut))
|
||||||
|
interject(s.bucketsIn, gotBuckets, interjections)
|
||||||
|
require.Equal(t, s.bucketsOut, gotBuckets)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWriteReadHistogramChunkLayout(t *testing.T) {
|
||||||
|
layouts := []struct {
|
||||||
|
schema int32
|
||||||
|
zeroThreshold float64
|
||||||
|
positiveSpans, negativeSpans []histogram.Span
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
schema: 3,
|
||||||
|
zeroThreshold: 0,
|
||||||
|
positiveSpans: []histogram.Span{{Offset: -4, Length: 3}, {Offset: 2, Length: 42}},
|
||||||
|
negativeSpans: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
schema: -2,
|
||||||
|
zeroThreshold: 2.938735877055719e-39, // Default value in client_golang.
|
||||||
|
positiveSpans: nil,
|
||||||
|
negativeSpans: []histogram.Span{{Offset: 2, Length: 5}, {Offset: 1, Length: 34}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
schema: 6,
|
||||||
|
zeroThreshold: 1024, // The largest power of two we can encode in one byte.
|
||||||
|
positiveSpans: nil,
|
||||||
|
negativeSpans: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
schema: 6,
|
||||||
|
zeroThreshold: 1025,
|
||||||
|
positiveSpans: []histogram.Span{{Offset: 2, Length: 5}, {Offset: 1, Length: 34}, {Offset: 0, Length: 0}}, // Weird span.
|
||||||
|
negativeSpans: []histogram.Span{{Offset: -345, Length: 4545}, {Offset: 53645665, Length: 345}, {Offset: 945995, Length: 85848}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
schema: 6,
|
||||||
|
zeroThreshold: 2048,
|
||||||
|
positiveSpans: nil,
|
||||||
|
negativeSpans: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
schema: 0,
|
||||||
|
zeroThreshold: math.Ldexp(0.5, -242), // The smallest power of two we can encode in one byte.
|
||||||
|
positiveSpans: []histogram.Span{{Offset: -4, Length: 3}},
|
||||||
|
negativeSpans: []histogram.Span{{Offset: 2, Length: 5}, {Offset: 1, Length: 34}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
schema: 0,
|
||||||
|
zeroThreshold: math.Ldexp(0.5, -243),
|
||||||
|
positiveSpans: []histogram.Span{{Offset: -4, Length: 3}},
|
||||||
|
negativeSpans: []histogram.Span{{Offset: 2, Length: 5}, {Offset: 1, Length: 34}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
schema: 4,
|
||||||
|
zeroThreshold: 42, // Not a power of two.
|
||||||
|
positiveSpans: nil,
|
||||||
|
negativeSpans: nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
bs := bstream{}
|
||||||
|
|
||||||
|
for _, l := range layouts {
|
||||||
|
writeHistogramChunkLayout(&bs, l.schema, l.zeroThreshold, l.positiveSpans, l.negativeSpans)
|
||||||
|
}
|
||||||
|
|
||||||
|
bsr := newBReader(bs.bytes())
|
||||||
|
|
||||||
|
for _, want := range layouts {
|
||||||
|
gotSchema, gotZeroThreshold, gotPositiveSpans, gotNegativeSpans, err := readHistogramChunkLayout(&bsr)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, want.schema, gotSchema)
|
||||||
|
require.Equal(t, want.zeroThreshold, gotZeroThreshold)
|
||||||
|
require.Equal(t, want.positiveSpans, gotPositiveSpans)
|
||||||
|
require.Equal(t, want.negativeSpans, gotNegativeSpans)
|
||||||
|
}
|
||||||
|
}
|
469
tsdb/chunkenc/histogram_test.go
Normal file
469
tsdb/chunkenc/histogram_test.go
Normal file
|
@ -0,0 +1,469 @@
|
||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package chunkenc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestHistogramChunkSameBuckets(t *testing.T) {
|
||||||
|
c := NewHistogramChunk()
|
||||||
|
var exp []res
|
||||||
|
|
||||||
|
// Create fresh appender and add the first histogram.
|
||||||
|
app, err := c.Appender()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 0, c.NumSamples())
|
||||||
|
|
||||||
|
ts := int64(1234567890)
|
||||||
|
h := &histogram.Histogram{
|
||||||
|
Count: 5,
|
||||||
|
ZeroCount: 2,
|
||||||
|
Sum: 18.4,
|
||||||
|
ZeroThreshold: 1e-100,
|
||||||
|
Schema: 1,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 1, -1, 0}, // counts: 1, 2, 1, 1 (total 5)
|
||||||
|
}
|
||||||
|
app.AppendHistogram(ts, h)
|
||||||
|
exp = append(exp, res{t: ts, h: h})
|
||||||
|
require.Equal(t, 1, c.NumSamples())
|
||||||
|
|
||||||
|
// Add an updated histogram.
|
||||||
|
ts += 16
|
||||||
|
h = h.Copy()
|
||||||
|
h.Count += 9
|
||||||
|
h.ZeroCount++
|
||||||
|
h.Sum = 24.4
|
||||||
|
h.PositiveBuckets = []int64{5, -2, 1, -2} // counts: 5, 3, 4, 2 (total 14)
|
||||||
|
app.AppendHistogram(ts, h)
|
||||||
|
exp = append(exp, res{t: ts, h: h})
|
||||||
|
require.Equal(t, 2, c.NumSamples())
|
||||||
|
|
||||||
|
// Add update with new appender.
|
||||||
|
app, err = c.Appender()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ts += 14
|
||||||
|
h = h.Copy()
|
||||||
|
h.Count += 13
|
||||||
|
h.ZeroCount += 2
|
||||||
|
h.Sum = 24.4
|
||||||
|
h.PositiveBuckets = []int64{6, 1, -3, 6} // counts: 6, 7, 4, 10 (total 27)
|
||||||
|
app.AppendHistogram(ts, h)
|
||||||
|
exp = append(exp, res{t: ts, h: h})
|
||||||
|
require.Equal(t, 3, c.NumSamples())
|
||||||
|
|
||||||
|
// 1. Expand iterator in simple case.
|
||||||
|
it := c.iterator(nil)
|
||||||
|
require.NoError(t, it.Err())
|
||||||
|
var act []res
|
||||||
|
for it.Next() == ValHistogram {
|
||||||
|
ts, h := it.AtHistogram()
|
||||||
|
act = append(act, res{t: ts, h: h})
|
||||||
|
}
|
||||||
|
require.NoError(t, it.Err())
|
||||||
|
require.Equal(t, exp, act)
|
||||||
|
|
||||||
|
// 2. Expand second iterator while reusing first one.
|
||||||
|
// it2 := c.Iterator(it1)
|
||||||
|
// var res2 []pair
|
||||||
|
// for it2.Next() {
|
||||||
|
// ts, v := it2.At()
|
||||||
|
// res2 = append(res2, pair{t: ts, v: v})
|
||||||
|
// }
|
||||||
|
// require.NoError(t, it2.Err())
|
||||||
|
// require.Equal(t, exp, res2)
|
||||||
|
|
||||||
|
// 3. Test iterator Seek.
|
||||||
|
// mid := len(exp) / 2
|
||||||
|
|
||||||
|
// it3 := c.Iterator(nil)
|
||||||
|
// var res3 []pair
|
||||||
|
// require.Equal(t, true, it3.Seek(exp[mid].t))
|
||||||
|
// Below ones should not matter.
|
||||||
|
// require.Equal(t, true, it3.Seek(exp[mid].t))
|
||||||
|
// require.Equal(t, true, it3.Seek(exp[mid].t))
|
||||||
|
// ts, v = it3.At()
|
||||||
|
// res3 = append(res3, pair{t: ts, v: v})
|
||||||
|
|
||||||
|
// for it3.Next() {
|
||||||
|
// ts, v := it3.At()
|
||||||
|
// res3 = append(res3, pair{t: ts, v: v})
|
||||||
|
// }
|
||||||
|
// require.NoError(t, it3.Err())
|
||||||
|
// require.Equal(t, exp[mid:], res3)
|
||||||
|
// require.Equal(t, false, it3.Seek(exp[len(exp)-1].t+1))
|
||||||
|
}
|
||||||
|
|
||||||
|
type res struct {
|
||||||
|
t int64
|
||||||
|
h *histogram.Histogram
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mimics the scenario described for compareSpans().
|
||||||
|
func TestHistogramChunkBucketChanges(t *testing.T) {
|
||||||
|
c := Chunk(NewHistogramChunk())
|
||||||
|
|
||||||
|
// Create fresh appender and add the first histogram.
|
||||||
|
app, err := c.Appender()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 0, c.NumSamples())
|
||||||
|
|
||||||
|
ts1 := int64(1234567890)
|
||||||
|
h1 := &histogram.Histogram{
|
||||||
|
Count: 5,
|
||||||
|
ZeroCount: 2,
|
||||||
|
Sum: 18.4,
|
||||||
|
ZeroThreshold: 1e-125,
|
||||||
|
Schema: 1,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 2, Length: 1},
|
||||||
|
{Offset: 3, Length: 2},
|
||||||
|
{Offset: 3, Length: 1},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{6, -3, 0, -1, 2, 1, -4}, // counts: 6, 3, 3, 2, 4, 5, 1 (total 24)
|
||||||
|
}
|
||||||
|
|
||||||
|
app.AppendHistogram(ts1, h1)
|
||||||
|
require.Equal(t, 1, c.NumSamples())
|
||||||
|
|
||||||
|
// Add a new histogram that has expanded buckets.
|
||||||
|
ts2 := ts1 + 16
|
||||||
|
h2 := h1.Copy()
|
||||||
|
h2.PositiveSpans = []histogram.Span{
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
{Offset: 1, Length: 4},
|
||||||
|
{Offset: 3, Length: 3},
|
||||||
|
}
|
||||||
|
h2.Count += 9
|
||||||
|
h2.ZeroCount++
|
||||||
|
h2.Sum = 30
|
||||||
|
// Existing histogram should get values converted from the above to:
|
||||||
|
// 6 3 0 3 0 0 2 4 5 0 1 (previous values with some new empty buckets in between)
|
||||||
|
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
|
||||||
|
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 1} // 7 5 1 3 1 0 2 5 5 0 1 (total 30)
|
||||||
|
|
||||||
|
// This is how span changes will be handled.
|
||||||
|
hApp, _ := app.(*HistogramAppender)
|
||||||
|
posInterjections, negInterjections, ok, cr := hApp.Appendable(h2)
|
||||||
|
require.Greater(t, len(posInterjections), 0)
|
||||||
|
require.Equal(t, 0, len(negInterjections))
|
||||||
|
require.True(t, ok) // Only new buckets came in.
|
||||||
|
require.False(t, cr)
|
||||||
|
c, app = hApp.Recode(posInterjections, negInterjections, h2.PositiveSpans, h2.NegativeSpans)
|
||||||
|
app.AppendHistogram(ts2, h2)
|
||||||
|
|
||||||
|
require.Equal(t, 2, c.NumSamples())
|
||||||
|
|
||||||
|
// Because the 2nd histogram has expanded buckets, we should expect all
|
||||||
|
// histograms (in particular the first) to come back using the new spans
|
||||||
|
// metadata as well as the expanded buckets.
|
||||||
|
h1.PositiveSpans = h2.PositiveSpans
|
||||||
|
h1.PositiveBuckets = []int64{6, -3, -3, 3, -3, 0, 2, 2, 1, -5, 1}
|
||||||
|
exp := []res{
|
||||||
|
{t: ts1, h: h1},
|
||||||
|
{t: ts2, h: h2},
|
||||||
|
}
|
||||||
|
it := c.Iterator(nil)
|
||||||
|
var act []res
|
||||||
|
for it.Next() == ValHistogram {
|
||||||
|
ts, h := it.AtHistogram()
|
||||||
|
act = append(act, res{t: ts, h: h})
|
||||||
|
}
|
||||||
|
require.NoError(t, it.Err())
|
||||||
|
require.Equal(t, exp, act)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHistoChunkAppendable(t *testing.T) {
|
||||||
|
c := Chunk(NewHistogramChunk())
|
||||||
|
|
||||||
|
// Create fresh appender and add the first histogram.
|
||||||
|
app, err := c.Appender()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 0, c.NumSamples())
|
||||||
|
|
||||||
|
ts := int64(1234567890)
|
||||||
|
h1 := &histogram.Histogram{
|
||||||
|
Count: 5,
|
||||||
|
ZeroCount: 2,
|
||||||
|
Sum: 18.4,
|
||||||
|
ZeroThreshold: 1e-125,
|
||||||
|
Schema: 1,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 2, Length: 1},
|
||||||
|
{Offset: 3, Length: 2},
|
||||||
|
{Offset: 3, Length: 1},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{6, -3, 0, -1, 2, 1, -4}, // counts: 6, 3, 3, 2, 4, 5, 1 (total 24)
|
||||||
|
}
|
||||||
|
|
||||||
|
app.AppendHistogram(ts, h1)
|
||||||
|
require.Equal(t, 1, c.NumSamples())
|
||||||
|
|
||||||
|
{ // New histogram that has more buckets.
|
||||||
|
h2 := h1
|
||||||
|
h2.PositiveSpans = []histogram.Span{
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
{Offset: 1, Length: 4},
|
||||||
|
{Offset: 3, Length: 3},
|
||||||
|
}
|
||||||
|
h2.Count += 9
|
||||||
|
h2.ZeroCount++
|
||||||
|
h2.Sum = 30
|
||||||
|
// Existing histogram should get values converted from the above to:
|
||||||
|
// 6 3 0 3 0 0 2 4 5 0 1 (previous values with some new empty buckets in between)
|
||||||
|
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
|
||||||
|
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 1} // 7 5 1 3 1 0 2 5 5 0 1 (total 30)
|
||||||
|
|
||||||
|
hApp, _ := app.(*HistogramAppender)
|
||||||
|
posInterjections, negInterjections, ok, cr := hApp.Appendable(h2)
|
||||||
|
require.Greater(t, len(posInterjections), 0)
|
||||||
|
require.Equal(t, 0, len(negInterjections))
|
||||||
|
require.True(t, ok) // Only new buckets came in.
|
||||||
|
require.False(t, cr)
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // New histogram that has a bucket missing.
|
||||||
|
h2 := h1
|
||||||
|
h2.PositiveSpans = []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 5, Length: 2},
|
||||||
|
{Offset: 3, Length: 1},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
}
|
||||||
|
h2.Sum = 21
|
||||||
|
h2.PositiveBuckets = []int64{6, -3, -1, 2, 1, -4} // counts: 6, 3, 2, 4, 5, 1 (total 21)
|
||||||
|
|
||||||
|
hApp, _ := app.(*HistogramAppender)
|
||||||
|
posInterjections, negInterjections, ok, cr := hApp.Appendable(h2)
|
||||||
|
require.Equal(t, 0, len(posInterjections))
|
||||||
|
require.Equal(t, 0, len(negInterjections))
|
||||||
|
require.False(t, ok) // Need to cut a new chunk.
|
||||||
|
require.True(t, cr)
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // New histogram that has a counter reset while buckets are same.
|
||||||
|
h2 := h1
|
||||||
|
h2.Sum = 23
|
||||||
|
h2.PositiveBuckets = []int64{6, -4, 1, -1, 2, 1, -4} // counts: 6, 2, 3, 2, 4, 5, 1 (total 23)
|
||||||
|
|
||||||
|
hApp, _ := app.(*HistogramAppender)
|
||||||
|
posInterjections, negInterjections, ok, cr := hApp.Appendable(h2)
|
||||||
|
require.Equal(t, 0, len(posInterjections))
|
||||||
|
require.Equal(t, 0, len(negInterjections))
|
||||||
|
require.False(t, ok) // Need to cut a new chunk.
|
||||||
|
require.True(t, cr)
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // New histogram that has a counter reset while new buckets were added.
|
||||||
|
h2 := h1
|
||||||
|
h2.PositiveSpans = []histogram.Span{
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
{Offset: 1, Length: 4},
|
||||||
|
{Offset: 3, Length: 3},
|
||||||
|
}
|
||||||
|
h2.Sum = 29
|
||||||
|
// Existing histogram should get values converted from the above to:
|
||||||
|
// 6 3 0 3 0 0 2 4 5 0 1 (previous values with some new empty buckets in between)
|
||||||
|
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
|
||||||
|
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 0} // 7 5 1 3 1 0 2 5 5 0 0 (total 29)
|
||||||
|
|
||||||
|
hApp, _ := app.(*HistogramAppender)
|
||||||
|
posInterjections, negInterjections, ok, cr := hApp.Appendable(h2)
|
||||||
|
require.Equal(t, 0, len(posInterjections))
|
||||||
|
require.Equal(t, 0, len(negInterjections))
|
||||||
|
require.False(t, ok) // Need to cut a new chunk.
|
||||||
|
require.True(t, cr)
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// New histogram that has a counter reset while new buckets were
|
||||||
|
// added before the first bucket and reset on first bucket. (to
|
||||||
|
// catch the edge case where the new bucket should be forwarded
|
||||||
|
// ahead until first old bucket at start)
|
||||||
|
h2 := h1
|
||||||
|
h2.PositiveSpans = []histogram.Span{
|
||||||
|
{Offset: -3, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
{Offset: 2, Length: 1},
|
||||||
|
{Offset: 3, Length: 2},
|
||||||
|
{Offset: 3, Length: 1},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
}
|
||||||
|
h2.Sum = 26
|
||||||
|
// Existing histogram should get values converted from the above to:
|
||||||
|
// 0, 0, 6, 3, 3, 2, 4, 5, 1
|
||||||
|
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
|
||||||
|
h2.PositiveBuckets = []int64{1, 1, 3, -2, 0, -1, 2, 1, -4} // counts: 1, 2, 5, 3, 3, 2, 4, 5, 1 (total 26)
|
||||||
|
|
||||||
|
hApp, _ := app.(*HistogramAppender)
|
||||||
|
posInterjections, negInterjections, ok, cr := hApp.Appendable(h2)
|
||||||
|
require.Equal(t, 0, len(posInterjections))
|
||||||
|
require.Equal(t, 0, len(negInterjections))
|
||||||
|
require.False(t, ok) // Need to cut a new chunk.
|
||||||
|
require.True(t, cr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAtFloatHistogram(t *testing.T) {
|
||||||
|
input := []histogram.Histogram{
|
||||||
|
{
|
||||||
|
Schema: 0,
|
||||||
|
Count: 21,
|
||||||
|
Sum: 1234.5,
|
||||||
|
ZeroThreshold: 0.001,
|
||||||
|
ZeroCount: 4,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 4},
|
||||||
|
{Offset: 0, Length: 0},
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 1, -1, 0, 0, 0, 0},
|
||||||
|
NegativeSpans: []histogram.Span{
|
||||||
|
{Offset: 1, Length: 4},
|
||||||
|
{Offset: 2, Length: 0},
|
||||||
|
{Offset: 2, Length: 3},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []int64{1, 1, -1, 1, 0, 0, 0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Schema: 0,
|
||||||
|
Count: 36,
|
||||||
|
Sum: 2345.6,
|
||||||
|
ZeroThreshold: 0.001,
|
||||||
|
ZeroCount: 5,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 4},
|
||||||
|
{Offset: 0, Length: 0},
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
|
||||||
|
NegativeSpans: []histogram.Span{
|
||||||
|
{Offset: 1, Length: 4},
|
||||||
|
{Offset: 2, Length: 0},
|
||||||
|
{Offset: 2, Length: 3},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Schema: 0,
|
||||||
|
Count: 36,
|
||||||
|
Sum: 1111.1,
|
||||||
|
ZeroThreshold: 0.001,
|
||||||
|
ZeroCount: 5,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 4},
|
||||||
|
{Offset: 0, Length: 0},
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 2, -2, 2, -1, 0, 0},
|
||||||
|
NegativeSpans: []histogram.Span{
|
||||||
|
{Offset: 1, Length: 4},
|
||||||
|
{Offset: 2, Length: 0},
|
||||||
|
{Offset: 2, Length: 3},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []int64{1, 3, -2, 5, -1, 0, -3},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
expOutput := []*histogram.FloatHistogram{
|
||||||
|
{
|
||||||
|
Schema: 0,
|
||||||
|
Count: 21,
|
||||||
|
Sum: 1234.5,
|
||||||
|
ZeroThreshold: 0.001,
|
||||||
|
ZeroCount: 4,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 4},
|
||||||
|
{Offset: 0, Length: 0},
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []float64{1, 2, 1, 1, 1, 1, 1},
|
||||||
|
NegativeSpans: []histogram.Span{
|
||||||
|
{Offset: 1, Length: 4},
|
||||||
|
{Offset: 2, Length: 0},
|
||||||
|
{Offset: 2, Length: 3},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []float64{1, 2, 1, 2, 2, 2, 2},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Schema: 0,
|
||||||
|
Count: 36,
|
||||||
|
Sum: 2345.6,
|
||||||
|
ZeroThreshold: 0.001,
|
||||||
|
ZeroCount: 5,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 4},
|
||||||
|
{Offset: 0, Length: 0},
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []float64{1, 3, 1, 2, 1, 1, 1},
|
||||||
|
NegativeSpans: []histogram.Span{
|
||||||
|
{Offset: 1, Length: 4},
|
||||||
|
{Offset: 2, Length: 0},
|
||||||
|
{Offset: 2, Length: 3},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []float64{1, 4, 2, 7, 5, 5, 2},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Schema: 0,
|
||||||
|
Count: 36,
|
||||||
|
Sum: 1111.1,
|
||||||
|
ZeroThreshold: 0.001,
|
||||||
|
ZeroCount: 5,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 4},
|
||||||
|
{Offset: 0, Length: 0},
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []float64{1, 3, 1, 3, 2, 2, 2},
|
||||||
|
NegativeSpans: []histogram.Span{
|
||||||
|
{Offset: 1, Length: 4},
|
||||||
|
{Offset: 2, Length: 0},
|
||||||
|
{Offset: 2, Length: 3},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []float64{1, 4, 2, 7, 6, 6, 3},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
chk := NewHistogramChunk()
|
||||||
|
app, err := chk.Appender()
|
||||||
|
require.NoError(t, err)
|
||||||
|
for i := range input {
|
||||||
|
app.AppendHistogram(int64(i), &input[i])
|
||||||
|
}
|
||||||
|
it := chk.Iterator(nil)
|
||||||
|
i := int64(0)
|
||||||
|
for it.Next() != ValNone {
|
||||||
|
ts, h := it.AtFloatHistogram()
|
||||||
|
require.Equal(t, i, ts)
|
||||||
|
require.Equal(t, expOutput[i], h, "histogram %d unequal", i)
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
}
|
232
tsdb/chunkenc/varbit.go
Normal file
232
tsdb/chunkenc/varbit.go
Normal file
|
@ -0,0 +1,232 @@
|
||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package chunkenc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/bits"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// putVarbitInt writes an int64 using varbit encoding with a bit bucketing
|
||||||
|
// optimized for the dod's observed in histogram buckets, plus a few additional
|
||||||
|
// buckets for large numbers.
|
||||||
|
//
|
||||||
|
// For optimal space utilization, each branch didn't need to support any values
|
||||||
|
// of any of the prior branches. So we could expand the range of each branch. Do
|
||||||
|
// more with fewer bits. It would come at the price of more expensive encoding
|
||||||
|
// and decoding (cutting out and later adding back that center-piece we
|
||||||
|
// skip). With the distributions of values we see in practice, we would reduce
|
||||||
|
// the size by around 1%. A more detailed study would be needed for precise
|
||||||
|
// values, but it's appears quite certain that we would end up far below 10%,
|
||||||
|
// which would maybe convince us to invest the increased coding/decoding cost.
|
||||||
|
func putVarbitInt(b *bstream, val int64) {
|
||||||
|
switch {
|
||||||
|
case val == 0: // Precisely 0, needs 1 bit.
|
||||||
|
b.writeBit(zero)
|
||||||
|
case bitRange(val, 3): // -3 <= val <= 4, needs 5 bits.
|
||||||
|
b.writeBits(0b10, 2)
|
||||||
|
b.writeBits(uint64(val), 3)
|
||||||
|
case bitRange(val, 6): // -31 <= val <= 32, 9 bits.
|
||||||
|
b.writeBits(0b110, 3)
|
||||||
|
b.writeBits(uint64(val), 6)
|
||||||
|
case bitRange(val, 9): // -255 <= val <= 256, 13 bits.
|
||||||
|
b.writeBits(0b1110, 4)
|
||||||
|
b.writeBits(uint64(val), 9)
|
||||||
|
case bitRange(val, 12): // -2047 <= val <= 2048, 17 bits.
|
||||||
|
b.writeBits(0b11110, 5)
|
||||||
|
b.writeBits(uint64(val), 12)
|
||||||
|
case bitRange(val, 18): // -131071 <= val <= 131072, 3 bytes.
|
||||||
|
b.writeBits(0b111110, 6)
|
||||||
|
b.writeBits(uint64(val), 18)
|
||||||
|
case bitRange(val, 25): // -16777215 <= val <= 16777216, 4 bytes.
|
||||||
|
b.writeBits(0b1111110, 7)
|
||||||
|
b.writeBits(uint64(val), 25)
|
||||||
|
case bitRange(val, 56): // -36028797018963967 <= val <= 36028797018963968, 8 bytes.
|
||||||
|
b.writeBits(0b11111110, 8)
|
||||||
|
b.writeBits(uint64(val), 56)
|
||||||
|
default:
|
||||||
|
b.writeBits(0b11111111, 8) // Worst case, needs 9 bytes.
|
||||||
|
b.writeBits(uint64(val), 64)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// readVarbitInt reads an int64 encoced with putVarbitInt.
|
||||||
|
func readVarbitInt(b *bstreamReader) (int64, error) {
|
||||||
|
var d byte
|
||||||
|
for i := 0; i < 8; i++ {
|
||||||
|
d <<= 1
|
||||||
|
bit, err := b.readBitFast()
|
||||||
|
if err != nil {
|
||||||
|
bit, err = b.readBit()
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if bit == zero {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
d |= 1
|
||||||
|
}
|
||||||
|
|
||||||
|
var val int64
|
||||||
|
var sz uint8
|
||||||
|
|
||||||
|
switch d {
|
||||||
|
case 0b0:
|
||||||
|
// val == 0
|
||||||
|
case 0b10:
|
||||||
|
sz = 3
|
||||||
|
case 0b110:
|
||||||
|
sz = 6
|
||||||
|
case 0b1110:
|
||||||
|
sz = 9
|
||||||
|
case 0b11110:
|
||||||
|
sz = 12
|
||||||
|
case 0b111110:
|
||||||
|
sz = 18
|
||||||
|
case 0b1111110:
|
||||||
|
sz = 25
|
||||||
|
case 0b11111110:
|
||||||
|
sz = 56
|
||||||
|
case 0b11111111:
|
||||||
|
// Do not use fast because it's very unlikely it will succeed.
|
||||||
|
bits, err := b.readBits(64)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
val = int64(bits)
|
||||||
|
default:
|
||||||
|
return 0, errors.Errorf("invalid bit pattern %b", d)
|
||||||
|
}
|
||||||
|
|
||||||
|
if sz != 0 {
|
||||||
|
bits, err := b.readBitsFast(sz)
|
||||||
|
if err != nil {
|
||||||
|
bits, err = b.readBits(sz)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if bits > (1 << (sz - 1)) {
|
||||||
|
// Or something.
|
||||||
|
bits = bits - (1 << sz)
|
||||||
|
}
|
||||||
|
val = int64(bits)
|
||||||
|
}
|
||||||
|
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func bitRangeUint(x uint64, nbits int) bool {
|
||||||
|
return bits.LeadingZeros64(x) >= 64-nbits
|
||||||
|
}
|
||||||
|
|
||||||
|
// putVarbitUint writes a uint64 using varbit encoding. It uses the same bit
|
||||||
|
// buckets as putVarbitInt.
|
||||||
|
func putVarbitUint(b *bstream, val uint64) {
|
||||||
|
switch {
|
||||||
|
case val == 0: // Precisely 0, needs 1 bit.
|
||||||
|
b.writeBit(zero)
|
||||||
|
case bitRangeUint(val, 3): // val <= 7, needs 5 bits.
|
||||||
|
b.writeBits(0b10, 2)
|
||||||
|
b.writeBits(val, 3)
|
||||||
|
case bitRangeUint(val, 6): // val <= 63, 9 bits.
|
||||||
|
b.writeBits(0b110, 3)
|
||||||
|
b.writeBits(val, 6)
|
||||||
|
case bitRangeUint(val, 9): // val <= 511, 13 bits.
|
||||||
|
b.writeBits(0b1110, 4)
|
||||||
|
b.writeBits(val, 9)
|
||||||
|
case bitRangeUint(val, 12): // val <= 4095, 17 bits.
|
||||||
|
b.writeBits(0b11110, 5)
|
||||||
|
b.writeBits(val, 12)
|
||||||
|
case bitRangeUint(val, 18): // val <= 262143, 3 bytes.
|
||||||
|
b.writeBits(0b111110, 6)
|
||||||
|
b.writeBits(val, 18)
|
||||||
|
case bitRangeUint(val, 25): // val <= 33554431, 4 bytes.
|
||||||
|
b.writeBits(0b1111110, 7)
|
||||||
|
b.writeBits(val, 25)
|
||||||
|
case bitRangeUint(val, 56): // val <= 72057594037927935, 8 bytes.
|
||||||
|
b.writeBits(0b11111110, 8)
|
||||||
|
b.writeBits(val, 56)
|
||||||
|
default:
|
||||||
|
b.writeBits(0b11111111, 8) // Worst case, needs 9 bytes.
|
||||||
|
b.writeBits(val, 64)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// readVarbitUint reads a uint64 encoced with putVarbitUint.
|
||||||
|
func readVarbitUint(b *bstreamReader) (uint64, error) {
|
||||||
|
var d byte
|
||||||
|
for i := 0; i < 8; i++ {
|
||||||
|
d <<= 1
|
||||||
|
bit, err := b.readBitFast()
|
||||||
|
if err != nil {
|
||||||
|
bit, err = b.readBit()
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if bit == zero {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
d |= 1
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
bits uint64
|
||||||
|
sz uint8
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
switch d {
|
||||||
|
case 0b0:
|
||||||
|
// val == 0
|
||||||
|
case 0b10:
|
||||||
|
sz = 3
|
||||||
|
case 0b110:
|
||||||
|
sz = 6
|
||||||
|
case 0b1110:
|
||||||
|
sz = 9
|
||||||
|
case 0b11110:
|
||||||
|
sz = 12
|
||||||
|
case 0b111110:
|
||||||
|
sz = 18
|
||||||
|
case 0b1111110:
|
||||||
|
sz = 25
|
||||||
|
case 0b11111110:
|
||||||
|
sz = 56
|
||||||
|
case 0b11111111:
|
||||||
|
// Do not use fast because it's very unlikely it will succeed.
|
||||||
|
bits, err = b.readBits(64)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return 0, errors.Errorf("invalid bit pattern %b", d)
|
||||||
|
}
|
||||||
|
|
||||||
|
if sz != 0 {
|
||||||
|
bits, err = b.readBitsFast(sz)
|
||||||
|
if err != nil {
|
||||||
|
bits, err = b.readBits(sz)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return bits, nil
|
||||||
|
}
|
85
tsdb/chunkenc/varbit_test.go
Normal file
85
tsdb/chunkenc/varbit_test.go
Normal file
|
@ -0,0 +1,85 @@
|
||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package chunkenc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestVarbitInt(t *testing.T) {
|
||||||
|
numbers := []int64{
|
||||||
|
math.MinInt64,
|
||||||
|
-36028797018963968, -36028797018963967,
|
||||||
|
-16777216, -16777215,
|
||||||
|
-131072, -131071,
|
||||||
|
-2048, -2047,
|
||||||
|
-256, -255,
|
||||||
|
-32, -31,
|
||||||
|
-4, -3,
|
||||||
|
-1, 0, 1,
|
||||||
|
4, 5,
|
||||||
|
32, 33,
|
||||||
|
256, 257,
|
||||||
|
2048, 2049,
|
||||||
|
131072, 131073,
|
||||||
|
16777216, 16777217,
|
||||||
|
36028797018963968, 36028797018963969,
|
||||||
|
math.MaxInt64,
|
||||||
|
}
|
||||||
|
|
||||||
|
bs := bstream{}
|
||||||
|
|
||||||
|
for _, n := range numbers {
|
||||||
|
putVarbitInt(&bs, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
bsr := newBReader(bs.bytes())
|
||||||
|
|
||||||
|
for _, want := range numbers {
|
||||||
|
got, err := readVarbitInt(&bsr)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, want, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestVarbitUint(t *testing.T) {
|
||||||
|
numbers := []uint64{
|
||||||
|
0, 1,
|
||||||
|
7, 8,
|
||||||
|
63, 64,
|
||||||
|
511, 512,
|
||||||
|
4095, 4096,
|
||||||
|
262143, 262144,
|
||||||
|
33554431, 33554432,
|
||||||
|
72057594037927935, 72057594037927936,
|
||||||
|
math.MaxUint64,
|
||||||
|
}
|
||||||
|
|
||||||
|
bs := bstream{}
|
||||||
|
|
||||||
|
for _, n := range numbers {
|
||||||
|
putVarbitUint(&bs, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
bsr := newBReader(bs.bytes())
|
||||||
|
|
||||||
|
for _, want := range numbers {
|
||||||
|
got, err := readVarbitUint(&bsr)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, want, got)
|
||||||
|
}
|
||||||
|
}
|
|
@ -47,6 +47,8 @@ import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"math"
|
"math"
|
||||||
"math/bits"
|
"math/bits"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -79,6 +81,7 @@ func (c *XORChunk) NumSamples() int {
|
||||||
return int(binary.BigEndian.Uint16(c.Bytes()))
|
return int(binary.BigEndian.Uint16(c.Bytes()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Compact implements the Chunk interface.
|
||||||
func (c *XORChunk) Compact() {
|
func (c *XORChunk) Compact() {
|
||||||
if l := len(c.b.stream); cap(c.b.stream) > l+chunkCompactCapacityThreshold {
|
if l := len(c.b.stream); cap(c.b.stream) > l+chunkCompactCapacityThreshold {
|
||||||
buf := make([]byte, l)
|
buf := make([]byte, l)
|
||||||
|
@ -96,7 +99,7 @@ func (c *XORChunk) Appender() (Appender, error) {
|
||||||
// To get an appender we must know the state it would have if we had
|
// To get an appender we must know the state it would have if we had
|
||||||
// appended all existing data from scratch.
|
// appended all existing data from scratch.
|
||||||
// We iterate through the end and populate via the iterator's state.
|
// We iterate through the end and populate via the iterator's state.
|
||||||
for it.Next() {
|
for it.Next() != ValNone {
|
||||||
}
|
}
|
||||||
if err := it.Err(); err != nil {
|
if err := it.Err(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -110,7 +113,7 @@ func (c *XORChunk) Appender() (Appender, error) {
|
||||||
leading: it.leading,
|
leading: it.leading,
|
||||||
trailing: it.trailing,
|
trailing: it.trailing,
|
||||||
}
|
}
|
||||||
if binary.BigEndian.Uint16(a.b.bytes()) == 0 {
|
if it.numTotal == 0 {
|
||||||
a.leading = 0xff
|
a.leading = 0xff
|
||||||
}
|
}
|
||||||
return a, nil
|
return a, nil
|
||||||
|
@ -149,6 +152,10 @@ type xorAppender struct {
|
||||||
trailing uint8
|
trailing uint8
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *xorAppender) AppendHistogram(t int64, h *histogram.Histogram) {
|
||||||
|
panic("appended a histogram to an xor chunk")
|
||||||
|
}
|
||||||
|
|
||||||
func (a *xorAppender) Append(t int64, v float64) {
|
func (a *xorAppender) Append(t int64, v float64) {
|
||||||
var tDelta uint64
|
var tDelta uint64
|
||||||
num := binary.BigEndian.Uint16(a.b.bytes())
|
num := binary.BigEndian.Uint16(a.b.bytes())
|
||||||
|
@ -176,6 +183,12 @@ func (a *xorAppender) Append(t int64, v float64) {
|
||||||
|
|
||||||
// Gorilla has a max resolution of seconds, Prometheus milliseconds.
|
// Gorilla has a max resolution of seconds, Prometheus milliseconds.
|
||||||
// Thus we use higher value range steps with larger bit size.
|
// Thus we use higher value range steps with larger bit size.
|
||||||
|
//
|
||||||
|
// TODO(beorn7): This seems to needlessly jump to large bit
|
||||||
|
// sizes even for very small deviations from zero. Timestamp
|
||||||
|
// compression can probably benefit from some smaller bit
|
||||||
|
// buckets. See also what was done for histogram encoding in
|
||||||
|
// varbit.go.
|
||||||
switch {
|
switch {
|
||||||
case dod == 0:
|
case dod == 0:
|
||||||
a.b.writeBit(zero)
|
a.b.writeBit(zero)
|
||||||
|
@ -209,38 +222,7 @@ func bitRange(x int64, nbits uint8) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *xorAppender) writeVDelta(v float64) {
|
func (a *xorAppender) writeVDelta(v float64) {
|
||||||
vDelta := math.Float64bits(v) ^ math.Float64bits(a.v)
|
xorWrite(a.b, v, a.v, &a.leading, &a.trailing)
|
||||||
|
|
||||||
if vDelta == 0 {
|
|
||||||
a.b.writeBit(zero)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
a.b.writeBit(one)
|
|
||||||
|
|
||||||
leading := uint8(bits.LeadingZeros64(vDelta))
|
|
||||||
trailing := uint8(bits.TrailingZeros64(vDelta))
|
|
||||||
|
|
||||||
// Clamp number of leading zeros to avoid overflow when encoding.
|
|
||||||
if leading >= 32 {
|
|
||||||
leading = 31
|
|
||||||
}
|
|
||||||
|
|
||||||
if a.leading != 0xff && leading >= a.leading && trailing >= a.trailing {
|
|
||||||
a.b.writeBit(zero)
|
|
||||||
a.b.writeBits(vDelta>>a.trailing, 64-int(a.leading)-int(a.trailing))
|
|
||||||
} else {
|
|
||||||
a.leading, a.trailing = leading, trailing
|
|
||||||
|
|
||||||
a.b.writeBit(one)
|
|
||||||
a.b.writeBits(uint64(leading), 5)
|
|
||||||
|
|
||||||
// Note that if leading == trailing == 0, then sigbits == 64. But that value doesn't actually fit into the 6 bits we have.
|
|
||||||
// Luckily, we never need to encode 0 significant bits, since that would put us in the other case (vdelta == 0).
|
|
||||||
// So instead we write out a 0 and adjust it back to 64 on unpacking.
|
|
||||||
sigbits := 64 - leading - trailing
|
|
||||||
a.b.writeBits(uint64(sigbits), 6)
|
|
||||||
a.b.writeBits(vDelta>>trailing, int(sigbits))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type xorIterator struct {
|
type xorIterator struct {
|
||||||
|
@ -258,23 +240,35 @@ type xorIterator struct {
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (it *xorIterator) Seek(t int64) bool {
|
func (it *xorIterator) Seek(t int64) ValueType {
|
||||||
if it.err != nil {
|
if it.err != nil {
|
||||||
return false
|
return ValNone
|
||||||
}
|
}
|
||||||
|
|
||||||
for t > it.t || it.numRead == 0 {
|
for t > it.t || it.numRead == 0 {
|
||||||
if !it.Next() {
|
if it.Next() == ValNone {
|
||||||
return false
|
return ValNone
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true
|
return ValFloat
|
||||||
}
|
}
|
||||||
|
|
||||||
func (it *xorIterator) At() (int64, float64) {
|
func (it *xorIterator) At() (int64, float64) {
|
||||||
return it.t, it.val
|
return it.t, it.val
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (it *xorIterator) AtHistogram() (int64, *histogram.Histogram) {
|
||||||
|
panic("cannot call xorIterator.AtHistogram")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *xorIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
|
||||||
|
panic("cannot call xorIterator.AtFloatHistogram")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *xorIterator) AtT() int64 {
|
||||||
|
return it.t
|
||||||
|
}
|
||||||
|
|
||||||
func (it *xorIterator) Err() error {
|
func (it *xorIterator) Err() error {
|
||||||
return it.err
|
return it.err
|
||||||
}
|
}
|
||||||
|
@ -294,33 +288,33 @@ func (it *xorIterator) Reset(b []byte) {
|
||||||
it.err = nil
|
it.err = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (it *xorIterator) Next() bool {
|
func (it *xorIterator) Next() ValueType {
|
||||||
if it.err != nil || it.numRead == it.numTotal {
|
if it.err != nil || it.numRead == it.numTotal {
|
||||||
return false
|
return ValNone
|
||||||
}
|
}
|
||||||
|
|
||||||
if it.numRead == 0 {
|
if it.numRead == 0 {
|
||||||
t, err := binary.ReadVarint(&it.br)
|
t, err := binary.ReadVarint(&it.br)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
it.err = err
|
it.err = err
|
||||||
return false
|
return ValNone
|
||||||
}
|
}
|
||||||
v, err := it.br.readBits(64)
|
v, err := it.br.readBits(64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
it.err = err
|
it.err = err
|
||||||
return false
|
return ValNone
|
||||||
}
|
}
|
||||||
it.t = t
|
it.t = t
|
||||||
it.val = math.Float64frombits(v)
|
it.val = math.Float64frombits(v)
|
||||||
|
|
||||||
it.numRead++
|
it.numRead++
|
||||||
return true
|
return ValFloat
|
||||||
}
|
}
|
||||||
if it.numRead == 1 {
|
if it.numRead == 1 {
|
||||||
tDelta, err := binary.ReadUvarint(&it.br)
|
tDelta, err := binary.ReadUvarint(&it.br)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
it.err = err
|
it.err = err
|
||||||
return false
|
return ValNone
|
||||||
}
|
}
|
||||||
it.tDelta = tDelta
|
it.tDelta = tDelta
|
||||||
it.t = it.t + int64(it.tDelta)
|
it.t = it.t + int64(it.tDelta)
|
||||||
|
@ -338,7 +332,7 @@ func (it *xorIterator) Next() bool {
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
it.err = err
|
it.err = err
|
||||||
return false
|
return ValNone
|
||||||
}
|
}
|
||||||
if bit == zero {
|
if bit == zero {
|
||||||
break
|
break
|
||||||
|
@ -361,7 +355,7 @@ func (it *xorIterator) Next() bool {
|
||||||
bits, err := it.br.readBits(64)
|
bits, err := it.br.readBits(64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
it.err = err
|
it.err = err
|
||||||
return false
|
return ValNone
|
||||||
}
|
}
|
||||||
|
|
||||||
dod = int64(bits)
|
dod = int64(bits)
|
||||||
|
@ -374,7 +368,7 @@ func (it *xorIterator) Next() bool {
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
it.err = err
|
it.err = err
|
||||||
return false
|
return ValNone
|
||||||
}
|
}
|
||||||
|
|
||||||
// Account for negative numbers, which come back as high unsigned numbers.
|
// Account for negative numbers, which come back as high unsigned numbers.
|
||||||
|
@ -391,73 +385,122 @@ func (it *xorIterator) Next() bool {
|
||||||
return it.readValue()
|
return it.readValue()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (it *xorIterator) readValue() bool {
|
func (it *xorIterator) readValue() ValueType {
|
||||||
bit, err := it.br.readBitFast()
|
err := xorRead(&it.br, &it.val, &it.leading, &it.trailing)
|
||||||
if err != nil {
|
|
||||||
bit, err = it.br.readBit()
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
it.err = err
|
it.err = err
|
||||||
return false
|
return ValNone
|
||||||
}
|
}
|
||||||
|
it.numRead++
|
||||||
|
return ValFloat
|
||||||
|
}
|
||||||
|
|
||||||
|
func xorWrite(b *bstream, newValue, currentValue float64, leading, trailing *uint8) {
|
||||||
|
delta := math.Float64bits(newValue) ^ math.Float64bits(currentValue)
|
||||||
|
|
||||||
|
if delta == 0 {
|
||||||
|
b.writeBit(zero)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b.writeBit(one)
|
||||||
|
|
||||||
|
newLeading := uint8(bits.LeadingZeros64(delta))
|
||||||
|
newTrailing := uint8(bits.TrailingZeros64(delta))
|
||||||
|
|
||||||
|
// Clamp number of leading zeros to avoid overflow when encoding.
|
||||||
|
if newLeading >= 32 {
|
||||||
|
newLeading = 31
|
||||||
|
}
|
||||||
|
|
||||||
|
if *leading != 0xff && newLeading >= *leading && newTrailing >= *trailing {
|
||||||
|
// In this case, we stick with the current leading/trailing.
|
||||||
|
b.writeBit(zero)
|
||||||
|
b.writeBits(delta>>*trailing, 64-int(*leading)-int(*trailing))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update leading/trailing for the caller.
|
||||||
|
*leading, *trailing = newLeading, newTrailing
|
||||||
|
|
||||||
|
b.writeBit(one)
|
||||||
|
b.writeBits(uint64(newLeading), 5)
|
||||||
|
|
||||||
|
// Note that if newLeading == newTrailing == 0, then sigbits == 64. But
|
||||||
|
// that value doesn't actually fit into the 6 bits we have. Luckily, we
|
||||||
|
// never need to encode 0 significant bits, since that would put us in
|
||||||
|
// the other case (vdelta == 0). So instead we write out a 0 and adjust
|
||||||
|
// it back to 64 on unpacking.
|
||||||
|
sigbits := 64 - newLeading - newTrailing
|
||||||
|
b.writeBits(uint64(sigbits), 6)
|
||||||
|
b.writeBits(delta>>newTrailing, int(sigbits))
|
||||||
|
}
|
||||||
|
|
||||||
|
func xorRead(br *bstreamReader, value *float64, leading, trailing *uint8) error {
|
||||||
|
bit, err := br.readBitFast()
|
||||||
|
if err != nil {
|
||||||
|
bit, err = br.readBit()
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if bit == zero {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
bit, err = br.readBitFast()
|
||||||
|
if err != nil {
|
||||||
|
bit, err = br.readBit()
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
bits uint64
|
||||||
|
newLeading, newTrailing, mbits uint8
|
||||||
|
)
|
||||||
|
|
||||||
if bit == zero {
|
if bit == zero {
|
||||||
// it.val = it.val
|
// Reuse leading/trailing zero bits.
|
||||||
|
newLeading, newTrailing = *leading, *trailing
|
||||||
|
mbits = 64 - newLeading - newTrailing
|
||||||
} else {
|
} else {
|
||||||
bit, err := it.br.readBitFast()
|
bits, err = br.readBitsFast(5)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
bit, err = it.br.readBit()
|
bits, err = br.readBits(5)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
it.err = err
|
return err
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
if bit == zero {
|
newLeading = uint8(bits)
|
||||||
// reuse leading/trailing zero bits
|
|
||||||
// it.leading, it.trailing = it.leading, it.trailing
|
|
||||||
} else {
|
|
||||||
bits, err := it.br.readBitsFast(5)
|
|
||||||
if err != nil {
|
|
||||||
bits, err = it.br.readBits(5)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
it.err = err
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
it.leading = uint8(bits)
|
|
||||||
|
|
||||||
bits, err = it.br.readBitsFast(6)
|
bits, err = br.readBitsFast(6)
|
||||||
if err != nil {
|
|
||||||
bits, err = it.br.readBits(6)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
it.err = err
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
mbits := uint8(bits)
|
|
||||||
// 0 significant bits here means we overflowed and we actually need 64; see comment in encoder
|
|
||||||
if mbits == 0 {
|
|
||||||
mbits = 64
|
|
||||||
}
|
|
||||||
it.trailing = 64 - it.leading - mbits
|
|
||||||
}
|
|
||||||
|
|
||||||
mbits := 64 - it.leading - it.trailing
|
|
||||||
bits, err := it.br.readBitsFast(mbits)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
bits, err = it.br.readBits(mbits)
|
bits, err = br.readBits(6)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
it.err = err
|
return err
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
vbits := math.Float64bits(it.val)
|
mbits = uint8(bits)
|
||||||
vbits ^= bits << it.trailing
|
// 0 significant bits here means we overflowed and we actually
|
||||||
it.val = math.Float64frombits(vbits)
|
// need 64; see comment in xrWrite.
|
||||||
|
if mbits == 0 {
|
||||||
|
mbits = 64
|
||||||
|
}
|
||||||
|
newTrailing = 64 - newLeading - mbits
|
||||||
|
// Update leading/trailing zero bits for the caller.
|
||||||
|
*leading, *trailing = newLeading, newTrailing
|
||||||
}
|
}
|
||||||
|
bits, err = br.readBitsFast(mbits)
|
||||||
it.numRead++
|
if err != nil {
|
||||||
return true
|
bits, err = br.readBits(mbits)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
vbits := math.Float64bits(*value)
|
||||||
|
vbits ^= bits << newTrailing
|
||||||
|
*value = math.Float64frombits(vbits)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// OOOXORChunk holds a XORChunk and overrides the Encoding() method.
|
// OOOXORChunk holds a XORChunk and overrides the Encoding() method.
|
||||||
|
|
43
tsdb/chunkenc/xor_test.go
Normal file
43
tsdb/chunkenc/xor_test.go
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
// Copyright 2022 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package chunkenc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func BenchmarkXorRead(b *testing.B) {
|
||||||
|
c := NewXORChunk()
|
||||||
|
app, err := c.Appender()
|
||||||
|
require.NoError(b, err)
|
||||||
|
for i := int64(0); i < 120*1000; i += 1000 {
|
||||||
|
app.Append(i, float64(i)+float64(i)/10+float64(i)/100+float64(i)/1000)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ReportAllocs()
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
var it Iterator
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
var ts int64
|
||||||
|
var v float64
|
||||||
|
it = c.Iterator(it)
|
||||||
|
for it.Next() != ValNone {
|
||||||
|
ts, v = it.At()
|
||||||
|
}
|
||||||
|
_, _ = ts, v
|
||||||
|
}
|
||||||
|
}
|
|
@ -768,7 +768,8 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta,
|
||||||
chksIter := s.Iterator()
|
chksIter := s.Iterator()
|
||||||
chks = chks[:0]
|
chks = chks[:0]
|
||||||
for chksIter.Next() {
|
for chksIter.Next() {
|
||||||
// We are not iterating in streaming way over chunk as it's more efficient to do bulk write for index and
|
// We are not iterating in streaming way over chunk as
|
||||||
|
// it's more efficient to do bulk write for index and
|
||||||
// chunk file purposes.
|
// chunk file purposes.
|
||||||
chks = append(chks, chksIter.At())
|
chks = append(chks, chksIter.At())
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,9 +17,11 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -29,11 +31,14 @@ import (
|
||||||
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||||
"github.com/prometheus/prometheus/tsdb/tombstones"
|
"github.com/prometheus/prometheus/tsdb/tombstones"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestSplitByRange(t *testing.T) {
|
func TestSplitByRange(t *testing.T) {
|
||||||
|
@ -968,7 +973,7 @@ func TestCompaction_populateBlock(t *testing.T) {
|
||||||
firstTs int64 = math.MaxInt64
|
firstTs int64 = math.MaxInt64
|
||||||
s sample
|
s sample
|
||||||
)
|
)
|
||||||
for iter.Next() {
|
for iter.Next() == chunkenc.ValFloat {
|
||||||
s.t, s.v = iter.At()
|
s.t, s.v = iter.At()
|
||||||
if firstTs == math.MaxInt64 {
|
if firstTs == math.MaxInt64 {
|
||||||
firstTs = s.t
|
firstTs = s.t
|
||||||
|
@ -1292,6 +1297,418 @@ func TestDeleteCompactionBlockAfterFailedReload(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHeadCompactionWithHistograms(t *testing.T) {
|
||||||
|
head, _ := newTestHead(t, DefaultBlockDuration, false, false)
|
||||||
|
require.NoError(t, head.Init(0))
|
||||||
|
t.Cleanup(func() {
|
||||||
|
require.NoError(t, head.Close())
|
||||||
|
})
|
||||||
|
|
||||||
|
minute := func(m int) int64 { return int64(m) * time.Minute.Milliseconds() }
|
||||||
|
ctx := context.Background()
|
||||||
|
appendHistogram := func(lbls labels.Labels, from, to int, h *histogram.Histogram, exp *[]tsdbutil.Sample) {
|
||||||
|
t.Helper()
|
||||||
|
app := head.Appender(ctx)
|
||||||
|
for tsMinute := from; tsMinute <= to; tsMinute++ {
|
||||||
|
_, err := app.AppendHistogram(0, lbls, minute(tsMinute), h)
|
||||||
|
require.NoError(t, err)
|
||||||
|
*exp = append(*exp, sample{t: minute(tsMinute), h: h.Copy()})
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
}
|
||||||
|
appendFloat := func(lbls labels.Labels, from, to int, exp *[]tsdbutil.Sample) {
|
||||||
|
t.Helper()
|
||||||
|
app := head.Appender(ctx)
|
||||||
|
for tsMinute := from; tsMinute <= to; tsMinute++ {
|
||||||
|
_, err := app.Append(0, lbls, minute(tsMinute), float64(tsMinute))
|
||||||
|
require.NoError(t, err)
|
||||||
|
*exp = append(*exp, sample{t: minute(tsMinute), v: float64(tsMinute)})
|
||||||
|
}
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
series1 = labels.FromStrings("foo", "bar1")
|
||||||
|
series2 = labels.FromStrings("foo", "bar2")
|
||||||
|
series3 = labels.FromStrings("foo", "bar3")
|
||||||
|
series4 = labels.FromStrings("foo", "bar4")
|
||||||
|
exp1, exp2, exp3, exp4 []tsdbutil.Sample
|
||||||
|
)
|
||||||
|
h := &histogram.Histogram{
|
||||||
|
Count: 11,
|
||||||
|
ZeroCount: 4,
|
||||||
|
ZeroThreshold: 0.001,
|
||||||
|
Sum: 35.5,
|
||||||
|
Schema: 1,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 2, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||||
|
NegativeSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 1},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []int64{1, 2, -1},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Series with only histograms.
|
||||||
|
appendHistogram(series1, 100, 105, h, &exp1)
|
||||||
|
|
||||||
|
// Series starting with float and then getting histograms.
|
||||||
|
appendFloat(series2, 100, 102, &exp2)
|
||||||
|
appendHistogram(series2, 103, 105, h.Copy(), &exp2)
|
||||||
|
appendFloat(series2, 106, 107, &exp2)
|
||||||
|
appendHistogram(series2, 108, 109, h.Copy(), &exp2)
|
||||||
|
|
||||||
|
// Series starting with histogram and then getting float.
|
||||||
|
appendHistogram(series3, 101, 103, h.Copy(), &exp3)
|
||||||
|
appendFloat(series3, 104, 106, &exp3)
|
||||||
|
appendHistogram(series3, 107, 108, h.Copy(), &exp3)
|
||||||
|
appendFloat(series3, 109, 110, &exp3)
|
||||||
|
|
||||||
|
// A float only series.
|
||||||
|
appendFloat(series4, 100, 102, &exp4)
|
||||||
|
|
||||||
|
// Compaction.
|
||||||
|
mint := head.MinTime()
|
||||||
|
maxt := head.MaxTime() + 1 // Block intervals are half-open: [b.MinTime, b.MaxTime).
|
||||||
|
compactor, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{DefaultBlockDuration}, chunkenc.NewPool(), nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
id, err := compactor.Write(head.opts.ChunkDirRoot, head, mint, maxt, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotEqual(t, ulid.ULID{}, id)
|
||||||
|
|
||||||
|
// Open the block and query it and check the histograms.
|
||||||
|
block, err := OpenBlock(nil, path.Join(head.opts.ChunkDirRoot, id.String()), nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(func() {
|
||||||
|
require.NoError(t, block.Close())
|
||||||
|
})
|
||||||
|
|
||||||
|
q, err := NewBlockQuerier(block, block.MinTime(), block.MaxTime())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
actHists := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
|
||||||
|
require.Equal(t, map[string][]tsdbutil.Sample{
|
||||||
|
series1.String(): exp1,
|
||||||
|
series2.String(): exp2,
|
||||||
|
series3.String(): exp3,
|
||||||
|
series4.String(): exp4,
|
||||||
|
}, actHists)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Depending on numSeriesPerSchema, it can take few gigs of memory;
|
||||||
|
// the test adds all samples to appender before committing instead of
|
||||||
|
// buffering the writes to make it run faster.
|
||||||
|
func TestSparseHistogramSpaceSavings(t *testing.T) {
|
||||||
|
t.Skip()
|
||||||
|
|
||||||
|
cases := []struct {
|
||||||
|
numSeriesPerSchema int
|
||||||
|
numBuckets int
|
||||||
|
numSpans int
|
||||||
|
gapBetweenSpans int
|
||||||
|
}{
|
||||||
|
{1, 15, 1, 0},
|
||||||
|
{1, 50, 1, 0},
|
||||||
|
{1, 100, 1, 0},
|
||||||
|
{1, 15, 3, 5},
|
||||||
|
{1, 50, 3, 3},
|
||||||
|
{1, 100, 3, 2},
|
||||||
|
{100, 15, 1, 0},
|
||||||
|
{100, 50, 1, 0},
|
||||||
|
{100, 100, 1, 0},
|
||||||
|
{100, 15, 3, 5},
|
||||||
|
{100, 50, 3, 3},
|
||||||
|
{100, 100, 3, 2},
|
||||||
|
//{1000, 15, 1, 0},
|
||||||
|
//{1000, 50, 1, 0},
|
||||||
|
//{1000, 100, 1, 0},
|
||||||
|
//{1000, 15, 3, 5},
|
||||||
|
//{1000, 50, 3, 3},
|
||||||
|
//{1000, 100, 3, 2},
|
||||||
|
}
|
||||||
|
|
||||||
|
type testSummary struct {
|
||||||
|
oldBlockTotalSeries int
|
||||||
|
oldBlockIndexSize int64
|
||||||
|
oldBlockChunksSize int64
|
||||||
|
oldBlockTotalSize int64
|
||||||
|
|
||||||
|
sparseBlockTotalSeries int
|
||||||
|
sparseBlockIndexSize int64
|
||||||
|
sparseBlockChunksSize int64
|
||||||
|
sparseBlockTotalSize int64
|
||||||
|
|
||||||
|
numBuckets int
|
||||||
|
numSpans int
|
||||||
|
gapBetweenSpans int
|
||||||
|
}
|
||||||
|
|
||||||
|
var summaries []testSummary
|
||||||
|
|
||||||
|
allSchemas := []int{-4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}
|
||||||
|
schemaDescription := []string{"minus_4", "minus_3", "minus_2", "minus_1", "0", "1", "2", "3", "4", "5", "6", "7", "8"}
|
||||||
|
numHistograms := 120 * 4 // 15s scrape interval.
|
||||||
|
timeStep := DefaultBlockDuration / int64(numHistograms)
|
||||||
|
for _, c := range cases {
|
||||||
|
t.Run(
|
||||||
|
fmt.Sprintf("series=%d,span=%d,gap=%d,buckets=%d",
|
||||||
|
len(allSchemas)*c.numSeriesPerSchema,
|
||||||
|
c.numSpans,
|
||||||
|
c.gapBetweenSpans,
|
||||||
|
c.numBuckets,
|
||||||
|
),
|
||||||
|
func(t *testing.T) {
|
||||||
|
oldHead, _ := newTestHead(t, DefaultBlockDuration, false, false)
|
||||||
|
t.Cleanup(func() {
|
||||||
|
require.NoError(t, oldHead.Close())
|
||||||
|
})
|
||||||
|
sparseHead, _ := newTestHead(t, DefaultBlockDuration, false, false)
|
||||||
|
t.Cleanup(func() {
|
||||||
|
require.NoError(t, sparseHead.Close())
|
||||||
|
})
|
||||||
|
|
||||||
|
var allSparseSeries []struct {
|
||||||
|
baseLabels labels.Labels
|
||||||
|
hists []*histogram.Histogram
|
||||||
|
}
|
||||||
|
|
||||||
|
for sid, schema := range allSchemas {
|
||||||
|
for i := 0; i < c.numSeriesPerSchema; i++ {
|
||||||
|
lbls := labels.Labels{
|
||||||
|
{Name: "__name__", Value: fmt.Sprintf("rpc_durations_%d_histogram_seconds", i)},
|
||||||
|
{Name: "instance", Value: "localhost:8080"},
|
||||||
|
{Name: "job", Value: fmt.Sprintf("sparse_histogram_schema_%s", schemaDescription[sid])},
|
||||||
|
}
|
||||||
|
allSparseSeries = append(allSparseSeries, struct {
|
||||||
|
baseLabels labels.Labels
|
||||||
|
hists []*histogram.Histogram
|
||||||
|
}{baseLabels: lbls, hists: generateCustomHistograms(numHistograms, c.numBuckets, c.numSpans, c.gapBetweenSpans, schema)})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
oldApp := oldHead.Appender(context.Background())
|
||||||
|
sparseApp := sparseHead.Appender(context.Background())
|
||||||
|
numOldSeriesPerHistogram := 0
|
||||||
|
|
||||||
|
var oldULID ulid.ULID
|
||||||
|
var sparseULID ulid.ULID
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
// Ingest sparse histograms.
|
||||||
|
for _, ah := range allSparseSeries {
|
||||||
|
var (
|
||||||
|
ref storage.SeriesRef
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
for i := 0; i < numHistograms; i++ {
|
||||||
|
ts := int64(i) * timeStep
|
||||||
|
ref, err = sparseApp.AppendHistogram(ref, ah.baseLabels, ts, ah.hists[i])
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
require.NoError(t, sparseApp.Commit())
|
||||||
|
|
||||||
|
// Sparse head compaction.
|
||||||
|
mint := sparseHead.MinTime()
|
||||||
|
maxt := sparseHead.MaxTime() + 1 // Block intervals are half-open: [b.MinTime, b.MaxTime).
|
||||||
|
compactor, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{DefaultBlockDuration}, chunkenc.NewPool(), nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
sparseULID, err = compactor.Write(sparseHead.opts.ChunkDirRoot, sparseHead, mint, maxt, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotEqual(t, ulid.ULID{}, sparseULID)
|
||||||
|
}()
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
// Ingest histograms the old way.
|
||||||
|
for _, ah := range allSparseSeries {
|
||||||
|
refs := make([]storage.SeriesRef, c.numBuckets+((c.numSpans-1)*c.gapBetweenSpans))
|
||||||
|
for i := 0; i < numHistograms; i++ {
|
||||||
|
ts := int64(i) * timeStep
|
||||||
|
|
||||||
|
h := ah.hists[i]
|
||||||
|
|
||||||
|
numOldSeriesPerHistogram = 0
|
||||||
|
it := h.CumulativeBucketIterator()
|
||||||
|
itIdx := 0
|
||||||
|
var err error
|
||||||
|
for it.Next() {
|
||||||
|
numOldSeriesPerHistogram++
|
||||||
|
b := it.At()
|
||||||
|
lbls := append(ah.baseLabels, labels.Label{Name: "le", Value: fmt.Sprintf("%.16f", b.Upper)})
|
||||||
|
refs[itIdx], err = oldApp.Append(refs[itIdx], lbls, ts, float64(b.Count))
|
||||||
|
require.NoError(t, err)
|
||||||
|
itIdx++
|
||||||
|
}
|
||||||
|
// _count metric.
|
||||||
|
countLbls := ah.baseLabels.Copy()
|
||||||
|
countLbls[0].Value = countLbls[0].Value + "_count"
|
||||||
|
_, err = oldApp.Append(0, countLbls, ts, float64(h.Count))
|
||||||
|
require.NoError(t, err)
|
||||||
|
numOldSeriesPerHistogram++
|
||||||
|
|
||||||
|
// _sum metric.
|
||||||
|
sumLbls := ah.baseLabels.Copy()
|
||||||
|
sumLbls[0].Value = sumLbls[0].Value + "_sum"
|
||||||
|
_, err = oldApp.Append(0, sumLbls, ts, h.Sum)
|
||||||
|
require.NoError(t, err)
|
||||||
|
numOldSeriesPerHistogram++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, oldApp.Commit())
|
||||||
|
|
||||||
|
// Old head compaction.
|
||||||
|
mint := oldHead.MinTime()
|
||||||
|
maxt := oldHead.MaxTime() + 1 // Block intervals are half-open: [b.MinTime, b.MaxTime).
|
||||||
|
compactor, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{DefaultBlockDuration}, chunkenc.NewPool(), nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
oldULID, err = compactor.Write(oldHead.opts.ChunkDirRoot, oldHead, mint, maxt, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotEqual(t, ulid.ULID{}, oldULID)
|
||||||
|
}()
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
oldBlockDir := filepath.Join(oldHead.opts.ChunkDirRoot, oldULID.String())
|
||||||
|
sparseBlockDir := filepath.Join(sparseHead.opts.ChunkDirRoot, sparseULID.String())
|
||||||
|
|
||||||
|
oldSize, err := fileutil.DirSize(oldBlockDir)
|
||||||
|
require.NoError(t, err)
|
||||||
|
oldIndexSize, err := fileutil.DirSize(filepath.Join(oldBlockDir, "index"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
oldChunksSize, err := fileutil.DirSize(filepath.Join(oldBlockDir, "chunks"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
sparseSize, err := fileutil.DirSize(sparseBlockDir)
|
||||||
|
require.NoError(t, err)
|
||||||
|
sparseIndexSize, err := fileutil.DirSize(filepath.Join(sparseBlockDir, "index"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
sparseChunksSize, err := fileutil.DirSize(filepath.Join(sparseBlockDir, "chunks"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
summaries = append(summaries, testSummary{
|
||||||
|
oldBlockTotalSeries: len(allSchemas) * c.numSeriesPerSchema * numOldSeriesPerHistogram,
|
||||||
|
oldBlockIndexSize: oldIndexSize,
|
||||||
|
oldBlockChunksSize: oldChunksSize,
|
||||||
|
oldBlockTotalSize: oldSize,
|
||||||
|
sparseBlockTotalSeries: len(allSchemas) * c.numSeriesPerSchema,
|
||||||
|
sparseBlockIndexSize: sparseIndexSize,
|
||||||
|
sparseBlockChunksSize: sparseChunksSize,
|
||||||
|
sparseBlockTotalSize: sparseSize,
|
||||||
|
numBuckets: c.numBuckets,
|
||||||
|
numSpans: c.numSpans,
|
||||||
|
gapBetweenSpans: c.gapBetweenSpans,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, s := range summaries {
|
||||||
|
fmt.Printf(`
|
||||||
|
Meta: NumBuckets=%d, NumSpans=%d, GapBetweenSpans=%d
|
||||||
|
Old Block: NumSeries=%d, IndexSize=%d, ChunksSize=%d, TotalSize=%d
|
||||||
|
Sparse Block: NumSeries=%d, IndexSize=%d, ChunksSize=%d, TotalSize=%d
|
||||||
|
Savings: Index=%.2f%%, Chunks=%.2f%%, Total=%.2f%%
|
||||||
|
`,
|
||||||
|
s.numBuckets, s.numSpans, s.gapBetweenSpans,
|
||||||
|
s.oldBlockTotalSeries, s.oldBlockIndexSize, s.oldBlockChunksSize, s.oldBlockTotalSize,
|
||||||
|
s.sparseBlockTotalSeries, s.sparseBlockIndexSize, s.sparseBlockChunksSize, s.sparseBlockTotalSize,
|
||||||
|
100*(1-float64(s.sparseBlockIndexSize)/float64(s.oldBlockIndexSize)),
|
||||||
|
100*(1-float64(s.sparseBlockChunksSize)/float64(s.oldBlockChunksSize)),
|
||||||
|
100*(1-float64(s.sparseBlockTotalSize)/float64(s.oldBlockTotalSize)),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateCustomHistograms(numHists, numBuckets, numSpans, gapBetweenSpans, schema int) (r []*histogram.Histogram) {
|
||||||
|
// First histogram with all the settings.
|
||||||
|
h := &histogram.Histogram{
|
||||||
|
Sum: 1000 * rand.Float64(),
|
||||||
|
Schema: int32(schema),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate spans.
|
||||||
|
h.PositiveSpans = []histogram.Span{
|
||||||
|
{Offset: int32(rand.Intn(10)), Length: uint32(numBuckets)},
|
||||||
|
}
|
||||||
|
if numSpans > 1 {
|
||||||
|
spanWidth := numBuckets / numSpans
|
||||||
|
// First span gets those additional buckets.
|
||||||
|
h.PositiveSpans[0].Length = uint32(spanWidth + (numBuckets - spanWidth*numSpans))
|
||||||
|
for i := 0; i < numSpans-1; i++ {
|
||||||
|
h.PositiveSpans = append(h.PositiveSpans, histogram.Span{Offset: int32(rand.Intn(gapBetweenSpans) + 1), Length: uint32(spanWidth)})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate buckets.
|
||||||
|
v := int64(rand.Intn(30) + 1)
|
||||||
|
h.PositiveBuckets = []int64{v}
|
||||||
|
count := v
|
||||||
|
firstHistValues := []int64{v}
|
||||||
|
for i := 0; i < numBuckets-1; i++ {
|
||||||
|
delta := int64(rand.Intn(20))
|
||||||
|
if rand.Int()%2 == 0 && firstHistValues[len(firstHistValues)-1] > delta {
|
||||||
|
// Randomly making delta negative such that curr value will be >0.
|
||||||
|
delta = -delta
|
||||||
|
}
|
||||||
|
|
||||||
|
currVal := firstHistValues[len(firstHistValues)-1] + delta
|
||||||
|
count += currVal
|
||||||
|
firstHistValues = append(firstHistValues, currVal)
|
||||||
|
|
||||||
|
h.PositiveBuckets = append(h.PositiveBuckets, delta)
|
||||||
|
}
|
||||||
|
|
||||||
|
h.Count = uint64(count)
|
||||||
|
|
||||||
|
r = append(r, h)
|
||||||
|
|
||||||
|
// Remaining histograms with same spans but changed bucket values.
|
||||||
|
for j := 0; j < numHists-1; j++ {
|
||||||
|
newH := h.Copy()
|
||||||
|
newH.Sum = float64(j+1) * 1000 * rand.Float64()
|
||||||
|
|
||||||
|
// Generate buckets.
|
||||||
|
count := int64(0)
|
||||||
|
currVal := int64(0)
|
||||||
|
for i := range newH.PositiveBuckets {
|
||||||
|
delta := int64(rand.Intn(10))
|
||||||
|
if i == 0 {
|
||||||
|
newH.PositiveBuckets[i] += delta
|
||||||
|
currVal = newH.PositiveBuckets[i]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
currVal += newH.PositiveBuckets[i]
|
||||||
|
if rand.Int()%2 == 0 && (currVal-delta) > firstHistValues[i] {
|
||||||
|
// Randomly making delta negative such that curr value will be >0
|
||||||
|
// and above the previous count since we are not doing resets here.
|
||||||
|
delta = -delta
|
||||||
|
}
|
||||||
|
newH.PositiveBuckets[i] += delta
|
||||||
|
currVal += delta
|
||||||
|
count += currVal
|
||||||
|
}
|
||||||
|
|
||||||
|
newH.Count = uint64(count)
|
||||||
|
|
||||||
|
r = append(r, newH)
|
||||||
|
h = newH
|
||||||
|
}
|
||||||
|
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
func TestCompactBlockMetas(t *testing.T) {
|
func TestCompactBlockMetas(t *testing.T) {
|
||||||
parent1 := ulid.MustNew(100, nil)
|
parent1 := ulid.MustNew(100, nil)
|
||||||
parent2 := ulid.MustNew(200, nil)
|
parent2 := ulid.MustNew(200, nil)
|
||||||
|
|
15
tsdb/db.go
15
tsdb/db.go
|
@ -81,6 +81,7 @@ func DefaultOptions() *Options {
|
||||||
StripeSize: DefaultStripeSize,
|
StripeSize: DefaultStripeSize,
|
||||||
HeadChunksWriteBufferSize: chunks.DefaultWriteBufferSize,
|
HeadChunksWriteBufferSize: chunks.DefaultWriteBufferSize,
|
||||||
IsolationDisabled: defaultIsolationDisabled,
|
IsolationDisabled: defaultIsolationDisabled,
|
||||||
|
HeadChunksWriteQueueSize: chunks.DefaultWriteQueueSize,
|
||||||
OutOfOrderCapMax: DefaultOutOfOrderCapMax,
|
OutOfOrderCapMax: DefaultOutOfOrderCapMax,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -166,6 +167,9 @@ type Options struct {
|
||||||
// Disables isolation between reads and in-flight appends.
|
// Disables isolation between reads and in-flight appends.
|
||||||
IsolationDisabled bool
|
IsolationDisabled bool
|
||||||
|
|
||||||
|
// EnableNativeHistograms enables the ingestion of native histograms.
|
||||||
|
EnableNativeHistograms bool
|
||||||
|
|
||||||
// OutOfOrderTimeWindow specifies how much out of order is allowed, if any.
|
// OutOfOrderTimeWindow specifies how much out of order is allowed, if any.
|
||||||
// This can change during run-time, so this value from here should only be used
|
// This can change during run-time, so this value from here should only be used
|
||||||
// while initialising.
|
// while initialising.
|
||||||
|
@ -775,6 +779,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
|
||||||
headOpts.EnableExemplarStorage = opts.EnableExemplarStorage
|
headOpts.EnableExemplarStorage = opts.EnableExemplarStorage
|
||||||
headOpts.MaxExemplars.Store(opts.MaxExemplars)
|
headOpts.MaxExemplars.Store(opts.MaxExemplars)
|
||||||
headOpts.EnableMemorySnapshotOnShutdown = opts.EnableMemorySnapshotOnShutdown
|
headOpts.EnableMemorySnapshotOnShutdown = opts.EnableMemorySnapshotOnShutdown
|
||||||
|
headOpts.EnableNativeHistograms.Store(opts.EnableNativeHistograms)
|
||||||
headOpts.OutOfOrderTimeWindow.Store(opts.OutOfOrderTimeWindow)
|
headOpts.OutOfOrderTimeWindow.Store(opts.OutOfOrderTimeWindow)
|
||||||
headOpts.OutOfOrderCapMax.Store(opts.OutOfOrderCapMax)
|
headOpts.OutOfOrderCapMax.Store(opts.OutOfOrderCapMax)
|
||||||
if opts.IsolationDisabled {
|
if opts.IsolationDisabled {
|
||||||
|
@ -974,6 +979,16 @@ func (db *DB) ApplyConfig(conf *config.Config) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EnableNativeHistograms enables the native histogram feature.
|
||||||
|
func (db *DB) EnableNativeHistograms() {
|
||||||
|
db.head.EnableNativeHistograms()
|
||||||
|
}
|
||||||
|
|
||||||
|
// DisableNativeHistograms disables the native histogram feature.
|
||||||
|
func (db *DB) DisableNativeHistograms() {
|
||||||
|
db.head.DisableNativeHistograms()
|
||||||
|
}
|
||||||
|
|
||||||
// dbAppender wraps the DB's head appender and triggers compactions on commit
|
// dbAppender wraps the DB's head appender and triggers compactions on commit
|
||||||
// if necessary.
|
// if necessary.
|
||||||
type dbAppender struct {
|
type dbAppender struct {
|
||||||
|
|
507
tsdb/db_test.go
507
tsdb/db_test.go
|
@ -41,6 +41,7 @@ import (
|
||||||
"go.uber.org/goleak"
|
"go.uber.org/goleak"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/metadata"
|
"github.com/prometheus/prometheus/model/metadata"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
@ -68,6 +69,11 @@ func openTestDB(t testing.TB, opts *Options, rngs []int64) (db *DB) {
|
||||||
tmpdir := t.TempDir()
|
tmpdir := t.TempDir()
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
|
if opts == nil {
|
||||||
|
opts = DefaultOptions()
|
||||||
|
}
|
||||||
|
opts.EnableNativeHistograms = true
|
||||||
|
|
||||||
if len(rngs) == 0 {
|
if len(rngs) == 0 {
|
||||||
db, err = Open(tmpdir, nil, nil, opts, nil)
|
db, err = Open(tmpdir, nil, nil, opts, nil)
|
||||||
} else {
|
} else {
|
||||||
|
@ -93,9 +99,17 @@ func query(t testing.TB, q storage.Querier, matchers ...*labels.Matcher) map[str
|
||||||
|
|
||||||
samples := []tsdbutil.Sample{}
|
samples := []tsdbutil.Sample{}
|
||||||
it := series.Iterator()
|
it := series.Iterator()
|
||||||
for it.Next() {
|
for typ := it.Next(); typ != chunkenc.ValNone; typ = it.Next() {
|
||||||
t, v := it.At()
|
switch typ {
|
||||||
samples = append(samples, sample{t: t, v: v})
|
case chunkenc.ValFloat:
|
||||||
|
ts, v := it.At()
|
||||||
|
samples = append(samples, sample{t: ts, v: v})
|
||||||
|
case chunkenc.ValHistogram:
|
||||||
|
ts, h := it.AtHistogram()
|
||||||
|
samples = append(samples, sample{t: ts, h: h})
|
||||||
|
default:
|
||||||
|
t.Fatalf("unknown sample type in query %s", typ.String())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
require.NoError(t, it.Err())
|
require.NoError(t, it.Err())
|
||||||
|
|
||||||
|
@ -420,7 +434,7 @@ Outer:
|
||||||
|
|
||||||
expSamples := make([]tsdbutil.Sample, 0, len(c.remaint))
|
expSamples := make([]tsdbutil.Sample, 0, len(c.remaint))
|
||||||
for _, ts := range c.remaint {
|
for _, ts := range c.remaint {
|
||||||
expSamples = append(expSamples, sample{ts, smpls[ts]})
|
expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil})
|
||||||
}
|
}
|
||||||
|
|
||||||
expss := newMockSeriesSet([]storage.Series{
|
expss := newMockSeriesSet([]storage.Series{
|
||||||
|
@ -462,7 +476,35 @@ func TestAmendDatapointCausesError(t *testing.T) {
|
||||||
require.NoError(t, app.Commit())
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
app = db.Appender(ctx)
|
app = db.Appender(ctx)
|
||||||
_, err = app.Append(0, labels.FromStrings("a", "b"), 0, 1)
|
_, err = app.Append(0, labels.Labels{{Name: "a", Value: "b"}}, 0, 0)
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = app.Append(0, labels.Labels{{Name: "a", Value: "b"}}, 0, 1)
|
||||||
|
require.Equal(t, storage.ErrDuplicateSampleForTimestamp, err)
|
||||||
|
require.NoError(t, app.Rollback())
|
||||||
|
|
||||||
|
h := histogram.Histogram{
|
||||||
|
Schema: 3,
|
||||||
|
Count: 61,
|
||||||
|
Sum: 2.7,
|
||||||
|
ZeroThreshold: 0.1,
|
||||||
|
ZeroCount: 42,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 4},
|
||||||
|
{Offset: 10, Length: 3},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
|
||||||
|
}
|
||||||
|
|
||||||
|
app = db.Appender(ctx)
|
||||||
|
_, err = app.AppendHistogram(0, labels.Labels{{Name: "a", Value: "c"}}, 0, h.Copy())
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
|
app = db.Appender(ctx)
|
||||||
|
_, err = app.AppendHistogram(0, labels.Labels{{Name: "a", Value: "c"}}, 0, h.Copy())
|
||||||
|
require.NoError(t, err)
|
||||||
|
h.Schema = 2
|
||||||
|
_, err = app.AppendHistogram(0, labels.Labels{{Name: "a", Value: "c"}}, 0, h.Copy())
|
||||||
require.Equal(t, storage.ErrDuplicateSampleForTimestamp, err)
|
require.Equal(t, storage.ErrDuplicateSampleForTimestamp, err)
|
||||||
require.NoError(t, app.Rollback())
|
require.NoError(t, app.Rollback())
|
||||||
}
|
}
|
||||||
|
@ -536,7 +578,7 @@ func TestSkippingInvalidValuesInSameTxn(t *testing.T) {
|
||||||
ssMap := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
ssMap := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
||||||
|
|
||||||
require.Equal(t, map[string][]tsdbutil.Sample{
|
require.Equal(t, map[string][]tsdbutil.Sample{
|
||||||
labels.New(labels.Label{Name: "a", Value: "b"}).String(): {sample{0, 1}},
|
labels.New(labels.Label{Name: "a", Value: "b"}).String(): {sample{0, 1, nil, nil}},
|
||||||
}, ssMap)
|
}, ssMap)
|
||||||
|
|
||||||
// Append Out of Order Value.
|
// Append Out of Order Value.
|
||||||
|
@ -553,7 +595,7 @@ func TestSkippingInvalidValuesInSameTxn(t *testing.T) {
|
||||||
ssMap = query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
ssMap = query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
||||||
|
|
||||||
require.Equal(t, map[string][]tsdbutil.Sample{
|
require.Equal(t, map[string][]tsdbutil.Sample{
|
||||||
labels.New(labels.Label{Name: "a", Value: "b"}).String(): {sample{0, 1}, sample{10, 3}},
|
labels.New(labels.Label{Name: "a", Value: "b"}).String(): {sample{0, 1, nil, nil}, sample{10, 3, nil, nil}},
|
||||||
}, ssMap)
|
}, ssMap)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -589,7 +631,7 @@ func TestDB_Snapshot(t *testing.T) {
|
||||||
sum := 0.0
|
sum := 0.0
|
||||||
for seriesSet.Next() {
|
for seriesSet.Next() {
|
||||||
series := seriesSet.At().Iterator()
|
series := seriesSet.At().Iterator()
|
||||||
for series.Next() {
|
for series.Next() == chunkenc.ValFloat {
|
||||||
_, v := series.At()
|
_, v := series.At()
|
||||||
sum += v
|
sum += v
|
||||||
}
|
}
|
||||||
|
@ -637,7 +679,7 @@ func TestDB_Snapshot_ChunksOutsideOfCompactedRange(t *testing.T) {
|
||||||
sum := 0.0
|
sum := 0.0
|
||||||
for seriesSet.Next() {
|
for seriesSet.Next() {
|
||||||
series := seriesSet.At().Iterator()
|
series := seriesSet.At().Iterator()
|
||||||
for series.Next() {
|
for series.Next() == chunkenc.ValFloat {
|
||||||
_, v := series.At()
|
_, v := series.At()
|
||||||
sum += v
|
sum += v
|
||||||
}
|
}
|
||||||
|
@ -703,7 +745,7 @@ Outer:
|
||||||
|
|
||||||
expSamples := make([]tsdbutil.Sample, 0, len(c.remaint))
|
expSamples := make([]tsdbutil.Sample, 0, len(c.remaint))
|
||||||
for _, ts := range c.remaint {
|
for _, ts := range c.remaint {
|
||||||
expSamples = append(expSamples, sample{ts, smpls[ts]})
|
expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil})
|
||||||
}
|
}
|
||||||
|
|
||||||
expss := newMockSeriesSet([]storage.Series{
|
expss := newMockSeriesSet([]storage.Series{
|
||||||
|
@ -808,7 +850,7 @@ func TestDB_e2e(t *testing.T) {
|
||||||
for i := 0; i < numDatapoints; i++ {
|
for i := 0; i < numDatapoints; i++ {
|
||||||
v := rand.Float64()
|
v := rand.Float64()
|
||||||
|
|
||||||
series = append(series, sample{ts, v})
|
series = append(series, sample{ts, v, nil, nil})
|
||||||
|
|
||||||
_, err := app.Append(0, lset, ts, v)
|
_, err := app.Append(0, lset, ts, v)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -1139,7 +1181,7 @@ func TestTombstoneClean(t *testing.T) {
|
||||||
|
|
||||||
expSamples := make([]tsdbutil.Sample, 0, len(c.remaint))
|
expSamples := make([]tsdbutil.Sample, 0, len(c.remaint))
|
||||||
for _, ts := range c.remaint {
|
for _, ts := range c.remaint {
|
||||||
expSamples = append(expSamples, sample{ts, smpls[ts]})
|
expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil})
|
||||||
}
|
}
|
||||||
|
|
||||||
expss := newMockSeriesSet([]storage.Series{
|
expss := newMockSeriesSet([]storage.Series{
|
||||||
|
@ -1442,7 +1484,7 @@ func TestSizeRetention(t *testing.T) {
|
||||||
for _, s := range series {
|
for _, s := range series {
|
||||||
aSeries = s.Labels()
|
aSeries = s.Labels()
|
||||||
it := s.Iterator()
|
it := s.Iterator()
|
||||||
for it.Next() {
|
for it.Next() == chunkenc.ValFloat {
|
||||||
tim, v := it.At()
|
tim, v := it.At()
|
||||||
_, err := headApp.Append(0, s.Labels(), tim, v)
|
_, err := headApp.Append(0, s.Labels(), tim, v)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -1653,7 +1695,7 @@ func expandSeriesSet(ss storage.SeriesSet) ([]labels.Labels, map[string][]sample
|
||||||
series := ss.At()
|
series := ss.At()
|
||||||
samples := []sample{}
|
samples := []sample{}
|
||||||
it := series.Iterator()
|
it := series.Iterator()
|
||||||
for it.Next() {
|
for it.Next() == chunkenc.ValFloat {
|
||||||
t, v := it.At()
|
t, v := it.At()
|
||||||
samples = append(samples, sample{t: t, v: v})
|
samples = append(samples, sample{t: t, v: v})
|
||||||
}
|
}
|
||||||
|
@ -2462,7 +2504,7 @@ func TestDBReadOnly_FlushWAL(t *testing.T) {
|
||||||
sum := 0.0
|
sum := 0.0
|
||||||
for seriesSet.Next() {
|
for seriesSet.Next() {
|
||||||
series := seriesSet.At().Iterator()
|
series := seriesSet.At().Iterator()
|
||||||
for series.Next() {
|
for series.Next() == chunkenc.ValFloat {
|
||||||
_, v := series.At()
|
_, v := series.At()
|
||||||
sum += v
|
sum += v
|
||||||
}
|
}
|
||||||
|
@ -2614,11 +2656,11 @@ func TestDBQueryDoesntSeeAppendsAfterCreation(t *testing.T) {
|
||||||
// TestChunkWriter_ReadAfterWrite ensures that chunk segment are cut at the set segment size and
|
// TestChunkWriter_ReadAfterWrite ensures that chunk segment are cut at the set segment size and
|
||||||
// that the resulted segments includes the expected chunks data.
|
// that the resulted segments includes the expected chunks data.
|
||||||
func TestChunkWriter_ReadAfterWrite(t *testing.T) {
|
func TestChunkWriter_ReadAfterWrite(t *testing.T) {
|
||||||
chk1 := tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 1}})
|
chk1 := tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 1, nil, nil}})
|
||||||
chk2 := tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 2}})
|
chk2 := tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 2, nil, nil}})
|
||||||
chk3 := tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 3}})
|
chk3 := tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 3, nil, nil}})
|
||||||
chk4 := tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 4}})
|
chk4 := tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 4, nil, nil}})
|
||||||
chk5 := tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 5}})
|
chk5 := tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 5, nil, nil}})
|
||||||
chunkSize := len(chk1.Chunk.Bytes()) + chunks.MaxChunkLengthFieldSize + chunks.ChunkEncodingSize + crc32.Size
|
chunkSize := len(chk1.Chunk.Bytes()) + chunks.MaxChunkLengthFieldSize + chunks.ChunkEncodingSize + crc32.Size
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
@ -2818,11 +2860,11 @@ func TestRangeForTimestamp(t *testing.T) {
|
||||||
// Regression test for https://github.com/prometheus/prometheus/pull/6514.
|
// Regression test for https://github.com/prometheus/prometheus/pull/6514.
|
||||||
func TestChunkReader_ConcurrentReads(t *testing.T) {
|
func TestChunkReader_ConcurrentReads(t *testing.T) {
|
||||||
chks := []chunks.Meta{
|
chks := []chunks.Meta{
|
||||||
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 1}}),
|
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 1, nil, nil}}),
|
||||||
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 2}}),
|
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 2, nil, nil}}),
|
||||||
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 3}}),
|
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 3, nil, nil}}),
|
||||||
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 4}}),
|
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 4, nil, nil}}),
|
||||||
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 5}}),
|
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 5, nil, nil}}),
|
||||||
}
|
}
|
||||||
|
|
||||||
tempDir := t.TempDir()
|
tempDir := t.TempDir()
|
||||||
|
@ -2883,7 +2925,7 @@ func TestCompactHead(t *testing.T) {
|
||||||
val := rand.Float64()
|
val := rand.Float64()
|
||||||
_, err := app.Append(0, labels.FromStrings("a", "b"), int64(i), val)
|
_, err := app.Append(0, labels.FromStrings("a", "b"), int64(i), val)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
expSamples = append(expSamples, sample{int64(i), val})
|
expSamples = append(expSamples, sample{int64(i), val, nil, nil})
|
||||||
}
|
}
|
||||||
require.NoError(t, app.Commit())
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
|
@ -2908,9 +2950,9 @@ func TestCompactHead(t *testing.T) {
|
||||||
|
|
||||||
for seriesSet.Next() {
|
for seriesSet.Next() {
|
||||||
series := seriesSet.At().Iterator()
|
series := seriesSet.At().Iterator()
|
||||||
for series.Next() {
|
for series.Next() == chunkenc.ValFloat {
|
||||||
time, val := series.At()
|
time, val := series.At()
|
||||||
actSamples = append(actSamples, sample{int64(time), val})
|
actSamples = append(actSamples, sample{int64(time), val, nil, nil})
|
||||||
}
|
}
|
||||||
require.NoError(t, series.Err())
|
require.NoError(t, series.Err())
|
||||||
}
|
}
|
||||||
|
@ -3318,7 +3360,7 @@ func testQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks(t
|
||||||
var sum float64
|
var sum float64
|
||||||
var firstErr error
|
var firstErr error
|
||||||
for _, it := range iterators {
|
for _, it := range iterators {
|
||||||
for it.Next() {
|
for it.Next() == chunkenc.ValFloat {
|
||||||
_, v := it.At()
|
_, v := it.At()
|
||||||
sum += v
|
sum += v
|
||||||
}
|
}
|
||||||
|
@ -4027,8 +4069,8 @@ func TestOOOCompaction(t *testing.T) {
|
||||||
fromMins, toMins := r[0], r[1]
|
fromMins, toMins := r[0], r[1]
|
||||||
for min := fromMins; min <= toMins; min++ {
|
for min := fromMins; min <= toMins; min++ {
|
||||||
ts := min * time.Minute.Milliseconds()
|
ts := min * time.Minute.Milliseconds()
|
||||||
series1Samples = append(series1Samples, sample{ts, float64(ts)})
|
series1Samples = append(series1Samples, sample{ts, float64(ts), nil, nil})
|
||||||
series2Samples = append(series2Samples, sample{ts, float64(2 * ts)})
|
series2Samples = append(series2Samples, sample{ts, float64(2 * ts), nil, nil})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
expRes := map[string][]tsdbutil.Sample{
|
expRes := map[string][]tsdbutil.Sample{
|
||||||
|
@ -4095,8 +4137,8 @@ func TestOOOCompaction(t *testing.T) {
|
||||||
series2Samples := make([]tsdbutil.Sample, 0, toMins-fromMins+1)
|
series2Samples := make([]tsdbutil.Sample, 0, toMins-fromMins+1)
|
||||||
for min := fromMins; min <= toMins; min++ {
|
for min := fromMins; min <= toMins; min++ {
|
||||||
ts := min * time.Minute.Milliseconds()
|
ts := min * time.Minute.Milliseconds()
|
||||||
series1Samples = append(series1Samples, sample{ts, float64(ts)})
|
series1Samples = append(series1Samples, sample{ts, float64(ts), nil, nil})
|
||||||
series2Samples = append(series2Samples, sample{ts, float64(2 * ts)})
|
series2Samples = append(series2Samples, sample{ts, float64(2 * ts), nil, nil})
|
||||||
}
|
}
|
||||||
expRes := map[string][]tsdbutil.Sample{
|
expRes := map[string][]tsdbutil.Sample{
|
||||||
series1.String(): series1Samples,
|
series1.String(): series1Samples,
|
||||||
|
@ -4227,8 +4269,8 @@ func TestOOOCompactionWithNormalCompaction(t *testing.T) {
|
||||||
series2Samples := make([]tsdbutil.Sample, 0, toMins-fromMins+1)
|
series2Samples := make([]tsdbutil.Sample, 0, toMins-fromMins+1)
|
||||||
for min := fromMins; min <= toMins; min++ {
|
for min := fromMins; min <= toMins; min++ {
|
||||||
ts := min * time.Minute.Milliseconds()
|
ts := min * time.Minute.Milliseconds()
|
||||||
series1Samples = append(series1Samples, sample{ts, float64(ts)})
|
series1Samples = append(series1Samples, sample{ts, float64(ts), nil, nil})
|
||||||
series2Samples = append(series2Samples, sample{ts, float64(2 * ts)})
|
series2Samples = append(series2Samples, sample{ts, float64(2 * ts), nil, nil})
|
||||||
}
|
}
|
||||||
expRes := map[string][]tsdbutil.Sample{
|
expRes := map[string][]tsdbutil.Sample{
|
||||||
series1.String(): series1Samples,
|
series1.String(): series1Samples,
|
||||||
|
@ -4415,7 +4457,7 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) {
|
||||||
var gotSamples []tsdbutil.Sample
|
var gotSamples []tsdbutil.Sample
|
||||||
for _, chunk := range chks[series1.String()] {
|
for _, chunk := range chks[series1.String()] {
|
||||||
it := chunk.Chunk.Iterator(nil)
|
it := chunk.Chunk.Iterator(nil)
|
||||||
for it.Next() {
|
for it.Next() == chunkenc.ValFloat {
|
||||||
ts, v := it.At()
|
ts, v := it.At()
|
||||||
gotSamples = append(gotSamples, sample{t: ts, v: v})
|
gotSamples = append(gotSamples, sample{t: ts, v: v})
|
||||||
}
|
}
|
||||||
|
@ -4601,7 +4643,7 @@ func TestOOODisabled(t *testing.T) {
|
||||||
require.Equal(t, expSamples, seriesSet)
|
require.Equal(t, expSamples, seriesSet)
|
||||||
require.Equal(t, float64(0), prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamplesAppended), "number of ooo appended samples mismatch")
|
require.Equal(t, float64(0), prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamplesAppended), "number of ooo appended samples mismatch")
|
||||||
require.Equal(t, float64(failedSamples),
|
require.Equal(t, float64(failedSamples),
|
||||||
prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples)+prom_testutil.ToFloat64(db.head.metrics.outOfBoundSamples),
|
prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat))+prom_testutil.ToFloat64(db.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat)),
|
||||||
"number of ooo/oob samples mismatch")
|
"number of ooo/oob samples mismatch")
|
||||||
|
|
||||||
// Verifying that no OOO artifacts were generated.
|
// Verifying that no OOO artifacts were generated.
|
||||||
|
@ -4681,7 +4723,7 @@ func TestWBLAndMmapReplay(t *testing.T) {
|
||||||
chk, err := db.head.chunkDiskMapper.Chunk(mc.ref)
|
chk, err := db.head.chunkDiskMapper.Chunk(mc.ref)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
it := chk.Iterator(nil)
|
it := chk.Iterator(nil)
|
||||||
for it.Next() {
|
for it.Next() == chunkenc.ValFloat {
|
||||||
ts, val := it.At()
|
ts, val := it.At()
|
||||||
s1MmapSamples = append(s1MmapSamples, sample{t: ts, v: val})
|
s1MmapSamples = append(s1MmapSamples, sample{t: ts, v: val})
|
||||||
}
|
}
|
||||||
|
@ -4910,7 +4952,7 @@ func TestOOOCompactionFailure(t *testing.T) {
|
||||||
series1Samples := make([]tsdbutil.Sample, 0, toMins-fromMins+1)
|
series1Samples := make([]tsdbutil.Sample, 0, toMins-fromMins+1)
|
||||||
for min := fromMins; min <= toMins; min++ {
|
for min := fromMins; min <= toMins; min++ {
|
||||||
ts := min * time.Minute.Milliseconds()
|
ts := min * time.Minute.Milliseconds()
|
||||||
series1Samples = append(series1Samples, sample{ts, float64(ts)})
|
series1Samples = append(series1Samples, sample{ts, float64(ts), nil, nil})
|
||||||
}
|
}
|
||||||
expRes := map[string][]tsdbutil.Sample{
|
expRes := map[string][]tsdbutil.Sample{
|
||||||
series1.String(): series1Samples,
|
series1.String(): series1Samples,
|
||||||
|
@ -5733,3 +5775,390 @@ func TestDiskFillingUpAfterDisablingOOO(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, int64(0), finfo.Size())
|
require.Equal(t, int64(0), finfo.Size())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHistogramAppendAndQuery(t *testing.T) {
|
||||||
|
db := openTestDB(t, nil, nil)
|
||||||
|
minute := func(m int) int64 { return int64(m) * time.Minute.Milliseconds() }
|
||||||
|
t.Cleanup(func() {
|
||||||
|
require.NoError(t, db.Close())
|
||||||
|
})
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
appendHistogram := func(lbls labels.Labels, tsMinute int, h *histogram.Histogram, exp *[]tsdbutil.Sample) {
|
||||||
|
t.Helper()
|
||||||
|
app := db.Appender(ctx)
|
||||||
|
_, err := app.AppendHistogram(0, lbls, minute(tsMinute), h)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
*exp = append(*exp, sample{t: minute(tsMinute), h: h.Copy()})
|
||||||
|
}
|
||||||
|
appendFloat := func(lbls labels.Labels, tsMinute int, val float64, exp *[]tsdbutil.Sample) {
|
||||||
|
t.Helper()
|
||||||
|
app := db.Appender(ctx)
|
||||||
|
_, err := app.Append(0, lbls, minute(tsMinute), val)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
*exp = append(*exp, sample{t: minute(tsMinute), v: val})
|
||||||
|
}
|
||||||
|
|
||||||
|
testQuery := func(name, value string, exp map[string][]tsdbutil.Sample) {
|
||||||
|
t.Helper()
|
||||||
|
q, err := db.Querier(ctx, math.MinInt64, math.MaxInt64)
|
||||||
|
require.NoError(t, err)
|
||||||
|
act := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, name, value))
|
||||||
|
require.Equal(t, exp, act)
|
||||||
|
}
|
||||||
|
|
||||||
|
baseH := &histogram.Histogram{
|
||||||
|
Count: 11,
|
||||||
|
ZeroCount: 4,
|
||||||
|
ZeroThreshold: 0.001,
|
||||||
|
Sum: 35.5,
|
||||||
|
Schema: 1,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 2, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||||
|
NegativeSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 1},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []int64{1, 2, -1},
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
series1 = labels.FromStrings("foo", "bar1")
|
||||||
|
series2 = labels.FromStrings("foo", "bar2")
|
||||||
|
series3 = labels.FromStrings("foo", "bar3")
|
||||||
|
series4 = labels.FromStrings("foo", "bar4")
|
||||||
|
exp1, exp2, exp3, exp4 []tsdbutil.Sample
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO(codesome): test everything for negative buckets as well.
|
||||||
|
t.Run("series with only histograms", func(t *testing.T) {
|
||||||
|
h := baseH.Copy() // This is shared across all sub tests.
|
||||||
|
|
||||||
|
appendHistogram(series1, 100, h.Copy(), &exp1)
|
||||||
|
testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1})
|
||||||
|
|
||||||
|
h.PositiveBuckets[0]++
|
||||||
|
h.NegativeBuckets[0] += 2
|
||||||
|
h.Count += 10
|
||||||
|
appendHistogram(series1, 101, h.Copy(), &exp1)
|
||||||
|
testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1})
|
||||||
|
|
||||||
|
t.Run("changing schema", func(t *testing.T) {
|
||||||
|
h.Schema = 2
|
||||||
|
appendHistogram(series1, 102, h.Copy(), &exp1)
|
||||||
|
testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1})
|
||||||
|
|
||||||
|
// Schema back to old.
|
||||||
|
h.Schema = 1
|
||||||
|
appendHistogram(series1, 103, h.Copy(), &exp1)
|
||||||
|
testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("new buckets incoming", func(t *testing.T) {
|
||||||
|
// In the previous unit test, during the last histogram append, we
|
||||||
|
// changed the schema and that caused a new chunk creation. Because
|
||||||
|
// of the next append the layout of the last histogram will change
|
||||||
|
// because the chunk will be re-encoded. So this forces us to modify
|
||||||
|
// the last histogram in exp1 so when we query we get the expected
|
||||||
|
// results.
|
||||||
|
lh := exp1[len(exp1)-1].H().Copy()
|
||||||
|
lh.PositiveSpans[1].Length++
|
||||||
|
lh.PositiveBuckets = append(lh.PositiveBuckets, -2) // -2 makes the last bucket 0.
|
||||||
|
exp1[len(exp1)-1] = sample{t: exp1[len(exp1)-1].T(), h: lh}
|
||||||
|
|
||||||
|
// This histogram with new bucket at the end causes the re-encoding of the previous histogram.
|
||||||
|
// Hence the previous histogram is recoded into this new layout.
|
||||||
|
// But the query returns the histogram from the in-memory buffer, hence we don't see the recode here yet.
|
||||||
|
h.PositiveSpans[1].Length++
|
||||||
|
h.PositiveBuckets = append(h.PositiveBuckets, 1)
|
||||||
|
h.Count += 3
|
||||||
|
appendHistogram(series1, 104, h.Copy(), &exp1)
|
||||||
|
testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1})
|
||||||
|
|
||||||
|
// Because of the previous two histograms being on the active chunk,
|
||||||
|
// and the next append is only adding a new bucket, the active chunk
|
||||||
|
// will be re-encoded to the new layout.
|
||||||
|
lh = exp1[len(exp1)-2].H().Copy()
|
||||||
|
lh.PositiveSpans[0].Length++
|
||||||
|
lh.PositiveSpans[1].Offset--
|
||||||
|
lh.PositiveBuckets = []int64{2, 1, -3, 2, 0, -2}
|
||||||
|
exp1[len(exp1)-2] = sample{t: exp1[len(exp1)-2].T(), h: lh}
|
||||||
|
|
||||||
|
lh = exp1[len(exp1)-1].H().Copy()
|
||||||
|
lh.PositiveSpans[0].Length++
|
||||||
|
lh.PositiveSpans[1].Offset--
|
||||||
|
lh.PositiveBuckets = []int64{2, 1, -3, 2, 0, 1}
|
||||||
|
exp1[len(exp1)-1] = sample{t: exp1[len(exp1)-1].T(), h: lh}
|
||||||
|
|
||||||
|
// Now we add the new buckets in between. Empty bucket is again not present for the old histogram.
|
||||||
|
h.PositiveSpans[0].Length++
|
||||||
|
h.PositiveSpans[1].Offset--
|
||||||
|
h.Count += 3
|
||||||
|
// {2, 1, -1, 0, 1} -> {2, 1, 0, -1, 0, 1}
|
||||||
|
h.PositiveBuckets = append(h.PositiveBuckets[:2], append([]int64{0}, h.PositiveBuckets[2:]...)...)
|
||||||
|
appendHistogram(series1, 105, h.Copy(), &exp1)
|
||||||
|
testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1})
|
||||||
|
|
||||||
|
// We add 4 more histograms to clear out the buffer and see the re-encoded histograms.
|
||||||
|
appendHistogram(series1, 106, h.Copy(), &exp1)
|
||||||
|
appendHistogram(series1, 107, h.Copy(), &exp1)
|
||||||
|
appendHistogram(series1, 108, h.Copy(), &exp1)
|
||||||
|
appendHistogram(series1, 109, h.Copy(), &exp1)
|
||||||
|
|
||||||
|
// Update the expected histograms to reflect the re-encoding.
|
||||||
|
l := len(exp1)
|
||||||
|
h7 := exp1[l-7].H()
|
||||||
|
h7.PositiveSpans = exp1[l-1].H().PositiveSpans
|
||||||
|
h7.PositiveBuckets = []int64{2, 1, -3, 2, 0, -2} // -3 and -2 are the empty buckets.
|
||||||
|
exp1[l-7] = sample{t: exp1[l-7].T(), h: h7}
|
||||||
|
|
||||||
|
h6 := exp1[l-6].H()
|
||||||
|
h6.PositiveSpans = exp1[l-1].H().PositiveSpans
|
||||||
|
h6.PositiveBuckets = []int64{2, 1, -3, 2, 0, 1} // -3 is the empty bucket.
|
||||||
|
exp1[l-6] = sample{t: exp1[l-6].T(), h: h6}
|
||||||
|
|
||||||
|
testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("buckets disappearing", func(t *testing.T) {
|
||||||
|
h.PositiveSpans[1].Length--
|
||||||
|
h.PositiveBuckets = h.PositiveBuckets[:len(h.PositiveBuckets)-1]
|
||||||
|
appendHistogram(series1, 110, h.Copy(), &exp1)
|
||||||
|
testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("series starting with float and then getting histograms", func(t *testing.T) {
|
||||||
|
appendFloat(series2, 100, 100, &exp2)
|
||||||
|
appendFloat(series2, 101, 101, &exp2)
|
||||||
|
appendFloat(series2, 102, 102, &exp2)
|
||||||
|
testQuery("foo", "bar2", map[string][]tsdbutil.Sample{series2.String(): exp2})
|
||||||
|
|
||||||
|
h := baseH.Copy()
|
||||||
|
appendHistogram(series2, 103, h.Copy(), &exp2)
|
||||||
|
appendHistogram(series2, 104, h.Copy(), &exp2)
|
||||||
|
appendHistogram(series2, 105, h.Copy(), &exp2)
|
||||||
|
testQuery("foo", "bar2", map[string][]tsdbutil.Sample{series2.String(): exp2})
|
||||||
|
|
||||||
|
// Switching between float and histograms again.
|
||||||
|
appendFloat(series2, 106, 106, &exp2)
|
||||||
|
appendFloat(series2, 107, 107, &exp2)
|
||||||
|
testQuery("foo", "bar2", map[string][]tsdbutil.Sample{series2.String(): exp2})
|
||||||
|
|
||||||
|
appendHistogram(series2, 108, h.Copy(), &exp2)
|
||||||
|
appendHistogram(series2, 109, h.Copy(), &exp2)
|
||||||
|
testQuery("foo", "bar2", map[string][]tsdbutil.Sample{series2.String(): exp2})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("series starting with histogram and then getting float", func(t *testing.T) {
|
||||||
|
h := baseH.Copy()
|
||||||
|
appendHistogram(series3, 101, h.Copy(), &exp3)
|
||||||
|
appendHistogram(series3, 102, h.Copy(), &exp3)
|
||||||
|
appendHistogram(series3, 103, h.Copy(), &exp3)
|
||||||
|
testQuery("foo", "bar3", map[string][]tsdbutil.Sample{series3.String(): exp3})
|
||||||
|
|
||||||
|
appendFloat(series3, 104, 100, &exp3)
|
||||||
|
appendFloat(series3, 105, 101, &exp3)
|
||||||
|
appendFloat(series3, 106, 102, &exp3)
|
||||||
|
testQuery("foo", "bar3", map[string][]tsdbutil.Sample{series3.String(): exp3})
|
||||||
|
|
||||||
|
// Switching between histogram and float again.
|
||||||
|
appendHistogram(series3, 107, h.Copy(), &exp3)
|
||||||
|
appendHistogram(series3, 108, h.Copy(), &exp3)
|
||||||
|
testQuery("foo", "bar3", map[string][]tsdbutil.Sample{series3.String(): exp3})
|
||||||
|
|
||||||
|
appendFloat(series3, 109, 106, &exp3)
|
||||||
|
appendFloat(series3, 110, 107, &exp3)
|
||||||
|
testQuery("foo", "bar3", map[string][]tsdbutil.Sample{series3.String(): exp3})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("query mix of histogram and float series", func(t *testing.T) {
|
||||||
|
// A float only series.
|
||||||
|
appendFloat(series4, 100, 100, &exp4)
|
||||||
|
appendFloat(series4, 101, 101, &exp4)
|
||||||
|
appendFloat(series4, 102, 102, &exp4)
|
||||||
|
|
||||||
|
testQuery("foo", "bar.*", map[string][]tsdbutil.Sample{
|
||||||
|
series1.String(): exp1,
|
||||||
|
series2.String(): exp2,
|
||||||
|
series3.String(): exp3,
|
||||||
|
series4.String(): exp4,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) {
|
||||||
|
minute := func(m int) int64 { return int64(m) * time.Minute.Milliseconds() }
|
||||||
|
|
||||||
|
testBlockQuerying := func(t *testing.T, blockSeries ...[]storage.Series) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
opts := DefaultOptions()
|
||||||
|
opts.AllowOverlappingCompaction = true // TODO(jesus.vazquez) This replaced AllowOverlappingBlocks, make sure that works
|
||||||
|
db := openTestDB(t, opts, nil)
|
||||||
|
t.Cleanup(func() {
|
||||||
|
require.NoError(t, db.Close())
|
||||||
|
})
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
exp := make(map[string][]tsdbutil.Sample)
|
||||||
|
for _, series := range blockSeries {
|
||||||
|
createBlock(t, db.Dir(), series)
|
||||||
|
|
||||||
|
for _, s := range series {
|
||||||
|
key := s.Labels().String()
|
||||||
|
it := s.Iterator()
|
||||||
|
slice := exp[key]
|
||||||
|
for typ := it.Next(); typ != chunkenc.ValNone; typ = it.Next() {
|
||||||
|
switch typ {
|
||||||
|
case chunkenc.ValFloat:
|
||||||
|
ts, v := it.At()
|
||||||
|
slice = append(slice, sample{t: ts, v: v})
|
||||||
|
case chunkenc.ValHistogram:
|
||||||
|
ts, h := it.AtHistogram()
|
||||||
|
slice = append(slice, sample{t: ts, h: h})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Slice(slice, func(i, j int) bool {
|
||||||
|
return slice[i].T() < slice[j].T()
|
||||||
|
})
|
||||||
|
exp[key] = slice
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Len(t, db.Blocks(), 0)
|
||||||
|
require.NoError(t, db.reload())
|
||||||
|
require.Len(t, db.Blocks(), len(blockSeries))
|
||||||
|
|
||||||
|
q, err := db.Querier(ctx, math.MinInt64, math.MaxInt64)
|
||||||
|
require.NoError(t, err)
|
||||||
|
res := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*"))
|
||||||
|
require.Equal(t, exp, res)
|
||||||
|
|
||||||
|
// Compact all the blocks together and query again.
|
||||||
|
blocks := db.Blocks()
|
||||||
|
blockDirs := make([]string, 0, len(blocks))
|
||||||
|
for _, b := range blocks {
|
||||||
|
blockDirs = append(blockDirs, b.Dir())
|
||||||
|
}
|
||||||
|
id, err := db.compactor.Compact(db.Dir(), blockDirs, blocks)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotEqual(t, ulid.ULID{}, id)
|
||||||
|
require.NoError(t, db.reload())
|
||||||
|
require.Len(t, db.Blocks(), 1)
|
||||||
|
|
||||||
|
q, err = db.Querier(ctx, math.MinInt64, math.MaxInt64)
|
||||||
|
require.NoError(t, err)
|
||||||
|
res = query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*"))
|
||||||
|
require.Equal(t, exp, res)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("serial blocks with only histograms", func(t *testing.T) {
|
||||||
|
testBlockQuerying(t,
|
||||||
|
genHistogramSeries(10, 5, minute(0), minute(119), minute(1)),
|
||||||
|
genHistogramSeries(10, 5, minute(120), minute(239), minute(1)),
|
||||||
|
genHistogramSeries(10, 5, minute(240), minute(359), minute(1)),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("serial blocks with either histograms or floats in a block and not both", func(t *testing.T) {
|
||||||
|
testBlockQuerying(t,
|
||||||
|
genHistogramSeries(10, 5, minute(0), minute(119), minute(1)),
|
||||||
|
genSeriesFromSampleGenerator(10, 5, minute(120), minute(239), minute(1), func(ts int64) tsdbutil.Sample {
|
||||||
|
return sample{t: ts, v: rand.Float64()}
|
||||||
|
}),
|
||||||
|
genHistogramSeries(10, 5, minute(240), minute(359), minute(1)),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("serial blocks with mix of histograms and float64", func(t *testing.T) {
|
||||||
|
testBlockQuerying(t,
|
||||||
|
genHistogramAndFloatSeries(10, 5, minute(0), minute(60), minute(1)),
|
||||||
|
genHistogramSeries(10, 5, minute(61), minute(120), minute(1)),
|
||||||
|
genHistogramAndFloatSeries(10, 5, minute(121), minute(180), minute(1)),
|
||||||
|
genSeriesFromSampleGenerator(10, 5, minute(181), minute(240), minute(1), func(ts int64) tsdbutil.Sample {
|
||||||
|
return sample{t: ts, v: rand.Float64()}
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("overlapping blocks with only histograms", func(t *testing.T) {
|
||||||
|
testBlockQuerying(t,
|
||||||
|
genHistogramSeries(10, 5, minute(0), minute(120), minute(3)),
|
||||||
|
genHistogramSeries(10, 5, minute(1), minute(120), minute(3)),
|
||||||
|
genHistogramSeries(10, 5, minute(2), minute(120), minute(3)),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("overlapping blocks with only histograms and only float in a series", func(t *testing.T) {
|
||||||
|
testBlockQuerying(t,
|
||||||
|
genHistogramSeries(10, 5, minute(0), minute(120), minute(3)),
|
||||||
|
genSeriesFromSampleGenerator(10, 5, minute(1), minute(120), minute(3), func(ts int64) tsdbutil.Sample {
|
||||||
|
return sample{t: ts, v: rand.Float64()}
|
||||||
|
}),
|
||||||
|
genHistogramSeries(10, 5, minute(2), minute(120), minute(3)),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("overlapping blocks with mix of histograms and float64", func(t *testing.T) {
|
||||||
|
testBlockQuerying(t,
|
||||||
|
genHistogramAndFloatSeries(10, 5, minute(0), minute(60), minute(3)),
|
||||||
|
genHistogramSeries(10, 5, minute(46), minute(100), minute(3)),
|
||||||
|
genHistogramAndFloatSeries(10, 5, minute(89), minute(140), minute(3)),
|
||||||
|
genSeriesFromSampleGenerator(10, 5, minute(126), minute(200), minute(3), func(ts int64) tsdbutil.Sample {
|
||||||
|
return sample{t: ts, v: rand.Float64()}
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNativeHistogramFlag(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
db, err := Open(dir, nil, nil, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(func() {
|
||||||
|
require.NoError(t, db.Close())
|
||||||
|
})
|
||||||
|
h := &histogram.Histogram{
|
||||||
|
Count: 6,
|
||||||
|
ZeroCount: 4,
|
||||||
|
ZeroThreshold: 0.001,
|
||||||
|
Sum: 35.5,
|
||||||
|
Schema: 1,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 2, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||||
|
}
|
||||||
|
|
||||||
|
l := labels.FromStrings("foo", "bar")
|
||||||
|
|
||||||
|
app := db.Appender(context.Background())
|
||||||
|
|
||||||
|
// Disabled by default.
|
||||||
|
_, err = app.AppendHistogram(0, l, 100, h)
|
||||||
|
require.Equal(t, storage.ErrNativeHistogramsDisabled, err)
|
||||||
|
|
||||||
|
// Enable and append.
|
||||||
|
db.EnableNativeHistograms()
|
||||||
|
_, err = app.AppendHistogram(0, l, 200, h)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
db.DisableNativeHistograms()
|
||||||
|
_, err = app.AppendHistogram(0, l, 300, h)
|
||||||
|
require.Equal(t, storage.ErrNativeHistogramsDisabled, err)
|
||||||
|
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
|
q, err := db.Querier(context.Background(), math.MinInt, math.MaxInt64)
|
||||||
|
require.NoError(t, err)
|
||||||
|
act := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
|
||||||
|
require.Equal(t, map[string][]tsdbutil.Sample{l.String(): {sample{t: 200, h: h}}}, act)
|
||||||
|
}
|
||||||
|
|
|
@ -33,3 +33,112 @@ in-file offset (lower 4 bytes) and segment sequence number (upper 4 bytes).
|
||||||
│ len <uvarint> │ encoding <1 byte> │ data <bytes> │ CRC32 <4 byte> │
|
│ len <uvarint> │ encoding <1 byte> │ data <bytes> │ CRC32 <4 byte> │
|
||||||
└───────────────┴───────────────────┴──────────────┴────────────────┘
|
└───────────────┴───────────────────┴──────────────┴────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
* `<uvarint>` has 1 to 10 bytes.
|
||||||
|
* `encoding`: Currently either `XOR` or `histogram`.
|
||||||
|
* `data`: See below for each encoding.
|
||||||
|
|
||||||
|
## XOR chunk data
|
||||||
|
|
||||||
|
```
|
||||||
|
┌──────────────────────┬───────────────┬───────────────┬──────────────────────┬──────────────────────┬──────────────────────┬──────────────────────┬─────┬──────────────────────┬──────────────────────┬──────────────────┐
|
||||||
|
│ num_samples <uint16> │ ts_0 <varint> │ v_0 <float64> │ ts_1_delta <uvarint> │ v_1_xor <varbit_xor> │ ts_2_dod <varbit_ts> │ v_2_xor <varbit_xor> │ ... │ ts_n_dod <varbit_ts> │ v_n_xor <varbit_xor> │ padding <x bits> │
|
||||||
|
└──────────────────────┴───────────────┴───────────────┴──────────────────────┴──────────────────────┴──────────────────────┴──────────────────────┴─────┴──────────────────────┴──────────────────────┴──────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Notes:
|
||||||
|
|
||||||
|
* `ts` is the timestamp, `v` is the value.
|
||||||
|
* `...` means to repeat the previous two fields as needed, with `n` starting at 2 and going up to `num_samples` – 1.
|
||||||
|
* `<uint16>` has 2 bytes in big-endian order.
|
||||||
|
* `<varint>` and `<uvarint>` have 1 to 10 bytes each.
|
||||||
|
* `ts_1_delta` is `ts_1` – `ts_0`.
|
||||||
|
* `ts_n_dod` is the “delta of deltas” of timestamps, i.e. (`ts_n` – `ts_n-1`) – (`ts_n-1` – `ts_n-2`).
|
||||||
|
* `v_n_xor` is the result of `v_n` XOR `v_n-1`.
|
||||||
|
* `<varbit_xor>` is a specific variable bitwidth encoding of the result of XORing the current and the previous value. It has between 1 bit and 77 bits.
|
||||||
|
See [code for details](https://github.com/prometheus/prometheus/blob/7309c20e7e5774e7838f183ec97c65baa4362edc/tsdb/chunkenc/xor.go#L220-L253).
|
||||||
|
* `<varbit_ts>` is a specific variable bitwidth encoding for the “delta of deltas” of timestamps (signed integers that are ideally small).
|
||||||
|
It has between 1 and 68 bits.
|
||||||
|
see [code for details](https://github.com/prometheus/prometheus/blob/7309c20e7e5774e7838f183ec97c65baa4362edc/tsdb/chunkenc/xor.go#L179-L205).
|
||||||
|
* `padding` of 0 to 7 bits so that the whole chunk data is byte-aligned.
|
||||||
|
* The chunk can have as few as one sample, i.e. `ts_1`, `v_1`, etc. are optional.
|
||||||
|
|
||||||
|
## Histogram chunk data
|
||||||
|
|
||||||
|
```
|
||||||
|
┌──────────────────────┬──────────────────────────┬───────────────────────────────┬─────────────────────┬──────────────────┬──────────────────┬────────────────┬──────────────────┐
|
||||||
|
│ num_samples <uint16> │ histogram_flags <1 byte> │ zero_threshold <1 or 9 bytes> │ schema <varbit_int> │ pos_spans <data> │ neg_spans <data> │ samples <data> │ padding <x bits> │
|
||||||
|
└──────────────────────┴──────────────────────────┴───────────────────────────────┴─────────────────────┴──────────────────┴──────────────────┴────────────────┴──────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Positive and negative spans data:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─────────────────────────┬────────────────────────┬───────────────────────┬────────────────────────┬───────────────────────┬─────┬────────────────────────┬───────────────────────┐
|
||||||
|
│ num_spans <varbit_uint> │ length_0 <varbit_uint> │ offset_0 <varbit_int> │ length_1 <varbit_uint> │ offset_1 <varbit_int> │ ... │ length_n <varbit_uint> │ offset_n <varbit_int> │
|
||||||
|
└─────────────────────────┴────────────────────────┴───────────────────────┴────────────────────────┴───────────────────────┴─────┴────────────────────────┴───────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Samples data:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌──────────────────────────┐
|
||||||
|
│ sample_0 <data> │
|
||||||
|
├──────────────────────────┤
|
||||||
|
│ sample_1 <data> │
|
||||||
|
├──────────────────────────┤
|
||||||
|
│ sample_2 <data> │
|
||||||
|
├──────────────────────────┤
|
||||||
|
│ ... │
|
||||||
|
├──────────────────────────┤
|
||||||
|
│ Sample_n <data> │
|
||||||
|
└──────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Sample 0 data:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─────────────────┬─────────────────────┬──────────────────────────┬───────────────┬───────────────────────────┬─────┬───────────────────────────┬───────────────────────────┬─────┬───────────────────────────┐
|
||||||
|
│ ts <varbit_int> │ count <varbit_uint> │ zero_count <varbit_uint> │ sum <float64> │ pos_bucket_0 <varbit_int> │ ... │ pos_bucket_n <varbit_int> │ neg_bucket_0 <varbit_int> │ ... │ neg_bucket_n <varbit_int> │
|
||||||
|
└─────────────────┴─────────────────────┴──────────────────────────┴───────────────┴───────────────────────────┴─────┴───────────────────────────┴───────────────────────────┴─────┴───────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Sample 1 data:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌────────────────────────┬───────────────────────────┬────────────────────────────────┬──────────────────────┬─────────────────────────────────┬─────┬─────────────────────────────────┬─────────────────────────────────┬─────┬─────────────────────────────────┐
|
||||||
|
│ ts_delta <varbit_uint> │ count_delta <varbit_uint> │ zero_count_delta <varbit_uint> │ sum_xor <varbit_xor> │ pos_bucket_0_delta <varbit_int> │ ... │ pos_bucket_n_delta <varbit_int> │ neg_bucket_0_delta <varbit_int> │ ... │ neg_bucket_n_delta <varbit_int> │
|
||||||
|
└────────────────────────┴───────────────────────────┴────────────────────────────────┴──────────────────────┴─────────────────────────────────┴─────┴─────────────────────────────────┴─────────────────────────────────┴─────┴─────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Sample 2 data and following:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─────────────────────┬────────────────────────┬─────────────────────────────┬──────────────────────┬───────────────────────────────┬─────┬───────────────────────────────┬───────────────────────────────┬─────┬───────────────────────────────┐
|
||||||
|
│ ts_dod <varbit_int> │ count_dod <varbit_int> │ zero_count_dod <varbit_int> │ sum_xor <varbit_xor> │ pos_bucket_0_dod <varbit_int> │ ... │ pos_bucket_n_dod <varbit_int> │ neg_bucket_0_dod <varbit_int> │ ... │ neg_bucket_n_dod <varbit_int> │
|
||||||
|
└─────────────────────┴────────────────────────┴─────────────────────────────┴──────────────────────┴───────────────────────────────┴─────┴───────────────────────────────┴───────────────────────────────┴─────┴───────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Notes:
|
||||||
|
|
||||||
|
* `histogram_flags` is a byte of which currently only the first two bits are used:
|
||||||
|
* `10`: Counter reset between the previous chunk and this one.
|
||||||
|
* `01`: No counter reset between the previous chunk and this one.
|
||||||
|
* `00`: Counter reset status unknown.
|
||||||
|
* `11`: Chunk is part of a gauge histogram, no counter resets are happening.
|
||||||
|
* `zero_threshold` has a specific encoding:
|
||||||
|
* If 0, it is a single zero byte.
|
||||||
|
* If a power of two between 2^-243 and 2^10, it is a single byte between 1 and 254.
|
||||||
|
* Otherwise, it is a byte with all bits set (255), followed by a float64, resulting in 9 bytes length.
|
||||||
|
* `schema` is a specific value defined by the exposition format. Currently valid values are -4 <= n <= 8.
|
||||||
|
* `<varbit_int>` is a variable bitwidth encoding for signed integers, optimized for “delta of deltas” of bucket deltas. It has between 1 bit and 9 bytes.
|
||||||
|
See [code for details](https://github.com/prometheus/prometheus/blob/8c1507ebaa4ca552958ffb60c2d1b21afb7150e4/tsdb/chunkenc/varbit.go#L31-L60).
|
||||||
|
* `<varbit_uint>` is a variable bitwidth encoding for unsigned integers with the same bit-bucketing as `<varbit_int>`.
|
||||||
|
See [code for details](https://github.com/prometheus/prometheus/blob/8c1507ebaa4ca552958ffb60c2d1b21afb7150e4/tsdb/chunkenc/varbit.go#L136-L165).
|
||||||
|
* `<varbit_xor>` is a specific variable bitwidth encoding of the result of XORing the current and the previous value. It has between 1 bit and 77 bits.
|
||||||
|
See [code for details](https://github.com/prometheus/prometheus/blob/8c1507ebaa4ca552958ffb60c2d1b21afb7150e4/tsdb/chunkenc/histogram.go#L538-L574).
|
||||||
|
* `padding` of 0 to 7 bits so that the whole chunk data is byte-aligned.
|
||||||
|
* Note that buckets are inherently deltas between the current bucket and the previous bucket. Only `bucket_0` is an absolute count.
|
||||||
|
* The chunk can have as few as one sample, i.e. sample 1 and following are optional.
|
||||||
|
* Similarly, there could be down to zero spans and down to zero buckets.
|
||||||
|
|
|
@ -66,7 +66,6 @@ as tombstone file in blocks.
|
||||||
└───────────────────────────────────┴─────────────────────────────┘
|
└───────────────────────────────────┴─────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
### Exemplar record
|
### Exemplar record
|
||||||
|
|
||||||
A single exemplar record contains one or more exemplars, encoded in the same way as we do in WAL but with changed record type.
|
A single exemplar record contains one or more exemplars, encoded in the same way as we do in WAL but with changed record type.
|
||||||
|
|
|
@ -178,9 +178,10 @@ func NewDecbufRaw(bs ByteSlice, length int) Decbuf {
|
||||||
return Decbuf{B: bs.Range(0, length)}
|
return Decbuf{B: bs.Range(0, length)}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Decbuf) Uvarint() int { return int(d.Uvarint64()) }
|
func (d *Decbuf) Uvarint() int { return int(d.Uvarint64()) }
|
||||||
func (d *Decbuf) Be32int() int { return int(d.Be32()) }
|
func (d *Decbuf) Uvarint32() uint32 { return uint32(d.Uvarint64()) }
|
||||||
func (d *Decbuf) Be64int64() int64 { return int64(d.Be64()) }
|
func (d *Decbuf) Be32int() int { return int(d.Be32()) }
|
||||||
|
func (d *Decbuf) Be64int64() int64 { return int64(d.Be64()) }
|
||||||
|
|
||||||
// Crc32 returns a CRC32 checksum over the remaining bytes.
|
// Crc32 returns a CRC32 checksum over the remaining bytes.
|
||||||
func (d *Decbuf) Crc32(castagnoliTable *crc32.Table) uint32 {
|
func (d *Decbuf) Crc32(castagnoliTable *crc32.Table) uint32 {
|
||||||
|
|
|
@ -21,6 +21,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Example() {
|
func Example() {
|
||||||
|
@ -67,7 +68,7 @@ func Example() {
|
||||||
fmt.Println("series:", series.Labels().String())
|
fmt.Println("series:", series.Labels().String())
|
||||||
|
|
||||||
it := series.Iterator()
|
it := series.Iterator()
|
||||||
for it.Next() {
|
for it.Next() == chunkenc.ValFloat {
|
||||||
_, v := it.At() // We ignore the timestamp here, only to have a predictable output we can test against (below)
|
_, v := it.At() // We ignore the timestamp here, only to have a predictable output we can test against (below)
|
||||||
fmt.Println("sample", v)
|
fmt.Println("sample", v)
|
||||||
}
|
}
|
||||||
|
|
102
tsdb/head.go
102
tsdb/head.go
|
@ -31,6 +31,7 @@ import (
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/metadata"
|
"github.com/prometheus/prometheus/model/metadata"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
@ -81,6 +82,7 @@ type Head struct {
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
appendPool sync.Pool
|
appendPool sync.Pool
|
||||||
exemplarsPool sync.Pool
|
exemplarsPool sync.Pool
|
||||||
|
histogramsPool sync.Pool
|
||||||
metadataPool sync.Pool
|
metadataPool sync.Pool
|
||||||
seriesPool sync.Pool
|
seriesPool sync.Pool
|
||||||
bytesPool sync.Pool
|
bytesPool sync.Pool
|
||||||
|
@ -130,6 +132,9 @@ type HeadOptions struct {
|
||||||
// https://pkg.go.dev/sync/atomic#pkg-note-BUG
|
// https://pkg.go.dev/sync/atomic#pkg-note-BUG
|
||||||
MaxExemplars atomic.Int64
|
MaxExemplars atomic.Int64
|
||||||
|
|
||||||
|
// EnableNativeHistograms enables the ingestion of native histograms.
|
||||||
|
EnableNativeHistograms atomic.Bool
|
||||||
|
|
||||||
ChunkRange int64
|
ChunkRange int64
|
||||||
// ChunkDirRoot is the parent directory of the chunks directory.
|
// ChunkDirRoot is the parent directory of the chunks directory.
|
||||||
ChunkDirRoot string
|
ChunkDirRoot string
|
||||||
|
@ -299,11 +304,11 @@ type headMetrics struct {
|
||||||
chunksCreated prometheus.Counter
|
chunksCreated prometheus.Counter
|
||||||
chunksRemoved prometheus.Counter
|
chunksRemoved prometheus.Counter
|
||||||
gcDuration prometheus.Summary
|
gcDuration prometheus.Summary
|
||||||
samplesAppended prometheus.Counter
|
samplesAppended *prometheus.CounterVec
|
||||||
outOfOrderSamplesAppended prometheus.Counter
|
outOfOrderSamplesAppended prometheus.Counter
|
||||||
outOfBoundSamples prometheus.Counter
|
outOfBoundSamples *prometheus.CounterVec
|
||||||
outOfOrderSamples prometheus.Counter
|
outOfOrderSamples *prometheus.CounterVec
|
||||||
tooOldSamples prometheus.Counter
|
tooOldSamples *prometheus.CounterVec
|
||||||
walTruncateDuration prometheus.Summary
|
walTruncateDuration prometheus.Summary
|
||||||
walCorruptionsTotal prometheus.Counter
|
walCorruptionsTotal prometheus.Counter
|
||||||
dataTotalReplayDuration prometheus.Gauge
|
dataTotalReplayDuration prometheus.Gauge
|
||||||
|
@ -318,6 +323,11 @@ type headMetrics struct {
|
||||||
oooHistogram prometheus.Histogram
|
oooHistogram prometheus.Histogram
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
sampleMetricTypeFloat = "float"
|
||||||
|
sampleMetricTypeHistogram = "histogram"
|
||||||
|
)
|
||||||
|
|
||||||
func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics {
|
func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics {
|
||||||
m := &headMetrics{
|
m := &headMetrics{
|
||||||
activeAppenders: prometheus.NewGauge(prometheus.GaugeOpts{
|
activeAppenders: prometheus.NewGauge(prometheus.GaugeOpts{
|
||||||
|
@ -370,26 +380,26 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics {
|
||||||
Name: "prometheus_tsdb_data_replay_duration_seconds",
|
Name: "prometheus_tsdb_data_replay_duration_seconds",
|
||||||
Help: "Time taken to replay the data on disk.",
|
Help: "Time taken to replay the data on disk.",
|
||||||
}),
|
}),
|
||||||
samplesAppended: prometheus.NewCounter(prometheus.CounterOpts{
|
samplesAppended: prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||||
Name: "prometheus_tsdb_head_samples_appended_total",
|
Name: "prometheus_tsdb_head_samples_appended_total",
|
||||||
Help: "Total number of appended samples.",
|
Help: "Total number of appended samples.",
|
||||||
}),
|
}, []string{"type"}),
|
||||||
outOfOrderSamplesAppended: prometheus.NewCounter(prometheus.CounterOpts{
|
outOfOrderSamplesAppended: prometheus.NewCounter(prometheus.CounterOpts{
|
||||||
Name: "prometheus_tsdb_head_out_of_order_samples_appended_total",
|
Name: "prometheus_tsdb_head_out_of_order_samples_appended_total",
|
||||||
Help: "Total number of appended out of order samples.",
|
Help: "Total number of appended out of order samples.",
|
||||||
}),
|
}),
|
||||||
outOfBoundSamples: prometheus.NewCounter(prometheus.CounterOpts{
|
outOfBoundSamples: prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||||
Name: "prometheus_tsdb_out_of_bound_samples_total",
|
Name: "prometheus_tsdb_out_of_bound_samples_total",
|
||||||
Help: "Total number of out of bound samples ingestion failed attempts with out of order support disabled.",
|
Help: "Total number of out of bound samples ingestion failed attempts with out of order support disabled.",
|
||||||
}),
|
}, []string{"type"}),
|
||||||
outOfOrderSamples: prometheus.NewCounter(prometheus.CounterOpts{
|
outOfOrderSamples: prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||||
Name: "prometheus_tsdb_out_of_order_samples_total",
|
Name: "prometheus_tsdb_out_of_order_samples_total",
|
||||||
Help: "Total number of out of order samples ingestion failed attempts due to out of order being disabled.",
|
Help: "Total number of out of order samples ingestion failed attempts due to out of order being disabled.",
|
||||||
}),
|
}, []string{"type"}),
|
||||||
tooOldSamples: prometheus.NewCounter(prometheus.CounterOpts{
|
tooOldSamples: prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||||
Name: "prometheus_tsdb_too_old_samples_total",
|
Name: "prometheus_tsdb_too_old_samples_total",
|
||||||
Help: "Total number of out of order samples ingestion failed attempts with out of support enabled, but sample outside of time window.",
|
Help: "Total number of out of order samples ingestion failed attempts with out of support enabled, but sample outside of time window.",
|
||||||
}),
|
}, []string{"type"}),
|
||||||
headTruncateFail: prometheus.NewCounter(prometheus.CounterOpts{
|
headTruncateFail: prometheus.NewCounter(prometheus.CounterOpts{
|
||||||
Name: "prometheus_tsdb_head_truncations_failed_total",
|
Name: "prometheus_tsdb_head_truncations_failed_total",
|
||||||
Help: "Total number of head truncations that failed.",
|
Help: "Total number of head truncations that failed.",
|
||||||
|
@ -880,6 +890,16 @@ func (h *Head) SetOutOfOrderTimeWindow(oooTimeWindow int64, wbl *wlog.WL) {
|
||||||
h.opts.OutOfOrderTimeWindow.Store(oooTimeWindow)
|
h.opts.OutOfOrderTimeWindow.Store(oooTimeWindow)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EnableNativeHistograms enables the native histogram feature.
|
||||||
|
func (h *Head) EnableNativeHistograms() {
|
||||||
|
h.opts.EnableNativeHistograms.Store(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DisableNativeHistograms disables the native histogram feature.
|
||||||
|
func (h *Head) DisableNativeHistograms() {
|
||||||
|
h.opts.EnableNativeHistograms.Store(false)
|
||||||
|
}
|
||||||
|
|
||||||
// PostingsCardinalityStats returns top 10 highest cardinality stats By label and value names.
|
// PostingsCardinalityStats returns top 10 highest cardinality stats By label and value names.
|
||||||
func (h *Head) PostingsCardinalityStats(statsByLabelName string) *index.PostingsStats {
|
func (h *Head) PostingsCardinalityStats(statsByLabelName string) *index.PostingsStats {
|
||||||
h.cardinalityMutex.Lock()
|
h.cardinalityMutex.Lock()
|
||||||
|
@ -1472,7 +1492,11 @@ func (h *Head) Close() error {
|
||||||
h.closedMtx.Lock()
|
h.closedMtx.Lock()
|
||||||
defer h.closedMtx.Unlock()
|
defer h.closedMtx.Unlock()
|
||||||
h.closed = true
|
h.closed = true
|
||||||
|
|
||||||
errs := tsdb_errors.NewMulti(h.chunkDiskMapper.Close())
|
errs := tsdb_errors.NewMulti(h.chunkDiskMapper.Close())
|
||||||
|
if errs.Err() == nil && h.opts.EnableMemorySnapshotOnShutdown {
|
||||||
|
errs.Add(h.performChunkSnapshot())
|
||||||
|
}
|
||||||
if h.wal != nil {
|
if h.wal != nil {
|
||||||
errs.Add(h.wal.Close())
|
errs.Add(h.wal.Close())
|
||||||
}
|
}
|
||||||
|
@ -1765,13 +1789,31 @@ func (s *stripeSeries) getOrSet(hash uint64, lset labels.Labels, createSeries fu
|
||||||
}
|
}
|
||||||
|
|
||||||
type sample struct {
|
type sample struct {
|
||||||
t int64
|
t int64
|
||||||
v float64
|
v float64
|
||||||
|
h *histogram.Histogram
|
||||||
|
fh *histogram.FloatHistogram
|
||||||
}
|
}
|
||||||
|
|
||||||
func newSample(t int64, v float64) tsdbutil.Sample { return sample{t, v} }
|
func newSample(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample {
|
||||||
func (s sample) T() int64 { return s.t }
|
return sample{t, v, h, fh}
|
||||||
func (s sample) V() float64 { return s.v }
|
}
|
||||||
|
|
||||||
|
func (s sample) T() int64 { return s.t }
|
||||||
|
func (s sample) V() float64 { return s.v }
|
||||||
|
func (s sample) H() *histogram.Histogram { return s.h }
|
||||||
|
func (s sample) FH() *histogram.FloatHistogram { return s.fh }
|
||||||
|
|
||||||
|
func (s sample) Type() chunkenc.ValueType {
|
||||||
|
switch {
|
||||||
|
case s.h != nil:
|
||||||
|
return chunkenc.ValHistogram
|
||||||
|
case s.fh != nil:
|
||||||
|
return chunkenc.ValFloatHistogram
|
||||||
|
default:
|
||||||
|
return chunkenc.ValFloat
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// memSeries is the in-memory representation of a series. None of its methods
|
// memSeries is the in-memory representation of a series. None of its methods
|
||||||
// are goroutine safe and it is the caller's responsibility to lock it.
|
// are goroutine safe and it is the caller's responsibility to lock it.
|
||||||
|
@ -1806,6 +1848,9 @@ type memSeries struct {
|
||||||
// We keep the last value here (in addition to appending it to the chunk) so we can check for duplicates.
|
// We keep the last value here (in addition to appending it to the chunk) so we can check for duplicates.
|
||||||
lastValue float64
|
lastValue float64
|
||||||
|
|
||||||
|
// We keep the last histogram value here (in addition to appending it to the chunk) so we can check for duplicates.
|
||||||
|
lastHistogramValue *histogram.Histogram
|
||||||
|
|
||||||
// Current appender for the head chunk. Set when a new head chunk is cut.
|
// Current appender for the head chunk. Set when a new head chunk is cut.
|
||||||
// It is nil only if headChunk is nil. E.g. if there was an appender that created a new series, but rolled back the commit
|
// It is nil only if headChunk is nil. E.g. if there was an appender that created a new series, but rolled back the commit
|
||||||
// (the first sample would create a headChunk, hence appender, but rollback skipped it while the Append() call would create a series).
|
// (the first sample would create a headChunk, hence appender, but rollback skipped it while the Append() call would create a series).
|
||||||
|
@ -1814,6 +1859,10 @@ type memSeries struct {
|
||||||
// txs is nil if isolation is disabled.
|
// txs is nil if isolation is disabled.
|
||||||
txs *txRing
|
txs *txRing
|
||||||
|
|
||||||
|
// TODO(beorn7): The only reason we track this is to create a staleness
|
||||||
|
// marker as either histogram or float sample. Perhaps there is a better way.
|
||||||
|
isHistogramSeries bool
|
||||||
|
|
||||||
pendingCommit bool // Whether there are samples waiting to be committed to this series.
|
pendingCommit bool // Whether there are samples waiting to be committed to this series.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1974,3 +2023,22 @@ func (h *Head) updateWALReplayStatusRead(current int) {
|
||||||
|
|
||||||
h.stats.WALReplayStatus.Current = current
|
h.stats.WALReplayStatus.Current = current
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GenerateTestHistograms(n int) (r []*histogram.Histogram) {
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
r = append(r, &histogram.Histogram{
|
||||||
|
Count: 5 + uint64(i*4),
|
||||||
|
ZeroCount: 2 + uint64(i),
|
||||||
|
ZeroThreshold: 0.001,
|
||||||
|
Sum: 18.4 * float64(i+1),
|
||||||
|
Schema: 1,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{int64(i + 1), 1, -1, 0},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
|
@ -22,8 +22,10 @@ import (
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/metadata"
|
"github.com/prometheus/prometheus/model/metadata"
|
||||||
|
"github.com/prometheus/prometheus/model/value"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
|
@ -66,6 +68,16 @@ func (a *initAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e
|
||||||
return a.app.AppendExemplar(ref, l, e)
|
return a.app.AppendExemplar(ref, l, e)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *initAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (storage.SeriesRef, error) {
|
||||||
|
if a.app != nil {
|
||||||
|
return a.app.AppendHistogram(ref, l, t, h)
|
||||||
|
}
|
||||||
|
a.head.initTime(t)
|
||||||
|
a.app = a.head.appender()
|
||||||
|
|
||||||
|
return a.app.AppendHistogram(ref, l, t, h)
|
||||||
|
}
|
||||||
|
|
||||||
func (a *initAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
|
func (a *initAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
|
||||||
if a.app != nil {
|
if a.app != nil {
|
||||||
return a.app.UpdateMetadata(ref, l, m)
|
return a.app.UpdateMetadata(ref, l, m)
|
||||||
|
@ -143,6 +155,7 @@ func (h *Head) appender() *headAppender {
|
||||||
samples: h.getAppendBuffer(),
|
samples: h.getAppendBuffer(),
|
||||||
sampleSeries: h.getSeriesBuffer(),
|
sampleSeries: h.getSeriesBuffer(),
|
||||||
exemplars: exemplarsBuf,
|
exemplars: exemplarsBuf,
|
||||||
|
histograms: h.getHistogramBuffer(),
|
||||||
metadata: h.getMetadataBuffer(),
|
metadata: h.getMetadataBuffer(),
|
||||||
appendID: appendID,
|
appendID: appendID,
|
||||||
cleanupAppendIDsBelow: cleanupAppendIDsBelow,
|
cleanupAppendIDsBelow: cleanupAppendIDsBelow,
|
||||||
|
@ -210,6 +223,19 @@ func (h *Head) putExemplarBuffer(b []exemplarWithSeriesRef) {
|
||||||
h.exemplarsPool.Put(b[:0])
|
h.exemplarsPool.Put(b[:0])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (h *Head) getHistogramBuffer() []record.RefHistogramSample {
|
||||||
|
b := h.histogramsPool.Get()
|
||||||
|
if b == nil {
|
||||||
|
return make([]record.RefHistogramSample, 0, 512)
|
||||||
|
}
|
||||||
|
return b.([]record.RefHistogramSample)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Head) putHistogramBuffer(b []record.RefHistogramSample) {
|
||||||
|
//nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty.
|
||||||
|
h.histogramsPool.Put(b[:0])
|
||||||
|
}
|
||||||
|
|
||||||
func (h *Head) getMetadataBuffer() []record.RefMetadata {
|
func (h *Head) getMetadataBuffer() []record.RefMetadata {
|
||||||
b := h.metadataPool.Get()
|
b := h.metadataPool.Get()
|
||||||
if b == nil {
|
if b == nil {
|
||||||
|
@ -261,12 +287,14 @@ type headAppender struct {
|
||||||
headMaxt int64 // We track it here to not take the lock for every sample appended.
|
headMaxt int64 // We track it here to not take the lock for every sample appended.
|
||||||
oooTimeWindow int64 // Use the same for the entire append, and don't load the atomic for each sample.
|
oooTimeWindow int64 // Use the same for the entire append, and don't load the atomic for each sample.
|
||||||
|
|
||||||
series []record.RefSeries // New series held by this appender.
|
series []record.RefSeries // New series held by this appender.
|
||||||
metadata []record.RefMetadata // New metadata held by this appender.
|
samples []record.RefSample // New float samples held by this appender.
|
||||||
samples []record.RefSample // New samples held by this appender.
|
exemplars []exemplarWithSeriesRef // New exemplars held by this appender.
|
||||||
exemplars []exemplarWithSeriesRef // New exemplars held by this appender.
|
sampleSeries []*memSeries // Float series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
|
||||||
sampleSeries []*memSeries // Series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
|
histograms []record.RefHistogramSample // New histogram samples held by this appender.
|
||||||
metadataSeries []*memSeries // Series corresponding to the metadata held by this appender.
|
histogramSeries []*memSeries // HistogramSamples series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
|
||||||
|
metadata []record.RefMetadata // New metadata held by this appender.
|
||||||
|
metadataSeries []*memSeries // Series corresponding to the metadata held by this appender.
|
||||||
|
|
||||||
appendID, cleanupAppendIDsBelow uint64
|
appendID, cleanupAppendIDsBelow uint64
|
||||||
closed bool
|
closed bool
|
||||||
|
@ -276,7 +304,7 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64
|
||||||
// For OOO inserts, this restriction is irrelevant and will be checked later once we confirm the sample is an in-order append.
|
// For OOO inserts, this restriction is irrelevant and will be checked later once we confirm the sample is an in-order append.
|
||||||
// If OOO inserts are disabled, we may as well as check this as early as we can and avoid more work.
|
// If OOO inserts are disabled, we may as well as check this as early as we can and avoid more work.
|
||||||
if a.oooTimeWindow == 0 && t < a.minValidTime {
|
if a.oooTimeWindow == 0 && t < a.minValidTime {
|
||||||
a.head.metrics.outOfBoundSamples.Inc()
|
a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat).Inc()
|
||||||
return 0, storage.ErrOutOfBounds
|
return 0, storage.ErrOutOfBounds
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -306,6 +334,10 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if value.IsStaleNaN(v) && s.isHistogramSeries {
|
||||||
|
return a.AppendHistogram(ref, lset, t, &histogram.Histogram{Sum: v})
|
||||||
|
}
|
||||||
|
|
||||||
s.Lock()
|
s.Lock()
|
||||||
// TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise
|
// TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise
|
||||||
// to skip that sample from the WAL and write only in the WBL.
|
// to skip that sample from the WAL and write only in the WBL.
|
||||||
|
@ -320,9 +352,9 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64
|
||||||
if err != nil {
|
if err != nil {
|
||||||
switch err {
|
switch err {
|
||||||
case storage.ErrOutOfOrderSample:
|
case storage.ErrOutOfOrderSample:
|
||||||
a.head.metrics.outOfOrderSamples.Inc()
|
a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Inc()
|
||||||
case storage.ErrTooOldSample:
|
case storage.ErrTooOldSample:
|
||||||
a.head.metrics.tooOldSamples.Inc()
|
a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Inc()
|
||||||
}
|
}
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -385,6 +417,28 @@ func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooTi
|
||||||
return false, headMaxt - t, storage.ErrOutOfOrderSample
|
return false, headMaxt - t, storage.ErrOutOfOrderSample
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// appendableHistogram checks whether the given sample is valid for appending to the series.
|
||||||
|
func (s *memSeries) appendableHistogram(t int64, h *histogram.Histogram) error {
|
||||||
|
c := s.head()
|
||||||
|
if c == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if t > c.maxTime {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if t < c.maxTime {
|
||||||
|
return storage.ErrOutOfOrderSample
|
||||||
|
}
|
||||||
|
|
||||||
|
// We are allowing exact duplicates as we can encounter them in valid cases
|
||||||
|
// like federation and erroring out at that time would be extremely noisy.
|
||||||
|
if !h.Equals(s.lastHistogramValue) {
|
||||||
|
return storage.ErrDuplicateSampleForTimestamp
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// AppendExemplar for headAppender assumes the series ref already exists, and so it doesn't
|
// AppendExemplar for headAppender assumes the series ref already exists, and so it doesn't
|
||||||
// use getOrCreate or make any of the lset sanity checks that Append does.
|
// use getOrCreate or make any of the lset sanity checks that Append does.
|
||||||
func (a *headAppender) AppendExemplar(ref storage.SeriesRef, lset labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
|
func (a *headAppender) AppendExemplar(ref storage.SeriesRef, lset labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
|
||||||
|
@ -422,6 +476,74 @@ func (a *headAppender) AppendExemplar(ref storage.SeriesRef, lset labels.Labels,
|
||||||
return storage.SeriesRef(s.ref), nil
|
return storage.SeriesRef(s.ref), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram) (storage.SeriesRef, error) {
|
||||||
|
if !a.head.opts.EnableNativeHistograms.Load() {
|
||||||
|
return 0, storage.ErrNativeHistogramsDisabled
|
||||||
|
}
|
||||||
|
|
||||||
|
if t < a.minValidTime {
|
||||||
|
a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeHistogram).Inc()
|
||||||
|
return 0, storage.ErrOutOfBounds
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ValidateHistogram(h); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
s := a.head.series.getByID(chunks.HeadSeriesRef(ref))
|
||||||
|
if s == nil {
|
||||||
|
// Ensure no empty labels have gotten through.
|
||||||
|
lset = lset.WithoutEmpty()
|
||||||
|
if len(lset) == 0 {
|
||||||
|
return 0, errors.Wrap(ErrInvalidSample, "empty labelset")
|
||||||
|
}
|
||||||
|
|
||||||
|
if l, dup := lset.HasDuplicateLabelNames(); dup {
|
||||||
|
return 0, errors.Wrap(ErrInvalidSample, fmt.Sprintf(`label name "%s" is not unique`, l))
|
||||||
|
}
|
||||||
|
|
||||||
|
var created bool
|
||||||
|
var err error
|
||||||
|
s, created, err = a.head.getOrCreate(lset.Hash(), lset)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
s.isHistogramSeries = true
|
||||||
|
if created {
|
||||||
|
a.series = append(a.series, record.RefSeries{
|
||||||
|
Ref: s.ref,
|
||||||
|
Labels: lset,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s.Lock()
|
||||||
|
if err := s.appendableHistogram(t, h); err != nil {
|
||||||
|
s.Unlock()
|
||||||
|
if err == storage.ErrOutOfOrderSample {
|
||||||
|
a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Inc()
|
||||||
|
}
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
s.pendingCommit = true
|
||||||
|
s.Unlock()
|
||||||
|
|
||||||
|
if t < a.mint {
|
||||||
|
a.mint = t
|
||||||
|
}
|
||||||
|
if t > a.maxt {
|
||||||
|
a.maxt = t
|
||||||
|
}
|
||||||
|
|
||||||
|
a.histograms = append(a.histograms, record.RefHistogramSample{
|
||||||
|
Ref: s.ref,
|
||||||
|
T: t,
|
||||||
|
H: h,
|
||||||
|
})
|
||||||
|
a.histogramSeries = append(a.histogramSeries, s)
|
||||||
|
return storage.SeriesRef(s.ref), nil
|
||||||
|
}
|
||||||
|
|
||||||
// UpdateMetadata for headAppender assumes the series ref already exists, and so it doesn't
|
// UpdateMetadata for headAppender assumes the series ref already exists, and so it doesn't
|
||||||
// use getOrCreate or make any of the lset sanity checks that Append does.
|
// use getOrCreate or make any of the lset sanity checks that Append does.
|
||||||
func (a *headAppender) UpdateMetadata(ref storage.SeriesRef, lset labels.Labels, meta metadata.Metadata) (storage.SeriesRef, error) {
|
func (a *headAppender) UpdateMetadata(ref storage.SeriesRef, lset labels.Labels, meta metadata.Metadata) (storage.SeriesRef, error) {
|
||||||
|
@ -453,6 +575,76 @@ func (a *headAppender) UpdateMetadata(ref storage.SeriesRef, lset labels.Labels,
|
||||||
return ref, nil
|
return ref, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ValidateHistogram(h *histogram.Histogram) error {
|
||||||
|
if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
|
||||||
|
return errors.Wrap(err, "negative side")
|
||||||
|
}
|
||||||
|
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
|
||||||
|
return errors.Wrap(err, "positive side")
|
||||||
|
}
|
||||||
|
|
||||||
|
negativeCount, err := checkHistogramBuckets(h.NegativeBuckets)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "negative side")
|
||||||
|
}
|
||||||
|
positiveCount, err := checkHistogramBuckets(h.PositiveBuckets)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "positive side")
|
||||||
|
}
|
||||||
|
|
||||||
|
if c := negativeCount + positiveCount; c > h.Count {
|
||||||
|
return errors.Wrap(
|
||||||
|
storage.ErrHistogramCountNotBigEnough,
|
||||||
|
fmt.Sprintf("%d observations found in buckets, but the Count field is %d", c, h.Count),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkHistogramSpans(spans []histogram.Span, numBuckets int) error {
|
||||||
|
var spanBuckets int
|
||||||
|
for n, span := range spans {
|
||||||
|
if n > 0 && span.Offset < 0 {
|
||||||
|
return errors.Wrap(
|
||||||
|
storage.ErrHistogramSpanNegativeOffset,
|
||||||
|
fmt.Sprintf("span number %d with offset %d", n+1, span.Offset),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
spanBuckets += int(span.Length)
|
||||||
|
}
|
||||||
|
if spanBuckets != numBuckets {
|
||||||
|
return errors.Wrap(
|
||||||
|
storage.ErrHistogramSpansBucketsMismatch,
|
||||||
|
fmt.Sprintf("spans need %d buckets, have %d buckets", spanBuckets, numBuckets),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkHistogramBuckets(buckets []int64) (uint64, error) {
|
||||||
|
if len(buckets) == 0 {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var count uint64
|
||||||
|
var last int64
|
||||||
|
|
||||||
|
for i := 0; i < len(buckets); i++ {
|
||||||
|
c := last + buckets[i]
|
||||||
|
if c < 0 {
|
||||||
|
return 0, errors.Wrap(
|
||||||
|
storage.ErrHistogramNegativeBucketCount,
|
||||||
|
fmt.Sprintf("bucket number %d has observation count of %d", i+1, c),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
last = c
|
||||||
|
count += uint64(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
return count, nil
|
||||||
|
}
|
||||||
|
|
||||||
var _ storage.GetRef = &headAppender{}
|
var _ storage.GetRef = &headAppender{}
|
||||||
|
|
||||||
func (a *headAppender) GetRef(lset labels.Labels) (storage.SeriesRef, labels.Labels) {
|
func (a *headAppender) GetRef(lset labels.Labels) (storage.SeriesRef, labels.Labels) {
|
||||||
|
@ -508,6 +700,13 @@ func (a *headAppender) log() error {
|
||||||
return errors.Wrap(err, "log exemplars")
|
return errors.Wrap(err, "log exemplars")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if len(a.histograms) > 0 {
|
||||||
|
rec = enc.HistogramSamples(a.histograms, buf)
|
||||||
|
buf = rec[:0]
|
||||||
|
if err := a.head.wal.Log(rec); err != nil {
|
||||||
|
return errors.Wrap(err, "log histograms")
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -553,6 +752,7 @@ func (a *headAppender) Commit() (err error) {
|
||||||
defer a.head.putAppendBuffer(a.samples)
|
defer a.head.putAppendBuffer(a.samples)
|
||||||
defer a.head.putSeriesBuffer(a.sampleSeries)
|
defer a.head.putSeriesBuffer(a.sampleSeries)
|
||||||
defer a.head.putExemplarBuffer(a.exemplars)
|
defer a.head.putExemplarBuffer(a.exemplars)
|
||||||
|
defer a.head.putHistogramBuffer(a.histograms)
|
||||||
defer a.head.putMetadataBuffer(a.metadata)
|
defer a.head.putMetadataBuffer(a.metadata)
|
||||||
defer a.head.iso.closeAppend(a.appendID)
|
defer a.head.iso.closeAppend(a.appendID)
|
||||||
|
|
||||||
|
@ -697,6 +897,33 @@ func (a *headAppender) Commit() (err error) {
|
||||||
series.Unlock()
|
series.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
histogramsTotal := len(a.histograms)
|
||||||
|
histoOOORejected := 0
|
||||||
|
for i, s := range a.histograms {
|
||||||
|
series = a.histogramSeries[i]
|
||||||
|
series.Lock()
|
||||||
|
ok, chunkCreated := series.appendHistogram(s.T, s.H, a.appendID, a.head.chunkDiskMapper, chunkRange)
|
||||||
|
series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
|
||||||
|
series.pendingCommit = false
|
||||||
|
series.Unlock()
|
||||||
|
|
||||||
|
if ok {
|
||||||
|
if s.T < inOrderMint {
|
||||||
|
inOrderMint = s.T
|
||||||
|
}
|
||||||
|
if s.T > inOrderMaxt {
|
||||||
|
inOrderMaxt = s.T
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
histogramsTotal--
|
||||||
|
histoOOORejected++
|
||||||
|
}
|
||||||
|
if chunkCreated {
|
||||||
|
a.head.metrics.chunks.Inc()
|
||||||
|
a.head.metrics.chunksCreated.Inc()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for i, m := range a.metadata {
|
for i, m := range a.metadata {
|
||||||
series = a.metadataSeries[i]
|
series = a.metadataSeries[i]
|
||||||
series.Lock()
|
series.Lock()
|
||||||
|
@ -704,10 +931,12 @@ func (a *headAppender) Commit() (err error) {
|
||||||
series.Unlock()
|
series.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
a.head.metrics.outOfOrderSamples.Add(float64(oooRejected))
|
a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(oooRejected))
|
||||||
a.head.metrics.outOfBoundSamples.Add(float64(oobRejected))
|
a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Add(float64(histoOOORejected))
|
||||||
a.head.metrics.tooOldSamples.Add(float64(tooOldRejected))
|
a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(oobRejected))
|
||||||
a.head.metrics.samplesAppended.Add(float64(samplesAppended))
|
a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(tooOldRejected))
|
||||||
|
a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(samplesAppended))
|
||||||
|
a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(histogramsTotal))
|
||||||
a.head.metrics.outOfOrderSamplesAppended.Add(float64(oooAccepted))
|
a.head.metrics.outOfOrderSamplesAppended.Add(float64(oooAccepted))
|
||||||
a.head.updateMinMaxTime(inOrderMint, inOrderMaxt)
|
a.head.updateMinMaxTime(inOrderMint, inOrderMaxt)
|
||||||
a.head.updateMinOOOMaxOOOTime(ooomint, ooomaxt)
|
a.head.updateMinOOOMaxOOOTime(ooomint, ooomaxt)
|
||||||
|
@ -751,26 +980,126 @@ func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDisk
|
||||||
// isolation for this append.)
|
// isolation for this append.)
|
||||||
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
|
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
|
||||||
func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64) (sampleInOrder, chunkCreated bool) {
|
func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64) (sampleInOrder, chunkCreated bool) {
|
||||||
|
c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncXOR, chunkDiskMapper, chunkRange)
|
||||||
|
if !sampleInOrder {
|
||||||
|
return sampleInOrder, chunkCreated
|
||||||
|
}
|
||||||
|
s.app.Append(t, v)
|
||||||
|
s.isHistogramSeries = false
|
||||||
|
|
||||||
|
c.maxTime = t
|
||||||
|
|
||||||
|
s.lastValue = v
|
||||||
|
|
||||||
|
if appendID > 0 {
|
||||||
|
s.txs.add(appendID)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, chunkCreated
|
||||||
|
}
|
||||||
|
|
||||||
|
// appendHistogram adds the histogram.
|
||||||
|
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
|
||||||
|
func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64) (sampleInOrder, chunkCreated bool) {
|
||||||
|
// Head controls the execution of recoding, so that we own the proper
|
||||||
|
// chunk reference afterwards. We check for Appendable before
|
||||||
|
// appendPreprocessor because in case it ends up creating a new chunk,
|
||||||
|
// we need to know if there was also a counter reset or not to set the
|
||||||
|
// meta properly.
|
||||||
|
app, _ := s.app.(*chunkenc.HistogramAppender)
|
||||||
|
var (
|
||||||
|
positiveInterjections, negativeInterjections []chunkenc.Interjection
|
||||||
|
okToAppend, counterReset bool
|
||||||
|
)
|
||||||
|
c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange)
|
||||||
|
if !sampleInOrder {
|
||||||
|
return sampleInOrder, chunkCreated
|
||||||
|
}
|
||||||
|
|
||||||
|
if app != nil {
|
||||||
|
positiveInterjections, negativeInterjections, okToAppend, counterReset = app.Appendable(h)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !chunkCreated {
|
||||||
|
// We have 3 cases here
|
||||||
|
// - !okToAppend -> We need to cut a new chunk.
|
||||||
|
// - okToAppend but we have interjections → Existing chunk needs
|
||||||
|
// recoding before we can append our histogram.
|
||||||
|
// - okToAppend and no interjections → Chunk is ready to support our histogram.
|
||||||
|
if !okToAppend || counterReset {
|
||||||
|
c = s.cutNewHeadChunk(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange)
|
||||||
|
chunkCreated = true
|
||||||
|
} else if len(positiveInterjections) > 0 || len(negativeInterjections) > 0 {
|
||||||
|
// New buckets have appeared. We need to recode all
|
||||||
|
// prior histogram samples within the chunk before we
|
||||||
|
// can process this one.
|
||||||
|
chunk, app := app.Recode(
|
||||||
|
positiveInterjections, negativeInterjections,
|
||||||
|
h.PositiveSpans, h.NegativeSpans,
|
||||||
|
)
|
||||||
|
c.chunk = chunk
|
||||||
|
s.app = app
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if chunkCreated {
|
||||||
|
hc := s.headChunk.chunk.(*chunkenc.HistogramChunk)
|
||||||
|
header := chunkenc.UnknownCounterReset
|
||||||
|
if counterReset {
|
||||||
|
header = chunkenc.CounterReset
|
||||||
|
} else if okToAppend {
|
||||||
|
header = chunkenc.NotCounterReset
|
||||||
|
}
|
||||||
|
hc.SetCounterResetHeader(header)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.app.AppendHistogram(t, h)
|
||||||
|
s.isHistogramSeries = true
|
||||||
|
|
||||||
|
c.maxTime = t
|
||||||
|
|
||||||
|
s.lastHistogramValue = h
|
||||||
|
|
||||||
|
if appendID > 0 {
|
||||||
|
s.txs.add(appendID)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, chunkCreated
|
||||||
|
}
|
||||||
|
|
||||||
|
// appendPreprocessor takes care of cutting new chunks and m-mapping old chunks.
|
||||||
|
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
|
||||||
|
// This should be called only when appending data.
|
||||||
|
func (s *memSeries) appendPreprocessor(
|
||||||
|
t int64, e chunkenc.Encoding, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64,
|
||||||
|
) (c *memChunk, sampleInOrder, chunkCreated bool) {
|
||||||
// Based on Gorilla white papers this offers near-optimal compression ratio
|
// Based on Gorilla white papers this offers near-optimal compression ratio
|
||||||
// so anything bigger that this has diminishing returns and increases
|
// so anything bigger that this has diminishing returns and increases
|
||||||
// the time range within which we have to decompress all samples.
|
// the time range within which we have to decompress all samples.
|
||||||
const samplesPerChunk = 120
|
const samplesPerChunk = 120
|
||||||
|
|
||||||
c := s.head()
|
c = s.head()
|
||||||
|
|
||||||
if c == nil {
|
if c == nil {
|
||||||
if len(s.mmappedChunks) > 0 && s.mmappedChunks[len(s.mmappedChunks)-1].maxTime >= t {
|
if len(s.mmappedChunks) > 0 && s.mmappedChunks[len(s.mmappedChunks)-1].maxTime >= t {
|
||||||
// Out of order sample. Sample timestamp is already in the mmapped chunks, so ignore it.
|
// Out of order sample. Sample timestamp is already in the mmapped chunks, so ignore it.
|
||||||
return false, false
|
return c, false, false
|
||||||
}
|
}
|
||||||
// There is no head chunk in this series yet, create the first chunk for the sample.
|
// There is no head chunk in this series yet, create the first chunk for the sample.
|
||||||
c = s.cutNewHeadChunk(t, chunkDiskMapper, chunkRange)
|
c = s.cutNewHeadChunk(t, e, chunkDiskMapper, chunkRange)
|
||||||
chunkCreated = true
|
chunkCreated = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Out of order sample.
|
// Out of order sample.
|
||||||
if c.maxTime >= t {
|
if c.maxTime >= t {
|
||||||
return false, chunkCreated
|
return c, false, chunkCreated
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.chunk.Encoding() != e {
|
||||||
|
// The chunk encoding expected by this append is different than the head chunk's
|
||||||
|
// encoding. So we cut a new chunk with the expected encoding.
|
||||||
|
c = s.cutNewHeadChunk(t, e, chunkDiskMapper, chunkRange)
|
||||||
|
chunkCreated = true
|
||||||
}
|
}
|
||||||
|
|
||||||
numSamples := c.chunk.NumSamples()
|
numSamples := c.chunk.NumSamples()
|
||||||
|
@ -794,19 +1123,11 @@ func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper
|
||||||
// as we expect more chunks to come.
|
// as we expect more chunks to come.
|
||||||
// Note that next chunk will have its nextAt recalculated for the new rate.
|
// Note that next chunk will have its nextAt recalculated for the new rate.
|
||||||
if t >= s.nextAt || numSamples >= samplesPerChunk*2 {
|
if t >= s.nextAt || numSamples >= samplesPerChunk*2 {
|
||||||
c = s.cutNewHeadChunk(t, chunkDiskMapper, chunkRange)
|
c = s.cutNewHeadChunk(t, e, chunkDiskMapper, chunkRange)
|
||||||
chunkCreated = true
|
chunkCreated = true
|
||||||
}
|
}
|
||||||
s.app.Append(t, v)
|
|
||||||
|
|
||||||
c.maxTime = t
|
return c, true, chunkCreated
|
||||||
s.lastValue = v
|
|
||||||
|
|
||||||
if appendID > 0 && s.txs != nil {
|
|
||||||
s.txs.add(appendID)
|
|
||||||
}
|
|
||||||
|
|
||||||
return true, chunkCreated
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// computeChunkEndTime estimates the end timestamp based the beginning of a
|
// computeChunkEndTime estimates the end timestamp based the beginning of a
|
||||||
|
@ -822,15 +1143,26 @@ func computeChunkEndTime(start, cur, max int64) int64 {
|
||||||
return start + (max-start)/n
|
return start + (max-start)/n
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *memSeries) cutNewHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64) *memChunk {
|
func (s *memSeries) cutNewHeadChunk(
|
||||||
|
mint int64, e chunkenc.Encoding, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64,
|
||||||
|
) *memChunk {
|
||||||
s.mmapCurrentHeadChunk(chunkDiskMapper)
|
s.mmapCurrentHeadChunk(chunkDiskMapper)
|
||||||
|
|
||||||
s.headChunk = &memChunk{
|
s.headChunk = &memChunk{
|
||||||
chunk: chunkenc.NewXORChunk(),
|
|
||||||
minTime: mint,
|
minTime: mint,
|
||||||
maxTime: math.MinInt64,
|
maxTime: math.MinInt64,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if chunkenc.IsValidEncoding(e) {
|
||||||
|
var err error
|
||||||
|
s.headChunk.chunk, err = chunkenc.NewEmptyChunk(e)
|
||||||
|
if err != nil {
|
||||||
|
panic(err) // This should never happen.
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
s.headChunk.chunk = chunkenc.NewXORChunk()
|
||||||
|
}
|
||||||
|
|
||||||
// Set upper bound on when the next chunk must be started. An earlier timestamp
|
// Set upper bound on when the next chunk must be started. An earlier timestamp
|
||||||
// may be chosen dynamically at a later point.
|
// may be chosen dynamically at a later point.
|
||||||
s.nextAt = rangeForTimestamp(mint, chunkRange)
|
s.nextAt = rangeForTimestamp(mint, chunkRange)
|
||||||
|
@ -874,7 +1206,7 @@ func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMap
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *memSeries) mmapCurrentHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper) {
|
func (s *memSeries) mmapCurrentHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper) {
|
||||||
if s.headChunk == nil {
|
if s.headChunk == nil || s.headChunk.chunk.NumSamples() == 0 {
|
||||||
// There is no head chunk, so nothing to m-map here.
|
// There is no head chunk, so nothing to m-map here.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -912,11 +1244,20 @@ func (a *headAppender) Rollback() (err error) {
|
||||||
series.pendingCommit = false
|
series.pendingCommit = false
|
||||||
series.Unlock()
|
series.Unlock()
|
||||||
}
|
}
|
||||||
|
for i := range a.histograms {
|
||||||
|
series = a.histogramSeries[i]
|
||||||
|
series.Lock()
|
||||||
|
series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
|
||||||
|
series.pendingCommit = false
|
||||||
|
series.Unlock()
|
||||||
|
}
|
||||||
a.head.putAppendBuffer(a.samples)
|
a.head.putAppendBuffer(a.samples)
|
||||||
a.head.putExemplarBuffer(a.exemplars)
|
a.head.putExemplarBuffer(a.exemplars)
|
||||||
|
a.head.putHistogramBuffer(a.histograms)
|
||||||
a.head.putMetadataBuffer(a.metadata)
|
a.head.putMetadataBuffer(a.metadata)
|
||||||
a.samples = nil
|
a.samples = nil
|
||||||
a.exemplars = nil
|
a.exemplars = nil
|
||||||
|
a.histograms = nil
|
||||||
a.metadata = nil
|
a.metadata = nil
|
||||||
|
|
||||||
// Series are created in the head memory regardless of rollback. Thus we have
|
// Series are created in the head memory regardless of rollback. Thus we have
|
||||||
|
|
|
@ -486,7 +486,7 @@ func (o mergedOOOChunks) Bytes() []byte {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
it := o.Iterator(nil)
|
it := o.Iterator(nil)
|
||||||
for it.Next() {
|
for it.Next() == chunkenc.ValFloat {
|
||||||
t, v := it.At()
|
t, v := it.At()
|
||||||
app.Append(t, v)
|
app.Append(t, v)
|
||||||
}
|
}
|
||||||
|
@ -535,7 +535,7 @@ func (b boundedChunk) Bytes() []byte {
|
||||||
xor := chunkenc.NewXORChunk()
|
xor := chunkenc.NewXORChunk()
|
||||||
a, _ := xor.Appender()
|
a, _ := xor.Appender()
|
||||||
it := b.Iterator(nil)
|
it := b.Iterator(nil)
|
||||||
for it.Next() {
|
for it.Next() == chunkenc.ValFloat {
|
||||||
t, v := it.At()
|
t, v := it.At()
|
||||||
a.Append(t, v)
|
a.Append(t, v)
|
||||||
}
|
}
|
||||||
|
@ -564,33 +564,35 @@ type boundedIterator struct {
|
||||||
// until its able to find a sample within the bounds minT and maxT.
|
// until its able to find a sample within the bounds minT and maxT.
|
||||||
// If there are samples within bounds it will advance one by one amongst them.
|
// If there are samples within bounds it will advance one by one amongst them.
|
||||||
// If there are no samples within bounds it will return false.
|
// If there are no samples within bounds it will return false.
|
||||||
func (b boundedIterator) Next() bool {
|
func (b boundedIterator) Next() chunkenc.ValueType {
|
||||||
for b.Iterator.Next() {
|
for b.Iterator.Next() == chunkenc.ValFloat {
|
||||||
t, _ := b.Iterator.At()
|
t, _ := b.Iterator.At()
|
||||||
if t < b.minT {
|
if t < b.minT {
|
||||||
continue
|
continue
|
||||||
} else if t > b.maxT {
|
} else if t > b.maxT {
|
||||||
return false
|
return chunkenc.ValNone
|
||||||
}
|
}
|
||||||
return true
|
return chunkenc.ValFloat
|
||||||
}
|
}
|
||||||
return false
|
return chunkenc.ValNone
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b boundedIterator) Seek(t int64) bool {
|
func (b boundedIterator) Seek(t int64) chunkenc.ValueType {
|
||||||
if t < b.minT {
|
if t < b.minT {
|
||||||
// We must seek at least up to b.minT if it is asked for something before that.
|
// We must seek at least up to b.minT if it is asked for something before that.
|
||||||
ok := b.Iterator.Seek(b.minT)
|
val := b.Iterator.Seek(b.minT)
|
||||||
if !ok {
|
if !(val == chunkenc.ValFloat) {
|
||||||
return false
|
return chunkenc.ValNone
|
||||||
}
|
}
|
||||||
t, _ := b.Iterator.At()
|
t, _ := b.Iterator.At()
|
||||||
return t <= b.maxT
|
if t <= b.maxT {
|
||||||
|
return chunkenc.ValFloat
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if t > b.maxT {
|
if t > b.maxT {
|
||||||
// We seek anyway so that the subsequent Next() calls will also return false.
|
// We seek anyway so that the subsequent Next() calls will also return false.
|
||||||
b.Iterator.Seek(t)
|
b.Iterator.Seek(t)
|
||||||
return false
|
return chunkenc.ValNone
|
||||||
}
|
}
|
||||||
return b.Iterator.Seek(t)
|
return b.Iterator.Seek(t)
|
||||||
}
|
}
|
||||||
|
@ -684,21 +686,6 @@ func (s *memSeries) iterator(id chunks.HeadChunkID, isoState *isolationState, ch
|
||||||
return makeStopIterator(c.chunk, it, stopAfter)
|
return makeStopIterator(c.chunk, it, stopAfter)
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeStopIterator(c chunkenc.Chunk, it chunkenc.Iterator, stopAfter int) chunkenc.Iterator {
|
|
||||||
// Re-use the Iterator object if it is a stopIterator.
|
|
||||||
if stopIter, ok := it.(*stopIterator); ok {
|
|
||||||
stopIter.Iterator = c.Iterator(stopIter.Iterator)
|
|
||||||
stopIter.i = -1
|
|
||||||
stopIter.stopAfter = stopAfter
|
|
||||||
return stopIter
|
|
||||||
}
|
|
||||||
return &stopIterator{
|
|
||||||
Iterator: c.Iterator(it),
|
|
||||||
i: -1,
|
|
||||||
stopAfter: stopAfter,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// stopIterator wraps an Iterator, but only returns the first
|
// stopIterator wraps an Iterator, but only returns the first
|
||||||
// stopAfter values, if initialized with i=-1.
|
// stopAfter values, if initialized with i=-1.
|
||||||
type stopIterator struct {
|
type stopIterator struct {
|
||||||
|
@ -707,10 +694,26 @@ type stopIterator struct {
|
||||||
i, stopAfter int
|
i, stopAfter int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (it *stopIterator) Next() bool {
|
func (it *stopIterator) Next() chunkenc.ValueType {
|
||||||
if it.i+1 >= it.stopAfter {
|
if it.i+1 >= it.stopAfter {
|
||||||
return false
|
return chunkenc.ValNone
|
||||||
}
|
}
|
||||||
it.i++
|
it.i++
|
||||||
return it.Iterator.Next()
|
return it.Iterator.Next()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func makeStopIterator(c chunkenc.Chunk, it chunkenc.Iterator, stopAfter int) chunkenc.Iterator {
|
||||||
|
// Re-use the Iterator object if it is a stopIterator.
|
||||||
|
if stopIter, ok := it.(*stopIterator); ok {
|
||||||
|
stopIter.Iterator = c.Iterator(stopIter.Iterator)
|
||||||
|
stopIter.i = -1
|
||||||
|
stopIter.stopAfter = stopAfter
|
||||||
|
return stopIter
|
||||||
|
}
|
||||||
|
|
||||||
|
return &stopIterator{
|
||||||
|
Iterator: c.Iterator(it),
|
||||||
|
i: -1,
|
||||||
|
stopAfter: stopAfter,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -41,7 +41,7 @@ func TestBoundedChunk(t *testing.T) {
|
||||||
name: "bounds represent a single sample",
|
name: "bounds represent a single sample",
|
||||||
inputChunk: newTestChunk(10),
|
inputChunk: newTestChunk(10),
|
||||||
expSamples: []sample{
|
expSamples: []sample{
|
||||||
{0, 0},
|
{0, 0, nil, nil},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -50,14 +50,14 @@ func TestBoundedChunk(t *testing.T) {
|
||||||
inputMinT: 1,
|
inputMinT: 1,
|
||||||
inputMaxT: 8,
|
inputMaxT: 8,
|
||||||
expSamples: []sample{
|
expSamples: []sample{
|
||||||
{1, 1},
|
{1, 1, nil, nil},
|
||||||
{2, 2},
|
{2, 2, nil, nil},
|
||||||
{3, 3},
|
{3, 3, nil, nil},
|
||||||
{4, 4},
|
{4, 4, nil, nil},
|
||||||
{5, 5},
|
{5, 5, nil, nil},
|
||||||
{6, 6},
|
{6, 6, nil, nil},
|
||||||
{7, 7},
|
{7, 7, nil, nil},
|
||||||
{8, 8},
|
{8, 8, nil, nil},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -66,12 +66,12 @@ func TestBoundedChunk(t *testing.T) {
|
||||||
inputMinT: 0,
|
inputMinT: 0,
|
||||||
inputMaxT: 5,
|
inputMaxT: 5,
|
||||||
expSamples: []sample{
|
expSamples: []sample{
|
||||||
{0, 0},
|
{0, 0, nil, nil},
|
||||||
{1, 1},
|
{1, 1, nil, nil},
|
||||||
{2, 2},
|
{2, 2, nil, nil},
|
||||||
{3, 3},
|
{3, 3, nil, nil},
|
||||||
{4, 4},
|
{4, 4, nil, nil},
|
||||||
{5, 5},
|
{5, 5, nil, nil},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -80,11 +80,11 @@ func TestBoundedChunk(t *testing.T) {
|
||||||
inputMinT: 5,
|
inputMinT: 5,
|
||||||
inputMaxT: 9,
|
inputMaxT: 9,
|
||||||
expSamples: []sample{
|
expSamples: []sample{
|
||||||
{5, 5},
|
{5, 5, nil, nil},
|
||||||
{6, 6},
|
{6, 6, nil, nil},
|
||||||
{7, 7},
|
{7, 7, nil, nil},
|
||||||
{8, 8},
|
{8, 8, nil, nil},
|
||||||
{9, 9},
|
{9, 9, nil, nil},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -95,11 +95,11 @@ func TestBoundedChunk(t *testing.T) {
|
||||||
initialSeek: 1,
|
initialSeek: 1,
|
||||||
seekIsASuccess: true,
|
seekIsASuccess: true,
|
||||||
expSamples: []sample{
|
expSamples: []sample{
|
||||||
{3, 3},
|
{3, 3, nil, nil},
|
||||||
{4, 4},
|
{4, 4, nil, nil},
|
||||||
{5, 5},
|
{5, 5, nil, nil},
|
||||||
{6, 6},
|
{6, 6, nil, nil},
|
||||||
{7, 7},
|
{7, 7, nil, nil},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -110,9 +110,9 @@ func TestBoundedChunk(t *testing.T) {
|
||||||
initialSeek: 5,
|
initialSeek: 5,
|
||||||
seekIsASuccess: true,
|
seekIsASuccess: true,
|
||||||
expSamples: []sample{
|
expSamples: []sample{
|
||||||
{5, 5},
|
{5, 5, nil, nil},
|
||||||
{6, 6},
|
{6, 6, nil, nil},
|
||||||
{7, 7},
|
{7, 7, nil, nil},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -144,23 +144,23 @@ func TestBoundedChunk(t *testing.T) {
|
||||||
|
|
||||||
if tc.initialSeek != 0 {
|
if tc.initialSeek != 0 {
|
||||||
// Testing Seek()
|
// Testing Seek()
|
||||||
ok := it.Seek(tc.initialSeek)
|
val := it.Seek(tc.initialSeek)
|
||||||
require.Equal(t, tc.seekIsASuccess, ok)
|
require.Equal(t, tc.seekIsASuccess, val == chunkenc.ValFloat)
|
||||||
if ok {
|
if val == chunkenc.ValFloat {
|
||||||
t, v := it.At()
|
t, v := it.At()
|
||||||
samples = append(samples, sample{t, v})
|
samples = append(samples, sample{t, v, nil, nil})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Testing Next()
|
// Testing Next()
|
||||||
for it.Next() {
|
for it.Next() == chunkenc.ValFloat {
|
||||||
t, v := it.At()
|
t, v := it.At()
|
||||||
samples = append(samples, sample{t, v})
|
samples = append(samples, sample{t, v, nil, nil})
|
||||||
}
|
}
|
||||||
|
|
||||||
// it.Next() should keep returning false.
|
// it.Next() should keep returning no value.
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
require.False(t, it.Next())
|
require.True(t, it.Next() == chunkenc.ValNone)
|
||||||
}
|
}
|
||||||
|
|
||||||
require.Equal(t, tc.expSamples, samples)
|
require.Equal(t, tc.expSamples, samples)
|
||||||
|
|
|
@ -38,7 +38,9 @@ import (
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
"github.com/prometheus/prometheus/model/value"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
|
@ -59,6 +61,7 @@ func newTestHead(t testing.TB, chunkRange int64, compressWAL, oooEnabled bool) (
|
||||||
opts.ChunkDirRoot = dir
|
opts.ChunkDirRoot = dir
|
||||||
opts.EnableExemplarStorage = true
|
opts.EnableExemplarStorage = true
|
||||||
opts.MaxExemplars.Store(config.DefaultExemplarsConfig.MaxExemplars)
|
opts.MaxExemplars.Store(config.DefaultExemplarsConfig.MaxExemplars)
|
||||||
|
opts.EnableNativeHistograms.Store(true)
|
||||||
if oooEnabled {
|
if oooEnabled {
|
||||||
opts.OutOfOrderTimeWindow.Store(10 * time.Minute.Milliseconds())
|
opts.OutOfOrderTimeWindow.Store(10 * time.Minute.Milliseconds())
|
||||||
}
|
}
|
||||||
|
@ -513,18 +516,18 @@ func TestHead_ReadWAL(t *testing.T) {
|
||||||
require.Equal(t, labels.FromStrings("a", "3"), s100.lset)
|
require.Equal(t, labels.FromStrings("a", "3"), s100.lset)
|
||||||
|
|
||||||
expandChunk := func(c chunkenc.Iterator) (x []sample) {
|
expandChunk := func(c chunkenc.Iterator) (x []sample) {
|
||||||
for c.Next() {
|
for c.Next() == chunkenc.ValFloat {
|
||||||
t, v := c.At()
|
t, v := c.At()
|
||||||
x = append(x, sample{t: t, v: v})
|
x = append(x, sample{t: t, v: v})
|
||||||
}
|
}
|
||||||
require.NoError(t, c.Err())
|
require.NoError(t, c.Err())
|
||||||
return x
|
return x
|
||||||
}
|
}
|
||||||
require.Equal(t, []sample{{100, 2}, {101, 5}}, expandChunk(s10.iterator(0, nil, head.chunkDiskMapper, nil, nil)))
|
require.Equal(t, []sample{{100, 2, nil, nil}, {101, 5, nil, nil}}, expandChunk(s10.iterator(0, nil, head.chunkDiskMapper, nil, nil)))
|
||||||
require.Equal(t, []sample{{101, 6}}, expandChunk(s50.iterator(0, nil, head.chunkDiskMapper, nil, nil)))
|
require.Equal(t, []sample{{101, 6, nil, nil}}, expandChunk(s50.iterator(0, nil, head.chunkDiskMapper, nil, nil)))
|
||||||
// The samples before the new series record should be discarded since a duplicate record
|
// The samples before the new series record should be discarded since a duplicate record
|
||||||
// is only possible when old samples were compacted.
|
// is only possible when old samples were compacted.
|
||||||
require.Equal(t, []sample{{101, 7}}, expandChunk(s100.iterator(0, nil, head.chunkDiskMapper, nil, nil)))
|
require.Equal(t, []sample{{101, 7, nil, nil}}, expandChunk(s100.iterator(0, nil, head.chunkDiskMapper, nil, nil)))
|
||||||
|
|
||||||
q, err := head.ExemplarQuerier(context.Background())
|
q, err := head.ExemplarQuerier(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -590,8 +593,8 @@ func TestHead_WALMultiRef(t *testing.T) {
|
||||||
// The samples before the new ref should be discarded since Head truncation
|
// The samples before the new ref should be discarded since Head truncation
|
||||||
// happens only after compacting the Head.
|
// happens only after compacting the Head.
|
||||||
require.Equal(t, map[string][]tsdbutil.Sample{`{foo="bar"}`: {
|
require.Equal(t, map[string][]tsdbutil.Sample{`{foo="bar"}`: {
|
||||||
sample{1700, 3},
|
sample{1700, 3, nil, nil},
|
||||||
sample{2000, 4},
|
sample{2000, 4, nil, nil},
|
||||||
}}, series)
|
}}, series)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -957,7 +960,7 @@ func TestDeleteUntilCurMax(t *testing.T) {
|
||||||
require.True(t, res.Next(), "series is not present")
|
require.True(t, res.Next(), "series is not present")
|
||||||
s := res.At()
|
s := res.At()
|
||||||
it := s.Iterator()
|
it := s.Iterator()
|
||||||
require.False(t, it.Next(), "expected no samples")
|
require.Equal(t, chunkenc.ValNone, it.Next(), "expected no samples")
|
||||||
for res.Next() {
|
for res.Next() {
|
||||||
}
|
}
|
||||||
require.NoError(t, res.Err())
|
require.NoError(t, res.Err())
|
||||||
|
@ -976,7 +979,7 @@ func TestDeleteUntilCurMax(t *testing.T) {
|
||||||
it = exps.Iterator()
|
it = exps.Iterator()
|
||||||
resSamples, err := storage.ExpandSamples(it, newSample)
|
resSamples, err := storage.ExpandSamples(it, newSample)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, []tsdbutil.Sample{sample{11, 1}}, resSamples)
|
require.Equal(t, []tsdbutil.Sample{sample{11, 1, nil, nil}}, resSamples)
|
||||||
for res.Next() {
|
for res.Next() {
|
||||||
}
|
}
|
||||||
require.NoError(t, res.Err())
|
require.NoError(t, res.Err())
|
||||||
|
@ -1093,7 +1096,7 @@ func TestDelete_e2e(t *testing.T) {
|
||||||
v := rand.Float64()
|
v := rand.Float64()
|
||||||
_, err := app.Append(0, ls, ts, v)
|
_, err := app.Append(0, ls, ts, v)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
series = append(series, sample{ts, v})
|
series = append(series, sample{ts, v, nil, nil})
|
||||||
ts += rand.Int63n(timeInterval) + 1
|
ts += rand.Int63n(timeInterval) + 1
|
||||||
}
|
}
|
||||||
seriesMap[labels.New(l...).String()] = series
|
seriesMap[labels.New(l...).String()] = series
|
||||||
|
@ -1160,7 +1163,7 @@ func TestDelete_e2e(t *testing.T) {
|
||||||
eok, rok := expSs.Next(), ss.Next()
|
eok, rok := expSs.Next(), ss.Next()
|
||||||
// Skip a series if iterator is empty.
|
// Skip a series if iterator is empty.
|
||||||
if rok {
|
if rok {
|
||||||
for !ss.At().Iterator().Next() {
|
for ss.At().Iterator().Next() == chunkenc.ValNone {
|
||||||
rok = ss.Next()
|
rok = ss.Next()
|
||||||
if !rok {
|
if !rok {
|
||||||
break
|
break
|
||||||
|
@ -1313,6 +1316,61 @@ func TestMemSeries_append(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestMemSeries_appendHistogram(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
// This is usually taken from the Head, but passing manually here.
|
||||||
|
chunkDiskMapper, err := chunks.NewChunkDiskMapper(nil, dir, chunkenc.NewPool(), chunks.DefaultWriteBufferSize, chunks.DefaultWriteQueueSize)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer func() {
|
||||||
|
require.NoError(t, chunkDiskMapper.Close())
|
||||||
|
}()
|
||||||
|
chunkRange := int64(1000)
|
||||||
|
|
||||||
|
s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled)
|
||||||
|
|
||||||
|
histograms := GenerateTestHistograms(4)
|
||||||
|
histogramWithOneMoreBucket := histograms[3].Copy()
|
||||||
|
histogramWithOneMoreBucket.Count++
|
||||||
|
histogramWithOneMoreBucket.Sum += 1.23
|
||||||
|
histogramWithOneMoreBucket.PositiveSpans[1].Length = 3
|
||||||
|
histogramWithOneMoreBucket.PositiveBuckets = append(histogramWithOneMoreBucket.PositiveBuckets, 1)
|
||||||
|
|
||||||
|
// Add first two samples at the very end of a chunk range and the next two
|
||||||
|
// on and after it.
|
||||||
|
// New chunk must correctly be cut at 1000.
|
||||||
|
ok, chunkCreated := s.appendHistogram(998, histograms[0], 0, chunkDiskMapper, chunkRange)
|
||||||
|
require.True(t, ok, "append failed")
|
||||||
|
require.True(t, chunkCreated, "first sample created chunk")
|
||||||
|
|
||||||
|
ok, chunkCreated = s.appendHistogram(999, histograms[1], 0, chunkDiskMapper, chunkRange)
|
||||||
|
require.True(t, ok, "append failed")
|
||||||
|
require.False(t, chunkCreated, "second sample should use same chunk")
|
||||||
|
|
||||||
|
ok, chunkCreated = s.appendHistogram(1000, histograms[2], 0, chunkDiskMapper, chunkRange)
|
||||||
|
require.True(t, ok, "append failed")
|
||||||
|
require.True(t, chunkCreated, "expected new chunk on boundary")
|
||||||
|
|
||||||
|
ok, chunkCreated = s.appendHistogram(1001, histograms[3], 0, chunkDiskMapper, chunkRange)
|
||||||
|
require.True(t, ok, "append failed")
|
||||||
|
require.False(t, chunkCreated, "second sample should use same chunk")
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(s.mmappedChunks), "there should be only 1 mmapped chunk")
|
||||||
|
require.Equal(t, int64(998), s.mmappedChunks[0].minTime, "wrong chunk range")
|
||||||
|
require.Equal(t, int64(999), s.mmappedChunks[0].maxTime, "wrong chunk range")
|
||||||
|
require.Equal(t, int64(1000), s.headChunk.minTime, "wrong chunk range")
|
||||||
|
require.Equal(t, int64(1001), s.headChunk.maxTime, "wrong chunk range")
|
||||||
|
|
||||||
|
ok, chunkCreated = s.appendHistogram(1002, histogramWithOneMoreBucket, 0, chunkDiskMapper, chunkRange)
|
||||||
|
require.True(t, ok, "append failed")
|
||||||
|
require.False(t, chunkCreated, "third sample should trigger a re-encoded chunk")
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(s.mmappedChunks), "there should be only 1 mmapped chunk")
|
||||||
|
require.Equal(t, int64(998), s.mmappedChunks[0].minTime, "wrong chunk range")
|
||||||
|
require.Equal(t, int64(999), s.mmappedChunks[0].maxTime, "wrong chunk range")
|
||||||
|
require.Equal(t, int64(1000), s.headChunk.minTime, "wrong chunk range")
|
||||||
|
require.Equal(t, int64(1002), s.headChunk.maxTime, "wrong chunk range")
|
||||||
|
}
|
||||||
|
|
||||||
func TestMemSeries_append_atVariableRate(t *testing.T) {
|
func TestMemSeries_append_atVariableRate(t *testing.T) {
|
||||||
const samplesPerChunk = 120
|
const samplesPerChunk = 120
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
@ -2073,19 +2131,19 @@ func TestOutOfOrderSamplesMetric(t *testing.T) {
|
||||||
require.NoError(t, app.Commit())
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
// Test out of order metric.
|
// Test out of order metric.
|
||||||
require.Equal(t, 0.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples))
|
require.Equal(t, 0.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat)))
|
||||||
app = db.Appender(ctx)
|
app = db.Appender(ctx)
|
||||||
_, err = app.Append(0, labels.FromStrings("a", "b"), 2, 99)
|
_, err = app.Append(0, labels.FromStrings("a", "b"), 2, 99)
|
||||||
require.Equal(t, storage.ErrOutOfOrderSample, err)
|
require.Equal(t, storage.ErrOutOfOrderSample, err)
|
||||||
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples))
|
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat)))
|
||||||
|
|
||||||
_, err = app.Append(0, labels.FromStrings("a", "b"), 3, 99)
|
_, err = app.Append(0, labels.FromStrings("a", "b"), 3, 99)
|
||||||
require.Equal(t, storage.ErrOutOfOrderSample, err)
|
require.Equal(t, storage.ErrOutOfOrderSample, err)
|
||||||
require.Equal(t, 2.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples))
|
require.Equal(t, 2.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat)))
|
||||||
|
|
||||||
_, err = app.Append(0, labels.FromStrings("a", "b"), 4, 99)
|
_, err = app.Append(0, labels.FromStrings("a", "b"), 4, 99)
|
||||||
require.Equal(t, storage.ErrOutOfOrderSample, err)
|
require.Equal(t, storage.ErrOutOfOrderSample, err)
|
||||||
require.Equal(t, 3.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples))
|
require.Equal(t, 3.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat)))
|
||||||
require.NoError(t, app.Commit())
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
// Compact Head to test out of bound metric.
|
// Compact Head to test out of bound metric.
|
||||||
|
@ -2101,11 +2159,11 @@ func TestOutOfOrderSamplesMetric(t *testing.T) {
|
||||||
app = db.Appender(ctx)
|
app = db.Appender(ctx)
|
||||||
_, err = app.Append(0, labels.FromStrings("a", "b"), db.head.minValidTime.Load()-2, 99)
|
_, err = app.Append(0, labels.FromStrings("a", "b"), db.head.minValidTime.Load()-2, 99)
|
||||||
require.Equal(t, storage.ErrOutOfBounds, err)
|
require.Equal(t, storage.ErrOutOfBounds, err)
|
||||||
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.outOfBoundSamples))
|
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat)))
|
||||||
|
|
||||||
_, err = app.Append(0, labels.FromStrings("a", "b"), db.head.minValidTime.Load()-1, 99)
|
_, err = app.Append(0, labels.FromStrings("a", "b"), db.head.minValidTime.Load()-1, 99)
|
||||||
require.Equal(t, storage.ErrOutOfBounds, err)
|
require.Equal(t, storage.ErrOutOfBounds, err)
|
||||||
require.Equal(t, 2.0, prom_testutil.ToFloat64(db.head.metrics.outOfBoundSamples))
|
require.Equal(t, 2.0, prom_testutil.ToFloat64(db.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat)))
|
||||||
require.NoError(t, app.Commit())
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
// Some more valid samples for out of order.
|
// Some more valid samples for out of order.
|
||||||
|
@ -2120,15 +2178,15 @@ func TestOutOfOrderSamplesMetric(t *testing.T) {
|
||||||
app = db.Appender(ctx)
|
app = db.Appender(ctx)
|
||||||
_, err = app.Append(0, labels.FromStrings("a", "b"), db.head.minValidTime.Load()+DefaultBlockDuration+2, 99)
|
_, err = app.Append(0, labels.FromStrings("a", "b"), db.head.minValidTime.Load()+DefaultBlockDuration+2, 99)
|
||||||
require.Equal(t, storage.ErrOutOfOrderSample, err)
|
require.Equal(t, storage.ErrOutOfOrderSample, err)
|
||||||
require.Equal(t, 4.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples))
|
require.Equal(t, 4.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat)))
|
||||||
|
|
||||||
_, err = app.Append(0, labels.FromStrings("a", "b"), db.head.minValidTime.Load()+DefaultBlockDuration+3, 99)
|
_, err = app.Append(0, labels.FromStrings("a", "b"), db.head.minValidTime.Load()+DefaultBlockDuration+3, 99)
|
||||||
require.Equal(t, storage.ErrOutOfOrderSample, err)
|
require.Equal(t, storage.ErrOutOfOrderSample, err)
|
||||||
require.Equal(t, 5.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples))
|
require.Equal(t, 5.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat)))
|
||||||
|
|
||||||
_, err = app.Append(0, labels.FromStrings("a", "b"), db.head.minValidTime.Load()+DefaultBlockDuration+4, 99)
|
_, err = app.Append(0, labels.FromStrings("a", "b"), db.head.minValidTime.Load()+DefaultBlockDuration+4, 99)
|
||||||
require.Equal(t, storage.ErrOutOfOrderSample, err)
|
require.Equal(t, storage.ErrOutOfOrderSample, err)
|
||||||
require.Equal(t, 6.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples))
|
require.Equal(t, 6.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat)))
|
||||||
require.NoError(t, app.Commit())
|
require.NoError(t, app.Commit())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2496,47 +2554,40 @@ func TestIteratorSeekIntoBuffer(t *testing.T) {
|
||||||
it := s.iterator(s.headChunkID(len(s.mmappedChunks)), nil, chunkDiskMapper, nil, nil)
|
it := s.iterator(s.headChunkID(len(s.mmappedChunks)), nil, chunkDiskMapper, nil, nil)
|
||||||
|
|
||||||
// First point.
|
// First point.
|
||||||
ok := it.Seek(0)
|
require.Equal(t, chunkenc.ValFloat, it.Seek(0))
|
||||||
require.True(t, ok)
|
|
||||||
ts, val := it.At()
|
ts, val := it.At()
|
||||||
require.Equal(t, int64(0), ts)
|
require.Equal(t, int64(0), ts)
|
||||||
require.Equal(t, float64(0), val)
|
require.Equal(t, float64(0), val)
|
||||||
|
|
||||||
// Advance one point.
|
// Advance one point.
|
||||||
ok = it.Next()
|
require.Equal(t, chunkenc.ValFloat, it.Next())
|
||||||
require.True(t, ok)
|
|
||||||
ts, val = it.At()
|
ts, val = it.At()
|
||||||
require.Equal(t, int64(1), ts)
|
require.Equal(t, int64(1), ts)
|
||||||
require.Equal(t, float64(1), val)
|
require.Equal(t, float64(1), val)
|
||||||
|
|
||||||
// Seeking an older timestamp shouldn't cause the iterator to go backwards.
|
// Seeking an older timestamp shouldn't cause the iterator to go backwards.
|
||||||
ok = it.Seek(0)
|
require.Equal(t, chunkenc.ValFloat, it.Seek(0))
|
||||||
require.True(t, ok)
|
|
||||||
ts, val = it.At()
|
ts, val = it.At()
|
||||||
require.Equal(t, int64(1), ts)
|
require.Equal(t, int64(1), ts)
|
||||||
require.Equal(t, float64(1), val)
|
require.Equal(t, float64(1), val)
|
||||||
|
|
||||||
// Seek into the buffer.
|
// Seek into the buffer.
|
||||||
ok = it.Seek(3)
|
require.Equal(t, chunkenc.ValFloat, it.Seek(3))
|
||||||
require.True(t, ok)
|
|
||||||
ts, val = it.At()
|
ts, val = it.At()
|
||||||
require.Equal(t, int64(3), ts)
|
require.Equal(t, int64(3), ts)
|
||||||
require.Equal(t, float64(3), val)
|
require.Equal(t, float64(3), val)
|
||||||
|
|
||||||
// Iterate through the rest of the buffer.
|
// Iterate through the rest of the buffer.
|
||||||
for i := 4; i < 7; i++ {
|
for i := 4; i < 7; i++ {
|
||||||
ok = it.Next()
|
require.Equal(t, chunkenc.ValFloat, it.Next())
|
||||||
require.True(t, ok)
|
|
||||||
ts, val = it.At()
|
ts, val = it.At()
|
||||||
require.Equal(t, int64(i), ts)
|
require.Equal(t, int64(i), ts)
|
||||||
require.Equal(t, float64(i), val)
|
require.Equal(t, float64(i), val)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run out of elements in the iterator.
|
// Run out of elements in the iterator.
|
||||||
ok = it.Next()
|
require.Equal(t, chunkenc.ValNone, it.Next())
|
||||||
require.False(t, ok)
|
require.Equal(t, chunkenc.ValNone, it.Seek(7))
|
||||||
ok = it.Seek(7)
|
|
||||||
require.False(t, ok)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests https://github.com/prometheus/prometheus/issues/8221.
|
// Tests https://github.com/prometheus/prometheus/issues/8221.
|
||||||
|
@ -2585,7 +2636,7 @@ func TestChunkNotFoundHeadGCRace(t *testing.T) {
|
||||||
|
|
||||||
// Now consume after compaction when it's gone.
|
// Now consume after compaction when it's gone.
|
||||||
it := s.Iterator()
|
it := s.Iterator()
|
||||||
for it.Next() {
|
for it.Next() == chunkenc.ValFloat {
|
||||||
_, _ = it.At()
|
_, _ = it.At()
|
||||||
}
|
}
|
||||||
// It should error here without any fix for the mentioned issue.
|
// It should error here without any fix for the mentioned issue.
|
||||||
|
@ -2593,7 +2644,7 @@ func TestChunkNotFoundHeadGCRace(t *testing.T) {
|
||||||
for ss.Next() {
|
for ss.Next() {
|
||||||
s = ss.At()
|
s = ss.At()
|
||||||
it := s.Iterator()
|
it := s.Iterator()
|
||||||
for it.Next() {
|
for it.Next() == chunkenc.ValFloat {
|
||||||
_, _ = it.At()
|
_, _ = it.At()
|
||||||
}
|
}
|
||||||
require.NoError(t, it.Err())
|
require.NoError(t, it.Err())
|
||||||
|
@ -2624,7 +2675,7 @@ func TestDataMissingOnQueryDuringCompaction(t *testing.T) {
|
||||||
ref, err = app.Append(ref, labels.FromStrings("a", "b"), ts, float64(i))
|
ref, err = app.Append(ref, labels.FromStrings("a", "b"), ts, float64(i))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
maxt = ts
|
maxt = ts
|
||||||
expSamples = append(expSamples, sample{ts, float64(i)})
|
expSamples = append(expSamples, sample{ts, float64(i), nil, nil})
|
||||||
}
|
}
|
||||||
require.NoError(t, app.Commit())
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
|
@ -2754,6 +2805,144 @@ func TestWaitForPendingReadersInTimeRange(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAppendHistogram(t *testing.T) {
|
||||||
|
l := labels.Labels{{Name: "a", Value: "b"}}
|
||||||
|
for _, numHistograms := range []int{1, 10, 150, 200, 250, 300} {
|
||||||
|
t.Run(fmt.Sprintf("%d", numHistograms), func(t *testing.T) {
|
||||||
|
head, _ := newTestHead(t, 1000, false, false)
|
||||||
|
t.Cleanup(func() {
|
||||||
|
require.NoError(t, head.Close())
|
||||||
|
})
|
||||||
|
|
||||||
|
require.NoError(t, head.Init(0))
|
||||||
|
app := head.Appender(context.Background())
|
||||||
|
|
||||||
|
type timedHistogram struct {
|
||||||
|
t int64
|
||||||
|
h *histogram.Histogram
|
||||||
|
}
|
||||||
|
expHistograms := make([]timedHistogram, 0, numHistograms)
|
||||||
|
for i, h := range GenerateTestHistograms(numHistograms) {
|
||||||
|
_, err := app.AppendHistogram(0, l, int64(i), h)
|
||||||
|
require.NoError(t, err)
|
||||||
|
expHistograms = append(expHistograms, timedHistogram{int64(i), h})
|
||||||
|
}
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
|
q, err := NewBlockQuerier(head, head.MinTime(), head.MaxTime())
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(func() {
|
||||||
|
require.NoError(t, q.Close())
|
||||||
|
})
|
||||||
|
|
||||||
|
ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
||||||
|
|
||||||
|
require.True(t, ss.Next())
|
||||||
|
s := ss.At()
|
||||||
|
require.False(t, ss.Next())
|
||||||
|
|
||||||
|
it := s.Iterator()
|
||||||
|
actHistograms := make([]timedHistogram, 0, len(expHistograms))
|
||||||
|
for it.Next() == chunkenc.ValHistogram {
|
||||||
|
t, h := it.AtHistogram()
|
||||||
|
actHistograms = append(actHistograms, timedHistogram{t, h})
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, expHistograms, actHistograms)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHistogramInWALAndMmapChunk(t *testing.T) {
|
||||||
|
head, _ := newTestHead(t, 1000, false, false)
|
||||||
|
t.Cleanup(func() {
|
||||||
|
require.NoError(t, head.Close())
|
||||||
|
})
|
||||||
|
require.NoError(t, head.Init(0))
|
||||||
|
|
||||||
|
// Series with only histograms.
|
||||||
|
s1 := labels.Labels{{Name: "a", Value: "b1"}}
|
||||||
|
k1 := s1.String()
|
||||||
|
numHistograms := 450
|
||||||
|
exp := map[string][]tsdbutil.Sample{}
|
||||||
|
app := head.Appender(context.Background())
|
||||||
|
for i, h := range GenerateTestHistograms(numHistograms) {
|
||||||
|
h.Count = h.Count * 2
|
||||||
|
h.NegativeSpans = h.PositiveSpans
|
||||||
|
h.NegativeBuckets = h.PositiveBuckets
|
||||||
|
_, err := app.AppendHistogram(0, s1, int64(i), h)
|
||||||
|
require.NoError(t, err)
|
||||||
|
exp[k1] = append(exp[k1], sample{t: int64(i), h: h.Copy()})
|
||||||
|
if i%5 == 0 {
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
app = head.Appender(context.Background())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
|
// There should be 3 mmap chunks in s1.
|
||||||
|
ms := head.series.getByHash(s1.Hash(), s1)
|
||||||
|
require.Len(t, ms.mmappedChunks, 3)
|
||||||
|
expMmapChunks := make([]*mmappedChunk, 0, 3)
|
||||||
|
for _, mmap := range ms.mmappedChunks {
|
||||||
|
require.Greater(t, mmap.numSamples, uint16(0))
|
||||||
|
cpy := *mmap
|
||||||
|
expMmapChunks = append(expMmapChunks, &cpy)
|
||||||
|
}
|
||||||
|
expHeadChunkSamples := ms.headChunk.chunk.NumSamples()
|
||||||
|
require.Greater(t, expHeadChunkSamples, 0)
|
||||||
|
|
||||||
|
// Series with mix of histograms and float.
|
||||||
|
s2 := labels.Labels{{Name: "a", Value: "b2"}}
|
||||||
|
k2 := s2.String()
|
||||||
|
app = head.Appender(context.Background())
|
||||||
|
ts := 0
|
||||||
|
for _, h := range GenerateTestHistograms(200) {
|
||||||
|
ts++
|
||||||
|
h.Count = h.Count * 2
|
||||||
|
h.NegativeSpans = h.PositiveSpans
|
||||||
|
h.NegativeBuckets = h.PositiveBuckets
|
||||||
|
_, err := app.AppendHistogram(0, s2, int64(ts), h)
|
||||||
|
require.NoError(t, err)
|
||||||
|
exp[k2] = append(exp[k2], sample{t: int64(ts), h: h.Copy()})
|
||||||
|
if ts%20 == 0 {
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
app = head.Appender(context.Background())
|
||||||
|
// Add some float.
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
ts++
|
||||||
|
_, err := app.Append(0, s2, int64(ts), float64(ts))
|
||||||
|
require.NoError(t, err)
|
||||||
|
exp[k2] = append(exp[k2], sample{t: int64(ts), v: float64(ts)})
|
||||||
|
}
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
app = head.Appender(context.Background())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
|
// Restart head.
|
||||||
|
require.NoError(t, head.Close())
|
||||||
|
w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
head, err = NewHead(nil, nil, w, nil, head.opts, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, head.Init(0))
|
||||||
|
|
||||||
|
// Checking contents of s1.
|
||||||
|
ms = head.series.getByHash(s1.Hash(), s1)
|
||||||
|
require.Equal(t, expMmapChunks, ms.mmappedChunks)
|
||||||
|
for _, mmap := range ms.mmappedChunks {
|
||||||
|
require.Greater(t, mmap.numSamples, uint16(0))
|
||||||
|
}
|
||||||
|
require.Equal(t, expHeadChunkSamples, ms.headChunk.chunk.NumSamples())
|
||||||
|
|
||||||
|
q, err := NewBlockQuerier(head, head.MinTime(), head.MaxTime())
|
||||||
|
require.NoError(t, err)
|
||||||
|
act := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "a", "b.*"))
|
||||||
|
require.Equal(t, exp, act)
|
||||||
|
}
|
||||||
|
|
||||||
func TestChunkSnapshot(t *testing.T) {
|
func TestChunkSnapshot(t *testing.T) {
|
||||||
head, _ := newTestHead(t, 120*4, false, false)
|
head, _ := newTestHead(t, 120*4, false, false)
|
||||||
defer func() {
|
defer func() {
|
||||||
|
@ -2849,10 +3038,10 @@ func TestChunkSnapshot(t *testing.T) {
|
||||||
for i := 1; i <= numSeries; i++ {
|
for i := 1; i <= numSeries; i++ {
|
||||||
lbls := labels.FromStrings("foo", fmt.Sprintf("bar%d", i))
|
lbls := labels.FromStrings("foo", fmt.Sprintf("bar%d", i))
|
||||||
lblStr := lbls.String()
|
lblStr := lbls.String()
|
||||||
// Should m-map at least 1 chunk.
|
// 240 samples should m-map at least 1 chunk.
|
||||||
for ts := int64(1); ts <= 200; ts++ {
|
for ts := int64(1); ts <= 240; ts++ {
|
||||||
val := rand.Float64()
|
val := rand.Float64()
|
||||||
expSeries[lblStr] = append(expSeries[lblStr], sample{ts, val})
|
expSeries[lblStr] = append(expSeries[lblStr], sample{ts, val, nil, nil})
|
||||||
ref, err := app.Append(0, lbls, ts, val)
|
ref, err := app.Append(0, lbls, ts, val)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
@ -2883,7 +3072,6 @@ func TestChunkSnapshot(t *testing.T) {
|
||||||
}, nil))
|
}, nil))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// These references should be the ones used for the snapshot.
|
// These references should be the ones used for the snapshot.
|
||||||
|
@ -2910,10 +3098,10 @@ func TestChunkSnapshot(t *testing.T) {
|
||||||
for i := 1; i <= numSeries; i++ {
|
for i := 1; i <= numSeries; i++ {
|
||||||
lbls := labels.FromStrings("foo", fmt.Sprintf("bar%d", i))
|
lbls := labels.FromStrings("foo", fmt.Sprintf("bar%d", i))
|
||||||
lblStr := lbls.String()
|
lblStr := lbls.String()
|
||||||
// Should m-map at least 1 chunk.
|
// 240 samples should m-map at least 1 chunk.
|
||||||
for ts := int64(201); ts <= 400; ts++ {
|
for ts := int64(241); ts <= 480; ts++ {
|
||||||
val := rand.Float64()
|
val := rand.Float64()
|
||||||
expSeries[lblStr] = append(expSeries[lblStr], sample{ts, val})
|
expSeries[lblStr] = append(expSeries[lblStr], sample{ts, val, nil, nil})
|
||||||
ref, err := app.Append(0, lbls, ts, val)
|
ref, err := app.Append(0, lbls, ts, val)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
@ -2945,7 +3133,6 @@ func TestChunkSnapshot(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
// Close Head and verify that new snapshot was not created.
|
// Close Head and verify that new snapshot was not created.
|
||||||
head.opts.EnableMemorySnapshotOnShutdown = false
|
head.opts.EnableMemorySnapshotOnShutdown = false
|
||||||
|
@ -3056,6 +3243,373 @@ func TestSnapshotError(t *testing.T) {
|
||||||
require.Equal(t, 0, len(tm))
|
require.Equal(t, 0, len(tm))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHistogramMetrics(t *testing.T) {
|
||||||
|
head, _ := newTestHead(t, 1000, false, false)
|
||||||
|
t.Cleanup(func() {
|
||||||
|
require.NoError(t, head.Close())
|
||||||
|
})
|
||||||
|
require.NoError(t, head.Init(0))
|
||||||
|
|
||||||
|
expHSeries, expHSamples := 0, 0
|
||||||
|
|
||||||
|
for x := 0; x < 5; x++ {
|
||||||
|
expHSeries++
|
||||||
|
l := labels.Labels{{Name: "a", Value: fmt.Sprintf("b%d", x)}}
|
||||||
|
for i, h := range GenerateTestHistograms(10) {
|
||||||
|
app := head.Appender(context.Background())
|
||||||
|
_, err := app.AppendHistogram(0, l, int64(i), h)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
expHSamples++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, float64(expHSamples), prom_testutil.ToFloat64(head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram)))
|
||||||
|
|
||||||
|
require.NoError(t, head.Close())
|
||||||
|
w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
head, err = NewHead(nil, nil, w, nil, head.opts, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, head.Init(0))
|
||||||
|
|
||||||
|
require.Equal(t, float64(0), prom_testutil.ToFloat64(head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram))) // Counter reset.
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHistogramStaleSample(t *testing.T) {
|
||||||
|
l := labels.Labels{{Name: "a", Value: "b"}}
|
||||||
|
numHistograms := 20
|
||||||
|
head, _ := newTestHead(t, 100000, false, false)
|
||||||
|
t.Cleanup(func() {
|
||||||
|
require.NoError(t, head.Close())
|
||||||
|
})
|
||||||
|
require.NoError(t, head.Init(0))
|
||||||
|
|
||||||
|
type timedHistogram struct {
|
||||||
|
t int64
|
||||||
|
h *histogram.Histogram
|
||||||
|
}
|
||||||
|
expHistograms := make([]timedHistogram, 0, numHistograms)
|
||||||
|
|
||||||
|
testQuery := func(numStale int) {
|
||||||
|
q, err := NewBlockQuerier(head, head.MinTime(), head.MaxTime())
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(func() {
|
||||||
|
require.NoError(t, q.Close())
|
||||||
|
})
|
||||||
|
|
||||||
|
ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
||||||
|
|
||||||
|
require.True(t, ss.Next())
|
||||||
|
s := ss.At()
|
||||||
|
require.False(t, ss.Next())
|
||||||
|
|
||||||
|
it := s.Iterator()
|
||||||
|
actHistograms := make([]timedHistogram, 0, len(expHistograms))
|
||||||
|
for it.Next() == chunkenc.ValHistogram {
|
||||||
|
t, h := it.AtHistogram()
|
||||||
|
actHistograms = append(actHistograms, timedHistogram{t, h})
|
||||||
|
}
|
||||||
|
|
||||||
|
// We cannot compare StaleNAN with require.Equal, hence checking each histogram manually.
|
||||||
|
require.Equal(t, len(expHistograms), len(actHistograms))
|
||||||
|
actNumStale := 0
|
||||||
|
for i, eh := range expHistograms {
|
||||||
|
ah := actHistograms[i]
|
||||||
|
if value.IsStaleNaN(eh.h.Sum) {
|
||||||
|
actNumStale++
|
||||||
|
require.True(t, value.IsStaleNaN(ah.h.Sum))
|
||||||
|
// To make require.Equal work.
|
||||||
|
ah.h.Sum = 0
|
||||||
|
eh.h = eh.h.Copy()
|
||||||
|
eh.h.Sum = 0
|
||||||
|
}
|
||||||
|
require.Equal(t, eh, ah)
|
||||||
|
}
|
||||||
|
require.Equal(t, numStale, actNumStale)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adding stale in the same appender.
|
||||||
|
app := head.Appender(context.Background())
|
||||||
|
for _, h := range GenerateTestHistograms(numHistograms) {
|
||||||
|
_, err := app.AppendHistogram(0, l, 100*int64(len(expHistograms)), h)
|
||||||
|
require.NoError(t, err)
|
||||||
|
expHistograms = append(expHistograms, timedHistogram{100 * int64(len(expHistograms)), h})
|
||||||
|
}
|
||||||
|
// +1 so that delta-of-delta is not 0.
|
||||||
|
_, err := app.Append(0, l, 100*int64(len(expHistograms))+1, math.Float64frombits(value.StaleNaN))
|
||||||
|
require.NoError(t, err)
|
||||||
|
expHistograms = append(expHistograms, timedHistogram{100*int64(len(expHistograms)) + 1, &histogram.Histogram{Sum: math.Float64frombits(value.StaleNaN)}})
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
|
// Only 1 chunk in the memory, no m-mapped chunk.
|
||||||
|
s := head.series.getByHash(l.Hash(), l)
|
||||||
|
require.NotNil(t, s)
|
||||||
|
require.Equal(t, 0, len(s.mmappedChunks))
|
||||||
|
testQuery(1)
|
||||||
|
|
||||||
|
// Adding stale in different appender and continuing series after a stale sample.
|
||||||
|
app = head.Appender(context.Background())
|
||||||
|
for _, h := range GenerateTestHistograms(2 * numHistograms)[numHistograms:] {
|
||||||
|
_, err := app.AppendHistogram(0, l, 100*int64(len(expHistograms)), h)
|
||||||
|
require.NoError(t, err)
|
||||||
|
expHistograms = append(expHistograms, timedHistogram{100 * int64(len(expHistograms)), h})
|
||||||
|
}
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
|
app = head.Appender(context.Background())
|
||||||
|
// +1 so that delta-of-delta is not 0.
|
||||||
|
_, err = app.Append(0, l, 100*int64(len(expHistograms))+1, math.Float64frombits(value.StaleNaN))
|
||||||
|
require.NoError(t, err)
|
||||||
|
expHistograms = append(expHistograms, timedHistogram{100*int64(len(expHistograms)) + 1, &histogram.Histogram{Sum: math.Float64frombits(value.StaleNaN)}})
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
|
// Total 2 chunks, 1 m-mapped.
|
||||||
|
s = head.series.getByHash(l.Hash(), l)
|
||||||
|
require.NotNil(t, s)
|
||||||
|
require.Equal(t, 1, len(s.mmappedChunks))
|
||||||
|
testQuery(2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHistogramCounterResetHeader(t *testing.T) {
|
||||||
|
l := labels.Labels{{Name: "a", Value: "b"}}
|
||||||
|
head, _ := newTestHead(t, 1000, false, false)
|
||||||
|
t.Cleanup(func() {
|
||||||
|
require.NoError(t, head.Close())
|
||||||
|
})
|
||||||
|
require.NoError(t, head.Init(0))
|
||||||
|
|
||||||
|
ts := int64(0)
|
||||||
|
appendHistogram := func(h *histogram.Histogram) {
|
||||||
|
ts++
|
||||||
|
app := head.Appender(context.Background())
|
||||||
|
_, err := app.AppendHistogram(0, l, ts, h)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
}
|
||||||
|
|
||||||
|
var expHeaders []chunkenc.CounterResetHeader
|
||||||
|
checkExpCounterResetHeader := func(newHeaders ...chunkenc.CounterResetHeader) {
|
||||||
|
expHeaders = append(expHeaders, newHeaders...)
|
||||||
|
|
||||||
|
ms, _, err := head.getOrCreate(l.Hash(), l)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, ms.mmappedChunks, len(expHeaders)-1) // One is the head chunk.
|
||||||
|
|
||||||
|
for i, mmapChunk := range ms.mmappedChunks {
|
||||||
|
chk, err := head.chunkDiskMapper.Chunk(mmapChunk.ref)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, expHeaders[i], chk.(*chunkenc.HistogramChunk).GetCounterResetHeader())
|
||||||
|
}
|
||||||
|
require.Equal(t, expHeaders[len(expHeaders)-1], ms.headChunk.chunk.(*chunkenc.HistogramChunk).GetCounterResetHeader())
|
||||||
|
}
|
||||||
|
|
||||||
|
h := GenerateTestHistograms(1)[0]
|
||||||
|
if len(h.NegativeBuckets) == 0 {
|
||||||
|
h.NegativeSpans = append([]histogram.Span{}, h.PositiveSpans...)
|
||||||
|
h.NegativeBuckets = append([]int64{}, h.PositiveBuckets...)
|
||||||
|
}
|
||||||
|
h.PositiveBuckets = []int64{100, 1, 1, 1}
|
||||||
|
h.NegativeBuckets = []int64{100, 1, 1, 1}
|
||||||
|
h.Count = 1000
|
||||||
|
|
||||||
|
// First histogram is UnknownCounterReset.
|
||||||
|
appendHistogram(h)
|
||||||
|
checkExpCounterResetHeader(chunkenc.UnknownCounterReset)
|
||||||
|
|
||||||
|
// Another normal histogram.
|
||||||
|
h.Count++
|
||||||
|
appendHistogram(h)
|
||||||
|
checkExpCounterResetHeader()
|
||||||
|
|
||||||
|
// Counter reset via Count.
|
||||||
|
h.Count--
|
||||||
|
appendHistogram(h)
|
||||||
|
checkExpCounterResetHeader(chunkenc.CounterReset)
|
||||||
|
|
||||||
|
// Add 2 non-counter reset histograms.
|
||||||
|
for i := 0; i < 250; i++ {
|
||||||
|
appendHistogram(h)
|
||||||
|
}
|
||||||
|
checkExpCounterResetHeader(chunkenc.NotCounterReset, chunkenc.NotCounterReset)
|
||||||
|
|
||||||
|
// Changing schema will cut a new chunk with unknown counter reset.
|
||||||
|
h.Schema++
|
||||||
|
appendHistogram(h)
|
||||||
|
checkExpCounterResetHeader(chunkenc.UnknownCounterReset)
|
||||||
|
|
||||||
|
// Changing schema will zero threshold a new chunk with unknown counter reset.
|
||||||
|
h.ZeroThreshold += 0.01
|
||||||
|
appendHistogram(h)
|
||||||
|
checkExpCounterResetHeader(chunkenc.UnknownCounterReset)
|
||||||
|
|
||||||
|
// Counter reset by removing a positive bucket.
|
||||||
|
h.PositiveSpans[1].Length--
|
||||||
|
h.PositiveBuckets = h.PositiveBuckets[1:]
|
||||||
|
appendHistogram(h)
|
||||||
|
checkExpCounterResetHeader(chunkenc.CounterReset)
|
||||||
|
|
||||||
|
// Counter reset by removing a negative bucket.
|
||||||
|
h.NegativeSpans[1].Length--
|
||||||
|
h.NegativeBuckets = h.NegativeBuckets[1:]
|
||||||
|
appendHistogram(h)
|
||||||
|
checkExpCounterResetHeader(chunkenc.CounterReset)
|
||||||
|
|
||||||
|
// Add 2 non-counter reset histograms. Just to have some non-counter reset chunks in between.
|
||||||
|
for i := 0; i < 250; i++ {
|
||||||
|
appendHistogram(h)
|
||||||
|
}
|
||||||
|
checkExpCounterResetHeader(chunkenc.NotCounterReset, chunkenc.NotCounterReset)
|
||||||
|
|
||||||
|
// Counter reset with counter reset in a positive bucket.
|
||||||
|
h.PositiveBuckets[len(h.PositiveBuckets)-1]--
|
||||||
|
appendHistogram(h)
|
||||||
|
checkExpCounterResetHeader(chunkenc.CounterReset)
|
||||||
|
|
||||||
|
// Counter reset with counter reset in a negative bucket.
|
||||||
|
h.NegativeBuckets[len(h.NegativeBuckets)-1]--
|
||||||
|
appendHistogram(h)
|
||||||
|
checkExpCounterResetHeader(chunkenc.CounterReset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAppendingDifferentEncodingToSameSeries(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
opts := DefaultOptions()
|
||||||
|
opts.EnableNativeHistograms = true
|
||||||
|
db, err := Open(dir, nil, nil, opts, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(func() {
|
||||||
|
require.NoError(t, db.Close())
|
||||||
|
})
|
||||||
|
db.DisableCompactions()
|
||||||
|
|
||||||
|
hists := GenerateTestHistograms(10)
|
||||||
|
lbls := labels.Labels{{Name: "a", Value: "b"}}
|
||||||
|
|
||||||
|
type result struct {
|
||||||
|
t int64
|
||||||
|
v float64
|
||||||
|
h *histogram.Histogram
|
||||||
|
vt chunkenc.ValueType
|
||||||
|
}
|
||||||
|
expResult := []result{}
|
||||||
|
ref := storage.SeriesRef(0)
|
||||||
|
addFloat64Sample := func(app storage.Appender, ts int64, v float64) {
|
||||||
|
ref, err = app.Append(ref, lbls, ts, v)
|
||||||
|
require.NoError(t, err)
|
||||||
|
expResult = append(expResult, result{
|
||||||
|
t: ts,
|
||||||
|
v: v,
|
||||||
|
vt: chunkenc.ValFloat,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
addHistogramSample := func(app storage.Appender, ts int64, h *histogram.Histogram) {
|
||||||
|
ref, err = app.AppendHistogram(ref, lbls, ts, h)
|
||||||
|
require.NoError(t, err)
|
||||||
|
expResult = append(expResult, result{
|
||||||
|
t: ts,
|
||||||
|
h: h,
|
||||||
|
vt: chunkenc.ValHistogram,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
checkExpChunks := func(count int) {
|
||||||
|
ms, created, err := db.Head().getOrCreate(lbls.Hash(), lbls)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.False(t, created)
|
||||||
|
require.NotNil(t, ms)
|
||||||
|
require.Len(t, ms.mmappedChunks, count-1) // One will be the head chunk.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only histograms in first commit.
|
||||||
|
app := db.Appender(context.Background())
|
||||||
|
addHistogramSample(app, 1, hists[1])
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
checkExpChunks(1)
|
||||||
|
|
||||||
|
// Only float64 in second commit, a new chunk should be cut.
|
||||||
|
app = db.Appender(context.Background())
|
||||||
|
addFloat64Sample(app, 2, 2)
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
checkExpChunks(2)
|
||||||
|
|
||||||
|
// Out of order histogram is shown correctly for a float64 chunk. No new chunk.
|
||||||
|
app = db.Appender(context.Background())
|
||||||
|
_, err = app.AppendHistogram(ref, lbls, 1, hists[2])
|
||||||
|
require.Equal(t, storage.ErrOutOfOrderSample, err)
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
|
// Only histograms in third commit to check float64 -> histogram transition.
|
||||||
|
app = db.Appender(context.Background())
|
||||||
|
addHistogramSample(app, 3, hists[3])
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
checkExpChunks(3)
|
||||||
|
|
||||||
|
// Out of order float64 is shown correctly for a histogram chunk. No new chunk.
|
||||||
|
app = db.Appender(context.Background())
|
||||||
|
_, err = app.Append(ref, lbls, 1, 2)
|
||||||
|
require.Equal(t, storage.ErrOutOfOrderSample, err)
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
|
// Combination of histograms and float64 in the same commit. The behaviour is undefined, but we want to also
|
||||||
|
// verify how TSDB would behave. Here the histogram is appended at the end, hence will be considered as out of order.
|
||||||
|
app = db.Appender(context.Background())
|
||||||
|
addFloat64Sample(app, 4, 4)
|
||||||
|
// This won't be committed.
|
||||||
|
addHistogramSample(app, 5, hists[5])
|
||||||
|
expResult = expResult[0 : len(expResult)-1]
|
||||||
|
addFloat64Sample(app, 6, 6)
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
checkExpChunks(4) // Only 1 new chunk for float64.
|
||||||
|
|
||||||
|
// Here the histogram is appended at the end, hence the first histogram is out of order.
|
||||||
|
app = db.Appender(context.Background())
|
||||||
|
// Out of order w.r.t. the next float64 sample that is appended first.
|
||||||
|
addHistogramSample(app, 7, hists[7])
|
||||||
|
expResult = expResult[0 : len(expResult)-1]
|
||||||
|
addFloat64Sample(app, 8, 9)
|
||||||
|
addHistogramSample(app, 9, hists[9])
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
checkExpChunks(5) // float64 added to old chunk, only 1 new for histograms.
|
||||||
|
|
||||||
|
// Query back and expect same order of samples.
|
||||||
|
q, err := db.Querier(context.Background(), math.MinInt64, math.MaxInt64)
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(func() {
|
||||||
|
require.NoError(t, q.Close())
|
||||||
|
})
|
||||||
|
|
||||||
|
ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
||||||
|
require.True(t, ss.Next())
|
||||||
|
s := ss.At()
|
||||||
|
it := s.Iterator()
|
||||||
|
expIdx := 0
|
||||||
|
loop:
|
||||||
|
for {
|
||||||
|
vt := it.Next()
|
||||||
|
switch vt {
|
||||||
|
case chunkenc.ValNone:
|
||||||
|
require.Equal(t, len(expResult), expIdx)
|
||||||
|
break loop
|
||||||
|
case chunkenc.ValFloat:
|
||||||
|
ts, v := it.At()
|
||||||
|
require.Equal(t, expResult[expIdx].t, ts)
|
||||||
|
require.Equal(t, expResult[expIdx].v, v)
|
||||||
|
case chunkenc.ValHistogram:
|
||||||
|
ts, h := it.AtHistogram()
|
||||||
|
require.Equal(t, expResult[expIdx].t, ts)
|
||||||
|
require.Equal(t, expResult[expIdx].h, h)
|
||||||
|
default:
|
||||||
|
require.Error(t, fmt.Errorf("unexpected ValueType %v", vt))
|
||||||
|
}
|
||||||
|
require.Equal(t, expResult[expIdx].vt, vt)
|
||||||
|
expIdx++
|
||||||
|
}
|
||||||
|
require.NoError(t, it.Err())
|
||||||
|
require.NoError(t, ss.Err())
|
||||||
|
require.Equal(t, len(expResult), expIdx)
|
||||||
|
require.False(t, ss.Next()) // Only 1 series.
|
||||||
|
}
|
||||||
|
|
||||||
// Tests https://github.com/prometheus/prometheus/issues/9725.
|
// Tests https://github.com/prometheus/prometheus/issues/9725.
|
||||||
func TestChunkSnapshotReplayBug(t *testing.T) {
|
func TestChunkSnapshotReplayBug(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
@ -3230,7 +3784,7 @@ func TestOOOWalReplay(t *testing.T) {
|
||||||
|
|
||||||
it := xor.Iterator(nil)
|
it := xor.Iterator(nil)
|
||||||
actOOOSamples := make([]sample, 0, len(expOOOSamples))
|
actOOOSamples := make([]sample, 0, len(expOOOSamples))
|
||||||
for it.Next() {
|
for it.Next() == chunkenc.ValFloat {
|
||||||
ts, v := it.At()
|
ts, v := it.At()
|
||||||
actOOOSamples = append(actOOOSamples, sample{t: ts, v: v})
|
actOOOSamples = append(actOOOSamples, sample{t: ts, v: v})
|
||||||
}
|
}
|
||||||
|
@ -3539,6 +4093,139 @@ func TestReplayAfterMmapReplayError(t *testing.T) {
|
||||||
require.NoError(t, h.Close())
|
require.NoError(t, h.Close())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHistogramValidation(t *testing.T) {
|
||||||
|
tests := map[string]struct {
|
||||||
|
h *histogram.Histogram
|
||||||
|
errMsg string
|
||||||
|
}{
|
||||||
|
"valid histogram": {
|
||||||
|
h: GenerateTestHistograms(1)[0],
|
||||||
|
},
|
||||||
|
"rejects histogram who has too few negative buckets": {
|
||||||
|
h: &histogram.Histogram{
|
||||||
|
NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}},
|
||||||
|
NegativeBuckets: []int64{},
|
||||||
|
},
|
||||||
|
errMsg: `negative side: spans need 1 buckets, have 0 buckets`,
|
||||||
|
},
|
||||||
|
"rejects histogram who has too few positive buckets": {
|
||||||
|
h: &histogram.Histogram{
|
||||||
|
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
|
||||||
|
PositiveBuckets: []int64{},
|
||||||
|
},
|
||||||
|
errMsg: `positive side: spans need 1 buckets, have 0 buckets`,
|
||||||
|
},
|
||||||
|
"rejects histogram who has too many negative buckets": {
|
||||||
|
h: &histogram.Histogram{
|
||||||
|
NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}},
|
||||||
|
NegativeBuckets: []int64{1, 2},
|
||||||
|
},
|
||||||
|
errMsg: `negative side: spans need 1 buckets, have 2 buckets`,
|
||||||
|
},
|
||||||
|
"rejects histogram who has too many positive buckets": {
|
||||||
|
h: &histogram.Histogram{
|
||||||
|
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
|
||||||
|
PositiveBuckets: []int64{1, 2},
|
||||||
|
},
|
||||||
|
errMsg: `positive side: spans need 1 buckets, have 2 buckets`,
|
||||||
|
},
|
||||||
|
"rejects a histogram which has a negative span with a negative offset": {
|
||||||
|
h: &histogram.Histogram{
|
||||||
|
NegativeSpans: []histogram.Span{{Offset: -1, Length: 1}, {Offset: -1, Length: 1}},
|
||||||
|
NegativeBuckets: []int64{1, 2},
|
||||||
|
},
|
||||||
|
errMsg: `negative side: span number 2 with offset -1`,
|
||||||
|
},
|
||||||
|
"rejects a histogram which has a positive span with a negative offset": {
|
||||||
|
h: &histogram.Histogram{
|
||||||
|
PositiveSpans: []histogram.Span{{Offset: -1, Length: 1}, {Offset: -1, Length: 1}},
|
||||||
|
PositiveBuckets: []int64{1, 2},
|
||||||
|
},
|
||||||
|
errMsg: `positive side: span number 2 with offset -1`,
|
||||||
|
},
|
||||||
|
"rejects a histogram which has a negative bucket with a negative count": {
|
||||||
|
h: &histogram.Histogram{
|
||||||
|
NegativeSpans: []histogram.Span{{Offset: -1, Length: 1}},
|
||||||
|
NegativeBuckets: []int64{-1},
|
||||||
|
},
|
||||||
|
errMsg: `negative side: bucket number 1 has observation count of -1`,
|
||||||
|
},
|
||||||
|
"rejects a histogram which has a positive bucket with a negative count": {
|
||||||
|
h: &histogram.Histogram{
|
||||||
|
PositiveSpans: []histogram.Span{{Offset: -1, Length: 1}},
|
||||||
|
PositiveBuckets: []int64{-1},
|
||||||
|
},
|
||||||
|
errMsg: `positive side: bucket number 1 has observation count of -1`,
|
||||||
|
},
|
||||||
|
"rejects a histogram which which has a lower count than count in buckets": {
|
||||||
|
h: &histogram.Histogram{
|
||||||
|
Count: 0,
|
||||||
|
NegativeSpans: []histogram.Span{{Offset: -1, Length: 1}},
|
||||||
|
PositiveSpans: []histogram.Span{{Offset: -1, Length: 1}},
|
||||||
|
NegativeBuckets: []int64{1},
|
||||||
|
PositiveBuckets: []int64{1},
|
||||||
|
},
|
||||||
|
errMsg: `2 observations found in buckets, but the Count field is 0`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for testName, tc := range tests {
|
||||||
|
t.Run(testName, func(t *testing.T) {
|
||||||
|
err := ValidateHistogram(tc.h)
|
||||||
|
if tc.errMsg != "" {
|
||||||
|
require.ErrorContains(t, err, tc.errMsg)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkHistogramValidation(b *testing.B) {
|
||||||
|
histograms := generateBigTestHistograms(b.N)
|
||||||
|
for _, h := range histograms {
|
||||||
|
require.NoError(b, ValidateHistogram(h))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateBigTestHistograms(n int) []*histogram.Histogram {
|
||||||
|
const numBuckets = 500
|
||||||
|
numSpans := numBuckets / 10
|
||||||
|
bucketsPerSide := numBuckets / 2
|
||||||
|
spanLength := uint32(bucketsPerSide / numSpans)
|
||||||
|
// Given all bucket deltas are 1, sum n + 1.
|
||||||
|
observationCount := numBuckets / 2 * (1 + numBuckets)
|
||||||
|
|
||||||
|
var histograms []*histogram.Histogram
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
h := &histogram.Histogram{
|
||||||
|
Count: uint64(i + observationCount),
|
||||||
|
ZeroCount: uint64(i),
|
||||||
|
ZeroThreshold: 1e-128,
|
||||||
|
Sum: 18.4 * float64(i+1),
|
||||||
|
Schema: 2,
|
||||||
|
NegativeSpans: make([]histogram.Span, numSpans),
|
||||||
|
PositiveSpans: make([]histogram.Span, numSpans),
|
||||||
|
NegativeBuckets: make([]int64, bucketsPerSide),
|
||||||
|
PositiveBuckets: make([]int64, bucketsPerSide),
|
||||||
|
}
|
||||||
|
|
||||||
|
for j := 0; j < numSpans; j++ {
|
||||||
|
s := histogram.Span{Offset: 1 + int32(i), Length: spanLength}
|
||||||
|
h.NegativeSpans[j] = s
|
||||||
|
h.PositiveSpans[j] = s
|
||||||
|
}
|
||||||
|
|
||||||
|
for j := 0; j < bucketsPerSide; j++ {
|
||||||
|
h.NegativeBuckets[j] = 1
|
||||||
|
h.PositiveBuckets[j] = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
histograms = append(histograms, h)
|
||||||
|
}
|
||||||
|
return histograms
|
||||||
|
}
|
||||||
|
|
||||||
func TestOOOAppendWithNoSeries(t *testing.T) {
|
func TestOOOAppendWithNoSeries(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, true)
|
wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, true)
|
||||||
|
|
143
tsdb/head_wal.go
143
tsdb/head_wal.go
|
@ -47,6 +47,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
|
||||||
// for error reporting.
|
// for error reporting.
|
||||||
var unknownRefs atomic.Uint64
|
var unknownRefs atomic.Uint64
|
||||||
var unknownExemplarRefs atomic.Uint64
|
var unknownExemplarRefs atomic.Uint64
|
||||||
|
var unknownHistogramRefs atomic.Uint64
|
||||||
var unknownMetadataRefs atomic.Uint64
|
var unknownMetadataRefs atomic.Uint64
|
||||||
// Track number of series records that had overlapping m-map chunks.
|
// Track number of series records that had overlapping m-map chunks.
|
||||||
var mmapOverlappingChunks atomic.Uint64
|
var mmapOverlappingChunks atomic.Uint64
|
||||||
|
@ -58,8 +59,9 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
|
||||||
processors = make([]walSubsetProcessor, n)
|
processors = make([]walSubsetProcessor, n)
|
||||||
exemplarsInput chan record.RefExemplar
|
exemplarsInput chan record.RefExemplar
|
||||||
|
|
||||||
dec record.Decoder
|
dec record.Decoder
|
||||||
shards = make([][]record.RefSample, n)
|
shards = make([][]record.RefSample, n)
|
||||||
|
histogramShards = make([][]record.RefHistogramSample, n)
|
||||||
|
|
||||||
decoded = make(chan interface{}, 10)
|
decoded = make(chan interface{}, 10)
|
||||||
decodeErr, seriesCreationErr error
|
decodeErr, seriesCreationErr error
|
||||||
|
@ -83,6 +85,11 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
|
||||||
return []record.RefExemplar{}
|
return []record.RefExemplar{}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
histogramsPool = sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
return []record.RefHistogramSample{}
|
||||||
|
},
|
||||||
|
}
|
||||||
metadataPool = sync.Pool{
|
metadataPool = sync.Pool{
|
||||||
New: func() interface{} {
|
New: func() interface{} {
|
||||||
return []record.RefMetadata{}
|
return []record.RefMetadata{}
|
||||||
|
@ -107,9 +114,10 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
|
||||||
processors[i].setup()
|
processors[i].setup()
|
||||||
|
|
||||||
go func(wp *walSubsetProcessor) {
|
go func(wp *walSubsetProcessor) {
|
||||||
unknown, overlapping := wp.processWALSamples(h, mmappedChunks, oooMmappedChunks)
|
unknown, unknownHistograms, overlapping := wp.processWALSamples(h, mmappedChunks, oooMmappedChunks)
|
||||||
unknownRefs.Add(unknown)
|
unknownRefs.Add(unknown)
|
||||||
mmapOverlappingChunks.Add(overlapping)
|
mmapOverlappingChunks.Add(overlapping)
|
||||||
|
unknownHistogramRefs.Add(unknownHistograms)
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}(&processors[i])
|
}(&processors[i])
|
||||||
}
|
}
|
||||||
|
@ -192,6 +200,18 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
decoded <- exemplars
|
decoded <- exemplars
|
||||||
|
case record.HistogramSamples:
|
||||||
|
hists := histogramsPool.Get().([]record.RefHistogramSample)[:0]
|
||||||
|
hists, err = dec.HistogramSamples(rec, hists)
|
||||||
|
if err != nil {
|
||||||
|
decodeErr = &wlog.CorruptionErr{
|
||||||
|
Err: errors.Wrap(err, "decode histograms"),
|
||||||
|
Segment: r.Segment(),
|
||||||
|
Offset: r.Offset(),
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
decoded <- hists
|
||||||
case record.Metadata:
|
case record.Metadata:
|
||||||
meta := metadataPool.Get().([]record.RefMetadata)[:0]
|
meta := metadataPool.Get().([]record.RefMetadata)[:0]
|
||||||
meta, err := dec.Metadata(rec, meta)
|
meta, err := dec.Metadata(rec, meta)
|
||||||
|
@ -292,6 +312,43 @@ Outer:
|
||||||
}
|
}
|
||||||
//nolint:staticcheck // Ignore SA6002 relax staticcheck verification.
|
//nolint:staticcheck // Ignore SA6002 relax staticcheck verification.
|
||||||
exemplarsPool.Put(v)
|
exemplarsPool.Put(v)
|
||||||
|
case []record.RefHistogramSample:
|
||||||
|
samples := v
|
||||||
|
minValidTime := h.minValidTime.Load()
|
||||||
|
// We split up the samples into chunks of 5000 samples or less.
|
||||||
|
// With O(300 * #cores) in-flight sample batches, large scrapes could otherwise
|
||||||
|
// cause thousands of very large in flight buffers occupying large amounts
|
||||||
|
// of unused memory.
|
||||||
|
for len(samples) > 0 {
|
||||||
|
m := 5000
|
||||||
|
if len(samples) < m {
|
||||||
|
m = len(samples)
|
||||||
|
}
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
if histogramShards[i] == nil {
|
||||||
|
histogramShards[i] = processors[i].reuseHistogramBuf()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, sam := range samples[:m] {
|
||||||
|
if sam.T < minValidTime {
|
||||||
|
continue // Before minValidTime: discard.
|
||||||
|
}
|
||||||
|
if r, ok := multiRef[sam.Ref]; ok {
|
||||||
|
sam.Ref = r
|
||||||
|
}
|
||||||
|
mod := uint64(sam.Ref) % uint64(n)
|
||||||
|
histogramShards[mod] = append(histogramShards[mod], sam)
|
||||||
|
}
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
if len(histogramShards[i]) > 0 {
|
||||||
|
processors[i].input <- walSubsetProcessorInputItem{histogramSamples: histogramShards[i]}
|
||||||
|
histogramShards[i] = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
samples = samples[m:]
|
||||||
|
}
|
||||||
|
//nolint:staticcheck // Ignore SA6002 relax staticcheck verification.
|
||||||
|
histogramsPool.Put(v)
|
||||||
case []record.RefMetadata:
|
case []record.RefMetadata:
|
||||||
for _, m := range v {
|
for _, m := range v {
|
||||||
s := h.series.getByID(chunks.HeadSeriesRef(m.Ref))
|
s := h.series.getByID(chunks.HeadSeriesRef(m.Ref))
|
||||||
|
@ -333,8 +390,14 @@ Outer:
|
||||||
return errors.Wrap(r.Err(), "read records")
|
return errors.Wrap(r.Err(), "read records")
|
||||||
}
|
}
|
||||||
|
|
||||||
if unknownRefs.Load() > 0 || unknownExemplarRefs.Load() > 0 || unknownMetadataRefs.Load() > 0 {
|
if unknownRefs.Load()+unknownExemplarRefs.Load()+unknownHistogramRefs.Load()+unknownMetadataRefs.Load() > 0 {
|
||||||
level.Warn(h.logger).Log("msg", "Unknown series references", "samples", unknownRefs.Load(), "exemplars", unknownExemplarRefs.Load(), "metadata", unknownMetadataRefs.Load())
|
level.Warn(h.logger).Log(
|
||||||
|
"msg", "Unknown series references",
|
||||||
|
"samples", unknownRefs.Load(),
|
||||||
|
"exemplars", unknownExemplarRefs.Load(),
|
||||||
|
"histograms", unknownHistogramRefs.Load(),
|
||||||
|
"metadata", unknownMetadataRefs.Load(),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
if count := mmapOverlappingChunks.Load(); count > 0 {
|
if count := mmapOverlappingChunks.Load(); count > 0 {
|
||||||
level.Info(h.logger).Log("msg", "Overlapping m-map chunks on duplicate series records", "count", count)
|
level.Info(h.logger).Log("msg", "Overlapping m-map chunks on duplicate series records", "count", count)
|
||||||
|
@ -402,25 +465,30 @@ func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*m
|
||||||
}
|
}
|
||||||
|
|
||||||
type walSubsetProcessor struct {
|
type walSubsetProcessor struct {
|
||||||
input chan walSubsetProcessorInputItem
|
input chan walSubsetProcessorInputItem
|
||||||
output chan []record.RefSample
|
output chan []record.RefSample
|
||||||
|
histogramsOutput chan []record.RefHistogramSample
|
||||||
}
|
}
|
||||||
|
|
||||||
type walSubsetProcessorInputItem struct {
|
type walSubsetProcessorInputItem struct {
|
||||||
samples []record.RefSample
|
samples []record.RefSample
|
||||||
existingSeries *memSeries
|
histogramSamples []record.RefHistogramSample
|
||||||
walSeriesRef chunks.HeadSeriesRef
|
existingSeries *memSeries
|
||||||
|
walSeriesRef chunks.HeadSeriesRef
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wp *walSubsetProcessor) setup() {
|
func (wp *walSubsetProcessor) setup() {
|
||||||
wp.output = make(chan []record.RefSample, 300)
|
|
||||||
wp.input = make(chan walSubsetProcessorInputItem, 300)
|
wp.input = make(chan walSubsetProcessorInputItem, 300)
|
||||||
|
wp.output = make(chan []record.RefSample, 300)
|
||||||
|
wp.histogramsOutput = make(chan []record.RefHistogramSample, 300)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wp *walSubsetProcessor) closeAndDrain() {
|
func (wp *walSubsetProcessor) closeAndDrain() {
|
||||||
close(wp.input)
|
close(wp.input)
|
||||||
for range wp.output {
|
for range wp.output {
|
||||||
}
|
}
|
||||||
|
for range wp.histogramsOutput {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If there is a buffer in the output chan, return it for reuse, otherwise return nil.
|
// If there is a buffer in the output chan, return it for reuse, otherwise return nil.
|
||||||
|
@ -433,11 +501,24 @@ func (wp *walSubsetProcessor) reuseBuf() []record.RefSample {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If there is a buffer in the output chan, return it for reuse, otherwise return nil.
|
||||||
|
func (wp *walSubsetProcessor) reuseHistogramBuf() []record.RefHistogramSample {
|
||||||
|
select {
|
||||||
|
case buf := <-wp.histogramsOutput:
|
||||||
|
return buf[:0]
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// processWALSamples adds the samples it receives to the head and passes
|
// processWALSamples adds the samples it receives to the head and passes
|
||||||
// the buffer received to an output channel for reuse.
|
// the buffer received to an output channel for reuse.
|
||||||
func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk) (unknownRefs, mmapOverlappingChunks uint64) {
|
// Samples before the minValidTime timestamp are discarded.
|
||||||
|
func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk) (unknownRefs, unknownHistogramRefs, mmapOverlappingChunks uint64) {
|
||||||
defer close(wp.output)
|
defer close(wp.output)
|
||||||
|
defer close(wp.histogramsOutput)
|
||||||
|
|
||||||
|
minValidTime := h.minValidTime.Load()
|
||||||
mint, maxt := int64(math.MaxInt64), int64(math.MinInt64)
|
mint, maxt := int64(math.MaxInt64), int64(math.MinInt64)
|
||||||
chunkRange := h.chunkRange.Load()
|
chunkRange := h.chunkRange.Load()
|
||||||
|
|
||||||
|
@ -460,6 +541,10 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp
|
||||||
if s.T <= ms.mmMaxTime {
|
if s.T <= ms.mmMaxTime {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
ms.isHistogramSeries = false
|
||||||
|
if s.T <= ms.mmMaxTime {
|
||||||
|
continue
|
||||||
|
}
|
||||||
if _, chunkCreated := ms.append(s.T, s.V, 0, h.chunkDiskMapper, chunkRange); chunkCreated {
|
if _, chunkCreated := ms.append(s.T, s.V, 0, h.chunkDiskMapper, chunkRange); chunkCreated {
|
||||||
h.metrics.chunksCreated.Inc()
|
h.metrics.chunksCreated.Inc()
|
||||||
h.metrics.chunks.Inc()
|
h.metrics.chunks.Inc()
|
||||||
|
@ -475,10 +560,40 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp
|
||||||
case wp.output <- in.samples:
|
case wp.output <- in.samples:
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, s := range in.histogramSamples {
|
||||||
|
if s.T < minValidTime {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ms := h.series.getByID(s.Ref)
|
||||||
|
if ms == nil {
|
||||||
|
unknownHistogramRefs++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ms.isHistogramSeries = true
|
||||||
|
if s.T <= ms.mmMaxTime {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, chunkCreated := ms.appendHistogram(s.T, s.H, 0, h.chunkDiskMapper, chunkRange); chunkCreated {
|
||||||
|
h.metrics.chunksCreated.Inc()
|
||||||
|
h.metrics.chunks.Inc()
|
||||||
|
}
|
||||||
|
if s.T > maxt {
|
||||||
|
maxt = s.T
|
||||||
|
}
|
||||||
|
if s.T < mint {
|
||||||
|
mint = s.T
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case wp.histogramsOutput <- in.histogramSamples:
|
||||||
|
default:
|
||||||
|
}
|
||||||
}
|
}
|
||||||
h.updateMinMaxTime(mint, maxt)
|
h.updateMinMaxTime(mint, maxt)
|
||||||
|
|
||||||
return unknownRefs, mmapOverlappingChunks
|
return unknownRefs, unknownHistogramRefs, mmapOverlappingChunks
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, lastMmapRef chunks.ChunkDiskMapperRef) (err error) {
|
func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, lastMmapRef chunks.ChunkDiskMapperRef) (err error) {
|
||||||
|
@ -745,7 +860,7 @@ func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (unknownRefs uint64) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
wp.mx.Unlock()
|
wp.mx.Unlock()
|
||||||
wp.output <- samples
|
|
||||||
}
|
}
|
||||||
|
|
||||||
h.updateMinOOOMaxOOOTime(mint, maxt)
|
h.updateMinOOOMaxOOOTime(mint, maxt)
|
||||||
|
|
|
@ -41,7 +41,7 @@ func (o *OOOChunk) Insert(t int64, v float64) bool {
|
||||||
|
|
||||||
if i >= len(o.samples) {
|
if i >= len(o.samples) {
|
||||||
// none found. append it at the end
|
// none found. append it at the end
|
||||||
o.samples = append(o.samples, sample{t, v})
|
o.samples = append(o.samples, sample{t, v, nil, nil})
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ func (o *OOOChunk) Insert(t int64, v float64) bool {
|
||||||
// Expand length by 1 to make room. use a zero sample, we will overwrite it anyway.
|
// Expand length by 1 to make room. use a zero sample, we will overwrite it anyway.
|
||||||
o.samples = append(o.samples, sample{})
|
o.samples = append(o.samples, sample{})
|
||||||
copy(o.samples[i+1:], o.samples[i:])
|
copy(o.samples[i+1:], o.samples[i:])
|
||||||
o.samples[i] = sample{t, v}
|
o.samples[i] = sample{t, v, nil, nil}
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
|
@ -860,7 +860,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
||||||
|
|
||||||
var resultSamples tsdbutil.SampleSlice
|
var resultSamples tsdbutil.SampleSlice
|
||||||
it := c.Iterator(nil)
|
it := c.Iterator(nil)
|
||||||
for it.Next() {
|
for it.Next() == chunkenc.ValFloat {
|
||||||
t, v := it.At()
|
t, v := it.At()
|
||||||
resultSamples = append(resultSamples, sample{t: t, v: v})
|
resultSamples = append(resultSamples, sample{t: t, v: v})
|
||||||
}
|
}
|
||||||
|
@ -1031,7 +1031,7 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
|
||||||
|
|
||||||
var resultSamples tsdbutil.SampleSlice
|
var resultSamples tsdbutil.SampleSlice
|
||||||
it := c.Iterator(nil)
|
it := c.Iterator(nil)
|
||||||
for it.Next() {
|
for it.Next() == chunkenc.ValFloat {
|
||||||
ts, v := it.At()
|
ts, v := it.At()
|
||||||
resultSamples = append(resultSamples, sample{t: ts, v: v})
|
resultSamples = append(resultSamples, sample{t: ts, v: v})
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue