mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-09 23:24:05 -08:00
Merge remote-tracking branch 'upstream/main' into dimitar/pull-upstream
This commit is contained in:
commit
77ac7ad40a
7
.github/dependabot.yml
vendored
7
.github/dependabot.yml
vendored
|
@ -4,6 +4,13 @@ updates:
|
||||||
directory: "/"
|
directory: "/"
|
||||||
schedule:
|
schedule:
|
||||||
interval: "monthly"
|
interval: "monthly"
|
||||||
|
groups:
|
||||||
|
k8s.io:
|
||||||
|
patterns:
|
||||||
|
- "k8s.io/*"
|
||||||
|
go.opentelemetry.io:
|
||||||
|
patterns:
|
||||||
|
- "go.opentelemetry.io/*"
|
||||||
# Disable version updates; we will get them when we update from upstream Prometheus.
|
# Disable version updates; we will get them when we update from upstream Prometheus.
|
||||||
open-pull-requests-limit: 0
|
open-pull-requests-limit: 0
|
||||||
- package-ecosystem: "gomod"
|
- package-ecosystem: "gomod"
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
go:
|
go:
|
||||||
# Whenever the Go version is updated here,
|
# Whenever the Go version is updated here,
|
||||||
# .circle/config.yml should also be updated.
|
# .circle/config.yml should also be updated.
|
||||||
version: 1.20
|
version: 1.21
|
||||||
repository:
|
repository:
|
||||||
path: github.com/prometheus/prometheus
|
path: github.com/prometheus/prometheus
|
||||||
build:
|
build:
|
||||||
|
|
|
@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
|
||||||
SKIP_GOLANGCI_LINT :=
|
SKIP_GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT :=
|
GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT_OPTS ?=
|
GOLANGCI_LINT_OPTS ?=
|
||||||
GOLANGCI_LINT_VERSION ?= v1.53.3
|
GOLANGCI_LINT_VERSION ?= v1.54.2
|
||||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
||||||
# windows isn't included here because of the path separator being different.
|
# windows isn't included here because of the path separator being different.
|
||||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||||
|
|
|
@ -51,7 +51,7 @@ Release cadence of first pre-releases being cut is 6 weeks.
|
||||||
| v2.44 | 2023-04-19 | Bryan Boreham (GitHub: @bboreham) |
|
| v2.44 | 2023-04-19 | Bryan Boreham (GitHub: @bboreham) |
|
||||||
| v2.45 LTS | 2023-05-31 | Jesus Vazquez (Github: @jesusvazquez) |
|
| v2.45 LTS | 2023-05-31 | Jesus Vazquez (Github: @jesusvazquez) |
|
||||||
| v2.46 | 2023-07-12 | Julien Pivotto (GitHub: @roidelapluie) |
|
| v2.46 | 2023-07-12 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||||
| v2.47 | 2023-08-23 | **searching for volunteer** |
|
| v2.47 | 2023-08-23 | Bryan Boreham (GitHub: @bboreham) |
|
||||||
| v2.48 | 2023-10-04 | **searching for volunteer** |
|
| v2.48 | 2023-10-04 | **searching for volunteer** |
|
||||||
|
|
||||||
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
|
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
|
||||||
|
|
|
@ -214,11 +214,6 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.tsdb.EnableNativeHistograms && c.tsdb.EnableMemorySnapshotOnShutdown {
|
|
||||||
c.tsdb.EnableMemorySnapshotOnShutdown = false
|
|
||||||
level.Warn(logger).Log("msg", "memory-snapshot-on-shutdown has been disabled automatically because memory-snapshot-on-shutdown and native-histograms cannot be enabled at the same time.")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
46
cmd/promtool/testdata/unittest.yml
vendored
46
cmd/promtool/testdata/unittest.yml
vendored
|
@ -10,6 +10,21 @@ tests:
|
||||||
- series: test_full
|
- series: test_full
|
||||||
values: "0 0"
|
values: "0 0"
|
||||||
|
|
||||||
|
- series: test_repeat
|
||||||
|
values: "1x2"
|
||||||
|
|
||||||
|
- series: test_increase
|
||||||
|
values: "1+1x2"
|
||||||
|
|
||||||
|
- series: test_histogram
|
||||||
|
values: "{{schema:1 sum:-0.3 count:32.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5}}"
|
||||||
|
|
||||||
|
- series: test_histogram_repeat
|
||||||
|
values: "{{sum:3 count:2 buckets:[2]}}x2"
|
||||||
|
|
||||||
|
- series: test_histogram_increase
|
||||||
|
values: "{{sum:3 count:2 buckets:[2]}}+{{sum:1.3 count:1 buckets:[1]}}x2"
|
||||||
|
|
||||||
- series: test_stale
|
- series: test_stale
|
||||||
values: "0 stale"
|
values: "0 stale"
|
||||||
|
|
||||||
|
@ -31,6 +46,37 @@ tests:
|
||||||
exp_samples:
|
exp_samples:
|
||||||
- value: 60
|
- value: 60
|
||||||
|
|
||||||
|
# Repeat & increase
|
||||||
|
- expr: test_repeat
|
||||||
|
eval_time: 2m
|
||||||
|
exp_samples:
|
||||||
|
- value: 1
|
||||||
|
labels: "test_repeat"
|
||||||
|
- expr: test_increase
|
||||||
|
eval_time: 2m
|
||||||
|
exp_samples:
|
||||||
|
- value: 3
|
||||||
|
labels: "test_increase"
|
||||||
|
|
||||||
|
# Histograms
|
||||||
|
- expr: test_histogram
|
||||||
|
eval_time: 1m
|
||||||
|
exp_samples:
|
||||||
|
- labels: "test_histogram"
|
||||||
|
histogram: "{{schema:1 sum:-0.3 count:32.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5}}"
|
||||||
|
|
||||||
|
- expr: test_histogram_repeat
|
||||||
|
eval_time: 2m
|
||||||
|
exp_samples:
|
||||||
|
- labels: "test_histogram_repeat"
|
||||||
|
histogram: "{{count:2 sum:3 buckets:[2]}}"
|
||||||
|
|
||||||
|
- expr: test_histogram_increase
|
||||||
|
eval_time: 2m
|
||||||
|
exp_samples:
|
||||||
|
- labels: "test_histogram_increase"
|
||||||
|
histogram: "{{count:4 sum:5.6 buckets:[4]}}"
|
||||||
|
|
||||||
# Ensure a value is stale as soon as it is marked as such.
|
# Ensure a value is stale as soon as it is marked as such.
|
||||||
- expr: test_stale
|
- expr: test_stale
|
||||||
eval_time: 59s
|
eval_time: 59s
|
||||||
|
|
|
@ -647,6 +647,14 @@ func dumpSamples(path string, mint, maxt int64, match string) (err error) {
|
||||||
ts, val := it.At()
|
ts, val := it.At()
|
||||||
fmt.Printf("%s %g %d\n", lbs, val, ts)
|
fmt.Printf("%s %g %d\n", lbs, val, ts)
|
||||||
}
|
}
|
||||||
|
for it.Next() == chunkenc.ValFloatHistogram {
|
||||||
|
ts, fh := it.AtFloatHistogram()
|
||||||
|
fmt.Printf("%s %s %d\n", lbs, fh.String(), ts)
|
||||||
|
}
|
||||||
|
for it.Next() == chunkenc.ValHistogram {
|
||||||
|
ts, h := it.AtHistogram()
|
||||||
|
fmt.Printf("%s %s %d\n", lbs, h.String(), ts)
|
||||||
|
}
|
||||||
if it.Err() != nil {
|
if it.Err() != nil {
|
||||||
return ss.Err()
|
return ss.Err()
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,6 +29,7 @@ import (
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/promql"
|
"github.com/prometheus/prometheus/promql"
|
||||||
"github.com/prometheus/prometheus/promql/parser"
|
"github.com/prometheus/prometheus/promql/parser"
|
||||||
|
@ -346,14 +347,29 @@ Outer:
|
||||||
var gotSamples []parsedSample
|
var gotSamples []parsedSample
|
||||||
for _, s := range got {
|
for _, s := range got {
|
||||||
gotSamples = append(gotSamples, parsedSample{
|
gotSamples = append(gotSamples, parsedSample{
|
||||||
Labels: s.Metric.Copy(),
|
Labels: s.Metric.Copy(),
|
||||||
Value: s.F,
|
Value: s.F,
|
||||||
|
Histogram: promql.HistogramTestExpression(s.H),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
var expSamples []parsedSample
|
var expSamples []parsedSample
|
||||||
for _, s := range testCase.ExpSamples {
|
for _, s := range testCase.ExpSamples {
|
||||||
lb, err := parser.ParseMetric(s.Labels)
|
lb, err := parser.ParseMetric(s.Labels)
|
||||||
|
var hist *histogram.FloatHistogram
|
||||||
|
if err == nil && s.Histogram != "" {
|
||||||
|
_, values, parseErr := parser.ParseSeriesDesc("{} " + s.Histogram)
|
||||||
|
switch {
|
||||||
|
case parseErr != nil:
|
||||||
|
err = parseErr
|
||||||
|
case len(values) != 1:
|
||||||
|
err = fmt.Errorf("expected 1 value, got %d", len(values))
|
||||||
|
case values[0].Histogram == nil:
|
||||||
|
err = fmt.Errorf("expected histogram, got %v", values[0])
|
||||||
|
default:
|
||||||
|
hist = values[0].Histogram
|
||||||
|
}
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fmt.Errorf("labels %q: %w", s.Labels, err)
|
err = fmt.Errorf("labels %q: %w", s.Labels, err)
|
||||||
errs = append(errs, fmt.Errorf(" expr: %q, time: %s, err: %w", testCase.Expr,
|
errs = append(errs, fmt.Errorf(" expr: %q, time: %s, err: %w", testCase.Expr,
|
||||||
|
@ -361,8 +377,9 @@ Outer:
|
||||||
continue Outer
|
continue Outer
|
||||||
}
|
}
|
||||||
expSamples = append(expSamples, parsedSample{
|
expSamples = append(expSamples, parsedSample{
|
||||||
Labels: lb,
|
Labels: lb,
|
||||||
Value: s.Value,
|
Value: s.Value,
|
||||||
|
Histogram: promql.HistogramTestExpression(hist),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -530,14 +547,16 @@ type promqlTestCase struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type sample struct {
|
type sample struct {
|
||||||
Labels string `yaml:"labels"`
|
Labels string `yaml:"labels"`
|
||||||
Value float64 `yaml:"value"`
|
Value float64 `yaml:"value"`
|
||||||
|
Histogram string `yaml:"histogram"` // A non-empty string means Value is ignored.
|
||||||
}
|
}
|
||||||
|
|
||||||
// parsedSample is a sample with parsed Labels.
|
// parsedSample is a sample with parsed Labels.
|
||||||
type parsedSample struct {
|
type parsedSample struct {
|
||||||
Labels labels.Labels
|
Labels labels.Labels
|
||||||
Value float64
|
Value float64
|
||||||
|
Histogram string // TestExpression() of histogram.FloatHistogram
|
||||||
}
|
}
|
||||||
|
|
||||||
func parsedSamplesString(pss []parsedSample) string {
|
func parsedSamplesString(pss []parsedSample) string {
|
||||||
|
@ -552,5 +571,8 @@ func parsedSamplesString(pss []parsedSample) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ps *parsedSample) String() string {
|
func (ps *parsedSample) String() string {
|
||||||
|
if ps.Histogram != "" {
|
||||||
|
return ps.Labels.String() + " " + ps.Histogram
|
||||||
|
}
|
||||||
return ps.Labels.String() + " " + strconv.FormatFloat(ps.Value, 'E', -1, 64)
|
return ps.Labels.String() + " " + strconv.FormatFloat(ps.Value, 'E', -1, 64)
|
||||||
}
|
}
|
||||||
|
|
|
@ -409,6 +409,9 @@ type GlobalConfig struct {
|
||||||
// More than this label value length post metric-relabeling will cause the
|
// More than this label value length post metric-relabeling will cause the
|
||||||
// scrape to fail. 0 means no limit.
|
// scrape to fail. 0 means no limit.
|
||||||
LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"`
|
LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"`
|
||||||
|
// Keep no more than this many dropped targets per job.
|
||||||
|
// 0 means no limit.
|
||||||
|
KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDirectory joins any relative file paths with dir.
|
// SetDirectory joins any relative file paths with dir.
|
||||||
|
@ -514,6 +517,9 @@ type ScrapeConfig struct {
|
||||||
// More than this many buckets in a native histogram will cause the scrape to
|
// More than this many buckets in a native histogram will cause the scrape to
|
||||||
// fail.
|
// fail.
|
||||||
NativeHistogramBucketLimit uint `yaml:"native_histogram_bucket_limit,omitempty"`
|
NativeHistogramBucketLimit uint `yaml:"native_histogram_bucket_limit,omitempty"`
|
||||||
|
// Keep no more than this many dropped targets per job.
|
||||||
|
// 0 means no limit.
|
||||||
|
KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"`
|
||||||
|
|
||||||
// We cannot do proper Go type embedding below as the parser will then parse
|
// We cannot do proper Go type embedding below as the parser will then parse
|
||||||
// values arbitrarily into the overflow maps of further-down types.
|
// values arbitrarily into the overflow maps of further-down types.
|
||||||
|
@ -608,6 +614,9 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
|
||||||
if c.LabelValueLengthLimit == 0 {
|
if c.LabelValueLengthLimit == 0 {
|
||||||
c.LabelValueLengthLimit = globalConfig.LabelValueLengthLimit
|
c.LabelValueLengthLimit = globalConfig.LabelValueLengthLimit
|
||||||
}
|
}
|
||||||
|
if c.KeepDroppedTargets == 0 {
|
||||||
|
c.KeepDroppedTargets = globalConfig.KeepDroppedTargets
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -810,6 +819,7 @@ type AlertmanagerConfig struct {
|
||||||
|
|
||||||
ServiceDiscoveryConfigs discovery.Configs `yaml:"-"`
|
ServiceDiscoveryConfigs discovery.Configs `yaml:"-"`
|
||||||
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
|
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
|
||||||
|
SigV4Config *sigv4.SigV4Config `yaml:"sigv4,omitempty"`
|
||||||
|
|
||||||
// The URL scheme to use when talking to Alertmanagers.
|
// The URL scheme to use when talking to Alertmanagers.
|
||||||
Scheme string `yaml:"scheme,omitempty"`
|
Scheme string `yaml:"scheme,omitempty"`
|
||||||
|
@ -845,6 +855,13 @@ func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) er
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
httpClientConfigAuthEnabled := c.HTTPClientConfig.BasicAuth != nil ||
|
||||||
|
c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil
|
||||||
|
|
||||||
|
if httpClientConfigAuthEnabled && c.SigV4Config != nil {
|
||||||
|
return fmt.Errorf("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured")
|
||||||
|
}
|
||||||
|
|
||||||
// Check for users putting URLs in target groups.
|
// Check for users putting URLs in target groups.
|
||||||
if len(c.RelabelConfigs) == 0 {
|
if len(c.RelabelConfigs) == 0 {
|
||||||
if err := checkStaticTargets(c.ServiceDiscoveryConfigs); err != nil {
|
if err := checkStaticTargets(c.ServiceDiscoveryConfigs); err != nil {
|
||||||
|
|
|
@ -91,7 +91,7 @@ func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
|
||||||
targets := make([]model.LabelSet, len(servers))
|
targets := make([]model.LabelSet, len(servers))
|
||||||
for i, server := range servers {
|
for i, server := range servers {
|
||||||
labels := model.LabelSet{
|
labels := model.LabelSet{
|
||||||
hetznerLabelRole: model.LabelValue(hetznerRoleHcloud),
|
hetznerLabelRole: model.LabelValue(HetznerRoleHcloud),
|
||||||
hetznerLabelServerID: model.LabelValue(fmt.Sprintf("%d", server.ID)),
|
hetznerLabelServerID: model.LabelValue(fmt.Sprintf("%d", server.ID)),
|
||||||
hetznerLabelServerName: model.LabelValue(server.Name),
|
hetznerLabelServerName: model.LabelValue(server.Name),
|
||||||
hetznerLabelDatacenter: model.LabelValue(server.Datacenter.Name),
|
hetznerLabelDatacenter: model.LabelValue(server.Datacenter.Name),
|
||||||
|
|
|
@ -57,7 +57,7 @@ type SDConfig struct {
|
||||||
|
|
||||||
RefreshInterval model.Duration `yaml:"refresh_interval"`
|
RefreshInterval model.Duration `yaml:"refresh_interval"`
|
||||||
Port int `yaml:"port"`
|
Port int `yaml:"port"`
|
||||||
Role role `yaml:"role"`
|
Role Role `yaml:"role"`
|
||||||
hcloudEndpoint string // For tests only.
|
hcloudEndpoint string // For tests only.
|
||||||
robotEndpoint string // For tests only.
|
robotEndpoint string // For tests only.
|
||||||
}
|
}
|
||||||
|
@ -74,26 +74,26 @@ type refresher interface {
|
||||||
refresh(context.Context) ([]*targetgroup.Group, error)
|
refresh(context.Context) ([]*targetgroup.Group, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// role is the role of the target within the Hetzner Ecosystem.
|
// Role is the Role of the target within the Hetzner Ecosystem.
|
||||||
type role string
|
type Role string
|
||||||
|
|
||||||
// The valid options for role.
|
// The valid options for role.
|
||||||
const (
|
const (
|
||||||
// Hetzner Robot Role (Dedicated Server)
|
// Hetzner Robot Role (Dedicated Server)
|
||||||
// https://robot.hetzner.com
|
// https://robot.hetzner.com
|
||||||
hetznerRoleRobot role = "robot"
|
HetznerRoleRobot Role = "robot"
|
||||||
// Hetzner Cloud Role
|
// Hetzner Cloud Role
|
||||||
// https://console.hetzner.cloud
|
// https://console.hetzner.cloud
|
||||||
hetznerRoleHcloud role = "hcloud"
|
HetznerRoleHcloud Role = "hcloud"
|
||||||
)
|
)
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *role) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
if err := unmarshal((*string)(c)); err != nil {
|
if err := unmarshal((*string)(c)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
switch *c {
|
switch *c {
|
||||||
case hetznerRoleRobot, hetznerRoleHcloud:
|
case HetznerRoleRobot, HetznerRoleHcloud:
|
||||||
return nil
|
return nil
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unknown role %q", *c)
|
return fmt.Errorf("unknown role %q", *c)
|
||||||
|
@ -143,12 +143,12 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error)
|
||||||
|
|
||||||
func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) {
|
func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) {
|
||||||
switch conf.Role {
|
switch conf.Role {
|
||||||
case hetznerRoleHcloud:
|
case HetznerRoleHcloud:
|
||||||
if conf.hcloudEndpoint == "" {
|
if conf.hcloudEndpoint == "" {
|
||||||
conf.hcloudEndpoint = hcloud.Endpoint
|
conf.hcloudEndpoint = hcloud.Endpoint
|
||||||
}
|
}
|
||||||
return newHcloudDiscovery(conf, l)
|
return newHcloudDiscovery(conf, l)
|
||||||
case hetznerRoleRobot:
|
case HetznerRoleRobot:
|
||||||
if conf.robotEndpoint == "" {
|
if conf.robotEndpoint == "" {
|
||||||
conf.robotEndpoint = "https://robot-ws.your-server.de"
|
conf.robotEndpoint = "https://robot-ws.your-server.de"
|
||||||
}
|
}
|
||||||
|
|
|
@ -105,7 +105,7 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error)
|
||||||
targets := make([]model.LabelSet, len(servers))
|
targets := make([]model.LabelSet, len(servers))
|
||||||
for i, server := range servers {
|
for i, server := range servers {
|
||||||
labels := model.LabelSet{
|
labels := model.LabelSet{
|
||||||
hetznerLabelRole: model.LabelValue(hetznerRoleRobot),
|
hetznerLabelRole: model.LabelValue(HetznerRoleRobot),
|
||||||
hetznerLabelServerID: model.LabelValue(strconv.Itoa(server.Server.ServerNumber)),
|
hetznerLabelServerID: model.LabelValue(strconv.Itoa(server.Server.ServerNumber)),
|
||||||
hetznerLabelServerName: model.LabelValue(server.Server.ServerName),
|
hetznerLabelServerName: model.LabelValue(server.Server.ServerName),
|
||||||
hetznerLabelDatacenter: model.LabelValue(strings.ToLower(server.Server.Dc)),
|
hetznerLabelDatacenter: model.LabelValue(strings.ToLower(server.Server.Dc)),
|
||||||
|
|
|
@ -106,14 +106,16 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
if len(c.AuthToken) > 0 && len(c.AuthTokenFile) > 0 {
|
if len(c.AuthToken) > 0 && len(c.AuthTokenFile) > 0 {
|
||||||
return errors.New("marathon_sd: at most one of auth_token & auth_token_file must be configured")
|
return errors.New("marathon_sd: at most one of auth_token & auth_token_file must be configured")
|
||||||
}
|
}
|
||||||
if c.HTTPClientConfig.BasicAuth != nil && (len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0) {
|
|
||||||
return errors.New("marathon_sd: at most one of basic_auth, auth_token & auth_token_file must be configured")
|
if len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0 {
|
||||||
}
|
switch {
|
||||||
if (len(c.HTTPClientConfig.BearerToken) > 0 || len(c.HTTPClientConfig.BearerTokenFile) > 0) && (len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0) {
|
case c.HTTPClientConfig.BasicAuth != nil:
|
||||||
return errors.New("marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured")
|
return errors.New("marathon_sd: at most one of basic_auth, auth_token & auth_token_file must be configured")
|
||||||
}
|
case len(c.HTTPClientConfig.BearerToken) > 0 || len(c.HTTPClientConfig.BearerTokenFile) > 0:
|
||||||
if c.HTTPClientConfig.Authorization != nil && (len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0) {
|
return errors.New("marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured")
|
||||||
return errors.New("marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured")
|
case c.HTTPClientConfig.Authorization != nil:
|
||||||
|
return errors.New("marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return c.HTTPClientConfig.Validate()
|
return c.HTTPClientConfig.Validate()
|
||||||
}
|
}
|
||||||
|
|
|
@ -106,6 +106,10 @@ global:
|
||||||
# change in the future.
|
# change in the future.
|
||||||
[ target_limit: <int> | default = 0 ]
|
[ target_limit: <int> | default = 0 ]
|
||||||
|
|
||||||
|
# Limit per scrape config on the number of targets dropped by relabeling
|
||||||
|
# that will be kept in memory. 0 means no limit.
|
||||||
|
[ keep_dropped_targets: <int> | default = 0 ]
|
||||||
|
|
||||||
# Rule files specifies a list of globs. Rules and alerts are read from
|
# Rule files specifies a list of globs. Rules and alerts are read from
|
||||||
# all matching files.
|
# all matching files.
|
||||||
rule_files:
|
rule_files:
|
||||||
|
@ -415,6 +419,10 @@ metric_relabel_configs:
|
||||||
# change in the future.
|
# change in the future.
|
||||||
[ target_limit: <int> | default = 0 ]
|
[ target_limit: <int> | default = 0 ]
|
||||||
|
|
||||||
|
# Per-job limit on the number of targets dropped by relabeling
|
||||||
|
# that will be kept in memory. 0 means no limit.
|
||||||
|
[ keep_dropped_targets: <int> | default = 0 ]
|
||||||
|
|
||||||
# Limit on total number of positive and negative buckets allowed in a single
|
# Limit on total number of positive and negative buckets allowed in a single
|
||||||
# native histogram. If this is exceeded, the entire scrape will be treated as
|
# native histogram. If this is exceeded, the entire scrape will be treated as
|
||||||
# failed. 0 means no limit.
|
# failed. 0 means no limit.
|
||||||
|
@ -2985,8 +2993,8 @@ password: <secret>
|
||||||
# Optional HTTP basic authentication information, currently not supported by Uyuni.
|
# Optional HTTP basic authentication information, currently not supported by Uyuni.
|
||||||
basic_auth:
|
basic_auth:
|
||||||
[ username: <string> ]
|
[ username: <string> ]
|
||||||
[ password: <secret> ]
|
[ password: <secret> ]
|
||||||
[ password_file: <string> ]
|
[ password_file: <string> ]
|
||||||
|
|
||||||
# Optional `Authorization` header configuration, currently not supported by Uyuni.
|
# Optional `Authorization` header configuration, currently not supported by Uyuni.
|
||||||
authorization:
|
authorization:
|
||||||
|
@ -3279,6 +3287,25 @@ authorization:
|
||||||
# It is mutually exclusive with `credentials`.
|
# It is mutually exclusive with `credentials`.
|
||||||
[ credentials_file: <filename> ]
|
[ credentials_file: <filename> ]
|
||||||
|
|
||||||
|
# Optionally configures AWS's Signature Verification 4 signing process to
|
||||||
|
# sign requests. Cannot be set at the same time as basic_auth, authorization, or oauth2.
|
||||||
|
# To use the default credentials from the AWS SDK, use `sigv4: {}`.
|
||||||
|
sigv4:
|
||||||
|
# The AWS region. If blank, the region from the default credentials chain
|
||||||
|
# is used.
|
||||||
|
[ region: <string> ]
|
||||||
|
|
||||||
|
# The AWS API keys. If blank, the environment variables `AWS_ACCESS_KEY_ID`
|
||||||
|
# and `AWS_SECRET_ACCESS_KEY` are used.
|
||||||
|
[ access_key: <string> ]
|
||||||
|
[ secret_key: <secret> ]
|
||||||
|
|
||||||
|
# Named AWS profile used to authenticate.
|
||||||
|
[ profile: <string> ]
|
||||||
|
|
||||||
|
# AWS Role ARN, an alternative to using AWS API keys.
|
||||||
|
[ role_arn: <string> ]
|
||||||
|
|
||||||
# Optional OAuth 2.0 configuration.
|
# Optional OAuth 2.0 configuration.
|
||||||
# Cannot be used at the same time as basic_auth or authorization.
|
# Cannot be used at the same time as basic_auth or authorization.
|
||||||
oauth2:
|
oauth2:
|
||||||
|
|
|
@ -76,18 +76,49 @@ series: <string>
|
||||||
|
|
||||||
# This uses expanding notation.
|
# This uses expanding notation.
|
||||||
# Expanding notation:
|
# Expanding notation:
|
||||||
# 'a+bxc' becomes 'a a+b a+(2*b) a+(3*b) … a+(c*b)'
|
# 'a+bxn' becomes 'a a+b a+(2*b) a+(3*b) … a+(n*b)'
|
||||||
# Read this as series starts at a, then c further samples incrementing by b.
|
# Read this as series starts at a, then n further samples incrementing by b.
|
||||||
# 'a-bxc' becomes 'a a-b a-(2*b) a-(3*b) … a-(c*b)'
|
# 'a-bxn' becomes 'a a-b a-(2*b) a-(3*b) … a-(n*b)'
|
||||||
# Read this as series starts at a, then c further samples decrementing by b (or incrementing by negative b).
|
# Read this as series starts at a, then n further samples decrementing by b (or incrementing by negative b).
|
||||||
|
# 'axn' becomes 'a a a … a' (n times) - it's a shorthand for 'a+0xn'
|
||||||
# There are special values to indicate missing and stale samples:
|
# There are special values to indicate missing and stale samples:
|
||||||
# '_' represents a missing sample from scrape
|
# '_' represents a missing sample from scrape
|
||||||
# 'stale' indicates a stale sample
|
# 'stale' indicates a stale sample
|
||||||
# Examples:
|
# Examples:
|
||||||
# 1. '-2+4x3' becomes '-2 2 6 10' - series starts at -2, then 3 further samples incrementing by 4.
|
# 1. '-2+4x3' becomes '-2 2 6 10' - series starts at -2, then 3 further samples incrementing by 4.
|
||||||
# 2. ' 1-2x4' becomes '1 -1 -3 -5 -7' - series starts at 1, then 4 further samples decrementing by 2.
|
# 2. ' 1-2x4' becomes '1 -1 -3 -5 -7' - series starts at 1, then 4 further samples decrementing by 2.
|
||||||
# 3. ' 1x4' becomes '1 1 1 1 1' - shorthand for '1+0x4', series starts at 1, then 4 further samples incrementing by 0.
|
# 3. ' 1x4' becomes '1 1 1 1 1' - shorthand for '1+0x4', series starts at 1, then 4 further samples incrementing by 0.
|
||||||
# 4. ' 1 _x3 stale' becomes '1 _ _ _ stale' - the missing sample cannot increment, so 3 missing samples are produced by the '_x3' expression.
|
# 4. ' 1 _x3 stale' becomes '1 _ _ _ stale' - the missing sample cannot increment, so 3 missing samples are produced by the '_x3' expression.
|
||||||
|
#
|
||||||
|
# Native histogram notation:
|
||||||
|
# Native histograms can be used instead of floating point numbers using the following notation:
|
||||||
|
# {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5}}
|
||||||
|
# Native histograms support the same expanding notation as floating point numbers, i.e. 'axn', 'a+bxn' and 'a-bxn'.
|
||||||
|
# All properties are optional and default to 0. The order is not important. The following properties are supported:
|
||||||
|
# - schema (int):
|
||||||
|
# Currently valid schema numbers are -4 <= n <= 8. They are all for
|
||||||
|
# base-2 bucket schemas, where 1 is a bucket boundary in each case, and
|
||||||
|
# then each power of two is divided into 2^n logarithmic buckets. Or
|
||||||
|
# in other words, each bucket boundary is the previous boundary times
|
||||||
|
# 2^(2^-n).
|
||||||
|
# - sum (float):
|
||||||
|
# The sum of all observations, including the zero bucket.
|
||||||
|
# - count (non-negative float):
|
||||||
|
# The number of observations, including those that are NaN and including the zero bucket.
|
||||||
|
# - z_bucket (non-negative float):
|
||||||
|
# The sum of all observations in the zero bucket.
|
||||||
|
# - z_bucket_w (non-negative float):
|
||||||
|
# The width of the zero bucket.
|
||||||
|
# If z_bucket_w > 0, the zero bucket contains all observations -z_bucket_w <= x <= z_bucket_w.
|
||||||
|
# Otherwise, the zero bucket only contains observations that are exactly 0.
|
||||||
|
# - buckets (list of non-negative floats):
|
||||||
|
# Observation counts in positive buckets. Each represents an absolute count.
|
||||||
|
# - offset (int):
|
||||||
|
# The starting index of the first entry in the positive buckets.
|
||||||
|
# - n_buckets (list of non-negative floats):
|
||||||
|
# Observation counts in negative buckets. Each represents an absolute count.
|
||||||
|
# - n_offset (int):
|
||||||
|
# The starting index of the first entry in the negative buckets.
|
||||||
values: <string>
|
values: <string>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -126,3 +126,11 @@ still ingest those conventional histograms that do not come with a
|
||||||
corresponding native histogram. However, if a native histogram is present,
|
corresponding native histogram. However, if a native histogram is present,
|
||||||
Prometheus will ignore the corresponding conventional histogram, with the
|
Prometheus will ignore the corresponding conventional histogram, with the
|
||||||
notable exception of exemplars, which are always ingested.
|
notable exception of exemplars, which are always ingested.
|
||||||
|
|
||||||
|
## OTLP Receiver
|
||||||
|
|
||||||
|
`--enable-feature=otlp-write-receiver`
|
||||||
|
|
||||||
|
The OTLP receiver allows Prometheus to accept [OpenTelemetry](https://opentelemetry.io/) metrics writes.
|
||||||
|
Prometheus is best used as a Pull based system, and staleness, `up` metric, and other Pull enabled features
|
||||||
|
won't work when you push OTLP metrics.
|
|
@ -543,6 +543,7 @@ GET /api/v1/targets
|
||||||
```
|
```
|
||||||
|
|
||||||
Both the active and dropped targets are part of the response by default.
|
Both the active and dropped targets are part of the response by default.
|
||||||
|
Dropped targets are subject to `keep_dropped_targets` limit, if set.
|
||||||
`labels` represents the label set after relabeling has occurred.
|
`labels` represents the label set after relabeling has occurred.
|
||||||
`discoveredLabels` represent the unmodified labels retrieved during service discovery before relabeling has occurred.
|
`discoveredLabels` represent the unmodified labels retrieved during service discovery before relabeling has occurred.
|
||||||
|
|
||||||
|
@ -1294,3 +1295,16 @@ Enable the remote write receiver by setting
|
||||||
endpoint is `/api/v1/write`. Find more details [here](../storage.md#overview).
|
endpoint is `/api/v1/write`. Find more details [here](../storage.md#overview).
|
||||||
|
|
||||||
*New in v2.33*
|
*New in v2.33*
|
||||||
|
|
||||||
|
## OTLP Receiver
|
||||||
|
|
||||||
|
Prometheus can be configured as a receiver for the OTLP Metrics protocol. This
|
||||||
|
is not considered an efficient way of ingesting samples. Use it
|
||||||
|
with caution for specific low-volume use cases. It is not suitable for
|
||||||
|
replacing the ingestion via scraping.
|
||||||
|
|
||||||
|
Enable the OTLP receiver by the feature flag
|
||||||
|
`--enable-feature=otlp-write-receiver`. When enabled, the OTLP receiver
|
||||||
|
endpoint is `/api/v1/otlp/v1/metrics`.
|
||||||
|
|
||||||
|
*New in v2.47*
|
|
@ -35,7 +35,7 @@ vector is the only type that can be directly graphed.
|
||||||
_Notes about the experimental native histograms:_
|
_Notes about the experimental native histograms:_
|
||||||
|
|
||||||
* Ingesting native histograms has to be enabled via a [feature
|
* Ingesting native histograms has to be enabled via a [feature
|
||||||
flag](../feature_flags/#native-histograms).
|
flag](../../feature_flags.md#native-histograms).
|
||||||
* Once native histograms have been ingested into the TSDB (and even after
|
* Once native histograms have been ingested into the TSDB (and even after
|
||||||
disabling the feature flag again), both instant vectors and range vectors may
|
disabling the feature flag again), both instant vectors and range vectors may
|
||||||
now contain samples that aren't simple floating point numbers (float samples)
|
now contain samples that aren't simple floating point numbers (float samples)
|
||||||
|
|
|
@ -14,7 +14,7 @@ vector, which if not provided it will default to the value of the expression
|
||||||
_Notes about the experimental native histograms:_
|
_Notes about the experimental native histograms:_
|
||||||
|
|
||||||
* Ingesting native histograms has to be enabled via a [feature
|
* Ingesting native histograms has to be enabled via a [feature
|
||||||
flag](../feature_flags/#native-histograms). As long as no native histograms
|
flag](../../feature_flags.md#native-histograms). As long as no native histograms
|
||||||
have been ingested into the TSDB, all functions will behave as usual.
|
have been ingested into the TSDB, all functions will behave as usual.
|
||||||
* Functions that do not explicitly mention native histograms in their
|
* Functions that do not explicitly mention native histograms in their
|
||||||
documentation (see below) will ignore histogram samples.
|
documentation (see below) will ignore histogram samples.
|
||||||
|
@ -145,7 +145,7 @@ delta(cpu_temp_celsius{host="zeus"}[2h])
|
||||||
```
|
```
|
||||||
|
|
||||||
`delta` acts on native histograms by calculating a new histogram where each
|
`delta` acts on native histograms by calculating a new histogram where each
|
||||||
compononent (sum and count of observations, buckets) is the difference between
|
component (sum and count of observations, buckets) is the difference between
|
||||||
the respective component in the first and last native histogram in
|
the respective component in the first and last native histogram in
|
||||||
`v`. However, each element in `v` that contains a mix of float and native
|
`v`. However, each element in `v` that contains a mix of float and native
|
||||||
histogram samples within the range, will be missing from the result vector.
|
histogram samples within the range, will be missing from the result vector.
|
||||||
|
@ -323,6 +323,19 @@ a histogram.
|
||||||
You can use `histogram_quantile(1, v instant-vector)` to get the estimated maximum value stored in
|
You can use `histogram_quantile(1, v instant-vector)` to get the estimated maximum value stored in
|
||||||
a histogram.
|
a histogram.
|
||||||
|
|
||||||
|
## `histogram_stddev()` and `histogram_stdvar()`
|
||||||
|
|
||||||
|
_Both functions only act on native histograms, which are an experimental
|
||||||
|
feature. The behavior of these functions may change in future versions of
|
||||||
|
Prometheus, including their removal from PromQL._
|
||||||
|
|
||||||
|
`histogram_stddev(v instant-vector)` returns the estimated standard deviation
|
||||||
|
of observations in a native histogram, based on the geometric mean of the buckets
|
||||||
|
where the observations lie. Samples that are not native histograms are ignored and
|
||||||
|
do not show up in the returned vector.
|
||||||
|
|
||||||
|
Similarly, `histogram_stdvar(v instant-vector)` returns the estimated standard
|
||||||
|
variance of observations in a native histogram.
|
||||||
|
|
||||||
## `holt_winters()`
|
## `holt_winters()`
|
||||||
|
|
||||||
|
@ -495,7 +508,7 @@ rate(http_requests_total{job="api-server"}[5m])
|
||||||
```
|
```
|
||||||
|
|
||||||
`rate` acts on native histograms by calculating a new histogram where each
|
`rate` acts on native histograms by calculating a new histogram where each
|
||||||
compononent (sum and count of observations, buckets) is the rate of increase
|
component (sum and count of observations, buckets) is the rate of increase
|
||||||
between the respective component in the first and last native histogram in
|
between the respective component in the first and last native histogram in
|
||||||
`v`. However, each element in `v` that contains a mix of float and native
|
`v`. However, each element in `v` that contains a mix of float and native
|
||||||
histogram samples within the range, will be missing from the result vector.
|
histogram samples within the range, will be missing from the result vector.
|
||||||
|
|
|
@ -310,7 +310,7 @@ so `2 ^ 3 ^ 2` is equivalent to `2 ^ (3 ^ 2)`.
|
||||||
## Operators for native histograms
|
## Operators for native histograms
|
||||||
|
|
||||||
Native histograms are an experimental feature. Ingesting native histograms has
|
Native histograms are an experimental feature. Ingesting native histograms has
|
||||||
to be enabled via a [feature flag](../feature_flags/#native-histograms). Once
|
to be enabled via a [feature flag](../../feature_flags.md#native-histograms). Once
|
||||||
native histograms have been ingested, they can be queried (even after the
|
native histograms have been ingested, they can be queried (even after the
|
||||||
feature flag has been disabled again). However, the operator support for native
|
feature flag has been disabled again). However, the operator support for native
|
||||||
histograms is still very limited.
|
histograms is still very limited.
|
||||||
|
|
|
@ -8,6 +8,11 @@
|
||||||
# If you are using Kubernetes 1.7.2 or earlier, please take note of the comments
|
# If you are using Kubernetes 1.7.2 or earlier, please take note of the comments
|
||||||
# for the kubernetes-cadvisor job; you will need to edit or remove this job.
|
# for the kubernetes-cadvisor job; you will need to edit or remove this job.
|
||||||
|
|
||||||
|
# Keep at most 100 sets of details of targets dropped by relabeling.
|
||||||
|
# This information is used to display in the UI for troubleshooting.
|
||||||
|
global:
|
||||||
|
keep_dropped_targets: 100
|
||||||
|
|
||||||
# Scrape config for API servers.
|
# Scrape config for API servers.
|
||||||
#
|
#
|
||||||
# Kubernetes exposes API servers as endpoints to the default/kubernetes
|
# Kubernetes exposes API servers as endpoints to the default/kubernetes
|
||||||
|
|
12
go.mod
12
go.mod
|
@ -1,6 +1,6 @@
|
||||||
module github.com/prometheus/prometheus
|
module github.com/prometheus/prometheus
|
||||||
|
|
||||||
go 1.19
|
go 1.20
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible
|
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible
|
||||||
|
@ -11,7 +11,7 @@ require (
|
||||||
github.com/DmitriyVTitov/size v1.5.0
|
github.com/DmitriyVTitov/size v1.5.0
|
||||||
github.com/alecthomas/kingpin/v2 v2.3.2
|
github.com/alecthomas/kingpin/v2 v2.3.2
|
||||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137
|
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137
|
||||||
github.com/aws/aws-sdk-go v1.44.302
|
github.com/aws/aws-sdk-go v1.44.317
|
||||||
github.com/cespare/xxhash/v2 v2.2.0
|
github.com/cespare/xxhash/v2 v2.2.0
|
||||||
github.com/dennwc/varint v1.0.0
|
github.com/dennwc/varint v1.0.0
|
||||||
github.com/dgraph-io/ristretto v0.1.1
|
github.com/dgraph-io/ristretto v0.1.1
|
||||||
|
@ -45,18 +45,18 @@ require (
|
||||||
github.com/oklog/ulid v1.3.1
|
github.com/oklog/ulid v1.3.1
|
||||||
github.com/ovh/go-ovh v1.4.1
|
github.com/ovh/go-ovh v1.4.1
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/prometheus/alertmanager v0.25.0
|
github.com/prometheus/alertmanager v0.26.0
|
||||||
github.com/prometheus/client_golang v1.16.0
|
github.com/prometheus/client_golang v1.16.0
|
||||||
github.com/prometheus/client_model v0.4.0
|
github.com/prometheus/client_model v0.4.0
|
||||||
github.com/prometheus/common v0.44.0
|
github.com/prometheus/common v0.44.0
|
||||||
github.com/prometheus/common/assets v0.2.0
|
github.com/prometheus/common/assets v0.2.0
|
||||||
github.com/prometheus/common/sigv4 v0.1.0
|
github.com/prometheus/common/sigv4 v0.1.0
|
||||||
github.com/prometheus/exporter-toolkit v0.10.0
|
github.com/prometheus/exporter-toolkit v0.10.0
|
||||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.19
|
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20
|
||||||
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
|
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
|
||||||
github.com/stretchr/testify v1.8.4
|
github.com/stretchr/testify v1.8.4
|
||||||
github.com/vultr/govultr/v2 v2.17.2
|
github.com/vultr/govultr/v2 v2.17.2
|
||||||
go.opentelemetry.io/collector/pdata v0.66.0
|
go.opentelemetry.io/collector/pdata v1.0.0-rcv0014
|
||||||
go.opentelemetry.io/collector/semconv v0.81.0
|
go.opentelemetry.io/collector/semconv v0.81.0
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0
|
||||||
go.opentelemetry.io/otel v1.16.0
|
go.opentelemetry.io/otel v1.16.0
|
||||||
|
@ -68,7 +68,7 @@ require (
|
||||||
go.uber.org/atomic v1.11.0
|
go.uber.org/atomic v1.11.0
|
||||||
go.uber.org/automaxprocs v1.5.2
|
go.uber.org/automaxprocs v1.5.2
|
||||||
go.uber.org/goleak v1.2.1
|
go.uber.org/goleak v1.2.1
|
||||||
go.uber.org/multierr v1.8.0
|
go.uber.org/multierr v1.11.0
|
||||||
golang.org/x/net v0.12.0
|
golang.org/x/net v0.12.0
|
||||||
golang.org/x/oauth2 v0.10.0
|
golang.org/x/oauth2 v0.10.0
|
||||||
golang.org/x/sync v0.3.0
|
golang.org/x/sync v0.3.0
|
||||||
|
|
21
go.sum
21
go.sum
|
@ -106,8 +106,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W
|
||||||
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
||||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||||
github.com/aws/aws-sdk-go v1.44.302 h1:ST3ko6GrJKn3Xi+nAvxjG3uk/V1pW8KC52WLeIxqqNk=
|
github.com/aws/aws-sdk-go v1.44.317 h1:+8XWrLmGMwPPXSRSLPzhgcGnzJ2mYkgkrcB9C/GnSOU=
|
||||||
github.com/aws/aws-sdk-go v1.44.302/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
github.com/aws/aws-sdk-go v1.44.317/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||||
|
@ -662,8 +662,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH
|
||||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||||
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
|
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
|
||||||
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
|
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
|
||||||
github.com/prometheus/alertmanager v0.25.0 h1:vbXKUR6PYRiZPRIKfmXaG+dmCKG52RtPL4Btl8hQGvg=
|
github.com/prometheus/alertmanager v0.26.0 h1:uOMJWfIwJguc3NaM3appWNbbrh6G/OjvaHMk22aBBYc=
|
||||||
github.com/prometheus/alertmanager v0.25.0/go.mod h1:MEZ3rFVHqKZsw7IcNS/m4AWZeXThmJhumpiWR4eHU/w=
|
github.com/prometheus/alertmanager v0.26.0/go.mod h1:rVcnARltVjavgVaNnmevxK7kOn7IZavyf0KNgHkbEpU=
|
||||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
|
@ -714,8 +714,8 @@ github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDN
|
||||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||||
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
||||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.19 h1:+1H+N9QFl2Sfvia0FBYfMrHYHYhmpZxhSE0wpPL2lYs=
|
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20 h1:a9hSJdJcd16e0HoMsnFvaHvxB3pxSD+SC7+CISp7xY0=
|
||||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.19/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
|
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
|
||||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
|
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
|
||||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||||
github.com/shoenig/test v0.6.6 h1:Oe8TPH9wAbv++YPNDKJWUnI8Q4PPWCx3UbOfH+FxiMU=
|
github.com/shoenig/test v0.6.6 h1:Oe8TPH9wAbv++YPNDKJWUnI8Q4PPWCx3UbOfH+FxiMU=
|
||||||
|
@ -805,8 +805,8 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||||
go.opentelemetry.io/collector/pdata v0.66.0 h1:UdE5U6MsDNzuiWaXdjGx2lC3ElVqWmN/hiUE8vyvSuM=
|
go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 h1:iT5qH0NLmkGeIdDtnBogYDx7L58t6CaWGL378DEo2QY=
|
||||||
go.opentelemetry.io/collector/pdata v0.66.0/go.mod h1:pqyaznLzk21m+1KL6fwOsRryRELL+zNM0qiVSn0MbVc=
|
go.opentelemetry.io/collector/pdata v1.0.0-rcv0014/go.mod h1:BRvDrx43kiSoUx3mr7SoA7h9B8+OY99mUK+CZSQFWW4=
|
||||||
go.opentelemetry.io/collector/semconv v0.81.0 h1:lCYNNo3powDvFIaTPP2jDKIrBiV1T92NK4QgL/aHYXw=
|
go.opentelemetry.io/collector/semconv v0.81.0 h1:lCYNNo3powDvFIaTPP2jDKIrBiV1T92NK4QgL/aHYXw=
|
||||||
go.opentelemetry.io/collector/semconv v0.81.0/go.mod h1:TlYPtzvsXyHOgr5eATi43qEMqwSmIziivJB2uctKswo=
|
go.opentelemetry.io/collector/semconv v0.81.0/go.mod h1:TlYPtzvsXyHOgr5eATi43qEMqwSmIziivJB2uctKswo=
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 h1:pginetY7+onl4qN1vl0xW/V/v6OBZ0vVdH+esuJgvmM=
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 h1:pginetY7+onl4qN1vl0xW/V/v6OBZ0vVdH+esuJgvmM=
|
||||||
|
@ -832,7 +832,6 @@ go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lI
|
||||||
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
||||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
|
||||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||||
go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME=
|
go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME=
|
||||||
|
@ -841,8 +840,8 @@ go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||||
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
|
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
|
||||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||||
go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8=
|
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||||
go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
|
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||||
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||||
|
|
|
@ -15,6 +15,7 @@ package histogram
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -130,6 +131,55 @@ func (h *FloatHistogram) String() string {
|
||||||
return sb.String()
|
return sb.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestExpression returns the string representation of this histogram as it is used in the internal PromQL testing
|
||||||
|
// framework as well as in promtool rules unit tests.
|
||||||
|
// The syntax is described in https://prometheus.io/docs/prometheus/latest/configuration/unit_testing_rules/#series
|
||||||
|
func (h *FloatHistogram) TestExpression() string {
|
||||||
|
var res []string
|
||||||
|
m := h.Copy()
|
||||||
|
|
||||||
|
m.Compact(math.MaxInt) // Compact to reduce the number of positive and negative spans to 1.
|
||||||
|
|
||||||
|
if m.Schema != 0 {
|
||||||
|
res = append(res, fmt.Sprintf("schema:%d", m.Schema))
|
||||||
|
}
|
||||||
|
if m.Count != 0 {
|
||||||
|
res = append(res, fmt.Sprintf("count:%g", m.Count))
|
||||||
|
}
|
||||||
|
if m.Sum != 0 {
|
||||||
|
res = append(res, fmt.Sprintf("sum:%g", m.Sum))
|
||||||
|
}
|
||||||
|
if m.ZeroCount != 0 {
|
||||||
|
res = append(res, fmt.Sprintf("z_bucket:%g", m.ZeroCount))
|
||||||
|
}
|
||||||
|
if m.ZeroThreshold != 0 {
|
||||||
|
res = append(res, fmt.Sprintf("z_bucket_w:%g", m.ZeroThreshold))
|
||||||
|
}
|
||||||
|
|
||||||
|
addBuckets := func(kind, bucketsKey, offsetKey string, buckets []float64, spans []Span) []string {
|
||||||
|
if len(spans) > 1 {
|
||||||
|
panic(fmt.Sprintf("histogram with multiple %s spans not supported", kind))
|
||||||
|
}
|
||||||
|
for _, span := range spans {
|
||||||
|
if span.Offset != 0 {
|
||||||
|
res = append(res, fmt.Sprintf("%s:%d", offsetKey, span.Offset))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var bucketStr []string
|
||||||
|
for _, bucket := range buckets {
|
||||||
|
bucketStr = append(bucketStr, fmt.Sprintf("%g", bucket))
|
||||||
|
}
|
||||||
|
if len(bucketStr) > 0 {
|
||||||
|
res = append(res, fmt.Sprintf("%s:[%s]", bucketsKey, strings.Join(bucketStr, " ")))
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
res = addBuckets("positive", "buckets", "offset", m.PositiveBuckets, m.PositiveSpans)
|
||||||
|
res = addBuckets("negative", "n_buckets", "n_offset", m.NegativeBuckets, m.NegativeSpans)
|
||||||
|
return "{{" + strings.Join(res, " ") + "}}"
|
||||||
|
}
|
||||||
|
|
||||||
// ZeroBucket returns the zero bucket.
|
// ZeroBucket returns the zero bucket.
|
||||||
func (h *FloatHistogram) ZeroBucket() Bucket[float64] {
|
func (h *FloatHistogram) ZeroBucket() Bucket[float64] {
|
||||||
return Bucket[float64]{
|
return Bucket[float64]{
|
||||||
|
@ -159,7 +209,7 @@ func (h *FloatHistogram) Mul(factor float64) *FloatHistogram {
|
||||||
return h
|
return h
|
||||||
}
|
}
|
||||||
|
|
||||||
// Div works like Scale but divides instead of multiplies.
|
// Div works like Mul but divides instead of multiplies.
|
||||||
// When dividing by 0, everything will be set to Inf.
|
// When dividing by 0, everything will be set to Inf.
|
||||||
func (h *FloatHistogram) Div(scalar float64) *FloatHistogram {
|
func (h *FloatHistogram) Div(scalar float64) *FloatHistogram {
|
||||||
h.ZeroCount /= scalar
|
h.ZeroCount /= scalar
|
||||||
|
@ -218,23 +268,17 @@ func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram {
|
||||||
h.Count += other.Count
|
h.Count += other.Count
|
||||||
h.Sum += other.Sum
|
h.Sum += other.Sum
|
||||||
|
|
||||||
// TODO(beorn7): If needed, this can be optimized by inspecting the
|
otherPositiveSpans := other.PositiveSpans
|
||||||
// spans in other and create missing buckets in h in batches.
|
otherPositiveBuckets := other.PositiveBuckets
|
||||||
var iInSpan, index int32
|
otherNegativeSpans := other.NegativeSpans
|
||||||
for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(true, h.ZeroThreshold, h.Schema); it.Next(); {
|
otherNegativeBuckets := other.NegativeBuckets
|
||||||
b := it.At()
|
if other.Schema != h.Schema {
|
||||||
h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket(
|
otherPositiveSpans, otherPositiveBuckets = mergeToSchema(other.PositiveSpans, other.PositiveBuckets, other.Schema, h.Schema)
|
||||||
b, h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan, index,
|
otherNegativeSpans, otherNegativeBuckets = mergeToSchema(other.NegativeSpans, other.NegativeBuckets, other.Schema, h.Schema)
|
||||||
)
|
|
||||||
index = b.Index
|
|
||||||
}
|
|
||||||
for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(false, h.ZeroThreshold, h.Schema); it.Next(); {
|
|
||||||
b := it.At()
|
|
||||||
h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket(
|
|
||||||
b, h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan, index,
|
|
||||||
)
|
|
||||||
index = b.Index
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.PositiveSpans, h.PositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
|
||||||
|
h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.NegativeSpans, h.NegativeBuckets, otherNegativeSpans, otherNegativeBuckets)
|
||||||
return h
|
return h
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -245,25 +289,17 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram {
|
||||||
h.Count -= other.Count
|
h.Count -= other.Count
|
||||||
h.Sum -= other.Sum
|
h.Sum -= other.Sum
|
||||||
|
|
||||||
// TODO(beorn7): If needed, this can be optimized by inspecting the
|
otherPositiveSpans := other.PositiveSpans
|
||||||
// spans in other and create missing buckets in h in batches.
|
otherPositiveBuckets := other.PositiveBuckets
|
||||||
var iInSpan, index int32
|
otherNegativeSpans := other.NegativeSpans
|
||||||
for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(true, h.ZeroThreshold, h.Schema); it.Next(); {
|
otherNegativeBuckets := other.NegativeBuckets
|
||||||
b := it.At()
|
if other.Schema != h.Schema {
|
||||||
b.Count *= -1
|
otherPositiveSpans, otherPositiveBuckets = mergeToSchema(other.PositiveSpans, other.PositiveBuckets, other.Schema, h.Schema)
|
||||||
h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket(
|
otherNegativeSpans, otherNegativeBuckets = mergeToSchema(other.NegativeSpans, other.NegativeBuckets, other.Schema, h.Schema)
|
||||||
b, h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan, index,
|
|
||||||
)
|
|
||||||
index = b.Index
|
|
||||||
}
|
|
||||||
for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(false, h.ZeroThreshold, h.Schema); it.Next(); {
|
|
||||||
b := it.At()
|
|
||||||
b.Count *= -1
|
|
||||||
h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket(
|
|
||||||
b, h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan, index,
|
|
||||||
)
|
|
||||||
index = b.Index
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.PositiveSpans, h.PositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
|
||||||
|
h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.NegativeSpans, h.NegativeBuckets, otherNegativeSpans, otherNegativeBuckets)
|
||||||
return h
|
return h
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -298,103 +334,6 @@ func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// addBucket takes the "coordinates" of the last bucket that was handled and
|
|
||||||
// adds the provided bucket after it. If a corresponding bucket exists, the
|
|
||||||
// count is added. If not, the bucket is inserted. The updated slices and the
|
|
||||||
// coordinates of the inserted or added-to bucket are returned.
|
|
||||||
func addBucket(
|
|
||||||
b Bucket[float64],
|
|
||||||
spans []Span, buckets []float64,
|
|
||||||
iSpan, iBucket int,
|
|
||||||
iInSpan, index int32,
|
|
||||||
) (
|
|
||||||
newSpans []Span, newBuckets []float64,
|
|
||||||
newISpan, newIBucket int, newIInSpan int32,
|
|
||||||
) {
|
|
||||||
if iSpan == -1 {
|
|
||||||
// First add, check if it is before all spans.
|
|
||||||
if len(spans) == 0 || spans[0].Offset > b.Index {
|
|
||||||
// Add bucket before all others.
|
|
||||||
buckets = append(buckets, 0)
|
|
||||||
copy(buckets[1:], buckets)
|
|
||||||
buckets[0] = b.Count
|
|
||||||
if len(spans) > 0 && spans[0].Offset == b.Index+1 {
|
|
||||||
spans[0].Length++
|
|
||||||
spans[0].Offset--
|
|
||||||
return spans, buckets, 0, 0, 0
|
|
||||||
}
|
|
||||||
spans = append(spans, Span{})
|
|
||||||
copy(spans[1:], spans)
|
|
||||||
spans[0] = Span{Offset: b.Index, Length: 1}
|
|
||||||
if len(spans) > 1 {
|
|
||||||
// Convert the absolute offset in the formerly
|
|
||||||
// first span to a relative offset.
|
|
||||||
spans[1].Offset -= b.Index + 1
|
|
||||||
}
|
|
||||||
return spans, buckets, 0, 0, 0
|
|
||||||
}
|
|
||||||
if spans[0].Offset == b.Index {
|
|
||||||
// Just add to first bucket.
|
|
||||||
buckets[0] += b.Count
|
|
||||||
return spans, buckets, 0, 0, 0
|
|
||||||
}
|
|
||||||
// We are behind the first bucket, so set everything to the
|
|
||||||
// first bucket and continue normally.
|
|
||||||
iSpan, iBucket, iInSpan = 0, 0, 0
|
|
||||||
index = spans[0].Offset
|
|
||||||
}
|
|
||||||
deltaIndex := b.Index - index
|
|
||||||
for {
|
|
||||||
remainingInSpan := int32(spans[iSpan].Length) - iInSpan
|
|
||||||
if deltaIndex < remainingInSpan {
|
|
||||||
// Bucket is in current span.
|
|
||||||
iBucket += int(deltaIndex)
|
|
||||||
iInSpan += deltaIndex
|
|
||||||
buckets[iBucket] += b.Count
|
|
||||||
return spans, buckets, iSpan, iBucket, iInSpan
|
|
||||||
}
|
|
||||||
deltaIndex -= remainingInSpan
|
|
||||||
iBucket += int(remainingInSpan)
|
|
||||||
iSpan++
|
|
||||||
if iSpan == len(spans) || deltaIndex < spans[iSpan].Offset {
|
|
||||||
// Bucket is in gap behind previous span (or there are no further spans).
|
|
||||||
buckets = append(buckets, 0)
|
|
||||||
copy(buckets[iBucket+1:], buckets[iBucket:])
|
|
||||||
buckets[iBucket] = b.Count
|
|
||||||
if deltaIndex == 0 {
|
|
||||||
// Directly after previous span, extend previous span.
|
|
||||||
if iSpan < len(spans) {
|
|
||||||
spans[iSpan].Offset--
|
|
||||||
}
|
|
||||||
iSpan--
|
|
||||||
iInSpan = int32(spans[iSpan].Length)
|
|
||||||
spans[iSpan].Length++
|
|
||||||
return spans, buckets, iSpan, iBucket, iInSpan
|
|
||||||
}
|
|
||||||
if iSpan < len(spans) && deltaIndex == spans[iSpan].Offset-1 {
|
|
||||||
// Directly before next span, extend next span.
|
|
||||||
iInSpan = 0
|
|
||||||
spans[iSpan].Offset--
|
|
||||||
spans[iSpan].Length++
|
|
||||||
return spans, buckets, iSpan, iBucket, iInSpan
|
|
||||||
}
|
|
||||||
// No next span, or next span is not directly adjacent to new bucket.
|
|
||||||
// Add new span.
|
|
||||||
iInSpan = 0
|
|
||||||
if iSpan < len(spans) {
|
|
||||||
spans[iSpan].Offset -= deltaIndex + 1
|
|
||||||
}
|
|
||||||
spans = append(spans, Span{})
|
|
||||||
copy(spans[iSpan+1:], spans[iSpan:])
|
|
||||||
spans[iSpan] = Span{Length: 1, Offset: deltaIndex}
|
|
||||||
return spans, buckets, iSpan, iBucket, iInSpan
|
|
||||||
}
|
|
||||||
// Try start of next span.
|
|
||||||
deltaIndex -= spans[iSpan].Offset
|
|
||||||
iInSpan = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compact eliminates empty buckets at the beginning and end of each span, then
|
// Compact eliminates empty buckets at the beginning and end of each span, then
|
||||||
// merges spans that are consecutive or at most maxEmptyBuckets apart, and
|
// merges spans that are consecutive or at most maxEmptyBuckets apart, and
|
||||||
// finally splits spans that contain more consecutive empty buckets than
|
// finally splits spans that contain more consecutive empty buckets than
|
||||||
|
@ -1033,3 +972,133 @@ func mergeToSchema(originSpans []Span, originBuckets []float64, originSchema, ta
|
||||||
|
|
||||||
return targetSpans, targetBuckets
|
return targetSpans, targetBuckets
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// addBuckets adds the buckets described by spansB/bucketsB to the buckets described by spansA/bucketsA,
|
||||||
|
// creating missing buckets in spansA/bucketsA as needed.
|
||||||
|
// It returns the resulting spans/buckets (which must be used instead of the original spansA/bucketsA,
|
||||||
|
// although spansA/bucketsA might get modified by this function).
|
||||||
|
// All buckets must use the same provided schema.
|
||||||
|
// Buckets in spansB/bucketsB with an absolute upper limit ≤ threshold are ignored.
|
||||||
|
// If negative is true, the buckets in spansB/bucketsB are subtracted rather than added.
|
||||||
|
func addBuckets(
|
||||||
|
schema int32, threshold float64, negative bool,
|
||||||
|
spansA []Span, bucketsA []float64,
|
||||||
|
spansB []Span, bucketsB []float64,
|
||||||
|
) ([]Span, []float64) {
|
||||||
|
var (
|
||||||
|
iSpan int = -1
|
||||||
|
iBucket int = -1
|
||||||
|
iInSpan int32
|
||||||
|
indexA int32
|
||||||
|
indexB int32
|
||||||
|
bIdxB int
|
||||||
|
bucketB float64
|
||||||
|
deltaIndex int32
|
||||||
|
lowerThanThreshold = true
|
||||||
|
)
|
||||||
|
|
||||||
|
for _, spanB := range spansB {
|
||||||
|
indexB += spanB.Offset
|
||||||
|
for j := 0; j < int(spanB.Length); j++ {
|
||||||
|
if lowerThanThreshold && getBound(indexB, schema) <= threshold {
|
||||||
|
goto nextLoop
|
||||||
|
}
|
||||||
|
lowerThanThreshold = false
|
||||||
|
|
||||||
|
bucketB = bucketsB[bIdxB]
|
||||||
|
if negative {
|
||||||
|
bucketB *= -1
|
||||||
|
}
|
||||||
|
|
||||||
|
if iSpan == -1 {
|
||||||
|
if len(spansA) == 0 || spansA[0].Offset > indexB {
|
||||||
|
// Add bucket before all others.
|
||||||
|
bucketsA = append(bucketsA, 0)
|
||||||
|
copy(bucketsA[1:], bucketsA)
|
||||||
|
bucketsA[0] = bucketB
|
||||||
|
if len(spansA) > 0 && spansA[0].Offset == indexB+1 {
|
||||||
|
spansA[0].Length++
|
||||||
|
spansA[0].Offset--
|
||||||
|
goto nextLoop
|
||||||
|
} else {
|
||||||
|
spansA = append(spansA, Span{})
|
||||||
|
copy(spansA[1:], spansA)
|
||||||
|
spansA[0] = Span{Offset: indexB, Length: 1}
|
||||||
|
if len(spansA) > 1 {
|
||||||
|
// Convert the absolute offset in the formerly
|
||||||
|
// first span to a relative offset.
|
||||||
|
spansA[1].Offset -= indexB + 1
|
||||||
|
}
|
||||||
|
goto nextLoop
|
||||||
|
}
|
||||||
|
} else if spansA[0].Offset == indexB {
|
||||||
|
// Just add to first bucket.
|
||||||
|
bucketsA[0] += bucketB
|
||||||
|
goto nextLoop
|
||||||
|
}
|
||||||
|
iSpan, iBucket, iInSpan = 0, 0, 0
|
||||||
|
indexA = spansA[0].Offset
|
||||||
|
}
|
||||||
|
deltaIndex = indexB - indexA
|
||||||
|
for {
|
||||||
|
remainingInSpan := int32(spansA[iSpan].Length) - iInSpan
|
||||||
|
if deltaIndex < remainingInSpan {
|
||||||
|
// Bucket is in current span.
|
||||||
|
iBucket += int(deltaIndex)
|
||||||
|
iInSpan += deltaIndex
|
||||||
|
bucketsA[iBucket] += bucketB
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
deltaIndex -= remainingInSpan
|
||||||
|
iBucket += int(remainingInSpan)
|
||||||
|
iSpan++
|
||||||
|
if iSpan == len(spansA) || deltaIndex < spansA[iSpan].Offset {
|
||||||
|
// Bucket is in gap behind previous span (or there are no further spans).
|
||||||
|
bucketsA = append(bucketsA, 0)
|
||||||
|
copy(bucketsA[iBucket+1:], bucketsA[iBucket:])
|
||||||
|
bucketsA[iBucket] = bucketB
|
||||||
|
switch {
|
||||||
|
case deltaIndex == 0:
|
||||||
|
// Directly after previous span, extend previous span.
|
||||||
|
if iSpan < len(spansA) {
|
||||||
|
spansA[iSpan].Offset--
|
||||||
|
}
|
||||||
|
iSpan--
|
||||||
|
iInSpan = int32(spansA[iSpan].Length)
|
||||||
|
spansA[iSpan].Length++
|
||||||
|
goto nextLoop
|
||||||
|
case iSpan < len(spansA) && deltaIndex == spansA[iSpan].Offset-1:
|
||||||
|
// Directly before next span, extend next span.
|
||||||
|
iInSpan = 0
|
||||||
|
spansA[iSpan].Offset--
|
||||||
|
spansA[iSpan].Length++
|
||||||
|
goto nextLoop
|
||||||
|
default:
|
||||||
|
// No next span, or next span is not directly adjacent to new bucket.
|
||||||
|
// Add new span.
|
||||||
|
iInSpan = 0
|
||||||
|
if iSpan < len(spansA) {
|
||||||
|
spansA[iSpan].Offset -= deltaIndex + 1
|
||||||
|
}
|
||||||
|
spansA = append(spansA, Span{})
|
||||||
|
copy(spansA[iSpan+1:], spansA[iSpan:])
|
||||||
|
spansA[iSpan] = Span{Length: 1, Offset: deltaIndex}
|
||||||
|
goto nextLoop
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Try start of next span.
|
||||||
|
deltaIndex -= spansA[iSpan].Offset
|
||||||
|
iInSpan = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nextLoop:
|
||||||
|
indexA = indexB
|
||||||
|
indexB++
|
||||||
|
bIdxB++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return spansA, bucketsA
|
||||||
|
}
|
||||||
|
|
|
@ -938,6 +938,21 @@ func TestFloatHistogramCompact(t *testing.T) {
|
||||||
NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 3, 4},
|
NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 3, 4},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cut empty buckets in the middle",
|
||||||
|
&FloatHistogram{
|
||||||
|
PositiveSpans: []Span{{5, 4}},
|
||||||
|
PositiveBuckets: []float64{1, 3, 0, 2},
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
&FloatHistogram{
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: 5, Length: 2},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []float64{1, 3, 2},
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cut empty buckets at start or end of spans, even in the middle",
|
"cut empty buckets at start or end of spans, even in the middle",
|
||||||
&FloatHistogram{
|
&FloatHistogram{
|
||||||
|
@ -955,7 +970,7 @@ func TestFloatHistogramCompact(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cut empty buckets at start or end but merge spans due to maxEmptyBuckets",
|
"cut empty buckets at start and end - also merge spans due to maxEmptyBuckets",
|
||||||
&FloatHistogram{
|
&FloatHistogram{
|
||||||
PositiveSpans: []Span{{-4, 4}, {5, 3}},
|
PositiveSpans: []Span{{-4, 4}, {5, 3}},
|
||||||
PositiveBuckets: []float64{0, 0, 1, 3.3, 4.2, 0.1, 3.3},
|
PositiveBuckets: []float64{0, 0, 1, 3.3, 4.2, 0.1, 3.3},
|
||||||
|
@ -998,18 +1013,42 @@ func TestFloatHistogramCompact(t *testing.T) {
|
||||||
PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3},
|
PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cut empty buckets from the middle of a span, avoiding none due to maxEmptyBuckets",
|
||||||
|
&FloatHistogram{
|
||||||
|
PositiveSpans: []Span{{-2, 4}},
|
||||||
|
PositiveBuckets: []float64{1, 0, 0, 3.3},
|
||||||
|
},
|
||||||
|
1,
|
||||||
|
&FloatHistogram{
|
||||||
|
PositiveSpans: []Span{{-2, 1}, {2, 1}},
|
||||||
|
PositiveBuckets: []float64{1, 3.3},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cut empty buckets and merge spans due to maxEmptyBuckets",
|
||||||
|
&FloatHistogram{
|
||||||
|
PositiveSpans: []Span{{-2, 4}, {3, 1}},
|
||||||
|
PositiveBuckets: []float64{1, 0, 0, 3.3, 4.2},
|
||||||
|
},
|
||||||
|
1,
|
||||||
|
&FloatHistogram{
|
||||||
|
PositiveSpans: []Span{{-2, 1}, {2, 1}, {3, 1}},
|
||||||
|
PositiveBuckets: []float64{1, 3.3, 4.2},
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cut empty buckets from the middle of a span, avoiding some due to maxEmptyBuckets",
|
"cut empty buckets from the middle of a span, avoiding some due to maxEmptyBuckets",
|
||||||
&FloatHistogram{
|
&FloatHistogram{
|
||||||
PositiveSpans: []Span{{-4, 6}, {3, 3}},
|
PositiveSpans: []Span{{-4, 6}, {3, 3}, {10, 2}},
|
||||||
PositiveBuckets: []float64{0, 0, 1, 0, 0, 3.3, 4.2, 0.1, 3.3},
|
PositiveBuckets: []float64{0, 0, 1, 0, 0, 3.3, 4.2, 0.1, 3.3, 2, 3},
|
||||||
NegativeSpans: []Span{{0, 2}, {3, 5}},
|
NegativeSpans: []Span{{0, 2}, {3, 5}},
|
||||||
NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 0, 3, 4},
|
NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 0, 3, 4},
|
||||||
},
|
},
|
||||||
1,
|
1,
|
||||||
&FloatHistogram{
|
&FloatHistogram{
|
||||||
PositiveSpans: []Span{{-2, 1}, {2, 1}, {3, 3}},
|
PositiveSpans: []Span{{-2, 1}, {2, 1}, {3, 3}, {10, 2}},
|
||||||
PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3},
|
PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3, 2, 3},
|
||||||
NegativeSpans: []Span{{0, 2}, {3, 5}},
|
NegativeSpans: []Span{{0, 2}, {3, 5}},
|
||||||
NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 0, 3, 4},
|
NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 0, 3, 4},
|
||||||
},
|
},
|
||||||
|
|
|
@ -49,12 +49,6 @@ type Labels struct {
|
||||||
data string
|
data string
|
||||||
}
|
}
|
||||||
|
|
||||||
type labelSlice []Label
|
|
||||||
|
|
||||||
func (ls labelSlice) Len() int { return len(ls) }
|
|
||||||
func (ls labelSlice) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] }
|
|
||||||
func (ls labelSlice) Less(i, j int) bool { return ls[i].Name < ls[j].Name }
|
|
||||||
|
|
||||||
func decodeSize(data string, index int) (int, int) {
|
func decodeSize(data string, index int) (int, int) {
|
||||||
// Fast-path for common case of a single byte, value 0..127.
|
// Fast-path for common case of a single byte, value 0..127.
|
||||||
b := data[index]
|
b := data[index]
|
||||||
|
@ -300,13 +294,26 @@ func (ls Labels) Get(name string) string {
|
||||||
|
|
||||||
// Has returns true if the label with the given name is present.
|
// Has returns true if the label with the given name is present.
|
||||||
func (ls Labels) Has(name string) bool {
|
func (ls Labels) Has(name string) bool {
|
||||||
|
if name == "" { // Avoid crash in loop if someone asks for "".
|
||||||
|
return false // Prometheus does not store blank label names.
|
||||||
|
}
|
||||||
for i := 0; i < len(ls.data); {
|
for i := 0; i < len(ls.data); {
|
||||||
var lName string
|
var size int
|
||||||
lName, i = decodeString(ls.data, i)
|
size, i = decodeSize(ls.data, i)
|
||||||
_, i = decodeString(ls.data, i)
|
if ls.data[i] == name[0] {
|
||||||
if lName == name {
|
lName := ls.data[i : i+size]
|
||||||
return true
|
i += size
|
||||||
|
if lName == name {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if ls.data[i] > name[0] { // Stop looking if we've gone past.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
i += size
|
||||||
}
|
}
|
||||||
|
size, i = decodeSize(ls.data, i)
|
||||||
|
i += size
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
|
@ -472,16 +472,22 @@ func BenchmarkLabels_Get(b *testing.B) {
|
||||||
for _, scenario := range []struct {
|
for _, scenario := range []struct {
|
||||||
desc, label string
|
desc, label string
|
||||||
}{
|
}{
|
||||||
{"get first label", allLabels[0].Name},
|
{"first label", allLabels[0].Name},
|
||||||
{"get middle label", allLabels[size/2].Name},
|
{"middle label", allLabels[size/2].Name},
|
||||||
{"get last label", allLabels[size-1].Name},
|
{"last label", allLabels[size-1].Name},
|
||||||
{"get not-found label", "benchmark"},
|
{"not-found label", "benchmark"},
|
||||||
} {
|
} {
|
||||||
b.Run(scenario.desc, func(b *testing.B) {
|
b.Run(scenario.desc, func(b *testing.B) {
|
||||||
b.ResetTimer()
|
b.Run("get", func(b *testing.B) {
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
_ = labels.Get(scenario.label)
|
_ = labels.Get(scenario.label)
|
||||||
}
|
}
|
||||||
|
})
|
||||||
|
b.Run("has", func(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
_ = labels.Has(scenario.label)
|
||||||
|
}
|
||||||
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
|
@ -56,6 +56,10 @@ type ProtobufParser struct {
|
||||||
fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed.
|
fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed.
|
||||||
redoClassic bool // true after parsing a native histogram if we need to parse it again as a classic histogram.
|
redoClassic bool // true after parsing a native histogram if we need to parse it again as a classic histogram.
|
||||||
|
|
||||||
|
// exemplarReturned is set to true each time an exemplar has been
|
||||||
|
// returned, and set back to false upon each Next() call.
|
||||||
|
exemplarReturned bool
|
||||||
|
|
||||||
// state is marked by the entry we are processing. EntryInvalid implies
|
// state is marked by the entry we are processing. EntryInvalid implies
|
||||||
// that we have to decode the next MetricFamily.
|
// that we have to decode the next MetricFamily.
|
||||||
state Entry
|
state Entry
|
||||||
|
@ -293,8 +297,12 @@ func (p *ProtobufParser) Metric(l *labels.Labels) string {
|
||||||
// Exemplar writes the exemplar of the current sample into the passed
|
// Exemplar writes the exemplar of the current sample into the passed
|
||||||
// exemplar. It returns if an exemplar exists or not. In case of a native
|
// exemplar. It returns if an exemplar exists or not. In case of a native
|
||||||
// histogram, the legacy bucket section is still used for exemplars. To ingest
|
// histogram, the legacy bucket section is still used for exemplars. To ingest
|
||||||
// all examplars, call the Exemplar method repeatedly until it returns false.
|
// all exemplars, call the Exemplar method repeatedly until it returns false.
|
||||||
func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool {
|
func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool {
|
||||||
|
if p.exemplarReturned && p.state == EntrySeries {
|
||||||
|
// We only ever return one exemplar per (non-native-histogram) series.
|
||||||
|
return false
|
||||||
|
}
|
||||||
m := p.mf.GetMetric()[p.metricPos]
|
m := p.mf.GetMetric()[p.metricPos]
|
||||||
var exProto *dto.Exemplar
|
var exProto *dto.Exemplar
|
||||||
switch p.mf.GetType() {
|
switch p.mf.GetType() {
|
||||||
|
@ -335,6 +343,7 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool {
|
||||||
}
|
}
|
||||||
p.builder.Sort()
|
p.builder.Sort()
|
||||||
ex.Labels = p.builder.Labels()
|
ex.Labels = p.builder.Labels()
|
||||||
|
p.exemplarReturned = true
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -342,6 +351,7 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool {
|
||||||
// text format parser). It returns (EntryInvalid, io.EOF) if no samples were
|
// text format parser). It returns (EntryInvalid, io.EOF) if no samples were
|
||||||
// read.
|
// read.
|
||||||
func (p *ProtobufParser) Next() (Entry, error) {
|
func (p *ProtobufParser) Next() (Entry, error) {
|
||||||
|
p.exemplarReturned = false
|
||||||
switch p.state {
|
switch p.state {
|
||||||
case EntryInvalid:
|
case EntryInvalid:
|
||||||
p.metricPos = 0
|
p.metricPos = 0
|
||||||
|
|
|
@ -1779,6 +1779,7 @@ func TestProtobufParse(t *testing.T) {
|
||||||
} else {
|
} else {
|
||||||
require.Equal(t, true, found, "i: %d", i)
|
require.Equal(t, true, found, "i: %d", i)
|
||||||
require.Equal(t, exp[i].e[0], e, "i: %d", i)
|
require.Equal(t, exp[i].e[0], e, "i: %d", i)
|
||||||
|
require.False(t, p.Exemplar(&e), "too many exemplars returned, i: %d", i)
|
||||||
}
|
}
|
||||||
|
|
||||||
case EntryHistogram:
|
case EntryHistogram:
|
||||||
|
|
|
@ -32,6 +32,7 @@ import (
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
config_util "github.com/prometheus/common/config"
|
config_util "github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/sigv4"
|
||||||
"github.com/prometheus/common/version"
|
"github.com/prometheus/common/version"
|
||||||
"go.uber.org/atomic"
|
"go.uber.org/atomic"
|
||||||
|
|
||||||
|
@ -640,6 +641,17 @@ func newAlertmanagerSet(cfg *config.AlertmanagerConfig, logger log.Logger, metri
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
t := client.Transport
|
||||||
|
|
||||||
|
if cfg.SigV4Config != nil {
|
||||||
|
t, err = sigv4.NewSigV4RoundTripper(cfg.SigV4Config, client.Transport)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
client.Transport = t
|
||||||
|
|
||||||
s := &alertmanagerSet{
|
s := &alertmanagerSet{
|
||||||
client: client,
|
client: client,
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
|
|
|
@ -66,6 +66,8 @@ func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *Engine, interval,
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
stor.DB.ForceHeadMMap() // Ensure we have at most one head chunk for every series.
|
||||||
|
stor.DB.Compact()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -186,7 +188,7 @@ func rangeQueryCases() []benchCase {
|
||||||
expr: "count({__name__!=\"\",l=\"\"})",
|
expr: "count({__name__!=\"\",l=\"\"})",
|
||||||
steps: 1,
|
steps: 1,
|
||||||
},
|
},
|
||||||
// timestamp() function
|
// Functions which have special handling inside eval()
|
||||||
{
|
{
|
||||||
expr: "timestamp(a_X)",
|
expr: "timestamp(a_X)",
|
||||||
},
|
},
|
||||||
|
@ -222,6 +224,7 @@ func rangeQueryCases() []benchCase {
|
||||||
|
|
||||||
func BenchmarkRangeQuery(b *testing.B) {
|
func BenchmarkRangeQuery(b *testing.B) {
|
||||||
stor := teststorage.New(b)
|
stor := teststorage.New(b)
|
||||||
|
stor.DB.DisableCompactions() // Don't want auto-compaction disrupting timings.
|
||||||
defer stor.Close()
|
defer stor.Close()
|
||||||
opts := EngineOpts{
|
opts := EngineOpts{
|
||||||
Logger: nil,
|
Logger: nil,
|
||||||
|
|
|
@ -1144,7 +1144,11 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
enh := &EvalNodeHelper{Out: make(Vector, 0, biggestLen)}
|
enh := &EvalNodeHelper{Out: make(Vector, 0, biggestLen)}
|
||||||
seriess := make(map[uint64]Series, biggestLen) // Output series by series hash.
|
type seriesAndTimestamp struct {
|
||||||
|
Series
|
||||||
|
ts int64
|
||||||
|
}
|
||||||
|
seriess := make(map[uint64]seriesAndTimestamp, biggestLen) // Output series by series hash.
|
||||||
tempNumSamples := ev.currentSamples
|
tempNumSamples := ev.currentSamples
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -1229,9 +1233,6 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
|
||||||
// Make the function call.
|
// Make the function call.
|
||||||
enh.Ts = ts
|
enh.Ts = ts
|
||||||
result, ws := funcCall(args, bufHelpers, enh)
|
result, ws := funcCall(args, bufHelpers, enh)
|
||||||
if result.ContainsSameLabelset() {
|
|
||||||
ev.errorf("vector cannot contain metrics with the same labelset")
|
|
||||||
}
|
|
||||||
enh.Out = result[:0] // Reuse result vector.
|
enh.Out = result[:0] // Reuse result vector.
|
||||||
warnings = append(warnings, ws...)
|
warnings = append(warnings, ws...)
|
||||||
|
|
||||||
|
@ -1248,6 +1249,9 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
|
||||||
|
|
||||||
// If this could be an instant query, shortcut so as not to change sort order.
|
// If this could be an instant query, shortcut so as not to change sort order.
|
||||||
if ev.endTimestamp == ev.startTimestamp {
|
if ev.endTimestamp == ev.startTimestamp {
|
||||||
|
if result.ContainsSameLabelset() {
|
||||||
|
ev.errorf("vector cannot contain metrics with the same labelset")
|
||||||
|
}
|
||||||
mat := make(Matrix, len(result))
|
mat := make(Matrix, len(result))
|
||||||
for i, s := range result {
|
for i, s := range result {
|
||||||
if s.H == nil {
|
if s.H == nil {
|
||||||
|
@ -1265,8 +1269,13 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
|
||||||
for _, sample := range result {
|
for _, sample := range result {
|
||||||
h := sample.Metric.Hash()
|
h := sample.Metric.Hash()
|
||||||
ss, ok := seriess[h]
|
ss, ok := seriess[h]
|
||||||
if !ok {
|
if ok {
|
||||||
ss = Series{Metric: sample.Metric}
|
if ss.ts == ts { // If we've seen this output series before at this timestamp, it's a duplicate.
|
||||||
|
ev.errorf("vector cannot contain metrics with the same labelset")
|
||||||
|
}
|
||||||
|
ss.ts = ts
|
||||||
|
} else {
|
||||||
|
ss = seriesAndTimestamp{Series{Metric: sample.Metric}, ts}
|
||||||
}
|
}
|
||||||
if sample.H == nil {
|
if sample.H == nil {
|
||||||
if ss.Floats == nil {
|
if ss.Floats == nil {
|
||||||
|
@ -1293,7 +1302,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
|
||||||
// Assemble the output matrix. By the time we get here we know we don't have too many samples.
|
// Assemble the output matrix. By the time we get here we know we don't have too many samples.
|
||||||
mat := make(Matrix, 0, len(seriess))
|
mat := make(Matrix, 0, len(seriess))
|
||||||
for _, ss := range seriess {
|
for _, ss := range seriess {
|
||||||
mat = append(mat, ss)
|
mat = append(mat, ss.Series)
|
||||||
}
|
}
|
||||||
ev.currentSamples = originalNumSamples + mat.TotalSamples()
|
ev.currentSamples = originalNumSamples + mat.TotalSamples()
|
||||||
ev.samplesStats.UpdatePeak(ev.currentSamples)
|
ev.samplesStats.UpdatePeak(ev.currentSamples)
|
||||||
|
@ -1388,7 +1397,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
|
||||||
unwrapParenExpr(&arg)
|
unwrapParenExpr(&arg)
|
||||||
vs, ok := arg.(*parser.VectorSelector)
|
vs, ok := arg.(*parser.VectorSelector)
|
||||||
if ok {
|
if ok {
|
||||||
return ev.evalTimestampFunctionOverVectorSelector(vs, call, e)
|
return ev.rangeEvalTimestampFunctionOverVectorSelector(vs, call, e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1826,7 +1835,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
|
||||||
panic(fmt.Errorf("unhandled expression of type: %T", expr))
|
panic(fmt.Errorf("unhandled expression of type: %T", expr))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ev *evaluator) evalTimestampFunctionOverVectorSelector(vs *parser.VectorSelector, call FunctionCall, e *parser.Call) (parser.Value, storage.Warnings) {
|
func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.VectorSelector, call FunctionCall, e *parser.Call) (parser.Value, storage.Warnings) {
|
||||||
ws, err := checkAndExpandSeriesSet(ev.ctx, vs)
|
ws, err := checkAndExpandSeriesSet(ev.ctx, vs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws})
|
ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws})
|
||||||
|
@ -1840,8 +1849,9 @@ func (ev *evaluator) evalTimestampFunctionOverVectorSelector(vs *parser.VectorSe
|
||||||
|
|
||||||
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
|
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
|
||||||
if vs.Timestamp != nil {
|
if vs.Timestamp != nil {
|
||||||
// This is a special case only for "timestamp" since the offset
|
// This is a special case for "timestamp()" when the @ modifier is used, to ensure that
|
||||||
// needs to be adjusted for every point.
|
// we return a point for each time step in this case.
|
||||||
|
// See https://github.com/prometheus/prometheus/issues/8433.
|
||||||
vs.Offset = time.Duration(enh.Ts-*vs.Timestamp) * time.Millisecond
|
vs.Offset = time.Duration(enh.Ts-*vs.Timestamp) * time.Millisecond
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,6 @@ import (
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
|
||||||
"github.com/prometheus/prometheus/util/testutil"
|
"github.com/prometheus/prometheus/util/testutil"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -35,7 +34,9 @@ import (
|
||||||
"github.com/prometheus/prometheus/model/timestamp"
|
"github.com/prometheus/prometheus/model/timestamp"
|
||||||
"github.com/prometheus/prometheus/promql/parser"
|
"github.com/prometheus/prometheus/promql/parser"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
||||||
"github.com/prometheus/prometheus/util/stats"
|
"github.com/prometheus/prometheus/util/stats"
|
||||||
|
"github.com/prometheus/prometheus/util/teststorage"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
|
@ -566,6 +567,7 @@ func TestSelectHintsSetCorrectly(t *testing.T) {
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
if tc.end == 0 {
|
if tc.end == 0 {
|
||||||
query, err = engine.NewInstantQuery(ctx, hintsRecorder, nil, tc.query, timestamp.Time(tc.start))
|
query, err = engine.NewInstantQuery(ctx, hintsRecorder, nil, tc.query, timestamp.Time(tc.start))
|
||||||
} else {
|
} else {
|
||||||
|
@ -573,7 +575,7 @@ func TestSelectHintsSetCorrectly(t *testing.T) {
|
||||||
}
|
}
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
res := query.Exec(ctx)
|
res := query.Exec(context.Background())
|
||||||
require.NoError(t, res.Err)
|
require.NoError(t, res.Err)
|
||||||
|
|
||||||
require.Equal(t, tc.expected, hintsRecorder.hints)
|
require.Equal(t, tc.expected, hintsRecorder.hints)
|
||||||
|
@ -636,15 +638,11 @@ func TestEngineShutdown(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEngineEvalStmtTimestamps(t *testing.T) {
|
func TestEngineEvalStmtTimestamps(t *testing.T) {
|
||||||
test, err := NewTest(t, `
|
storage := LoadedStorage(t, `
|
||||||
load 10s
|
load 10s
|
||||||
metric 1 2
|
metric 1 2
|
||||||
`)
|
`)
|
||||||
require.NoError(t, err)
|
t.Cleanup(func() { storage.Close() })
|
||||||
defer test.Close()
|
|
||||||
|
|
||||||
err = test.Run()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
Query string
|
Query string
|
||||||
|
@ -728,14 +726,15 @@ load 10s
|
||||||
t.Run(fmt.Sprintf("%d query=%s", i, c.Query), func(t *testing.T) {
|
t.Run(fmt.Sprintf("%d query=%s", i, c.Query), func(t *testing.T) {
|
||||||
var err error
|
var err error
|
||||||
var qry Query
|
var qry Query
|
||||||
|
engine := newTestEngine()
|
||||||
if c.Interval == 0 {
|
if c.Interval == 0 {
|
||||||
qry, err = test.QueryEngine().NewInstantQuery(test.context, test.Queryable(), nil, c.Query, c.Start)
|
qry, err = engine.NewInstantQuery(context.Background(), storage, nil, c.Query, c.Start)
|
||||||
} else {
|
} else {
|
||||||
qry, err = test.QueryEngine().NewRangeQuery(test.context, test.Queryable(), nil, c.Query, c.Start, c.End, c.Interval)
|
qry, err = engine.NewRangeQuery(context.Background(), storage, nil, c.Query, c.Start, c.End, c.Interval)
|
||||||
}
|
}
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
res := qry.Exec(test.Context())
|
res := qry.Exec(context.Background())
|
||||||
if c.ShouldError {
|
if c.ShouldError {
|
||||||
require.Error(t, res.Err, "expected error for the query %q", c.Query)
|
require.Error(t, res.Err, "expected error for the query %q", c.Query)
|
||||||
return
|
return
|
||||||
|
@ -748,18 +747,14 @@ load 10s
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestQueryStatistics(t *testing.T) {
|
func TestQueryStatistics(t *testing.T) {
|
||||||
test, err := NewTest(t, `
|
storage := LoadedStorage(t, `
|
||||||
load 10s
|
load 10s
|
||||||
metricWith1SampleEvery10Seconds 1+1x100
|
metricWith1SampleEvery10Seconds 1+1x100
|
||||||
metricWith3SampleEvery10Seconds{a="1",b="1"} 1+1x100
|
metricWith3SampleEvery10Seconds{a="1",b="1"} 1+1x100
|
||||||
metricWith3SampleEvery10Seconds{a="2",b="2"} 1+1x100
|
metricWith3SampleEvery10Seconds{a="2",b="2"} 1+1x100
|
||||||
metricWith3SampleEvery10Seconds{a="3",b="2"} 1+1x100
|
metricWith3SampleEvery10Seconds{a="3",b="2"} 1+1x100
|
||||||
`)
|
`)
|
||||||
require.NoError(t, err)
|
t.Cleanup(func() { storage.Close() })
|
||||||
defer test.Close()
|
|
||||||
|
|
||||||
err = test.Run()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
Query string
|
Query string
|
||||||
|
@ -1194,7 +1189,7 @@ load 10s
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
engine := test.QueryEngine()
|
engine := newTestEngine()
|
||||||
engine.enablePerStepStats = true
|
engine.enablePerStepStats = true
|
||||||
origMaxSamples := engine.maxSamplesPerQuery
|
origMaxSamples := engine.maxSamplesPerQuery
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
|
@ -1206,13 +1201,13 @@ load 10s
|
||||||
var err error
|
var err error
|
||||||
var qry Query
|
var qry Query
|
||||||
if c.Interval == 0 {
|
if c.Interval == 0 {
|
||||||
qry, err = engine.NewInstantQuery(test.context, test.Queryable(), opts, c.Query, c.Start)
|
qry, err = engine.NewInstantQuery(context.Background(), storage, opts, c.Query, c.Start)
|
||||||
} else {
|
} else {
|
||||||
qry, err = engine.NewRangeQuery(test.context, test.Queryable(), opts, c.Query, c.Start, c.End, c.Interval)
|
qry, err = engine.NewRangeQuery(context.Background(), storage, opts, c.Query, c.Start, c.End, c.Interval)
|
||||||
}
|
}
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
res := qry.Exec(test.Context())
|
res := qry.Exec(context.Background())
|
||||||
require.Equal(t, expErr, res.Err)
|
require.Equal(t, expErr, res.Err)
|
||||||
|
|
||||||
return qry.Stats()
|
return qry.Stats()
|
||||||
|
@ -1234,17 +1229,13 @@ load 10s
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMaxQuerySamples(t *testing.T) {
|
func TestMaxQuerySamples(t *testing.T) {
|
||||||
test, err := NewTest(t, `
|
storage := LoadedStorage(t, `
|
||||||
load 10s
|
load 10s
|
||||||
metric 1+1x100
|
metric 1+1x100
|
||||||
bigmetric{a="1"} 1+1x100
|
bigmetric{a="1"} 1+1x100
|
||||||
bigmetric{a="2"} 1+1x100
|
bigmetric{a="2"} 1+1x100
|
||||||
`)
|
`)
|
||||||
require.NoError(t, err)
|
t.Cleanup(func() { storage.Close() })
|
||||||
defer test.Close()
|
|
||||||
|
|
||||||
err = test.Run()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// These test cases should be touching the limit exactly (hence no exceeding).
|
// These test cases should be touching the limit exactly (hence no exceeding).
|
||||||
// Exceeding the limit will be tested by doing -1 to the MaxSamples.
|
// Exceeding the limit will be tested by doing -1 to the MaxSamples.
|
||||||
|
@ -1382,20 +1373,20 @@ load 10s
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
engine := test.QueryEngine()
|
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
t.Run(c.Query, func(t *testing.T) {
|
t.Run(c.Query, func(t *testing.T) {
|
||||||
|
engine := newTestEngine()
|
||||||
testFunc := func(expError error) {
|
testFunc := func(expError error) {
|
||||||
var err error
|
var err error
|
||||||
var qry Query
|
var qry Query
|
||||||
if c.Interval == 0 {
|
if c.Interval == 0 {
|
||||||
qry, err = engine.NewInstantQuery(test.context, test.Queryable(), nil, c.Query, c.Start)
|
qry, err = engine.NewInstantQuery(context.Background(), storage, nil, c.Query, c.Start)
|
||||||
} else {
|
} else {
|
||||||
qry, err = engine.NewRangeQuery(test.context, test.Queryable(), nil, c.Query, c.Start, c.End, c.Interval)
|
qry, err = engine.NewRangeQuery(context.Background(), storage, nil, c.Query, c.Start, c.End, c.Interval)
|
||||||
}
|
}
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
res := qry.Exec(test.Context())
|
res := qry.Exec(context.Background())
|
||||||
stats := qry.Stats()
|
stats := qry.Stats()
|
||||||
require.Equal(t, expError, res.Err)
|
require.Equal(t, expError, res.Err)
|
||||||
require.NotNil(t, stats)
|
require.NotNil(t, stats)
|
||||||
|
@ -1416,7 +1407,8 @@ load 10s
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAtModifier(t *testing.T) {
|
func TestAtModifier(t *testing.T) {
|
||||||
test, err := NewTest(t, `
|
engine := newTestEngine()
|
||||||
|
storage := LoadedStorage(t, `
|
||||||
load 10s
|
load 10s
|
||||||
metric{job="1"} 0+1x1000
|
metric{job="1"} 0+1x1000
|
||||||
metric{job="2"} 0+2x1000
|
metric{job="2"} 0+2x1000
|
||||||
|
@ -1427,11 +1419,7 @@ load 10s
|
||||||
load 1ms
|
load 1ms
|
||||||
metric_ms 0+1x10000
|
metric_ms 0+1x10000
|
||||||
`)
|
`)
|
||||||
require.NoError(t, err)
|
t.Cleanup(func() { storage.Close() })
|
||||||
defer test.Close()
|
|
||||||
|
|
||||||
err = test.Run()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
lbls1 := labels.FromStrings("__name__", "metric", "job", "1")
|
lbls1 := labels.FromStrings("__name__", "metric", "job", "1")
|
||||||
lbls2 := labels.FromStrings("__name__", "metric", "job", "2")
|
lbls2 := labels.FromStrings("__name__", "metric", "job", "2")
|
||||||
|
@ -1441,7 +1429,7 @@ load 1ms
|
||||||
lblsneg := labels.FromStrings("__name__", "metric_neg")
|
lblsneg := labels.FromStrings("__name__", "metric_neg")
|
||||||
|
|
||||||
// Add some samples with negative timestamp.
|
// Add some samples with negative timestamp.
|
||||||
db := test.TSDB()
|
db := storage.DB
|
||||||
app := db.Appender(context.Background())
|
app := db.Appender(context.Background())
|
||||||
ref, err := app.Append(0, lblsneg, -1000000, 1000)
|
ref, err := app.Append(0, lblsneg, -1000000, 1000)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -1630,13 +1618,13 @@ load 1ms
|
||||||
var err error
|
var err error
|
||||||
var qry Query
|
var qry Query
|
||||||
if c.end == 0 {
|
if c.end == 0 {
|
||||||
qry, err = test.QueryEngine().NewInstantQuery(test.context, test.Queryable(), nil, c.query, start)
|
qry, err = engine.NewInstantQuery(context.Background(), storage, nil, c.query, start)
|
||||||
} else {
|
} else {
|
||||||
qry, err = test.QueryEngine().NewRangeQuery(test.context, test.Queryable(), nil, c.query, start, end, interval)
|
qry, err = engine.NewRangeQuery(context.Background(), storage, nil, c.query, start, end, interval)
|
||||||
}
|
}
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
res := qry.Exec(test.Context())
|
res := qry.Exec(context.Background())
|
||||||
require.NoError(t, res.Err)
|
require.NoError(t, res.Err)
|
||||||
if expMat, ok := c.result.(Matrix); ok {
|
if expMat, ok := c.result.(Matrix); ok {
|
||||||
sort.Sort(expMat)
|
sort.Sort(expMat)
|
||||||
|
@ -1955,18 +1943,16 @@ func TestSubquerySelector(t *testing.T) {
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
t.Run("", func(t *testing.T) {
|
t.Run("", func(t *testing.T) {
|
||||||
test, err := NewTest(t, tst.loadString)
|
engine := newTestEngine()
|
||||||
require.NoError(t, err)
|
storage := LoadedStorage(t, tst.loadString)
|
||||||
defer test.Close()
|
t.Cleanup(func() { storage.Close() })
|
||||||
|
|
||||||
require.NoError(t, test.Run())
|
|
||||||
engine := test.QueryEngine()
|
|
||||||
for _, c := range tst.cases {
|
for _, c := range tst.cases {
|
||||||
t.Run(c.Query, func(t *testing.T) {
|
t.Run(c.Query, func(t *testing.T) {
|
||||||
qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, c.Query, c.Start)
|
qry, err := engine.NewInstantQuery(context.Background(), storage, nil, c.Query, c.Start)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
res := qry.Exec(test.Context())
|
res := qry.Exec(context.Background())
|
||||||
require.Equal(t, c.Result.Err, res.Err)
|
require.Equal(t, c.Result.Err, res.Err)
|
||||||
mat := res.Value.(Matrix)
|
mat := res.Value.(Matrix)
|
||||||
sort.Sort(mat)
|
sort.Sort(mat)
|
||||||
|
@ -1978,95 +1964,42 @@ func TestSubquerySelector(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTimestampFunction_StepsMoreOftenThanSamples(t *testing.T) {
|
func TestTimestampFunction_StepsMoreOftenThanSamples(t *testing.T) {
|
||||||
test, err := NewTest(t, `
|
engine := newTestEngine()
|
||||||
|
storage := LoadedStorage(t, `
|
||||||
load 1m
|
load 1m
|
||||||
metric 0+1x1000
|
metric 0+1x1000
|
||||||
`)
|
`)
|
||||||
require.NoError(t, err)
|
t.Cleanup(func() { storage.Close() })
|
||||||
defer test.Close()
|
|
||||||
|
|
||||||
err = test.Run()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
query := "timestamp(metric)"
|
query := "timestamp(metric)"
|
||||||
start := time.Unix(0, 0)
|
start := time.Unix(0, 0)
|
||||||
end := time.Unix(61, 0)
|
end := time.Unix(61, 0)
|
||||||
interval := time.Second
|
interval := time.Second
|
||||||
|
|
||||||
|
// We expect the value to be 0 for t=0s to t=59s (inclusive), then 60 for t=60s and t=61s.
|
||||||
|
expectedPoints := []FPoint{}
|
||||||
|
|
||||||
|
for t := 0; t <= 59; t++ {
|
||||||
|
expectedPoints = append(expectedPoints, FPoint{F: 0, T: int64(t * 1000)})
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedPoints = append(
|
||||||
|
expectedPoints,
|
||||||
|
FPoint{F: 60, T: 60_000},
|
||||||
|
FPoint{F: 60, T: 61_000},
|
||||||
|
)
|
||||||
|
|
||||||
expectedResult := Matrix{
|
expectedResult := Matrix{
|
||||||
Series{
|
Series{
|
||||||
Floats: []FPoint{
|
Floats: expectedPoints,
|
||||||
{F: 0, T: 0},
|
|
||||||
{F: 0, T: 1_000},
|
|
||||||
{F: 0, T: 2_000},
|
|
||||||
{F: 0, T: 3_000},
|
|
||||||
{F: 0, T: 4_000},
|
|
||||||
{F: 0, T: 5_000},
|
|
||||||
{F: 0, T: 6_000},
|
|
||||||
{F: 0, T: 7_000},
|
|
||||||
{F: 0, T: 8_000},
|
|
||||||
{F: 0, T: 9_000},
|
|
||||||
{F: 0, T: 10_000},
|
|
||||||
{F: 0, T: 11_000},
|
|
||||||
{F: 0, T: 12_000},
|
|
||||||
{F: 0, T: 13_000},
|
|
||||||
{F: 0, T: 14_000},
|
|
||||||
{F: 0, T: 15_000},
|
|
||||||
{F: 0, T: 16_000},
|
|
||||||
{F: 0, T: 17_000},
|
|
||||||
{F: 0, T: 18_000},
|
|
||||||
{F: 0, T: 19_000},
|
|
||||||
{F: 0, T: 20_000},
|
|
||||||
{F: 0, T: 21_000},
|
|
||||||
{F: 0, T: 22_000},
|
|
||||||
{F: 0, T: 23_000},
|
|
||||||
{F: 0, T: 24_000},
|
|
||||||
{F: 0, T: 25_000},
|
|
||||||
{F: 0, T: 26_000},
|
|
||||||
{F: 0, T: 27_000},
|
|
||||||
{F: 0, T: 28_000},
|
|
||||||
{F: 0, T: 29_000},
|
|
||||||
{F: 0, T: 30_000},
|
|
||||||
{F: 0, T: 31_000},
|
|
||||||
{F: 0, T: 32_000},
|
|
||||||
{F: 0, T: 33_000},
|
|
||||||
{F: 0, T: 34_000},
|
|
||||||
{F: 0, T: 35_000},
|
|
||||||
{F: 0, T: 36_000},
|
|
||||||
{F: 0, T: 37_000},
|
|
||||||
{F: 0, T: 38_000},
|
|
||||||
{F: 0, T: 39_000},
|
|
||||||
{F: 0, T: 40_000},
|
|
||||||
{F: 0, T: 41_000},
|
|
||||||
{F: 0, T: 42_000},
|
|
||||||
{F: 0, T: 43_000},
|
|
||||||
{F: 0, T: 44_000},
|
|
||||||
{F: 0, T: 45_000},
|
|
||||||
{F: 0, T: 46_000},
|
|
||||||
{F: 0, T: 47_000},
|
|
||||||
{F: 0, T: 48_000},
|
|
||||||
{F: 0, T: 49_000},
|
|
||||||
{F: 0, T: 50_000},
|
|
||||||
{F: 0, T: 51_000},
|
|
||||||
{F: 0, T: 52_000},
|
|
||||||
{F: 0, T: 53_000},
|
|
||||||
{F: 0, T: 54_000},
|
|
||||||
{F: 0, T: 55_000},
|
|
||||||
{F: 0, T: 56_000},
|
|
||||||
{F: 0, T: 57_000},
|
|
||||||
{F: 0, T: 58_000},
|
|
||||||
{F: 0, T: 59_000},
|
|
||||||
{F: 60, T: 60_000},
|
|
||||||
{F: 60, T: 61_000},
|
|
||||||
},
|
|
||||||
Metric: labels.EmptyLabels(),
|
Metric: labels.EmptyLabels(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
qry, err := test.QueryEngine().NewRangeQuery(test.context, test.Queryable(), nil, query, start, end, interval)
|
qry, err := engine.NewRangeQuery(context.Background(), storage, nil, query, start, end, interval)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
res := qry.Exec(test.Context())
|
res := qry.Exec(context.Background())
|
||||||
require.NoError(t, res.Err)
|
require.NoError(t, res.Err)
|
||||||
require.Equal(t, expectedResult, res.Value)
|
require.Equal(t, expectedResult, res.Value)
|
||||||
}
|
}
|
||||||
|
@ -3005,7 +2938,6 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEngineOptsValidation(t *testing.T) {
|
func TestEngineOptsValidation(t *testing.T) {
|
||||||
ctx := context.Background()
|
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
opts EngineOpts
|
opts EngineOpts
|
||||||
query string
|
query string
|
||||||
|
@ -3065,8 +2997,8 @@ func TestEngineOptsValidation(t *testing.T) {
|
||||||
|
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
eng := NewEngine(c.opts)
|
eng := NewEngine(c.opts)
|
||||||
_, err1 := eng.NewInstantQuery(ctx, nil, nil, c.query, time.Unix(10, 0))
|
_, err1 := eng.NewInstantQuery(context.Background(), nil, nil, c.query, time.Unix(10, 0))
|
||||||
_, err2 := eng.NewRangeQuery(ctx, nil, nil, c.query, time.Unix(0, 0), time.Unix(10, 0), time.Second)
|
_, err2 := eng.NewRangeQuery(context.Background(), nil, nil, c.query, time.Unix(0, 0), time.Unix(10, 0), time.Second)
|
||||||
if c.fail {
|
if c.fail {
|
||||||
require.Equal(t, c.expError, err1)
|
require.Equal(t, c.expError, err1)
|
||||||
require.Equal(t, c.expError, err2)
|
require.Equal(t, c.expError, err2)
|
||||||
|
@ -3226,17 +3158,14 @@ func TestRangeQuery(t *testing.T) {
|
||||||
}
|
}
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
t.Run(c.Name, func(t *testing.T) {
|
t.Run(c.Name, func(t *testing.T) {
|
||||||
test, err := NewTest(t, c.Load)
|
engine := newTestEngine()
|
||||||
require.NoError(t, err)
|
storage := LoadedStorage(t, c.Load)
|
||||||
defer test.Close()
|
t.Cleanup(func() { storage.Close() })
|
||||||
|
|
||||||
err = test.Run()
|
qry, err := engine.NewRangeQuery(context.Background(), storage, nil, c.Query, c.Start, c.End, c.Interval)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
qry, err := test.QueryEngine().NewRangeQuery(test.context, test.Queryable(), nil, c.Query, c.Start, c.End, c.Interval)
|
res := qry.Exec(context.Background())
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
res := qry.Exec(test.Context())
|
|
||||||
require.NoError(t, res.Err)
|
require.NoError(t, res.Err)
|
||||||
require.Equal(t, c.Result, res.Value)
|
require.Equal(t, c.Result, res.Value)
|
||||||
})
|
})
|
||||||
|
@ -3246,27 +3175,24 @@ func TestRangeQuery(t *testing.T) {
|
||||||
func TestNativeHistogramRate(t *testing.T) {
|
func TestNativeHistogramRate(t *testing.T) {
|
||||||
// TODO(beorn7): Integrate histograms into the PromQL testing framework
|
// TODO(beorn7): Integrate histograms into the PromQL testing framework
|
||||||
// and write more tests there.
|
// and write more tests there.
|
||||||
test, err := NewTest(t, "")
|
engine := newTestEngine()
|
||||||
require.NoError(t, err)
|
storage := teststorage.New(t)
|
||||||
defer test.Close()
|
t.Cleanup(func() { storage.Close() })
|
||||||
|
|
||||||
seriesName := "sparse_histogram_series"
|
seriesName := "sparse_histogram_series"
|
||||||
lbls := labels.FromStrings("__name__", seriesName)
|
lbls := labels.FromStrings("__name__", seriesName)
|
||||||
|
|
||||||
app := test.Storage().Appender(context.TODO())
|
app := storage.Appender(context.Background())
|
||||||
for i, h := range tsdbutil.GenerateTestHistograms(100) {
|
for i, h := range tsdbutil.GenerateTestHistograms(100) {
|
||||||
_, err := app.AppendHistogram(0, lbls, int64(i)*int64(15*time.Second/time.Millisecond), h, nil)
|
_, err := app.AppendHistogram(0, lbls, int64(i)*int64(15*time.Second/time.Millisecond), h, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
require.NoError(t, app.Commit())
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
require.NoError(t, test.Run())
|
|
||||||
engine := test.QueryEngine()
|
|
||||||
|
|
||||||
queryString := fmt.Sprintf("rate(%s[1m])", seriesName)
|
queryString := fmt.Sprintf("rate(%s[1m])", seriesName)
|
||||||
qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond)))
|
qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond)))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
res := qry.Exec(test.Context())
|
res := qry.Exec(context.Background())
|
||||||
require.NoError(t, res.Err)
|
require.NoError(t, res.Err)
|
||||||
vector, err := res.Vector()
|
vector, err := res.Vector()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -3277,7 +3203,7 @@ func TestNativeHistogramRate(t *testing.T) {
|
||||||
Schema: 1,
|
Schema: 1,
|
||||||
ZeroThreshold: 0.001,
|
ZeroThreshold: 0.001,
|
||||||
ZeroCount: 1. / 15.,
|
ZeroCount: 1. / 15.,
|
||||||
Count: 8. / 15.,
|
Count: 9. / 15.,
|
||||||
Sum: 1.226666666666667,
|
Sum: 1.226666666666667,
|
||||||
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}},
|
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}},
|
||||||
PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.},
|
PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.},
|
||||||
|
@ -3290,27 +3216,24 @@ func TestNativeHistogramRate(t *testing.T) {
|
||||||
func TestNativeFloatHistogramRate(t *testing.T) {
|
func TestNativeFloatHistogramRate(t *testing.T) {
|
||||||
// TODO(beorn7): Integrate histograms into the PromQL testing framework
|
// TODO(beorn7): Integrate histograms into the PromQL testing framework
|
||||||
// and write more tests there.
|
// and write more tests there.
|
||||||
test, err := NewTest(t, "")
|
engine := newTestEngine()
|
||||||
require.NoError(t, err)
|
storage := teststorage.New(t)
|
||||||
defer test.Close()
|
t.Cleanup(func() { storage.Close() })
|
||||||
|
|
||||||
seriesName := "sparse_histogram_series"
|
seriesName := "sparse_histogram_series"
|
||||||
lbls := labels.FromStrings("__name__", seriesName)
|
lbls := labels.FromStrings("__name__", seriesName)
|
||||||
|
|
||||||
app := test.Storage().Appender(context.TODO())
|
app := storage.Appender(context.Background())
|
||||||
for i, fh := range tsdbutil.GenerateTestFloatHistograms(100) {
|
for i, fh := range tsdbutil.GenerateTestFloatHistograms(100) {
|
||||||
_, err := app.AppendHistogram(0, lbls, int64(i)*int64(15*time.Second/time.Millisecond), nil, fh)
|
_, err := app.AppendHistogram(0, lbls, int64(i)*int64(15*time.Second/time.Millisecond), nil, fh)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
require.NoError(t, app.Commit())
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
require.NoError(t, test.Run())
|
|
||||||
engine := test.QueryEngine()
|
|
||||||
|
|
||||||
queryString := fmt.Sprintf("rate(%s[1m])", seriesName)
|
queryString := fmt.Sprintf("rate(%s[1m])", seriesName)
|
||||||
qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond)))
|
qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond)))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
res := qry.Exec(test.Context())
|
res := qry.Exec(context.Background())
|
||||||
require.NoError(t, res.Err)
|
require.NoError(t, res.Err)
|
||||||
vector, err := res.Vector()
|
vector, err := res.Vector()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -3321,7 +3244,7 @@ func TestNativeFloatHistogramRate(t *testing.T) {
|
||||||
Schema: 1,
|
Schema: 1,
|
||||||
ZeroThreshold: 0.001,
|
ZeroThreshold: 0.001,
|
||||||
ZeroCount: 1. / 15.,
|
ZeroCount: 1. / 15.,
|
||||||
Count: 8. / 15.,
|
Count: 9. / 15.,
|
||||||
Sum: 1.226666666666667,
|
Sum: 1.226666666666667,
|
||||||
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}},
|
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}},
|
||||||
PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.},
|
PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.},
|
||||||
|
@ -3353,16 +3276,16 @@ func TestNativeHistogram_HistogramCountAndSum(t *testing.T) {
|
||||||
}
|
}
|
||||||
for _, floatHisto := range []bool{true, false} {
|
for _, floatHisto := range []bool{true, false} {
|
||||||
t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) {
|
t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) {
|
||||||
test, err := NewTest(t, "")
|
engine := newTestEngine()
|
||||||
require.NoError(t, err)
|
storage := teststorage.New(t)
|
||||||
t.Cleanup(test.Close)
|
t.Cleanup(func() { storage.Close() })
|
||||||
|
|
||||||
seriesName := "sparse_histogram_series"
|
seriesName := "sparse_histogram_series"
|
||||||
lbls := labels.FromStrings("__name__", seriesName)
|
lbls := labels.FromStrings("__name__", seriesName)
|
||||||
engine := test.QueryEngine()
|
|
||||||
|
|
||||||
ts := int64(10 * time.Minute / time.Millisecond)
|
ts := int64(10 * time.Minute / time.Millisecond)
|
||||||
app := test.Storage().Appender(context.TODO())
|
app := storage.Appender(context.Background())
|
||||||
|
var err error
|
||||||
if floatHisto {
|
if floatHisto {
|
||||||
_, err = app.AppendHistogram(0, lbls, ts, nil, h.ToFloat())
|
_, err = app.AppendHistogram(0, lbls, ts, nil, h.ToFloat())
|
||||||
} else {
|
} else {
|
||||||
|
@ -3372,10 +3295,10 @@ func TestNativeHistogram_HistogramCountAndSum(t *testing.T) {
|
||||||
require.NoError(t, app.Commit())
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
queryString := fmt.Sprintf("histogram_count(%s)", seriesName)
|
queryString := fmt.Sprintf("histogram_count(%s)", seriesName)
|
||||||
qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts))
|
qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
res := qry.Exec(test.Context())
|
res := qry.Exec(context.Background())
|
||||||
require.NoError(t, res.Err)
|
require.NoError(t, res.Err)
|
||||||
|
|
||||||
vector, err := res.Vector()
|
vector, err := res.Vector()
|
||||||
|
@ -3390,10 +3313,10 @@ func TestNativeHistogram_HistogramCountAndSum(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
queryString = fmt.Sprintf("histogram_sum(%s)", seriesName)
|
queryString = fmt.Sprintf("histogram_sum(%s)", seriesName)
|
||||||
qry, err = engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts))
|
qry, err = engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
res = qry.Exec(test.Context())
|
res = qry.Exec(context.Background())
|
||||||
require.NoError(t, res.Err)
|
require.NoError(t, res.Err)
|
||||||
|
|
||||||
vector, err = res.Vector()
|
vector, err = res.Vector()
|
||||||
|
@ -3410,6 +3333,165 @@ func TestNativeHistogram_HistogramCountAndSum(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestNativeHistogram_HistogramStdDevVar(t *testing.T) {
|
||||||
|
// TODO(codesome): Integrate histograms into the PromQL testing framework
|
||||||
|
// and write more tests there.
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
h *histogram.Histogram
|
||||||
|
stdVar float64
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "1, 2, 3, 4 low-res",
|
||||||
|
h: &histogram.Histogram{
|
||||||
|
Count: 4,
|
||||||
|
Sum: 10,
|
||||||
|
Schema: 2,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 1},
|
||||||
|
{Offset: 3, Length: 1},
|
||||||
|
{Offset: 2, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 0, 0, 0},
|
||||||
|
},
|
||||||
|
stdVar: 1.163807968526718, // actual variance: 1.25
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "1, 2, 3, 4 hi-res",
|
||||||
|
h: &histogram.Histogram{
|
||||||
|
Count: 4,
|
||||||
|
Sum: 10,
|
||||||
|
Schema: 8,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 1},
|
||||||
|
{Offset: 255, Length: 1},
|
||||||
|
{Offset: 149, Length: 1},
|
||||||
|
{Offset: 105, Length: 1},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 0, 0, 0},
|
||||||
|
},
|
||||||
|
stdVar: 1.2471347737158793, // actual variance: 1.25
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "-50, -8, 0, 3, 8, 9, 100",
|
||||||
|
h: &histogram.Histogram{
|
||||||
|
Count: 7,
|
||||||
|
ZeroCount: 1,
|
||||||
|
Sum: 62,
|
||||||
|
Schema: 3,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 13, Length: 1},
|
||||||
|
{Offset: 10, Length: 1},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
{Offset: 27, Length: 1},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 0, 0, 0},
|
||||||
|
NegativeSpans: []histogram.Span{
|
||||||
|
{Offset: 24, Length: 1},
|
||||||
|
{Offset: 21, Length: 1},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []int64{1, 0},
|
||||||
|
},
|
||||||
|
stdVar: 1544.8582535368798, // actual variance: 1738.4082
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "-50, -8, 0, 3, 8, 9, 100, NaN",
|
||||||
|
h: &histogram.Histogram{
|
||||||
|
Count: 8,
|
||||||
|
ZeroCount: 1,
|
||||||
|
Sum: math.NaN(),
|
||||||
|
Schema: 3,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 13, Length: 1},
|
||||||
|
{Offset: 10, Length: 1},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
{Offset: 27, Length: 1},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 0, 0, 0},
|
||||||
|
NegativeSpans: []histogram.Span{
|
||||||
|
{Offset: 24, Length: 1},
|
||||||
|
{Offset: 21, Length: 1},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []int64{1, 0},
|
||||||
|
},
|
||||||
|
stdVar: math.NaN(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "-50, -8, 0, 3, 8, 9, 100, +Inf",
|
||||||
|
h: &histogram.Histogram{
|
||||||
|
Count: 8,
|
||||||
|
ZeroCount: 1,
|
||||||
|
Sum: math.Inf(1),
|
||||||
|
Schema: 3,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 13, Length: 1},
|
||||||
|
{Offset: 10, Length: 1},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
{Offset: 27, Length: 1},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 0, 0, 0},
|
||||||
|
NegativeSpans: []histogram.Span{
|
||||||
|
{Offset: 24, Length: 1},
|
||||||
|
{Offset: 21, Length: 1},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []int64{1, 0},
|
||||||
|
},
|
||||||
|
stdVar: math.NaN(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
for _, floatHisto := range []bool{true, false} {
|
||||||
|
t.Run(fmt.Sprintf("%s floatHistogram=%t", tc.name, floatHisto), func(t *testing.T) {
|
||||||
|
engine := newTestEngine()
|
||||||
|
storage := teststorage.New(t)
|
||||||
|
t.Cleanup(func() { storage.Close() })
|
||||||
|
|
||||||
|
seriesName := "sparse_histogram_series"
|
||||||
|
lbls := labels.FromStrings("__name__", seriesName)
|
||||||
|
|
||||||
|
ts := int64(10 * time.Minute / time.Millisecond)
|
||||||
|
app := storage.Appender(context.Background())
|
||||||
|
var err error
|
||||||
|
if floatHisto {
|
||||||
|
_, err = app.AppendHistogram(0, lbls, ts, nil, tc.h.ToFloat())
|
||||||
|
} else {
|
||||||
|
_, err = app.AppendHistogram(0, lbls, ts, tc.h, nil)
|
||||||
|
}
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
|
queryString := fmt.Sprintf("histogram_stdvar(%s)", seriesName)
|
||||||
|
qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
res := qry.Exec(context.Background())
|
||||||
|
require.NoError(t, res.Err)
|
||||||
|
|
||||||
|
vector, err := res.Vector()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Len(t, vector, 1)
|
||||||
|
require.Nil(t, vector[0].H)
|
||||||
|
require.InEpsilon(t, tc.stdVar, vector[0].F, 1e-12)
|
||||||
|
|
||||||
|
queryString = fmt.Sprintf("histogram_stddev(%s)", seriesName)
|
||||||
|
qry, err = engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
res = qry.Exec(context.Background())
|
||||||
|
require.NoError(t, res.Err)
|
||||||
|
|
||||||
|
vector, err = res.Vector()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Len(t, vector, 1)
|
||||||
|
require.Nil(t, vector[0].H)
|
||||||
|
require.InEpsilon(t, math.Sqrt(tc.stdVar), vector[0].F, 1e-12)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestNativeHistogram_HistogramQuantile(t *testing.T) {
|
func TestNativeHistogram_HistogramQuantile(t *testing.T) {
|
||||||
// TODO(codesome): Integrate histograms into the PromQL testing framework
|
// TODO(codesome): Integrate histograms into the PromQL testing framework
|
||||||
// and write more tests there.
|
// and write more tests there.
|
||||||
|
@ -3603,18 +3685,18 @@ func TestNativeHistogram_HistogramQuantile(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
test, err := NewTest(t, "")
|
engine := newTestEngine()
|
||||||
require.NoError(t, err)
|
storage := teststorage.New(t)
|
||||||
t.Cleanup(test.Close)
|
t.Cleanup(func() { storage.Close() })
|
||||||
idx := int64(0)
|
idx := int64(0)
|
||||||
for _, floatHisto := range []bool{true, false} {
|
for _, floatHisto := range []bool{true, false} {
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
t.Run(fmt.Sprintf("%s floatHistogram=%t", c.text, floatHisto), func(t *testing.T) {
|
t.Run(fmt.Sprintf("%s floatHistogram=%t", c.text, floatHisto), func(t *testing.T) {
|
||||||
seriesName := "sparse_histogram_series"
|
seriesName := "sparse_histogram_series"
|
||||||
lbls := labels.FromStrings("__name__", seriesName)
|
lbls := labels.FromStrings("__name__", seriesName)
|
||||||
engine := test.QueryEngine()
|
|
||||||
ts := idx * int64(10*time.Minute/time.Millisecond)
|
ts := idx * int64(10*time.Minute/time.Millisecond)
|
||||||
app := test.Storage().Appender(context.TODO())
|
app := storage.Appender(context.Background())
|
||||||
|
var err error
|
||||||
if floatHisto {
|
if floatHisto {
|
||||||
_, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat())
|
_, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat())
|
||||||
} else {
|
} else {
|
||||||
|
@ -3626,10 +3708,10 @@ func TestNativeHistogram_HistogramQuantile(t *testing.T) {
|
||||||
for j, sc := range c.subCases {
|
for j, sc := range c.subCases {
|
||||||
t.Run(fmt.Sprintf("%d %s", j, sc.quantile), func(t *testing.T) {
|
t.Run(fmt.Sprintf("%d %s", j, sc.quantile), func(t *testing.T) {
|
||||||
queryString := fmt.Sprintf("histogram_quantile(%s, %s)", sc.quantile, seriesName)
|
queryString := fmt.Sprintf("histogram_quantile(%s, %s)", sc.quantile, seriesName)
|
||||||
qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts))
|
qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
res := qry.Exec(test.Context())
|
res := qry.Exec(context.Background())
|
||||||
require.NoError(t, res.Err)
|
require.NoError(t, res.Err)
|
||||||
|
|
||||||
vector, err := res.Vector()
|
vector, err := res.Vector()
|
||||||
|
@ -4036,16 +4118,16 @@ func TestNativeHistogram_HistogramFraction(t *testing.T) {
|
||||||
for _, floatHisto := range []bool{true, false} {
|
for _, floatHisto := range []bool{true, false} {
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
t.Run(fmt.Sprintf("%s floatHistogram=%t", c.text, floatHisto), func(t *testing.T) {
|
t.Run(fmt.Sprintf("%s floatHistogram=%t", c.text, floatHisto), func(t *testing.T) {
|
||||||
test, err := NewTest(t, "")
|
engine := newTestEngine()
|
||||||
require.NoError(t, err)
|
storage := teststorage.New(t)
|
||||||
t.Cleanup(test.Close)
|
t.Cleanup(func() { storage.Close() })
|
||||||
|
|
||||||
seriesName := "sparse_histogram_series"
|
seriesName := "sparse_histogram_series"
|
||||||
lbls := labels.FromStrings("__name__", seriesName)
|
lbls := labels.FromStrings("__name__", seriesName)
|
||||||
engine := test.QueryEngine()
|
|
||||||
|
|
||||||
ts := idx * int64(10*time.Minute/time.Millisecond)
|
ts := idx * int64(10*time.Minute/time.Millisecond)
|
||||||
app := test.Storage().Appender(context.TODO())
|
app := storage.Appender(context.Background())
|
||||||
|
var err error
|
||||||
if floatHisto {
|
if floatHisto {
|
||||||
_, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat())
|
_, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat())
|
||||||
} else {
|
} else {
|
||||||
|
@ -4057,10 +4139,10 @@ func TestNativeHistogram_HistogramFraction(t *testing.T) {
|
||||||
for j, sc := range c.subCases {
|
for j, sc := range c.subCases {
|
||||||
t.Run(fmt.Sprintf("%d %s %s", j, sc.lower, sc.upper), func(t *testing.T) {
|
t.Run(fmt.Sprintf("%d %s %s", j, sc.lower, sc.upper), func(t *testing.T) {
|
||||||
queryString := fmt.Sprintf("histogram_fraction(%s, %s, %s)", sc.lower, sc.upper, seriesName)
|
queryString := fmt.Sprintf("histogram_fraction(%s, %s, %s)", sc.lower, sc.upper, seriesName)
|
||||||
qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts))
|
qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
res := qry.Exec(test.Context())
|
res := qry.Exec(context.Background())
|
||||||
require.NoError(t, res.Err)
|
require.NoError(t, res.Err)
|
||||||
|
|
||||||
vector, err := res.Vector()
|
vector, err := res.Vector()
|
||||||
|
@ -4094,7 +4176,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) {
|
||||||
{
|
{
|
||||||
CounterResetHint: histogram.GaugeType,
|
CounterResetHint: histogram.GaugeType,
|
||||||
Schema: 0,
|
Schema: 0,
|
||||||
Count: 21,
|
Count: 25,
|
||||||
Sum: 1234.5,
|
Sum: 1234.5,
|
||||||
ZeroThreshold: 0.001,
|
ZeroThreshold: 0.001,
|
||||||
ZeroCount: 4,
|
ZeroCount: 4,
|
||||||
|
@ -4112,7 +4194,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) {
|
||||||
{
|
{
|
||||||
CounterResetHint: histogram.GaugeType,
|
CounterResetHint: histogram.GaugeType,
|
||||||
Schema: 0,
|
Schema: 0,
|
||||||
Count: 36,
|
Count: 41,
|
||||||
Sum: 2345.6,
|
Sum: 2345.6,
|
||||||
ZeroThreshold: 0.001,
|
ZeroThreshold: 0.001,
|
||||||
ZeroCount: 5,
|
ZeroCount: 5,
|
||||||
|
@ -4132,7 +4214,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) {
|
||||||
{
|
{
|
||||||
CounterResetHint: histogram.GaugeType,
|
CounterResetHint: histogram.GaugeType,
|
||||||
Schema: 0,
|
Schema: 0,
|
||||||
Count: 36,
|
Count: 41,
|
||||||
Sum: 1111.1,
|
Sum: 1111.1,
|
||||||
ZeroThreshold: 0.001,
|
ZeroThreshold: 0.001,
|
||||||
ZeroCount: 5,
|
ZeroCount: 5,
|
||||||
|
@ -4159,7 +4241,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) {
|
||||||
Schema: 0,
|
Schema: 0,
|
||||||
ZeroThreshold: 0.001,
|
ZeroThreshold: 0.001,
|
||||||
ZeroCount: 14,
|
ZeroCount: 14,
|
||||||
Count: 93,
|
Count: 107,
|
||||||
Sum: 4691.2,
|
Sum: 4691.2,
|
||||||
PositiveSpans: []histogram.Span{
|
PositiveSpans: []histogram.Span{
|
||||||
{Offset: 0, Length: 7},
|
{Offset: 0, Length: 7},
|
||||||
|
@ -4176,7 +4258,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) {
|
||||||
Schema: 0,
|
Schema: 0,
|
||||||
ZeroThreshold: 0.001,
|
ZeroThreshold: 0.001,
|
||||||
ZeroCount: 3.5,
|
ZeroCount: 3.5,
|
||||||
Count: 23.25,
|
Count: 26.75,
|
||||||
Sum: 1172.8,
|
Sum: 1172.8,
|
||||||
PositiveSpans: []histogram.Span{
|
PositiveSpans: []histogram.Span{
|
||||||
{Offset: 0, Length: 7},
|
{Offset: 0, Length: 7},
|
||||||
|
@ -4195,20 +4277,20 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) {
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
for _, floatHisto := range []bool{true, false} {
|
for _, floatHisto := range []bool{true, false} {
|
||||||
t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) {
|
t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) {
|
||||||
test, err := NewTest(t, "")
|
storage := teststorage.New(t)
|
||||||
require.NoError(t, err)
|
t.Cleanup(func() { storage.Close() })
|
||||||
t.Cleanup(test.Close)
|
|
||||||
|
|
||||||
seriesName := "sparse_histogram_series"
|
seriesName := "sparse_histogram_series"
|
||||||
seriesNameOverTime := "sparse_histogram_series_over_time"
|
seriesNameOverTime := "sparse_histogram_series_over_time"
|
||||||
|
|
||||||
engine := test.QueryEngine()
|
engine := newTestEngine()
|
||||||
|
|
||||||
ts := idx0 * int64(10*time.Minute/time.Millisecond)
|
ts := idx0 * int64(10*time.Minute/time.Millisecond)
|
||||||
app := test.Storage().Appender(context.TODO())
|
app := storage.Appender(context.Background())
|
||||||
for idx1, h := range c.histograms {
|
for idx1, h := range c.histograms {
|
||||||
lbls := labels.FromStrings("__name__", seriesName, "idx", fmt.Sprintf("%d", idx1))
|
lbls := labels.FromStrings("__name__", seriesName, "idx", fmt.Sprintf("%d", idx1))
|
||||||
// Since we mutate h later, we need to create a copy here.
|
// Since we mutate h later, we need to create a copy here.
|
||||||
|
var err error
|
||||||
if floatHisto {
|
if floatHisto {
|
||||||
_, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat())
|
_, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat())
|
||||||
} else {
|
} else {
|
||||||
|
@ -4229,10 +4311,10 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) {
|
||||||
require.NoError(t, app.Commit())
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
queryAndCheck := func(queryString string, ts int64, exp Vector) {
|
queryAndCheck := func(queryString string, ts int64, exp Vector) {
|
||||||
qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts))
|
qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
res := qry.Exec(test.Context())
|
res := qry.Exec(context.Background())
|
||||||
require.NoError(t, res.Err)
|
require.NoError(t, res.Err)
|
||||||
|
|
||||||
vector, err := res.Vector()
|
vector, err := res.Vector()
|
||||||
|
@ -4287,7 +4369,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) {
|
||||||
histograms: []histogram.Histogram{
|
histograms: []histogram.Histogram{
|
||||||
{
|
{
|
||||||
Schema: 0,
|
Schema: 0,
|
||||||
Count: 36,
|
Count: 41,
|
||||||
Sum: 2345.6,
|
Sum: 2345.6,
|
||||||
ZeroThreshold: 0.001,
|
ZeroThreshold: 0.001,
|
||||||
ZeroCount: 5,
|
ZeroCount: 5,
|
||||||
|
@ -4322,7 +4404,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) {
|
||||||
},
|
},
|
||||||
expected: histogram.FloatHistogram{
|
expected: histogram.FloatHistogram{
|
||||||
Schema: 0,
|
Schema: 0,
|
||||||
Count: 25,
|
Count: 30,
|
||||||
Sum: 1111.1,
|
Sum: 1111.1,
|
||||||
ZeroThreshold: 0.001,
|
ZeroThreshold: 0.001,
|
||||||
ZeroCount: 2,
|
ZeroCount: 2,
|
||||||
|
@ -4343,7 +4425,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) {
|
||||||
histograms: []histogram.Histogram{
|
histograms: []histogram.Histogram{
|
||||||
{
|
{
|
||||||
Schema: 0,
|
Schema: 0,
|
||||||
Count: 36,
|
Count: 41,
|
||||||
Sum: 2345.6,
|
Sum: 2345.6,
|
||||||
ZeroThreshold: 0.001,
|
ZeroThreshold: 0.001,
|
||||||
ZeroCount: 5,
|
ZeroCount: 5,
|
||||||
|
@ -4378,7 +4460,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) {
|
||||||
},
|
},
|
||||||
expected: histogram.FloatHistogram{
|
expected: histogram.FloatHistogram{
|
||||||
Schema: 0,
|
Schema: 0,
|
||||||
Count: 25,
|
Count: 30,
|
||||||
Sum: 1111.1,
|
Sum: 1111.1,
|
||||||
ZeroThreshold: 0.001,
|
ZeroThreshold: 0.001,
|
||||||
ZeroCount: 2,
|
ZeroCount: 2,
|
||||||
|
@ -4413,7 +4495,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Schema: 0,
|
Schema: 0,
|
||||||
Count: 36,
|
Count: 41,
|
||||||
Sum: 2345.6,
|
Sum: 2345.6,
|
||||||
ZeroThreshold: 0.001,
|
ZeroThreshold: 0.001,
|
||||||
ZeroCount: 5,
|
ZeroCount: 5,
|
||||||
|
@ -4433,7 +4515,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) {
|
||||||
},
|
},
|
||||||
expected: histogram.FloatHistogram{
|
expected: histogram.FloatHistogram{
|
||||||
Schema: 0,
|
Schema: 0,
|
||||||
Count: -25,
|
Count: -30,
|
||||||
Sum: -1111.1,
|
Sum: -1111.1,
|
||||||
ZeroThreshold: 0.001,
|
ZeroThreshold: 0.001,
|
||||||
ZeroCount: -2,
|
ZeroCount: -2,
|
||||||
|
@ -4455,19 +4537,18 @@ func TestNativeHistogram_SubOperator(t *testing.T) {
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
for _, floatHisto := range []bool{true, false} {
|
for _, floatHisto := range []bool{true, false} {
|
||||||
t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) {
|
t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) {
|
||||||
test, err := NewTest(t, "")
|
engine := newTestEngine()
|
||||||
require.NoError(t, err)
|
storage := teststorage.New(t)
|
||||||
t.Cleanup(test.Close)
|
t.Cleanup(func() { storage.Close() })
|
||||||
|
|
||||||
seriesName := "sparse_histogram_series"
|
seriesName := "sparse_histogram_series"
|
||||||
|
|
||||||
engine := test.QueryEngine()
|
|
||||||
|
|
||||||
ts := idx0 * int64(10*time.Minute/time.Millisecond)
|
ts := idx0 * int64(10*time.Minute/time.Millisecond)
|
||||||
app := test.Storage().Appender(context.TODO())
|
app := storage.Appender(context.Background())
|
||||||
for idx1, h := range c.histograms {
|
for idx1, h := range c.histograms {
|
||||||
lbls := labels.FromStrings("__name__", seriesName, "idx", fmt.Sprintf("%d", idx1))
|
lbls := labels.FromStrings("__name__", seriesName, "idx", fmt.Sprintf("%d", idx1))
|
||||||
// Since we mutate h later, we need to create a copy here.
|
// Since we mutate h later, we need to create a copy here.
|
||||||
|
var err error
|
||||||
if floatHisto {
|
if floatHisto {
|
||||||
_, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat())
|
_, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat())
|
||||||
} else {
|
} else {
|
||||||
|
@ -4478,15 +4559,25 @@ func TestNativeHistogram_SubOperator(t *testing.T) {
|
||||||
require.NoError(t, app.Commit())
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
queryAndCheck := func(queryString string, exp Vector) {
|
queryAndCheck := func(queryString string, exp Vector) {
|
||||||
qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts))
|
qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
res := qry.Exec(test.Context())
|
res := qry.Exec(context.Background())
|
||||||
require.NoError(t, res.Err)
|
require.NoError(t, res.Err)
|
||||||
|
|
||||||
vector, err := res.Vector()
|
vector, err := res.Vector()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if len(vector) == len(exp) {
|
||||||
|
for i, e := range exp {
|
||||||
|
got := vector[i].H
|
||||||
|
if got != e.H {
|
||||||
|
// Error messages are better if we compare structs, not pointers.
|
||||||
|
require.Equal(t, *e.H, *got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
require.Equal(t, exp, vector)
|
require.Equal(t, exp, vector)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4497,8 +4588,8 @@ func TestNativeHistogram_SubOperator(t *testing.T) {
|
||||||
}
|
}
|
||||||
queryAndCheck(queryString, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}})
|
queryAndCheck(queryString, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}})
|
||||||
})
|
})
|
||||||
idx0++
|
|
||||||
}
|
}
|
||||||
|
idx0++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4601,20 +4692,20 @@ func TestNativeHistogram_MulDivOperator(t *testing.T) {
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
for _, floatHisto := range []bool{true, false} {
|
for _, floatHisto := range []bool{true, false} {
|
||||||
t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) {
|
t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) {
|
||||||
test, err := NewTest(t, "")
|
storage := teststorage.New(t)
|
||||||
require.NoError(t, err)
|
t.Cleanup(func() { storage.Close() })
|
||||||
t.Cleanup(test.Close)
|
|
||||||
|
|
||||||
seriesName := "sparse_histogram_series"
|
seriesName := "sparse_histogram_series"
|
||||||
floatSeriesName := "float_series"
|
floatSeriesName := "float_series"
|
||||||
|
|
||||||
engine := test.QueryEngine()
|
engine := newTestEngine()
|
||||||
|
|
||||||
ts := idx0 * int64(10*time.Minute/time.Millisecond)
|
ts := idx0 * int64(10*time.Minute/time.Millisecond)
|
||||||
app := test.Storage().Appender(context.TODO())
|
app := storage.Appender(context.Background())
|
||||||
h := c.histogram
|
h := c.histogram
|
||||||
lbls := labels.FromStrings("__name__", seriesName)
|
lbls := labels.FromStrings("__name__", seriesName)
|
||||||
// Since we mutate h later, we need to create a copy here.
|
// Since we mutate h later, we need to create a copy here.
|
||||||
|
var err error
|
||||||
if floatHisto {
|
if floatHisto {
|
||||||
_, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat())
|
_, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat())
|
||||||
} else {
|
} else {
|
||||||
|
@ -4626,10 +4717,10 @@ func TestNativeHistogram_MulDivOperator(t *testing.T) {
|
||||||
require.NoError(t, app.Commit())
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
queryAndCheck := func(queryString string, exp Vector) {
|
queryAndCheck := func(queryString string, exp Vector) {
|
||||||
qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts))
|
qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
res := qry.Exec(test.Context())
|
res := qry.Exec(context.Background())
|
||||||
require.NoError(t, res.Err)
|
require.NoError(t, res.Err)
|
||||||
|
|
||||||
vector, err := res.Vector()
|
vector, err := res.Vector()
|
||||||
|
@ -4730,22 +4821,18 @@ metric 0 1 2
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
c := c
|
c := c
|
||||||
t.Run(c.name, func(t *testing.T) {
|
t.Run(c.name, func(t *testing.T) {
|
||||||
test, err := NewTest(t, load)
|
engine := newTestEngine()
|
||||||
require.NoError(t, err)
|
storage := LoadedStorage(t, load)
|
||||||
defer test.Close()
|
t.Cleanup(func() { storage.Close() })
|
||||||
|
|
||||||
err = test.Run()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
eng := test.QueryEngine()
|
|
||||||
if c.engineLookback != 0 {
|
if c.engineLookback != 0 {
|
||||||
eng.lookbackDelta = c.engineLookback
|
engine.lookbackDelta = c.engineLookback
|
||||||
}
|
}
|
||||||
opts := NewPrometheusQueryOpts(false, c.queryLookback)
|
opts := NewPrometheusQueryOpts(false, c.queryLookback)
|
||||||
qry, err := eng.NewInstantQuery(test.context, test.Queryable(), opts, query, c.ts)
|
qry, err := engine.NewInstantQuery(context.Background(), storage, opts, query, c.ts)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
res := qry.Exec(test.Context())
|
res := qry.Exec(context.Background())
|
||||||
require.NoError(t, res.Err)
|
require.NoError(t, res.Err)
|
||||||
vec, ok := res.Value.(Vector)
|
vec, ok := res.Value.(Vector)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
|
|
|
@ -996,6 +996,72 @@ func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNod
|
||||||
return enh.Out
|
return enh.Out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// === histogram_stddev(Vector parser.ValueTypeVector) Vector ===
|
||||||
|
func funcHistogramStdDev(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||||
|
inVec := vals[0].(Vector)
|
||||||
|
|
||||||
|
for _, sample := range inVec {
|
||||||
|
// Skip non-histogram samples.
|
||||||
|
if sample.H == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
mean := sample.H.Sum / sample.H.Count
|
||||||
|
var variance, cVariance float64
|
||||||
|
it := sample.H.AllBucketIterator()
|
||||||
|
for it.Next() {
|
||||||
|
bucket := it.At()
|
||||||
|
var val float64
|
||||||
|
if bucket.Lower <= 0 && 0 <= bucket.Upper {
|
||||||
|
val = 0
|
||||||
|
} else {
|
||||||
|
val = math.Sqrt(bucket.Upper * bucket.Lower)
|
||||||
|
}
|
||||||
|
delta := val - mean
|
||||||
|
variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance)
|
||||||
|
}
|
||||||
|
variance += cVariance
|
||||||
|
variance /= sample.H.Count
|
||||||
|
enh.Out = append(enh.Out, Sample{
|
||||||
|
Metric: enh.DropMetricName(sample.Metric),
|
||||||
|
F: math.Sqrt(variance),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return enh.Out
|
||||||
|
}
|
||||||
|
|
||||||
|
// === histogram_stdvar(Vector parser.ValueTypeVector) Vector ===
|
||||||
|
func funcHistogramStdVar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||||
|
inVec := vals[0].(Vector)
|
||||||
|
|
||||||
|
for _, sample := range inVec {
|
||||||
|
// Skip non-histogram samples.
|
||||||
|
if sample.H == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
mean := sample.H.Sum / sample.H.Count
|
||||||
|
var variance, cVariance float64
|
||||||
|
it := sample.H.AllBucketIterator()
|
||||||
|
for it.Next() {
|
||||||
|
bucket := it.At()
|
||||||
|
var val float64
|
||||||
|
if bucket.Lower <= 0 && 0 <= bucket.Upper {
|
||||||
|
val = 0
|
||||||
|
} else {
|
||||||
|
val = math.Sqrt(bucket.Upper * bucket.Lower)
|
||||||
|
}
|
||||||
|
delta := val - mean
|
||||||
|
variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance)
|
||||||
|
}
|
||||||
|
variance += cVariance
|
||||||
|
variance /= sample.H.Count
|
||||||
|
enh.Out = append(enh.Out, Sample{
|
||||||
|
Metric: enh.DropMetricName(sample.Metric),
|
||||||
|
F: variance,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return enh.Out
|
||||||
|
}
|
||||||
|
|
||||||
// === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector ===
|
// === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector ===
|
||||||
func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||||
lower := vals[0].(Vector)[0].F
|
lower := vals[0].(Vector)[0].F
|
||||||
|
@ -1377,6 +1443,8 @@ var FunctionCalls = map[string]FunctionCall{
|
||||||
"histogram_fraction": funcHistogramFraction,
|
"histogram_fraction": funcHistogramFraction,
|
||||||
"histogram_quantile": funcHistogramQuantile,
|
"histogram_quantile": funcHistogramQuantile,
|
||||||
"histogram_sum": funcHistogramSum,
|
"histogram_sum": funcHistogramSum,
|
||||||
|
"histogram_stddev": funcHistogramStdDev,
|
||||||
|
"histogram_stdvar": funcHistogramStdVar,
|
||||||
"holt_winters": funcHoltWinters,
|
"holt_winters": funcHoltWinters,
|
||||||
"hour": funcHour,
|
"hour": funcHour,
|
||||||
"idelta": funcIdelta,
|
"idelta": funcIdelta,
|
||||||
|
|
|
@ -173,6 +173,16 @@ var Functions = map[string]*Function{
|
||||||
ArgTypes: []ValueType{ValueTypeVector},
|
ArgTypes: []ValueType{ValueTypeVector},
|
||||||
ReturnType: ValueTypeVector,
|
ReturnType: ValueTypeVector,
|
||||||
},
|
},
|
||||||
|
"histogram_stddev": {
|
||||||
|
Name: "histogram_stddev",
|
||||||
|
ArgTypes: []ValueType{ValueTypeVector},
|
||||||
|
ReturnType: ValueTypeVector,
|
||||||
|
},
|
||||||
|
"histogram_stdvar": {
|
||||||
|
Name: "histogram_stdvar",
|
||||||
|
ArgTypes: []ValueType{ValueTypeVector},
|
||||||
|
ReturnType: ValueTypeVector,
|
||||||
|
},
|
||||||
"histogram_fraction": {
|
"histogram_fraction": {
|
||||||
Name: "histogram_fraction",
|
Name: "histogram_fraction",
|
||||||
ArgTypes: []ValueType{ValueTypeScalar, ValueTypeScalar, ValueTypeVector},
|
ArgTypes: []ValueType{ValueTypeScalar, ValueTypeScalar, ValueTypeVector},
|
||||||
|
|
|
@ -21,23 +21,28 @@ import (
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/value"
|
"github.com/prometheus/prometheus/model/value"
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
)
|
)
|
||||||
|
|
||||||
%}
|
%}
|
||||||
|
|
||||||
%union {
|
%union {
|
||||||
node Node
|
node Node
|
||||||
item Item
|
item Item
|
||||||
matchers []*labels.Matcher
|
matchers []*labels.Matcher
|
||||||
matcher *labels.Matcher
|
matcher *labels.Matcher
|
||||||
label labels.Label
|
label labels.Label
|
||||||
labels labels.Labels
|
labels labels.Labels
|
||||||
lblList []labels.Label
|
lblList []labels.Label
|
||||||
strings []string
|
strings []string
|
||||||
series []SequenceValue
|
series []SequenceValue
|
||||||
uint uint64
|
histogram *histogram.FloatHistogram
|
||||||
float float64
|
descriptors map[string]interface{}
|
||||||
duration time.Duration
|
bucket_set []float64
|
||||||
|
int int64
|
||||||
|
uint uint64
|
||||||
|
float float64
|
||||||
|
duration time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -54,6 +59,8 @@ IDENTIFIER
|
||||||
LEFT_BRACE
|
LEFT_BRACE
|
||||||
LEFT_BRACKET
|
LEFT_BRACKET
|
||||||
LEFT_PAREN
|
LEFT_PAREN
|
||||||
|
OPEN_HIST
|
||||||
|
CLOSE_HIST
|
||||||
METRIC_IDENTIFIER
|
METRIC_IDENTIFIER
|
||||||
NUMBER
|
NUMBER
|
||||||
RIGHT_BRACE
|
RIGHT_BRACE
|
||||||
|
@ -64,6 +71,20 @@ SPACE
|
||||||
STRING
|
STRING
|
||||||
TIMES
|
TIMES
|
||||||
|
|
||||||
|
// Histogram Descriptors.
|
||||||
|
%token histogramDescStart
|
||||||
|
%token <item>
|
||||||
|
SUM_DESC
|
||||||
|
COUNT_DESC
|
||||||
|
SCHEMA_DESC
|
||||||
|
OFFSET_DESC
|
||||||
|
NEGATIVE_OFFSET_DESC
|
||||||
|
BUCKETS_DESC
|
||||||
|
NEGATIVE_BUCKETS_DESC
|
||||||
|
ZERO_BUCKET_DESC
|
||||||
|
ZERO_BUCKET_WIDTH_DESC
|
||||||
|
%token histogramDescEnd
|
||||||
|
|
||||||
// Operators.
|
// Operators.
|
||||||
%token operatorsStart
|
%token operatorsStart
|
||||||
%token <item>
|
%token <item>
|
||||||
|
@ -145,6 +166,10 @@ START_METRIC_SELECTOR
|
||||||
%type <label> label_set_item
|
%type <label> label_set_item
|
||||||
%type <strings> grouping_label_list grouping_labels maybe_grouping_labels
|
%type <strings> grouping_label_list grouping_labels maybe_grouping_labels
|
||||||
%type <series> series_item series_values
|
%type <series> series_item series_values
|
||||||
|
%type <histogram> histogram_series_value
|
||||||
|
%type <descriptors> histogram_desc_map histogram_desc_item
|
||||||
|
%type <bucket_set> bucket_set bucket_set_list
|
||||||
|
%type <int> int
|
||||||
%type <uint> uint
|
%type <uint> uint
|
||||||
%type <float> number series_value signed_number signed_or_unsigned_number
|
%type <float> number series_value signed_number signed_or_unsigned_number
|
||||||
%type <node> step_invariant_expr aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_literal offset_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector
|
%type <node> step_invariant_expr aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_literal offset_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector
|
||||||
|
@ -256,7 +281,7 @@ binary_expr : expr ADD bin_modifier expr { $$ = yylex.(*parser).newBinar
|
||||||
;
|
;
|
||||||
|
|
||||||
// Using left recursion for the modifier rules, helps to keep the parser stack small and
|
// Using left recursion for the modifier rules, helps to keep the parser stack small and
|
||||||
// reduces allocations
|
// reduces allocations.
|
||||||
bin_modifier : group_modifiers;
|
bin_modifier : group_modifiers;
|
||||||
|
|
||||||
bool_modifier : /* empty */
|
bool_modifier : /* empty */
|
||||||
|
@ -470,7 +495,7 @@ subquery_expr : expr LEFT_BRACKET duration COLON maybe_duration RIGHT_BRACKET
|
||||||
*/
|
*/
|
||||||
|
|
||||||
unary_expr :
|
unary_expr :
|
||||||
/* gives the rule the same precedence as MUL. This aligns with mathematical conventions */
|
/* Gives the rule the same precedence as MUL. This aligns with mathematical conventions. */
|
||||||
unary_op expr %prec MUL
|
unary_op expr %prec MUL
|
||||||
{
|
{
|
||||||
if nl, ok := $2.(*NumberLiteral); ok {
|
if nl, ok := $2.(*NumberLiteral); ok {
|
||||||
|
@ -605,7 +630,10 @@ label_set_item : IDENTIFIER EQL STRING
|
||||||
;
|
;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Series descriptions (only used by unit tests).
|
* Series descriptions:
|
||||||
|
* A separate language that is used to generate series values promtool.
|
||||||
|
* It is included in the promQL parser, because it shares common functionality, such as parsing a metric.
|
||||||
|
* The syntax is described in https://prometheus.io/docs/prometheus/latest/configuration/unit_testing_rules/#series
|
||||||
*/
|
*/
|
||||||
|
|
||||||
series_description: metric series_values
|
series_description: metric series_values
|
||||||
|
@ -641,6 +669,7 @@ series_item : BLANK
|
||||||
| series_value TIMES uint
|
| series_value TIMES uint
|
||||||
{
|
{
|
||||||
$$ = []SequenceValue{}
|
$$ = []SequenceValue{}
|
||||||
|
// Add an additional value for time 0, which we ignore in tests.
|
||||||
for i:=uint64(0); i <= $3; i++{
|
for i:=uint64(0); i <= $3; i++{
|
||||||
$$ = append($$, SequenceValue{Value: $1})
|
$$ = append($$, SequenceValue{Value: $1})
|
||||||
}
|
}
|
||||||
|
@ -648,11 +677,42 @@ series_item : BLANK
|
||||||
| series_value signed_number TIMES uint
|
| series_value signed_number TIMES uint
|
||||||
{
|
{
|
||||||
$$ = []SequenceValue{}
|
$$ = []SequenceValue{}
|
||||||
|
// Add an additional value for time 0, which we ignore in tests.
|
||||||
for i:=uint64(0); i <= $4; i++{
|
for i:=uint64(0); i <= $4; i++{
|
||||||
$$ = append($$, SequenceValue{Value: $1})
|
$$ = append($$, SequenceValue{Value: $1})
|
||||||
$1 += $2
|
$1 += $2
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Histogram descriptions (part of unit testing).
|
||||||
|
| histogram_series_value
|
||||||
|
{
|
||||||
|
$$ = []SequenceValue{{Histogram:$1}}
|
||||||
|
}
|
||||||
|
| histogram_series_value TIMES uint
|
||||||
|
{
|
||||||
|
$$ = []SequenceValue{}
|
||||||
|
// Add an additional value for time 0, which we ignore in tests.
|
||||||
|
for i:=uint64(0); i <= $3; i++{
|
||||||
|
$$ = append($$, SequenceValue{Histogram:$1})
|
||||||
|
//$1 += $2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
| histogram_series_value ADD histogram_series_value TIMES uint
|
||||||
|
{
|
||||||
|
val, err := yylex.(*parser).histogramsIncreaseSeries($1,$3,$5)
|
||||||
|
if err != nil {
|
||||||
|
yylex.(*parser).addSemanticError(err)
|
||||||
|
}
|
||||||
|
$$ = val
|
||||||
|
}
|
||||||
|
| histogram_series_value SUB histogram_series_value TIMES uint
|
||||||
|
{
|
||||||
|
val, err := yylex.(*parser).histogramsDecreaseSeries($1,$3,$5)
|
||||||
|
if err != nil {
|
||||||
|
yylex.(*parser).addSemanticError(err)
|
||||||
|
}
|
||||||
|
$$ = val
|
||||||
|
}
|
||||||
;
|
;
|
||||||
|
|
||||||
series_value : IDENTIFIER
|
series_value : IDENTIFIER
|
||||||
|
@ -666,7 +726,109 @@ series_value : IDENTIFIER
|
||||||
| signed_number
|
| signed_number
|
||||||
;
|
;
|
||||||
|
|
||||||
|
histogram_series_value
|
||||||
|
: OPEN_HIST histogram_desc_map SPACE CLOSE_HIST
|
||||||
|
{
|
||||||
|
$$ = yylex.(*parser).buildHistogramFromMap(&$2)
|
||||||
|
}
|
||||||
|
| OPEN_HIST histogram_desc_map CLOSE_HIST
|
||||||
|
{
|
||||||
|
$$ = yylex.(*parser).buildHistogramFromMap(&$2)
|
||||||
|
}
|
||||||
|
| OPEN_HIST SPACE CLOSE_HIST
|
||||||
|
{
|
||||||
|
m := yylex.(*parser).newMap()
|
||||||
|
$$ = yylex.(*parser).buildHistogramFromMap(&m)
|
||||||
|
}
|
||||||
|
| OPEN_HIST CLOSE_HIST
|
||||||
|
{
|
||||||
|
m := yylex.(*parser).newMap()
|
||||||
|
$$ = yylex.(*parser).buildHistogramFromMap(&m)
|
||||||
|
}
|
||||||
|
;
|
||||||
|
|
||||||
|
histogram_desc_map
|
||||||
|
: histogram_desc_map SPACE histogram_desc_item
|
||||||
|
{
|
||||||
|
$$ = *(yylex.(*parser).mergeMaps(&$1,&$3))
|
||||||
|
}
|
||||||
|
| histogram_desc_item
|
||||||
|
{
|
||||||
|
$$ = $1
|
||||||
|
}
|
||||||
|
| histogram_desc_map error {
|
||||||
|
yylex.(*parser).unexpected("histogram description", "histogram description key, e.g. buckets:[5 10 7]")
|
||||||
|
}
|
||||||
|
;
|
||||||
|
|
||||||
|
histogram_desc_item
|
||||||
|
: SCHEMA_DESC COLON int
|
||||||
|
{
|
||||||
|
$$ = yylex.(*parser).newMap()
|
||||||
|
$$["schema"] = $3
|
||||||
|
}
|
||||||
|
| SUM_DESC COLON signed_or_unsigned_number
|
||||||
|
{
|
||||||
|
$$ = yylex.(*parser).newMap()
|
||||||
|
$$["sum"] = $3
|
||||||
|
}
|
||||||
|
| COUNT_DESC COLON number
|
||||||
|
{
|
||||||
|
$$ = yylex.(*parser).newMap()
|
||||||
|
$$["count"] = $3
|
||||||
|
}
|
||||||
|
| ZERO_BUCKET_DESC COLON number
|
||||||
|
{
|
||||||
|
$$ = yylex.(*parser).newMap()
|
||||||
|
$$["z_bucket"] = $3
|
||||||
|
}
|
||||||
|
| ZERO_BUCKET_WIDTH_DESC COLON number
|
||||||
|
{
|
||||||
|
$$ = yylex.(*parser).newMap()
|
||||||
|
$$["z_bucket_w"] = $3
|
||||||
|
}
|
||||||
|
| BUCKETS_DESC COLON bucket_set
|
||||||
|
{
|
||||||
|
$$ = yylex.(*parser).newMap()
|
||||||
|
$$["buckets"] = $3
|
||||||
|
}
|
||||||
|
| OFFSET_DESC COLON int
|
||||||
|
{
|
||||||
|
$$ = yylex.(*parser).newMap()
|
||||||
|
$$["offset"] = $3
|
||||||
|
}
|
||||||
|
| NEGATIVE_BUCKETS_DESC COLON bucket_set
|
||||||
|
{
|
||||||
|
$$ = yylex.(*parser).newMap()
|
||||||
|
$$["n_buckets"] = $3
|
||||||
|
}
|
||||||
|
| NEGATIVE_OFFSET_DESC COLON int
|
||||||
|
{
|
||||||
|
$$ = yylex.(*parser).newMap()
|
||||||
|
$$["n_offset"] = $3
|
||||||
|
}
|
||||||
|
;
|
||||||
|
|
||||||
|
bucket_set : LEFT_BRACKET bucket_set_list SPACE RIGHT_BRACKET
|
||||||
|
{
|
||||||
|
$$ = $2
|
||||||
|
}
|
||||||
|
| LEFT_BRACKET bucket_set_list RIGHT_BRACKET
|
||||||
|
{
|
||||||
|
$$ = $2
|
||||||
|
}
|
||||||
|
;
|
||||||
|
|
||||||
|
bucket_set_list : bucket_set_list SPACE number
|
||||||
|
{
|
||||||
|
$$ = append($1, $3)
|
||||||
|
}
|
||||||
|
| number
|
||||||
|
{
|
||||||
|
$$ = []float64{$1}
|
||||||
|
}
|
||||||
|
| bucket_set_list error
|
||||||
|
;
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -675,7 +837,7 @@ series_value : IDENTIFIER
|
||||||
|
|
||||||
aggregate_op : AVG | BOTTOMK | COUNT | COUNT_VALUES | GROUP | MAX | MIN | QUANTILE | STDDEV | STDVAR | SUM | TOPK ;
|
aggregate_op : AVG | BOTTOMK | COUNT | COUNT_VALUES | GROUP | MAX | MIN | QUANTILE | STDDEV | STDVAR | SUM | TOPK ;
|
||||||
|
|
||||||
// inside of grouping options label names can be recognized as keywords by the lexer. This is a list of keywords that could also be a label name.
|
// Inside of grouping options label names can be recognized as keywords by the lexer. This is a list of keywords that could also be a label name.
|
||||||
maybe_label : AVG | BOOL | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | GROUP_LEFT | GROUP_RIGHT | IDENTIFIER | IGNORING | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | ON | QUANTILE | STDDEV | STDVAR | SUM | TOPK | START | END | ATAN2;
|
maybe_label : AVG | BOOL | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | GROUP_LEFT | GROUP_RIGHT | IDENTIFIER | IGNORING | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | ON | QUANTILE | STDDEV | STDVAR | SUM | TOPK | START | END | ATAN2;
|
||||||
|
|
||||||
unary_op : ADD | SUB;
|
unary_op : ADD | SUB;
|
||||||
|
@ -713,6 +875,10 @@ uint : NUMBER
|
||||||
}
|
}
|
||||||
;
|
;
|
||||||
|
|
||||||
|
int : SUB uint { $$ = -int64($2) }
|
||||||
|
| uint { $$ = int64($1) }
|
||||||
|
;
|
||||||
|
|
||||||
duration : DURATION
|
duration : DURATION
|
||||||
{
|
{
|
||||||
var err error
|
var err error
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -133,9 +133,23 @@ var key = map[string]ItemType{
|
||||||
"end": END,
|
"end": END,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var histogramDesc = map[string]ItemType{
|
||||||
|
"sum": SUM_DESC,
|
||||||
|
"count": COUNT_DESC,
|
||||||
|
"schema": SCHEMA_DESC,
|
||||||
|
"offset": OFFSET_DESC,
|
||||||
|
"n_offset": NEGATIVE_OFFSET_DESC,
|
||||||
|
"buckets": BUCKETS_DESC,
|
||||||
|
"n_buckets": NEGATIVE_BUCKETS_DESC,
|
||||||
|
"z_bucket": ZERO_BUCKET_DESC,
|
||||||
|
"z_bucket_w": ZERO_BUCKET_WIDTH_DESC,
|
||||||
|
}
|
||||||
|
|
||||||
// ItemTypeStr is the default string representations for common Items. It does not
|
// ItemTypeStr is the default string representations for common Items. It does not
|
||||||
// imply that those are the only character sequences that can be lexed to such an Item.
|
// imply that those are the only character sequences that can be lexed to such an Item.
|
||||||
var ItemTypeStr = map[ItemType]string{
|
var ItemTypeStr = map[ItemType]string{
|
||||||
|
OPEN_HIST: "{{",
|
||||||
|
CLOSE_HIST: "}}",
|
||||||
LEFT_PAREN: "(",
|
LEFT_PAREN: "(",
|
||||||
RIGHT_PAREN: ")",
|
RIGHT_PAREN: ")",
|
||||||
LEFT_BRACE: "{",
|
LEFT_BRACE: "{",
|
||||||
|
@ -224,6 +238,16 @@ type stateFn func(*Lexer) stateFn
|
||||||
// Negative numbers indicate undefined positions.
|
// Negative numbers indicate undefined positions.
|
||||||
type Pos int
|
type Pos int
|
||||||
|
|
||||||
|
type histogramState int
|
||||||
|
|
||||||
|
const (
|
||||||
|
histogramStateNone histogramState = iota
|
||||||
|
histogramStateOpen
|
||||||
|
histogramStateMul
|
||||||
|
histogramStateAdd
|
||||||
|
histogramStateSub
|
||||||
|
)
|
||||||
|
|
||||||
// Lexer holds the state of the scanner.
|
// Lexer holds the state of the scanner.
|
||||||
type Lexer struct {
|
type Lexer struct {
|
||||||
input string // The string being scanned.
|
input string // The string being scanned.
|
||||||
|
@ -241,9 +265,10 @@ type Lexer struct {
|
||||||
gotColon bool // Whether we got a ':' after [ was opened.
|
gotColon bool // Whether we got a ':' after [ was opened.
|
||||||
stringOpen rune // Quote rune of the string currently being read.
|
stringOpen rune // Quote rune of the string currently being read.
|
||||||
|
|
||||||
// seriesDesc is set when a series description for the testing
|
// series description variables for internal PromQL testing framework as well as in promtool rules unit tests.
|
||||||
// language is lexed.
|
// see https://prometheus.io/docs/prometheus/latest/configuration/unit_testing_rules/#series
|
||||||
seriesDesc bool
|
seriesDesc bool // Whether we are lexing a series description.
|
||||||
|
histogramState histogramState // Determines whether or not inside of a histogram description.
|
||||||
}
|
}
|
||||||
|
|
||||||
// next returns the next rune in the input.
|
// next returns the next rune in the input.
|
||||||
|
@ -338,6 +363,9 @@ const lineComment = "#"
|
||||||
|
|
||||||
// lexStatements is the top-level state for lexing.
|
// lexStatements is the top-level state for lexing.
|
||||||
func lexStatements(l *Lexer) stateFn {
|
func lexStatements(l *Lexer) stateFn {
|
||||||
|
if l.histogramState != histogramStateNone {
|
||||||
|
return lexHistogram
|
||||||
|
}
|
||||||
if l.braceOpen {
|
if l.braceOpen {
|
||||||
return lexInsideBraces
|
return lexInsideBraces
|
||||||
}
|
}
|
||||||
|
@ -460,6 +488,117 @@ func lexStatements(l *Lexer) stateFn {
|
||||||
return lexStatements
|
return lexStatements
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func lexHistogram(l *Lexer) stateFn {
|
||||||
|
switch l.histogramState {
|
||||||
|
case histogramStateMul:
|
||||||
|
l.histogramState = histogramStateNone
|
||||||
|
l.next()
|
||||||
|
l.emit(TIMES)
|
||||||
|
return lexNumber
|
||||||
|
case histogramStateAdd:
|
||||||
|
l.histogramState = histogramStateNone
|
||||||
|
l.next()
|
||||||
|
l.emit(ADD)
|
||||||
|
return lexValueSequence
|
||||||
|
case histogramStateSub:
|
||||||
|
l.histogramState = histogramStateNone
|
||||||
|
l.next()
|
||||||
|
l.emit(SUB)
|
||||||
|
return lexValueSequence
|
||||||
|
}
|
||||||
|
|
||||||
|
if l.bracketOpen {
|
||||||
|
return lexBuckets
|
||||||
|
}
|
||||||
|
switch r := l.next(); {
|
||||||
|
case isSpace(r):
|
||||||
|
l.emit(SPACE)
|
||||||
|
return lexSpace
|
||||||
|
case isAlpha(r):
|
||||||
|
l.backup()
|
||||||
|
return lexHistogramDescriptor
|
||||||
|
case r == ':':
|
||||||
|
l.emit(COLON)
|
||||||
|
return lexHistogram
|
||||||
|
case r == '-':
|
||||||
|
l.emit(SUB)
|
||||||
|
return lexNumber
|
||||||
|
case r == 'x':
|
||||||
|
l.emit(TIMES)
|
||||||
|
return lexNumber
|
||||||
|
case isDigit(r):
|
||||||
|
l.backup()
|
||||||
|
return lexNumber
|
||||||
|
case r == '[':
|
||||||
|
l.bracketOpen = true
|
||||||
|
l.emit(LEFT_BRACKET)
|
||||||
|
return lexBuckets
|
||||||
|
case r == '}' && l.peek() == '}':
|
||||||
|
l.next()
|
||||||
|
l.emit(CLOSE_HIST)
|
||||||
|
switch l.peek() {
|
||||||
|
case 'x':
|
||||||
|
l.histogramState = histogramStateMul
|
||||||
|
return lexHistogram
|
||||||
|
case '+':
|
||||||
|
l.histogramState = histogramStateAdd
|
||||||
|
return lexHistogram
|
||||||
|
case '-':
|
||||||
|
l.histogramState = histogramStateSub
|
||||||
|
return lexHistogram
|
||||||
|
default:
|
||||||
|
l.histogramState = histogramStateNone
|
||||||
|
return lexValueSequence
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return l.errorf("histogram description incomplete unexpected: %q", r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func lexHistogramDescriptor(l *Lexer) stateFn {
|
||||||
|
Loop:
|
||||||
|
for {
|
||||||
|
switch r := l.next(); {
|
||||||
|
case isAlpha(r):
|
||||||
|
// absorb.
|
||||||
|
default:
|
||||||
|
l.backup()
|
||||||
|
|
||||||
|
word := l.input[l.start:l.pos]
|
||||||
|
if desc, ok := histogramDesc[strings.ToLower(word)]; ok {
|
||||||
|
if l.peek() == ':' {
|
||||||
|
l.emit(desc)
|
||||||
|
return lexHistogram
|
||||||
|
} else {
|
||||||
|
l.errorf("missing `:` for histogram descriptor")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
l.errorf("bad histogram descriptor found: %q", word)
|
||||||
|
}
|
||||||
|
|
||||||
|
break Loop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return lexStatements
|
||||||
|
}
|
||||||
|
|
||||||
|
func lexBuckets(l *Lexer) stateFn {
|
||||||
|
switch r := l.next(); {
|
||||||
|
case isSpace(r):
|
||||||
|
l.emit(SPACE)
|
||||||
|
return lexSpace
|
||||||
|
case isDigit(r):
|
||||||
|
l.backup()
|
||||||
|
return lexNumber
|
||||||
|
case r == ']':
|
||||||
|
l.bracketOpen = false
|
||||||
|
l.emit(RIGHT_BRACKET)
|
||||||
|
return lexHistogram
|
||||||
|
default:
|
||||||
|
return l.errorf("invalid character in buckets description: %q", r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// lexInsideBraces scans the inside of a vector selector. Keywords are ignored and
|
// lexInsideBraces scans the inside of a vector selector. Keywords are ignored and
|
||||||
// scanned as identifiers.
|
// scanned as identifiers.
|
||||||
func lexInsideBraces(l *Lexer) stateFn {
|
func lexInsideBraces(l *Lexer) stateFn {
|
||||||
|
@ -517,9 +656,20 @@ func lexInsideBraces(l *Lexer) stateFn {
|
||||||
|
|
||||||
// lexValueSequence scans a value sequence of a series description.
|
// lexValueSequence scans a value sequence of a series description.
|
||||||
func lexValueSequence(l *Lexer) stateFn {
|
func lexValueSequence(l *Lexer) stateFn {
|
||||||
|
if l.histogramState != histogramStateNone {
|
||||||
|
return lexHistogram
|
||||||
|
}
|
||||||
switch r := l.next(); {
|
switch r := l.next(); {
|
||||||
case r == eof:
|
case r == eof:
|
||||||
return lexStatements
|
return lexStatements
|
||||||
|
case r == '{' && l.peek() == '{':
|
||||||
|
if l.histogramState != histogramStateNone {
|
||||||
|
return l.errorf("unexpected histogram opening {{")
|
||||||
|
}
|
||||||
|
l.histogramState = histogramStateOpen
|
||||||
|
l.next()
|
||||||
|
l.emit(OPEN_HIST)
|
||||||
|
return lexHistogram
|
||||||
case isSpace(r):
|
case isSpace(r):
|
||||||
l.emit(SPACE)
|
l.emit(SPACE)
|
||||||
lexSpace(l)
|
lexSpace(l)
|
||||||
|
|
|
@ -494,6 +494,73 @@ var tests = []struct {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "histogram series descriptions",
|
||||||
|
tests: []testCase{
|
||||||
|
{
|
||||||
|
input: `{} {{buckets:[5]}}`,
|
||||||
|
expected: []Item{
|
||||||
|
{LEFT_BRACE, 0, `{`},
|
||||||
|
{RIGHT_BRACE, 1, `}`},
|
||||||
|
{SPACE, 2, ` `},
|
||||||
|
{OPEN_HIST, 3, `{{`},
|
||||||
|
{BUCKETS_DESC, 5, `buckets`},
|
||||||
|
{COLON, 12, `:`},
|
||||||
|
{LEFT_BRACKET, 13, `[`},
|
||||||
|
{NUMBER, 14, `5`},
|
||||||
|
{RIGHT_BRACKET, 15, `]`},
|
||||||
|
{CLOSE_HIST, 16, `}}`},
|
||||||
|
},
|
||||||
|
seriesDesc: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: `{} {{buckets: [5 10 7]}}`,
|
||||||
|
expected: []Item{
|
||||||
|
{LEFT_BRACE, 0, `{`},
|
||||||
|
{RIGHT_BRACE, 1, `}`},
|
||||||
|
{SPACE, 2, ` `},
|
||||||
|
{OPEN_HIST, 3, `{{`},
|
||||||
|
{BUCKETS_DESC, 5, `buckets`},
|
||||||
|
{COLON, 12, `:`},
|
||||||
|
{SPACE, 13, ` `},
|
||||||
|
{LEFT_BRACKET, 14, `[`},
|
||||||
|
{NUMBER, 15, `5`},
|
||||||
|
{SPACE, 16, ` `},
|
||||||
|
{NUMBER, 17, `10`},
|
||||||
|
{SPACE, 19, ` `},
|
||||||
|
{NUMBER, 20, `7`},
|
||||||
|
{RIGHT_BRACKET, 21, `]`},
|
||||||
|
{CLOSE_HIST, 22, `}}`},
|
||||||
|
},
|
||||||
|
seriesDesc: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: `{} {{buckets: [5 10 7] schema:1}}`,
|
||||||
|
expected: []Item{
|
||||||
|
{LEFT_BRACE, 0, `{`},
|
||||||
|
{RIGHT_BRACE, 1, `}`},
|
||||||
|
{SPACE, 2, ` `},
|
||||||
|
{OPEN_HIST, 3, `{{`},
|
||||||
|
{BUCKETS_DESC, 5, `buckets`},
|
||||||
|
{COLON, 12, `:`},
|
||||||
|
{SPACE, 13, ` `},
|
||||||
|
{LEFT_BRACKET, 14, `[`},
|
||||||
|
{NUMBER, 15, `5`},
|
||||||
|
{SPACE, 16, ` `},
|
||||||
|
{NUMBER, 17, `10`},
|
||||||
|
{SPACE, 19, ` `},
|
||||||
|
{NUMBER, 20, `7`},
|
||||||
|
{RIGHT_BRACKET, 21, `]`},
|
||||||
|
{SPACE, 22, ` `},
|
||||||
|
{SCHEMA_DESC, 23, `schema`},
|
||||||
|
{COLON, 29, `:`},
|
||||||
|
{NUMBER, 30, `1`},
|
||||||
|
{CLOSE_HIST, 31, `}}`},
|
||||||
|
},
|
||||||
|
seriesDesc: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "series descriptions",
|
name: "series descriptions",
|
||||||
tests: []testCase{
|
tests: []testCase{
|
||||||
|
@ -735,7 +802,6 @@ func TestLexer(t *testing.T) {
|
||||||
|
|
||||||
for l.state = lexStatements; l.state != nil; {
|
for l.state = lexStatements; l.state != nil; {
|
||||||
out = append(out, Item{})
|
out = append(out, Item{})
|
||||||
|
|
||||||
l.NextItem(&out[len(out)-1])
|
l.NextItem(&out[len(out)-1])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,7 @@ import (
|
||||||
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/timestamp"
|
"github.com/prometheus/prometheus/model/timestamp"
|
||||||
"github.com/prometheus/prometheus/util/strutil"
|
"github.com/prometheus/prometheus/util/strutil"
|
||||||
|
@ -168,6 +169,21 @@ func (errs ParseErrors) Error() string {
|
||||||
return "error contains no error message"
|
return "error contains no error message"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EnrichParseError enriches a single or list of parse errors (used for unit tests and promtool).
|
||||||
|
func EnrichParseError(err error, enrich func(parseErr *ParseErr)) {
|
||||||
|
var parseErr *ParseErr
|
||||||
|
if errors.As(err, &parseErr) {
|
||||||
|
enrich(parseErr)
|
||||||
|
}
|
||||||
|
var parseErrors ParseErrors
|
||||||
|
if errors.As(err, &parseErrors) {
|
||||||
|
for i, e := range parseErrors {
|
||||||
|
enrich(&e)
|
||||||
|
parseErrors[i] = e
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ParseExpr returns the expression parsed from the input.
|
// ParseExpr returns the expression parsed from the input.
|
||||||
func ParseExpr(input string) (expr Expr, err error) {
|
func ParseExpr(input string) (expr Expr, err error) {
|
||||||
p := NewParser(input)
|
p := NewParser(input)
|
||||||
|
@ -214,14 +230,18 @@ func ParseMetricSelector(input string) (m []*labels.Matcher, err error) {
|
||||||
|
|
||||||
// SequenceValue is an omittable value in a sequence of time series values.
|
// SequenceValue is an omittable value in a sequence of time series values.
|
||||||
type SequenceValue struct {
|
type SequenceValue struct {
|
||||||
Value float64
|
Value float64
|
||||||
Omitted bool
|
Omitted bool
|
||||||
|
Histogram *histogram.FloatHistogram
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v SequenceValue) String() string {
|
func (v SequenceValue) String() string {
|
||||||
if v.Omitted {
|
if v.Omitted {
|
||||||
return "_"
|
return "_"
|
||||||
}
|
}
|
||||||
|
if v.Histogram != nil {
|
||||||
|
return v.Histogram.String()
|
||||||
|
}
|
||||||
return fmt.Sprintf("%f", v.Value)
|
return fmt.Sprintf("%f", v.Value)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -270,6 +290,10 @@ func (p *parser) addParseErr(positionRange PositionRange, err error) {
|
||||||
p.parseErrors = append(p.parseErrors, perr)
|
p.parseErrors = append(p.parseErrors, perr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *parser) addSemanticError(err error) {
|
||||||
|
p.addParseErr(p.yyParser.lval.item.PositionRange(), err)
|
||||||
|
}
|
||||||
|
|
||||||
// unexpected creates a parser error complaining about an unexpected lexer item.
|
// unexpected creates a parser error complaining about an unexpected lexer item.
|
||||||
// The item that is presented as unexpected is always the last item produced
|
// The item that is presented as unexpected is always the last item produced
|
||||||
// by the lexer.
|
// by the lexer.
|
||||||
|
@ -443,6 +467,147 @@ func (p *parser) newAggregateExpr(op Item, modifier, args Node) (ret *AggregateE
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// newMap is used when building the FloatHistogram from a map.
|
||||||
|
func (p *parser) newMap() (ret map[string]interface{}) {
|
||||||
|
return map[string]interface{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// mergeMaps is used to combine maps as they're used to later build the Float histogram.
|
||||||
|
// This will merge the right map into the left map.
|
||||||
|
func (p *parser) mergeMaps(left, right *map[string]interface{}) (ret *map[string]interface{}) {
|
||||||
|
for key, value := range *right {
|
||||||
|
if _, ok := (*left)[key]; ok {
|
||||||
|
p.addParseErrf(PositionRange{}, "duplicate key \"%s\" in histogram", key)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
(*left)[key] = value
|
||||||
|
}
|
||||||
|
return left
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) histogramsIncreaseSeries(base, inc *histogram.FloatHistogram, times uint64) ([]SequenceValue, error) {
|
||||||
|
return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) *histogram.FloatHistogram {
|
||||||
|
return a.Add(b)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) histogramsDecreaseSeries(base, inc *histogram.FloatHistogram, times uint64) ([]SequenceValue, error) {
|
||||||
|
return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) *histogram.FloatHistogram {
|
||||||
|
return a.Sub(b)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uint64,
|
||||||
|
combine func(*histogram.FloatHistogram, *histogram.FloatHistogram) *histogram.FloatHistogram,
|
||||||
|
) ([]SequenceValue, error) {
|
||||||
|
ret := make([]SequenceValue, times+1)
|
||||||
|
// Add an additional value (the base) for time 0, which we ignore in tests.
|
||||||
|
ret[0] = SequenceValue{Histogram: base}
|
||||||
|
cur := base
|
||||||
|
for i := uint64(1); i <= times; i++ {
|
||||||
|
if cur.Schema > inc.Schema {
|
||||||
|
return nil, fmt.Errorf("error combining histograms: cannot merge from schema %d to %d", inc.Schema, cur.Schema)
|
||||||
|
}
|
||||||
|
|
||||||
|
cur = combine(cur.Copy(), inc)
|
||||||
|
ret[i] = SequenceValue{Histogram: cur}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildHistogramFromMap is used in the grammar to take then individual parts of the histogram and complete it.
|
||||||
|
func (p *parser) buildHistogramFromMap(desc *map[string]interface{}) *histogram.FloatHistogram {
|
||||||
|
output := &histogram.FloatHistogram{}
|
||||||
|
|
||||||
|
val, ok := (*desc)["schema"]
|
||||||
|
if ok {
|
||||||
|
schema, ok := val.(int64)
|
||||||
|
if ok {
|
||||||
|
output.Schema = int32(schema)
|
||||||
|
} else {
|
||||||
|
p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing schema number: %v", val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
val, ok = (*desc)["sum"]
|
||||||
|
if ok {
|
||||||
|
sum, ok := val.(float64)
|
||||||
|
if ok {
|
||||||
|
output.Sum = sum
|
||||||
|
} else {
|
||||||
|
p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing sum number: %v", val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
val, ok = (*desc)["count"]
|
||||||
|
if ok {
|
||||||
|
count, ok := val.(float64)
|
||||||
|
if ok {
|
||||||
|
output.Count = count
|
||||||
|
} else {
|
||||||
|
p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing count number: %v", val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
val, ok = (*desc)["z_bucket"]
|
||||||
|
if ok {
|
||||||
|
bucket, ok := val.(float64)
|
||||||
|
if ok {
|
||||||
|
output.ZeroCount = bucket
|
||||||
|
} else {
|
||||||
|
p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing z_bucket number: %v", val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
val, ok = (*desc)["z_bucket_w"]
|
||||||
|
if ok {
|
||||||
|
bucketWidth, ok := val.(float64)
|
||||||
|
if ok {
|
||||||
|
output.ZeroThreshold = bucketWidth
|
||||||
|
} else {
|
||||||
|
p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing z_bucket_w number: %v", val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buckets, spans := p.buildHistogramBucketsAndSpans(desc, "buckets", "offset")
|
||||||
|
output.PositiveBuckets = buckets
|
||||||
|
output.PositiveSpans = spans
|
||||||
|
|
||||||
|
buckets, spans = p.buildHistogramBucketsAndSpans(desc, "n_buckets", "n_offset")
|
||||||
|
output.NegativeBuckets = buckets
|
||||||
|
output.NegativeSpans = spans
|
||||||
|
|
||||||
|
return output
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) buildHistogramBucketsAndSpans(desc *map[string]interface{}, bucketsKey, offsetKey string,
|
||||||
|
) (buckets []float64, spans []histogram.Span) {
|
||||||
|
bucketCount := 0
|
||||||
|
val, ok := (*desc)[bucketsKey]
|
||||||
|
if ok {
|
||||||
|
val, ok := val.([]float64)
|
||||||
|
if ok {
|
||||||
|
buckets = val
|
||||||
|
bucketCount = len(buckets)
|
||||||
|
} else {
|
||||||
|
p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing %s float array: %v", bucketsKey, val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
offset := int32(0)
|
||||||
|
val, ok = (*desc)[offsetKey]
|
||||||
|
if ok {
|
||||||
|
val, ok := val.(int64)
|
||||||
|
if ok {
|
||||||
|
offset = int32(val)
|
||||||
|
} else {
|
||||||
|
p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing %s number: %v", offsetKey, val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if bucketCount > 0 {
|
||||||
|
spans = []histogram.Span{{Offset: offset, Length: uint32(bucketCount)}}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// number parses a number.
|
// number parses a number.
|
||||||
func (p *parser) number(val string) float64 {
|
func (p *parser) number(val string) float64 {
|
||||||
n, err := strconv.ParseInt(val, 0, 64)
|
n, err := strconv.ParseInt(val, 0, 64)
|
||||||
|
|
|
@ -24,6 +24,7 @@ import (
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -3654,6 +3655,17 @@ var testSeries = []struct {
|
||||||
input: `my_metric{a="b"} 1 2 3-0x4`,
|
input: `my_metric{a="b"} 1 2 3-0x4`,
|
||||||
expectedMetric: labels.FromStrings(labels.MetricName, "my_metric", "a", "b"),
|
expectedMetric: labels.FromStrings(labels.MetricName, "my_metric", "a", "b"),
|
||||||
expectedValues: newSeq(1, 2, 3, 3, 3, 3, 3),
|
expectedValues: newSeq(1, 2, 3, 3, 3, 3, 3),
|
||||||
|
}, {
|
||||||
|
input: `{} 1+1`,
|
||||||
|
fail: true,
|
||||||
|
}, {
|
||||||
|
input: `{} 1x0`,
|
||||||
|
expectedMetric: labels.EmptyLabels(),
|
||||||
|
expectedValues: newSeq(1),
|
||||||
|
}, {
|
||||||
|
input: `{} 1+1x0`,
|
||||||
|
expectedMetric: labels.EmptyLabels(),
|
||||||
|
expectedValues: newSeq(1),
|
||||||
}, {
|
}, {
|
||||||
input: `my_metric{a="b"} 1 3 _ 5 _x4`,
|
input: `my_metric{a="b"} 1 3 _ 5 _x4`,
|
||||||
expectedMetric: labels.FromStrings(labels.MetricName, "my_metric", "a", "b"),
|
expectedMetric: labels.FromStrings(labels.MetricName, "my_metric", "a", "b"),
|
||||||
|
@ -3721,6 +3733,305 @@ func newSeq(vals ...float64) (res []SequenceValue) {
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestParseHistogramSeries(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
name string
|
||||||
|
input string
|
||||||
|
expected []histogram.FloatHistogram
|
||||||
|
expectedError string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "empty histogram",
|
||||||
|
input: "{} {{}}",
|
||||||
|
expected: []histogram.FloatHistogram{{}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty histogram with space",
|
||||||
|
input: "{} {{ }}",
|
||||||
|
expected: []histogram.FloatHistogram{{}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "all properties used",
|
||||||
|
input: `{} {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5}}`,
|
||||||
|
expected: []histogram.FloatHistogram{{
|
||||||
|
Schema: 1,
|
||||||
|
Sum: -0.3,
|
||||||
|
Count: 3.1,
|
||||||
|
ZeroCount: 7.1,
|
||||||
|
ZeroThreshold: 0.05,
|
||||||
|
PositiveBuckets: []float64{5.1, 10, 7},
|
||||||
|
PositiveSpans: []histogram.Span{{Offset: -3, Length: 3}},
|
||||||
|
NegativeBuckets: []float64{4.1, 5},
|
||||||
|
NegativeSpans: []histogram.Span{{Offset: -5, Length: 2}},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "all properties used - with spaces",
|
||||||
|
input: `{} {{schema:1 sum:0.3 count:3 z_bucket:7 z_bucket_w:5 buckets:[5 10 7 ] offset:-3 n_buckets:[4 5] n_offset:5 }}`,
|
||||||
|
expected: []histogram.FloatHistogram{{
|
||||||
|
Schema: 1,
|
||||||
|
Sum: 0.3,
|
||||||
|
Count: 3,
|
||||||
|
ZeroCount: 7,
|
||||||
|
ZeroThreshold: 5,
|
||||||
|
PositiveBuckets: []float64{5, 10, 7},
|
||||||
|
PositiveSpans: []histogram.Span{{Offset: -3, Length: 3}},
|
||||||
|
NegativeBuckets: []float64{4, 5},
|
||||||
|
NegativeSpans: []histogram.Span{{Offset: 5, Length: 2}},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "static series",
|
||||||
|
input: `{} {{buckets:[5 10 7] schema:1}}x2`,
|
||||||
|
expected: []histogram.FloatHistogram{
|
||||||
|
{
|
||||||
|
Schema: 1,
|
||||||
|
PositiveBuckets: []float64{5, 10, 7},
|
||||||
|
PositiveSpans: []histogram.Span{{
|
||||||
|
Offset: 0,
|
||||||
|
Length: 3,
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Schema: 1,
|
||||||
|
PositiveBuckets: []float64{5, 10, 7},
|
||||||
|
PositiveSpans: []histogram.Span{{
|
||||||
|
Offset: 0,
|
||||||
|
Length: 3,
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Schema: 1,
|
||||||
|
PositiveBuckets: []float64{5, 10, 7},
|
||||||
|
PositiveSpans: []histogram.Span{{
|
||||||
|
Offset: 0,
|
||||||
|
Length: 3,
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "static series - x0",
|
||||||
|
input: `{} {{buckets:[5 10 7] schema:1}}x0`,
|
||||||
|
expected: []histogram.FloatHistogram{
|
||||||
|
{
|
||||||
|
Schema: 1,
|
||||||
|
PositiveBuckets: []float64{5, 10, 7},
|
||||||
|
PositiveSpans: []histogram.Span{{
|
||||||
|
Offset: 0,
|
||||||
|
Length: 3,
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "2 histograms stated explicitly",
|
||||||
|
input: `{} {{buckets:[5 10 7] schema:1}} {{buckets:[1 2 3] schema:1}}`,
|
||||||
|
expected: []histogram.FloatHistogram{
|
||||||
|
{
|
||||||
|
Schema: 1,
|
||||||
|
PositiveBuckets: []float64{5, 10, 7},
|
||||||
|
PositiveSpans: []histogram.Span{{
|
||||||
|
Offset: 0,
|
||||||
|
Length: 3,
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Schema: 1,
|
||||||
|
PositiveBuckets: []float64{1, 2, 3},
|
||||||
|
PositiveSpans: []histogram.Span{{
|
||||||
|
Offset: 0,
|
||||||
|
Length: 3,
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "series with increment - with different schemas",
|
||||||
|
input: `{} {{buckets:[5] schema:0}}+{{buckets:[1 2] schema:1}}x2`,
|
||||||
|
expected: []histogram.FloatHistogram{
|
||||||
|
{
|
||||||
|
PositiveBuckets: []float64{5},
|
||||||
|
PositiveSpans: []histogram.Span{{
|
||||||
|
Offset: 0,
|
||||||
|
Length: 1,
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
PositiveBuckets: []float64{6, 2},
|
||||||
|
PositiveSpans: []histogram.Span{{
|
||||||
|
Offset: 0,
|
||||||
|
Length: 2,
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
PositiveBuckets: []float64{7, 4},
|
||||||
|
PositiveSpans: []histogram.Span{{
|
||||||
|
Offset: 0,
|
||||||
|
Length: 2,
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "series with decrement",
|
||||||
|
input: `{} {{buckets:[5 10 7] schema:1}}-{{buckets:[1 2 3] schema:1}}x2`,
|
||||||
|
expected: []histogram.FloatHistogram{
|
||||||
|
{
|
||||||
|
Schema: 1,
|
||||||
|
PositiveBuckets: []float64{5, 10, 7},
|
||||||
|
PositiveSpans: []histogram.Span{{
|
||||||
|
Offset: 0,
|
||||||
|
Length: 3,
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Schema: 1,
|
||||||
|
PositiveBuckets: []float64{4, 8, 4},
|
||||||
|
PositiveSpans: []histogram.Span{{
|
||||||
|
Offset: 0,
|
||||||
|
Length: 3,
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Schema: 1,
|
||||||
|
PositiveBuckets: []float64{3, 6, 1},
|
||||||
|
PositiveSpans: []histogram.Span{{
|
||||||
|
Offset: 0,
|
||||||
|
Length: 3,
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "series with increment - 0x",
|
||||||
|
input: `{} {{buckets:[5 10 7] schema:1}}+{{buckets:[1 2 3] schema:1}}x0`,
|
||||||
|
expected: []histogram.FloatHistogram{
|
||||||
|
{
|
||||||
|
Schema: 1,
|
||||||
|
PositiveBuckets: []float64{5, 10, 7},
|
||||||
|
PositiveSpans: []histogram.Span{{
|
||||||
|
Offset: 0,
|
||||||
|
Length: 3,
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "series with different schemas - second one is smaller",
|
||||||
|
input: `{} {{buckets:[5 10 7] schema:1}}+{{buckets:[1 2 3] schema:0}}x2`,
|
||||||
|
expectedError: `1:63: parse error: error combining histograms: cannot merge from schema 0 to 1`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "different order",
|
||||||
|
input: `{} {{buckets:[5 10 7] schema:1}}`,
|
||||||
|
expected: []histogram.FloatHistogram{{
|
||||||
|
Schema: 1,
|
||||||
|
PositiveBuckets: []float64{5, 10, 7},
|
||||||
|
PositiveSpans: []histogram.Span{{
|
||||||
|
Offset: 0,
|
||||||
|
Length: 3,
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "double property",
|
||||||
|
input: `{} {{schema:1 schema:1}}`,
|
||||||
|
expectedError: `1:1: parse error: duplicate key "schema" in histogram`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "unknown property",
|
||||||
|
input: `{} {{foo:1}}`,
|
||||||
|
expectedError: `1:6: parse error: bad histogram descriptor found: "foo"`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "space before :",
|
||||||
|
input: `{} {{schema :1}}`,
|
||||||
|
expectedError: "1:6: parse error: missing `:` for histogram descriptor",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "space after :",
|
||||||
|
input: `{} {{schema: 1}}`,
|
||||||
|
expectedError: `1:13: parse error: unexpected " " in series values`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "space after [",
|
||||||
|
input: `{} {{buckets:[ 1]}}`,
|
||||||
|
expectedError: `1:15: parse error: unexpected " " in series values`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "space after {{",
|
||||||
|
input: `{} {{ schema:1}}`,
|
||||||
|
expectedError: `1:7: parse error: unexpected "<Item 57372>" "schema" in series values`,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
_, vals, err := ParseSeriesDesc(test.input)
|
||||||
|
if test.expectedError != "" {
|
||||||
|
require.EqualError(t, err, test.expectedError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
require.NoError(t, err)
|
||||||
|
var got []histogram.FloatHistogram
|
||||||
|
for _, v := range vals {
|
||||||
|
got = append(got, *v.Histogram)
|
||||||
|
}
|
||||||
|
require.Equal(t, test.expected, got)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHistogramTestExpression(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
name string
|
||||||
|
input histogram.FloatHistogram
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "single positive and negative span",
|
||||||
|
input: histogram.FloatHistogram{
|
||||||
|
Schema: 1,
|
||||||
|
Sum: -0.3,
|
||||||
|
Count: 3.1,
|
||||||
|
ZeroCount: 7.1,
|
||||||
|
ZeroThreshold: 0.05,
|
||||||
|
PositiveBuckets: []float64{5.1, 10, 7},
|
||||||
|
PositiveSpans: []histogram.Span{{Offset: -3, Length: 3}},
|
||||||
|
NegativeBuckets: []float64{4.1, 5},
|
||||||
|
NegativeSpans: []histogram.Span{{Offset: -5, Length: 2}},
|
||||||
|
},
|
||||||
|
expected: `{{schema:1 count:3.1 sum:-0.3 z_bucket:7.1 z_bucket_w:0.05 offset:-3 buckets:[5.1 10 7] n_offset:-5 n_buckets:[4.1 5]}}`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "multiple positive and negative spans",
|
||||||
|
input: histogram.FloatHistogram{
|
||||||
|
PositiveBuckets: []float64{5.1, 10, 7},
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: -3, Length: 1},
|
||||||
|
{Offset: 4, Length: 2},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []float64{4.1, 5, 7, 8, 9},
|
||||||
|
NegativeSpans: []histogram.Span{
|
||||||
|
{Offset: -1, Length: 2},
|
||||||
|
{Offset: 2, Length: 3},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: `{{offset:-3 buckets:[5.1 0 0 0 0 10 7] n_offset:-1 n_buckets:[4.1 5 0 0 7 8 9]}}`,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
expression := test.input.TestExpression()
|
||||||
|
require.Equal(t, test.expected, expression)
|
||||||
|
_, vals, err := ParseSeriesDesc("{} " + expression)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, vals, 1)
|
||||||
|
canonical := vals[0].Histogram
|
||||||
|
require.NotNil(t, canonical)
|
||||||
|
require.Equal(t, test.expected, canonical.TestExpression())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestParseSeries(t *testing.T) {
|
func TestParseSeries(t *testing.T) {
|
||||||
for _, test := range testSeries {
|
for _, test := range testSeries {
|
||||||
metric, vals, err := ParseSeriesDesc(test.input)
|
metric, vals, err := ParseSeriesDesc(test.input)
|
||||||
|
|
|
@ -15,7 +15,6 @@ package promql
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
@ -26,19 +25,21 @@ import (
|
||||||
"github.com/prometheus/prometheus/util/teststorage"
|
"github.com/prometheus/prometheus/util/teststorage"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func newTestEngine() *Engine {
|
||||||
|
return NewEngine(EngineOpts{
|
||||||
|
Logger: nil,
|
||||||
|
Reg: nil,
|
||||||
|
MaxSamples: 10000,
|
||||||
|
Timeout: 100 * time.Second,
|
||||||
|
NoStepSubqueryIntervalFn: func(int64) int64 { return durationMilliseconds(1 * time.Minute) },
|
||||||
|
EnableAtModifier: true,
|
||||||
|
EnableNegativeOffset: true,
|
||||||
|
EnablePerStepStats: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestEvaluations(t *testing.T) {
|
func TestEvaluations(t *testing.T) {
|
||||||
files, err := filepath.Glob("testdata/*.test")
|
RunBuiltinTests(t, newTestEngine())
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
for _, fn := range files {
|
|
||||||
t.Run(fn, func(t *testing.T) {
|
|
||||||
test, err := newTestFromFile(t, fn)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, test.Run())
|
|
||||||
|
|
||||||
test.Close()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run a lot of queries at the same time, to check for race conditions.
|
// Run a lot of queries at the same time, to check for race conditions.
|
||||||
|
|
262
promql/test.go
262
promql/test.go
|
@ -15,12 +15,14 @@ package promql
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"embed"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/fs"
|
||||||
"math"
|
"math"
|
||||||
"os"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/grafana/regexp"
|
"github.com/grafana/regexp"
|
||||||
|
@ -28,11 +30,11 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/timestamp"
|
"github.com/prometheus/prometheus/model/timestamp"
|
||||||
"github.com/prometheus/prometheus/promql/parser"
|
"github.com/prometheus/prometheus/promql/parser"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/tsdb"
|
|
||||||
"github.com/prometheus/prometheus/util/teststorage"
|
"github.com/prometheus/prometheus/util/teststorage"
|
||||||
"github.com/prometheus/prometheus/util/testutil"
|
"github.com/prometheus/prometheus/util/testutil"
|
||||||
)
|
)
|
||||||
|
@ -51,23 +53,74 @@ const (
|
||||||
|
|
||||||
var testStartTime = time.Unix(0, 0).UTC()
|
var testStartTime = time.Unix(0, 0).UTC()
|
||||||
|
|
||||||
// Test is a sequence of read and write commands that are run
|
// LoadedStorage returns storage with generated data using the provided load statements.
|
||||||
|
// Non-load statements will cause test errors.
|
||||||
|
func LoadedStorage(t testutil.T, input string) *teststorage.TestStorage {
|
||||||
|
test, err := newTest(t, input)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
for _, cmd := range test.cmds {
|
||||||
|
switch cmd.(type) {
|
||||||
|
case *loadCmd:
|
||||||
|
require.NoError(t, test.exec(cmd, nil))
|
||||||
|
default:
|
||||||
|
t.Errorf("only 'load' commands accepted, got '%s'", cmd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return test.storage
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunBuiltinTests runs an acceptance test suite against the provided engine.
|
||||||
|
func RunBuiltinTests(t *testing.T, engine engineQuerier) {
|
||||||
|
files, err := fs.Glob(testsFs, "*/*.test")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
for _, fn := range files {
|
||||||
|
t.Run(fn, func(t *testing.T) {
|
||||||
|
content, err := fs.ReadFile(testsFs, fn)
|
||||||
|
require.NoError(t, err)
|
||||||
|
RunTest(t, string(content), engine)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunTest parses and runs the test against the provided engine.
|
||||||
|
func RunTest(t testutil.T, input string, engine engineQuerier) {
|
||||||
|
test, err := newTest(t, input)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if test.storage != nil {
|
||||||
|
test.storage.Close()
|
||||||
|
}
|
||||||
|
if test.cancelCtx != nil {
|
||||||
|
test.cancelCtx()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
for _, cmd := range test.cmds {
|
||||||
|
// TODO(fabxc): aggregate command errors, yield diffs for result
|
||||||
|
// comparison errors.
|
||||||
|
require.NoError(t, test.exec(cmd, engine))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// test is a sequence of read and write commands that are run
|
||||||
// against a test storage.
|
// against a test storage.
|
||||||
type Test struct {
|
type test struct {
|
||||||
testutil.T
|
testutil.T
|
||||||
|
|
||||||
cmds []testCommand
|
cmds []testCommand
|
||||||
|
|
||||||
storage *teststorage.TestStorage
|
storage *teststorage.TestStorage
|
||||||
|
|
||||||
queryEngine *Engine
|
context context.Context
|
||||||
context context.Context
|
cancelCtx context.CancelFunc
|
||||||
cancelCtx context.CancelFunc
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTest returns an initialized empty Test.
|
// newTest returns an initialized empty Test.
|
||||||
func NewTest(t testutil.T, input string) (*Test, error) {
|
func newTest(t testutil.T, input string) (*test, error) {
|
||||||
test := &Test{
|
test := &test{
|
||||||
T: t,
|
T: t,
|
||||||
cmds: []testCommand{},
|
cmds: []testCommand{},
|
||||||
}
|
}
|
||||||
|
@ -77,46 +130,12 @@ func NewTest(t testutil.T, input string) (*Test, error) {
|
||||||
return test, err
|
return test, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTestFromFile(t testutil.T, filename string) (*Test, error) {
|
//go:embed testdata
|
||||||
content, err := os.ReadFile(filename)
|
var testsFs embed.FS
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return NewTest(t, string(content))
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryEngine returns the test's query engine.
|
type engineQuerier interface {
|
||||||
func (t *Test) QueryEngine() *Engine {
|
NewRangeQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error)
|
||||||
return t.queryEngine
|
NewInstantQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, ts time.Time) (Query, error)
|
||||||
}
|
|
||||||
|
|
||||||
// Queryable allows querying the test data.
|
|
||||||
func (t *Test) Queryable() storage.Queryable {
|
|
||||||
return t.storage
|
|
||||||
}
|
|
||||||
|
|
||||||
// Context returns the test's context.
|
|
||||||
func (t *Test) Context() context.Context {
|
|
||||||
return t.context
|
|
||||||
}
|
|
||||||
|
|
||||||
// Storage returns the test's storage.
|
|
||||||
func (t *Test) Storage() storage.Storage {
|
|
||||||
return t.storage
|
|
||||||
}
|
|
||||||
|
|
||||||
// TSDB returns test's TSDB.
|
|
||||||
func (t *Test) TSDB() *tsdb.DB {
|
|
||||||
return t.storage.DB
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExemplarStorage returns the test's exemplar storage.
|
|
||||||
func (t *Test) ExemplarStorage() storage.ExemplarStorage {
|
|
||||||
return t.storage
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *Test) ExemplarQueryable() storage.ExemplarQueryable {
|
|
||||||
return t.storage.ExemplarQueryable()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func raise(line int, format string, v ...interface{}) error {
|
func raise(line int, format string, v ...interface{}) error {
|
||||||
|
@ -144,12 +163,8 @@ func parseLoad(lines []string, i int) (int, *loadCmd, error) {
|
||||||
i--
|
i--
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
metric, vals, err := parser.ParseSeriesDesc(defLine)
|
metric, vals, err := parseSeries(defLine, i)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
var perr *parser.ParseErr
|
|
||||||
if errors.As(err, &perr) {
|
|
||||||
perr.LineOffset = i
|
|
||||||
}
|
|
||||||
return i, nil, err
|
return i, nil, err
|
||||||
}
|
}
|
||||||
cmd.set(metric, vals...)
|
cmd.set(metric, vals...)
|
||||||
|
@ -157,7 +172,18 @@ func parseLoad(lines []string, i int) (int, *loadCmd, error) {
|
||||||
return i, cmd, nil
|
return i, cmd, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Test) parseEval(lines []string, i int) (int, *evalCmd, error) {
|
func parseSeries(defLine string, line int) (labels.Labels, []parser.SequenceValue, error) {
|
||||||
|
metric, vals, err := parser.ParseSeriesDesc(defLine)
|
||||||
|
if err != nil {
|
||||||
|
parser.EnrichParseError(err, func(parseErr *parser.ParseErr) {
|
||||||
|
parseErr.LineOffset = line
|
||||||
|
})
|
||||||
|
return labels.Labels{}, nil, err
|
||||||
|
}
|
||||||
|
return metric, vals, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
|
||||||
if !patEvalInstant.MatchString(lines[i]) {
|
if !patEvalInstant.MatchString(lines[i]) {
|
||||||
return i, nil, raise(i, "invalid evaluation command. (eval[_fail|_ordered] instant [at <offset:duration>] <query>")
|
return i, nil, raise(i, "invalid evaluation command. (eval[_fail|_ordered] instant [at <offset:duration>] <query>")
|
||||||
}
|
}
|
||||||
|
@ -169,14 +195,13 @@ func (t *Test) parseEval(lines []string, i int) (int, *evalCmd, error) {
|
||||||
)
|
)
|
||||||
_, err := parser.ParseExpr(expr)
|
_, err := parser.ParseExpr(expr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
var perr *parser.ParseErr
|
parser.EnrichParseError(err, func(parseErr *parser.ParseErr) {
|
||||||
if errors.As(err, &perr) {
|
parseErr.LineOffset = i
|
||||||
perr.LineOffset = i
|
|
||||||
posOffset := parser.Pos(strings.Index(lines[i], expr))
|
posOffset := parser.Pos(strings.Index(lines[i], expr))
|
||||||
perr.PositionRange.Start += posOffset
|
parseErr.PositionRange.Start += posOffset
|
||||||
perr.PositionRange.End += posOffset
|
parseErr.PositionRange.End += posOffset
|
||||||
perr.Query = lines[i]
|
parseErr.Query = lines[i]
|
||||||
}
|
})
|
||||||
return i, nil, err
|
return i, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -205,12 +230,8 @@ func (t *Test) parseEval(lines []string, i int) (int, *evalCmd, error) {
|
||||||
cmd.expect(0, parser.SequenceValue{Value: f})
|
cmd.expect(0, parser.SequenceValue{Value: f})
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
metric, vals, err := parser.ParseSeriesDesc(defLine)
|
metric, vals, err := parseSeries(defLine, i)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
var perr *parser.ParseErr
|
|
||||||
if errors.As(err, &perr) {
|
|
||||||
perr.LineOffset = i
|
|
||||||
}
|
|
||||||
return i, nil, err
|
return i, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -237,7 +258,7 @@ func getLines(input string) []string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// parse the given command sequence and appends it to the test.
|
// parse the given command sequence and appends it to the test.
|
||||||
func (t *Test) parse(input string) error {
|
func (t *test) parse(input string) error {
|
||||||
lines := getLines(input)
|
lines := getLines(input)
|
||||||
var err error
|
var err error
|
||||||
// Scan for steps line by line.
|
// Scan for steps line by line.
|
||||||
|
@ -281,7 +302,7 @@ func (*evalCmd) testCmd() {}
|
||||||
type loadCmd struct {
|
type loadCmd struct {
|
||||||
gap time.Duration
|
gap time.Duration
|
||||||
metrics map[uint64]labels.Labels
|
metrics map[uint64]labels.Labels
|
||||||
defs map[uint64][]FPoint
|
defs map[uint64][]Sample
|
||||||
exemplars map[uint64][]exemplar.Exemplar
|
exemplars map[uint64][]exemplar.Exemplar
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -289,7 +310,7 @@ func newLoadCmd(gap time.Duration) *loadCmd {
|
||||||
return &loadCmd{
|
return &loadCmd{
|
||||||
gap: gap,
|
gap: gap,
|
||||||
metrics: map[uint64]labels.Labels{},
|
metrics: map[uint64]labels.Labels{},
|
||||||
defs: map[uint64][]FPoint{},
|
defs: map[uint64][]Sample{},
|
||||||
exemplars: map[uint64][]exemplar.Exemplar{},
|
exemplars: map[uint64][]exemplar.Exemplar{},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -302,13 +323,14 @@ func (cmd loadCmd) String() string {
|
||||||
func (cmd *loadCmd) set(m labels.Labels, vals ...parser.SequenceValue) {
|
func (cmd *loadCmd) set(m labels.Labels, vals ...parser.SequenceValue) {
|
||||||
h := m.Hash()
|
h := m.Hash()
|
||||||
|
|
||||||
samples := make([]FPoint, 0, len(vals))
|
samples := make([]Sample, 0, len(vals))
|
||||||
ts := testStartTime
|
ts := testStartTime
|
||||||
for _, v := range vals {
|
for _, v := range vals {
|
||||||
if !v.Omitted {
|
if !v.Omitted {
|
||||||
samples = append(samples, FPoint{
|
samples = append(samples, Sample{
|
||||||
T: ts.UnixNano() / int64(time.Millisecond/time.Nanosecond),
|
T: ts.UnixNano() / int64(time.Millisecond/time.Nanosecond),
|
||||||
F: v.Value,
|
F: v.Value,
|
||||||
|
H: v.Histogram,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
ts = ts.Add(cmd.gap)
|
ts = ts.Add(cmd.gap)
|
||||||
|
@ -323,7 +345,7 @@ func (cmd *loadCmd) append(a storage.Appender) error {
|
||||||
m := cmd.metrics[h]
|
m := cmd.metrics[h]
|
||||||
|
|
||||||
for _, s := range smpls {
|
for _, s := range smpls {
|
||||||
if _, err := a.Append(0, m, s.T, s.F); err != nil {
|
if err := appendSample(a, s, m); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -331,6 +353,19 @@ func (cmd *loadCmd) append(a storage.Appender) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func appendSample(a storage.Appender, s Sample, m labels.Labels) error {
|
||||||
|
if s.H != nil {
|
||||||
|
if _, err := a.AppendHistogram(0, m, s.T, nil, s.H); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if _, err := a.Append(0, m, s.T, s.F); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// evalCmd is a command that evaluates an expression for the given time (range)
|
// evalCmd is a command that evaluates an expression for the given time (range)
|
||||||
// and expects a specific result.
|
// and expects a specific result.
|
||||||
type evalCmd struct {
|
type evalCmd struct {
|
||||||
|
@ -399,8 +434,13 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
|
||||||
if ev.ordered && exp.pos != pos+1 {
|
if ev.ordered && exp.pos != pos+1 {
|
||||||
return fmt.Errorf("expected metric %s with %v at position %d but was at %d", v.Metric, exp.vals, exp.pos, pos+1)
|
return fmt.Errorf("expected metric %s with %v at position %d but was at %d", v.Metric, exp.vals, exp.pos, pos+1)
|
||||||
}
|
}
|
||||||
if !almostEqual(exp.vals[0].Value, v.F) {
|
exp0 := exp.vals[0]
|
||||||
return fmt.Errorf("expected %v for %s but got %v", exp.vals[0].Value, v.Metric, v.F)
|
expH := exp0.Histogram
|
||||||
|
if (expH == nil) != (v.H == nil) || (expH != nil && !expH.Equals(v.H)) {
|
||||||
|
return fmt.Errorf("expected %v for %s but got %s", HistogramTestExpression(expH), v.Metric, HistogramTestExpression(v.H))
|
||||||
|
}
|
||||||
|
if !almostEqual(exp0.Value, v.F) {
|
||||||
|
return fmt.Errorf("expected %v for %s but got %v", exp0.Value, v.Metric, v.F)
|
||||||
}
|
}
|
||||||
|
|
||||||
seen[fp] = true
|
seen[fp] = true
|
||||||
|
@ -416,8 +456,15 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
case Scalar:
|
case Scalar:
|
||||||
if !almostEqual(ev.expected[0].vals[0].Value, val.V) {
|
if len(ev.expected) != 1 {
|
||||||
return fmt.Errorf("expected Scalar %v but got %v", val.V, ev.expected[0].vals[0].Value)
|
return fmt.Errorf("expected vector result, but got scalar %s", val.String())
|
||||||
|
}
|
||||||
|
exp0 := ev.expected[0].vals[0]
|
||||||
|
if exp0.Histogram != nil {
|
||||||
|
return fmt.Errorf("expected Histogram %v but got scalar %s", exp0.Histogram.TestExpression(), val.String())
|
||||||
|
}
|
||||||
|
if !almostEqual(exp0.Value, val.V) {
|
||||||
|
return fmt.Errorf("expected Scalar %v but got %v", val.V, exp0.Value)
|
||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
@ -426,6 +473,14 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HistogramTestExpression returns TestExpression() for the given histogram or "" if the histogram is nil.
|
||||||
|
func HistogramTestExpression(h *histogram.FloatHistogram) string {
|
||||||
|
if h != nil {
|
||||||
|
return h.TestExpression()
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
// clearCmd is a command that wipes the test's storage state.
|
// clearCmd is a command that wipes the test's storage state.
|
||||||
type clearCmd struct{}
|
type clearCmd struct{}
|
||||||
|
|
||||||
|
@ -433,19 +488,6 @@ func (cmd clearCmd) String() string {
|
||||||
return "clear"
|
return "clear"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run executes the command sequence of the test. Until the maximum error number
|
|
||||||
// is reached, evaluation errors do not terminate execution.
|
|
||||||
func (t *Test) Run() error {
|
|
||||||
for _, cmd := range t.cmds {
|
|
||||||
// TODO(fabxc): aggregate command errors, yield diffs for result
|
|
||||||
// comparison errors.
|
|
||||||
if err := t.exec(cmd); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type atModifierTestCase struct {
|
type atModifierTestCase struct {
|
||||||
expr string
|
expr string
|
||||||
evalTime time.Time
|
evalTime time.Time
|
||||||
|
@ -515,7 +557,7 @@ func atModifierTestCases(exprStr string, evalTime time.Time) ([]atModifierTestCa
|
||||||
}
|
}
|
||||||
|
|
||||||
// exec processes a single step of the test.
|
// exec processes a single step of the test.
|
||||||
func (t *Test) exec(tc testCommand) error {
|
func (t *test) exec(tc testCommand, engine engineQuerier) error {
|
||||||
switch cmd := tc.(type) {
|
switch cmd := tc.(type) {
|
||||||
case *clearCmd:
|
case *clearCmd:
|
||||||
t.clear()
|
t.clear()
|
||||||
|
@ -538,7 +580,7 @@ func (t *Test) exec(tc testCommand) error {
|
||||||
}
|
}
|
||||||
queries = append([]atModifierTestCase{{expr: cmd.expr, evalTime: cmd.start}}, queries...)
|
queries = append([]atModifierTestCase{{expr: cmd.expr, evalTime: cmd.start}}, queries...)
|
||||||
for _, iq := range queries {
|
for _, iq := range queries {
|
||||||
q, err := t.QueryEngine().NewInstantQuery(t.context, t.storage, nil, iq.expr, iq.evalTime)
|
q, err := engine.NewInstantQuery(t.context, t.storage, nil, iq.expr, iq.evalTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -555,12 +597,12 @@ func (t *Test) exec(tc testCommand) error {
|
||||||
}
|
}
|
||||||
err = cmd.compareResult(res.Value)
|
err = cmd.compareResult(res.Value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error in %s %s: %w", cmd, iq.expr, err)
|
return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check query returns same result in range mode,
|
// Check query returns same result in range mode,
|
||||||
// by checking against the middle step.
|
// by checking against the middle step.
|
||||||
q, err = t.queryEngine.NewRangeQuery(t.context, t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute)
|
q, err = engine.NewRangeQuery(t.context, t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -576,12 +618,19 @@ func (t *Test) exec(tc testCommand) error {
|
||||||
mat := rangeRes.Value.(Matrix)
|
mat := rangeRes.Value.(Matrix)
|
||||||
vec := make(Vector, 0, len(mat))
|
vec := make(Vector, 0, len(mat))
|
||||||
for _, series := range mat {
|
for _, series := range mat {
|
||||||
|
// We expect either Floats or Histograms.
|
||||||
for _, point := range series.Floats {
|
for _, point := range series.Floats {
|
||||||
if point.T == timeMilliseconds(iq.evalTime) {
|
if point.T == timeMilliseconds(iq.evalTime) {
|
||||||
vec = append(vec, Sample{Metric: series.Metric, T: point.T, F: point.F})
|
vec = append(vec, Sample{Metric: series.Metric, T: point.T, F: point.F})
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
for _, point := range series.Histograms {
|
||||||
|
if point.T == timeMilliseconds(iq.evalTime) {
|
||||||
|
vec = append(vec, Sample{Metric: series.Metric, T: point.T, H: point.H})
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if _, ok := res.Value.(Scalar); ok {
|
if _, ok := res.Value.(Scalar); ok {
|
||||||
err = cmd.compareResult(Scalar{V: vec[0].F})
|
err = cmd.compareResult(Scalar{V: vec[0].F})
|
||||||
|
@ -601,7 +650,7 @@ func (t *Test) exec(tc testCommand) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// clear the current test storage of all inserted samples.
|
// clear the current test storage of all inserted samples.
|
||||||
func (t *Test) clear() {
|
func (t *test) clear() {
|
||||||
if t.storage != nil {
|
if t.storage != nil {
|
||||||
err := t.storage.Close()
|
err := t.storage.Close()
|
||||||
require.NoError(t.T, err, "Unexpected error while closing test storage.")
|
require.NoError(t.T, err, "Unexpected error while closing test storage.")
|
||||||
|
@ -610,30 +659,9 @@ func (t *Test) clear() {
|
||||||
t.cancelCtx()
|
t.cancelCtx()
|
||||||
}
|
}
|
||||||
t.storage = teststorage.New(t)
|
t.storage = teststorage.New(t)
|
||||||
|
|
||||||
opts := EngineOpts{
|
|
||||||
Logger: nil,
|
|
||||||
Reg: nil,
|
|
||||||
MaxSamples: 10000,
|
|
||||||
Timeout: 100 * time.Second,
|
|
||||||
NoStepSubqueryIntervalFn: func(int64) int64 { return durationMilliseconds(1 * time.Minute) },
|
|
||||||
EnableAtModifier: true,
|
|
||||||
EnableNegativeOffset: true,
|
|
||||||
EnablePerStepStats: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
t.queryEngine = NewEngine(opts)
|
|
||||||
t.context, t.cancelCtx = context.WithCancel(context.Background())
|
t.context, t.cancelCtx = context.WithCancel(context.Background())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes resources associated with the Test.
|
|
||||||
func (t *Test) Close() {
|
|
||||||
t.cancelCtx()
|
|
||||||
|
|
||||||
err := t.storage.Close()
|
|
||||||
require.NoError(t.T, err, "Unexpected error while closing test storage.")
|
|
||||||
}
|
|
||||||
|
|
||||||
// samplesAlmostEqual returns true if the two sample lines only differ by a
|
// samplesAlmostEqual returns true if the two sample lines only differ by a
|
||||||
// small relative error in their sample value.
|
// small relative error in their sample value.
|
||||||
func almostEqual(a, b float64) bool {
|
func almostEqual(a, b float64) bool {
|
||||||
|
@ -763,7 +791,7 @@ func (ll *LazyLoader) appendTill(ts int64) error {
|
||||||
ll.loadCmd.defs[h] = smpls[i:]
|
ll.loadCmd.defs[h] = smpls[i:]
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if _, err := app.Append(0, m, s.T, s.F); err != nil {
|
if err := appendSample(app, s, m); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if i == len(smpls)-1 {
|
if i == len(smpls)-1 {
|
||||||
|
|
226
promql/testdata/native_histograms.test
vendored
Normal file
226
promql/testdata/native_histograms.test
vendored
Normal file
|
@ -0,0 +1,226 @@
|
||||||
|
# Minimal valid case: an empty histogram.
|
||||||
|
load 5m
|
||||||
|
empty_histogram {{}}
|
||||||
|
|
||||||
|
eval instant at 5m empty_histogram
|
||||||
|
{__name__="empty_histogram"} {{}}
|
||||||
|
|
||||||
|
eval instant at 5m histogram_count(empty_histogram)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 5m histogram_sum(empty_histogram)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 5m histogram_fraction(-Inf, +Inf, empty_histogram)
|
||||||
|
{} NaN
|
||||||
|
|
||||||
|
eval instant at 5m histogram_fraction(0, 8, empty_histogram)
|
||||||
|
{} NaN
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# buckets:[1 2 1] means 1 observation in the 1st bucket, 2 observations in the 2nd and 1 observation in the 3rd (total 4).
|
||||||
|
load 5m
|
||||||
|
single_histogram {{schema:0 sum:5 count:4 buckets:[1 2 1]}}
|
||||||
|
|
||||||
|
# histogram_count extracts the count property from the histogram.
|
||||||
|
eval instant at 5m histogram_count(single_histogram)
|
||||||
|
{} 4
|
||||||
|
|
||||||
|
# histogram_sum extracts the sum property from the histogram.
|
||||||
|
eval instant at 5m histogram_sum(single_histogram)
|
||||||
|
{} 5
|
||||||
|
|
||||||
|
# We expect half of the values to fall in the range 1 < x <= 2.
|
||||||
|
eval instant at 5m histogram_fraction(1, 2, single_histogram)
|
||||||
|
{} 0.5
|
||||||
|
|
||||||
|
# We expect all values to fall in the range 0 < x <= 8.
|
||||||
|
eval instant at 5m histogram_fraction(0, 8, single_histogram)
|
||||||
|
{} 1
|
||||||
|
|
||||||
|
# Median is 1.5 due to linear estimation of the midpoint of the middle bucket, whose values are within range 1 < x <= 2.
|
||||||
|
eval instant at 5m histogram_quantile(0.5, single_histogram)
|
||||||
|
{} 1.5
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Repeat the same histogram 10 times.
|
||||||
|
load 5m
|
||||||
|
multi_histogram {{schema:0 sum:5 count:4 buckets:[1 2 1]}}x10
|
||||||
|
|
||||||
|
eval instant at 5m histogram_count(multi_histogram)
|
||||||
|
{} 4
|
||||||
|
|
||||||
|
eval instant at 5m histogram_sum(multi_histogram)
|
||||||
|
{} 5
|
||||||
|
|
||||||
|
eval instant at 5m histogram_fraction(1, 2, multi_histogram)
|
||||||
|
{} 0.5
|
||||||
|
|
||||||
|
eval instant at 5m histogram_quantile(0.5, multi_histogram)
|
||||||
|
{} 1.5
|
||||||
|
|
||||||
|
|
||||||
|
# Each entry should look the same as the first.
|
||||||
|
eval instant at 50m histogram_count(multi_histogram)
|
||||||
|
{} 4
|
||||||
|
|
||||||
|
eval instant at 50m histogram_sum(multi_histogram)
|
||||||
|
{} 5
|
||||||
|
|
||||||
|
eval instant at 50m histogram_fraction(1, 2, multi_histogram)
|
||||||
|
{} 0.5
|
||||||
|
|
||||||
|
eval instant at 50m histogram_quantile(0.5, multi_histogram)
|
||||||
|
{} 1.5
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Accumulate the histogram addition for 10 iterations, offset is a bucket position where offset:0 is always the bucket
|
||||||
|
# with an upper limit of 1 and offset:1 is the bucket which follows to the right. Negative offsets represent bucket
|
||||||
|
# positions for upper limits <1 (tending toward zero), where offset:-1 is the bucket to the left of offset:0.
|
||||||
|
load 5m
|
||||||
|
incr_histogram {{schema:0 sum:4 count:4 buckets:[1 2 1]}}+{{sum:2 count:1 buckets:[1] offset:1}}x10
|
||||||
|
|
||||||
|
eval instant at 5m histogram_count(incr_histogram)
|
||||||
|
{} 5
|
||||||
|
|
||||||
|
eval instant at 5m histogram_sum(incr_histogram)
|
||||||
|
{} 6
|
||||||
|
|
||||||
|
# We expect 3/5ths of the values to fall in the range 1 < x <= 2.
|
||||||
|
eval instant at 5m histogram_fraction(1, 2, incr_histogram)
|
||||||
|
{} 0.6
|
||||||
|
|
||||||
|
eval instant at 5m histogram_quantile(0.5, incr_histogram)
|
||||||
|
{} 1.5
|
||||||
|
|
||||||
|
|
||||||
|
eval instant at 50m incr_histogram
|
||||||
|
{__name__="incr_histogram"} {{count:14 sum:24 buckets:[1 12 1]}}
|
||||||
|
|
||||||
|
eval instant at 50m histogram_count(incr_histogram)
|
||||||
|
{} 14
|
||||||
|
|
||||||
|
eval instant at 50m histogram_sum(incr_histogram)
|
||||||
|
{} 24
|
||||||
|
|
||||||
|
# We expect 12/14ths of the values to fall in the range 1 < x <= 2.
|
||||||
|
eval instant at 50m histogram_fraction(1, 2, incr_histogram)
|
||||||
|
{} 0.8571428571428571
|
||||||
|
|
||||||
|
eval instant at 50m histogram_quantile(0.5, incr_histogram)
|
||||||
|
{} 1.5
|
||||||
|
|
||||||
|
# Per-second average rate of increase should be 1/(5*60) for count and buckets, then 2/(5*60) for sum.
|
||||||
|
eval instant at 50m rate(incr_histogram[5m])
|
||||||
|
{} {{count:0.0033333333333333335 sum:0.006666666666666667 offset:1 buckets:[0.0033333333333333335]}}
|
||||||
|
|
||||||
|
# Calculate the 50th percentile of observations over the last 10m.
|
||||||
|
eval instant at 50m histogram_quantile(0.5, rate(incr_histogram[10m]))
|
||||||
|
{} 1.5
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Schema represents the histogram resolution, different schema have compatible bucket boundaries, e.g.:
|
||||||
|
# 0: 1 2 4 8 16 32 64 (higher resolution)
|
||||||
|
# -1: 1 4 16 64 (lower resolution)
|
||||||
|
#
|
||||||
|
# Histograms can be merged as long as the histogram to the right is same resolution or higher.
|
||||||
|
load 5m
|
||||||
|
low_res_histogram {{schema:-1 sum:4 count:1 buckets:[1] offset:1}}+{{schema:0 sum:4 count:4 buckets:[2 2] offset:1}}x1
|
||||||
|
|
||||||
|
eval instant at 5m low_res_histogram
|
||||||
|
{__name__="low_res_histogram"} {{schema:-1 count:5 sum:8 offset:1 buckets:[5]}}
|
||||||
|
|
||||||
|
eval instant at 5m histogram_count(low_res_histogram)
|
||||||
|
{} 5
|
||||||
|
|
||||||
|
eval instant at 5m histogram_sum(low_res_histogram)
|
||||||
|
{} 8
|
||||||
|
|
||||||
|
# We expect all values to fall into the lower-resolution bucket with the range 1 < x <= 4.
|
||||||
|
eval instant at 5m histogram_fraction(1, 4, low_res_histogram)
|
||||||
|
{} 1
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# z_bucket:1 means there is one observation in the zero bucket and z_bucket_w:0.5 means the zero bucket has the range
|
||||||
|
# 0 < x <= 0.5. Sum and count are expected to represent all observations in the histogram, including those in the zero bucket.
|
||||||
|
load 5m
|
||||||
|
single_zero_histogram {{schema:0 z_bucket:1 z_bucket_w:0.5 sum:0.25 count:1}}
|
||||||
|
|
||||||
|
eval instant at 5m histogram_count(single_zero_histogram)
|
||||||
|
{} 1
|
||||||
|
|
||||||
|
eval instant at 5m histogram_sum(single_zero_histogram)
|
||||||
|
{} 0.25
|
||||||
|
|
||||||
|
# When only the zero bucket is populated, or there are negative buckets, the distribution is assumed to be equally
|
||||||
|
# distributed around zero; i.e. that there are an equal number of positive and negative observations. Therefore the
|
||||||
|
# entire distribution must lie within the full range of the zero bucket, in this case: -0.5 < x <= +0.5.
|
||||||
|
eval instant at 5m histogram_fraction(-0.5, 0.5, single_zero_histogram)
|
||||||
|
{} 1
|
||||||
|
|
||||||
|
# Half of the observations are estimated to be zero, as this is the midpoint between -0.5 and +0.5.
|
||||||
|
eval instant at 5m histogram_quantile(0.5, single_zero_histogram)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Let's turn single_histogram upside-down.
|
||||||
|
load 5m
|
||||||
|
negative_histogram {{schema:0 sum:-5 count:4 n_buckets:[1 2 1]}}
|
||||||
|
|
||||||
|
eval instant at 5m histogram_count(negative_histogram)
|
||||||
|
{} 4
|
||||||
|
|
||||||
|
eval instant at 5m histogram_sum(negative_histogram)
|
||||||
|
{} -5
|
||||||
|
|
||||||
|
# We expect half of the values to fall in the range -2 < x <= -1.
|
||||||
|
eval instant at 5m histogram_fraction(-2, -1, negative_histogram)
|
||||||
|
{} 0.5
|
||||||
|
|
||||||
|
eval instant at 5m histogram_quantile(0.5, negative_histogram)
|
||||||
|
{} -1.5
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Two histogram samples.
|
||||||
|
load 5m
|
||||||
|
two_samples_histogram {{schema:0 sum:4 count:4 buckets:[1 2 1]}} {{schema:0 sum:-4 count:4 n_buckets:[1 2 1]}}
|
||||||
|
|
||||||
|
# We expect to see the newest sample.
|
||||||
|
eval instant at 10m histogram_count(two_samples_histogram)
|
||||||
|
{} 4
|
||||||
|
|
||||||
|
eval instant at 10m histogram_sum(two_samples_histogram)
|
||||||
|
{} -4
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-2, -1, two_samples_histogram)
|
||||||
|
{} 0.5
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(0.5, two_samples_histogram)
|
||||||
|
{} -1.5
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Add two histograms with negated data.
|
||||||
|
load 5m
|
||||||
|
balanced_histogram {{schema:0 sum:4 count:4 buckets:[1 2 1]}}+{{schema:0 sum:-4 count:4 n_buckets:[1 2 1]}}x1
|
||||||
|
|
||||||
|
eval instant at 5m histogram_count(balanced_histogram)
|
||||||
|
{} 8
|
||||||
|
|
||||||
|
eval instant at 5m histogram_sum(balanced_histogram)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 5m histogram_fraction(0, 4, balanced_histogram)
|
||||||
|
{} 0.5
|
||||||
|
|
||||||
|
# If the quantile happens to be located in a span of empty buckets, the actually returned value is the lower bound of
|
||||||
|
# the first populated bucket after the span of empty buckets.
|
||||||
|
eval instant at 5m histogram_quantile(0.5, balanced_histogram)
|
||||||
|
{} 0.5
|
|
@ -33,6 +33,17 @@ import (
|
||||||
"github.com/prometheus/prometheus/util/teststorage"
|
"github.com/prometheus/prometheus/util/teststorage"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var testEngine = promql.NewEngine(promql.EngineOpts{
|
||||||
|
Logger: nil,
|
||||||
|
Reg: nil,
|
||||||
|
MaxSamples: 10000,
|
||||||
|
Timeout: 100 * time.Second,
|
||||||
|
NoStepSubqueryIntervalFn: func(int64) int64 { return 60 * 1000 },
|
||||||
|
EnableAtModifier: true,
|
||||||
|
EnableNegativeOffset: true,
|
||||||
|
EnablePerStepStats: true,
|
||||||
|
})
|
||||||
|
|
||||||
func TestAlertingRuleState(t *testing.T) {
|
func TestAlertingRuleState(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
|
@ -74,14 +85,11 @@ func TestAlertingRuleState(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAlertingRuleLabelsUpdate(t *testing.T) {
|
func TestAlertingRuleLabelsUpdate(t *testing.T) {
|
||||||
suite, err := promql.NewTest(t, `
|
storage := promql.LoadedStorage(t, `
|
||||||
load 1m
|
load 1m
|
||||||
http_requests{job="app-server", instance="0"} 75 85 70 70 stale
|
http_requests{job="app-server", instance="0"} 75 85 70 70 stale
|
||||||
`)
|
`)
|
||||||
require.NoError(t, err)
|
t.Cleanup(func() { storage.Close() })
|
||||||
defer suite.Close()
|
|
||||||
|
|
||||||
require.NoError(t, suite.Run())
|
|
||||||
|
|
||||||
expr, err := parser.ParseExpr(`http_requests < 100`)
|
expr, err := parser.ParseExpr(`http_requests < 100`)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -158,8 +166,7 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) {
|
||||||
t.Logf("case %d", i)
|
t.Logf("case %d", i)
|
||||||
evalTime := baseTime.Add(time.Duration(i) * time.Minute)
|
evalTime := baseTime.Add(time.Duration(i) * time.Minute)
|
||||||
result[0].T = timestamp.FromTime(evalTime)
|
result[0].T = timestamp.FromTime(evalTime)
|
||||||
res, err := rule.Eval(suite.Context(), 0, evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0)
|
res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
|
||||||
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
|
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
|
||||||
|
@ -176,20 +183,17 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) {
|
||||||
require.Equal(t, result, filteredRes)
|
require.Equal(t, result, filteredRes)
|
||||||
}
|
}
|
||||||
evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute)
|
evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute)
|
||||||
res, err := rule.Eval(suite.Context(), 0, evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0)
|
res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 0, len(res))
|
require.Equal(t, 0, len(res))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) {
|
func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) {
|
||||||
suite, err := promql.NewTest(t, `
|
storage := promql.LoadedStorage(t, `
|
||||||
load 1m
|
load 1m
|
||||||
http_requests{job="app-server", instance="0"} 75 85 70 70
|
http_requests{job="app-server", instance="0"} 75 85 70 70
|
||||||
`)
|
`)
|
||||||
require.NoError(t, err)
|
t.Cleanup(func() { storage.Close() })
|
||||||
defer suite.Close()
|
|
||||||
|
|
||||||
require.NoError(t, suite.Run())
|
|
||||||
|
|
||||||
expr, err := parser.ParseExpr(`http_requests < 100`)
|
expr, err := parser.ParseExpr(`http_requests < 100`)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -247,7 +251,7 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) {
|
||||||
|
|
||||||
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
|
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
|
||||||
res, err := ruleWithoutExternalLabels.Eval(
|
res, err := ruleWithoutExternalLabels.Eval(
|
||||||
suite.Context(), 0, evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0,
|
context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0,
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for _, smpl := range res {
|
for _, smpl := range res {
|
||||||
|
@ -261,7 +265,7 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err = ruleWithExternalLabels.Eval(
|
res, err = ruleWithExternalLabels.Eval(
|
||||||
suite.Context(), 0, evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0,
|
context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0,
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for _, smpl := range res {
|
for _, smpl := range res {
|
||||||
|
@ -278,14 +282,11 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAlertingRuleExternalURLInTemplate(t *testing.T) {
|
func TestAlertingRuleExternalURLInTemplate(t *testing.T) {
|
||||||
suite, err := promql.NewTest(t, `
|
storage := promql.LoadedStorage(t, `
|
||||||
load 1m
|
load 1m
|
||||||
http_requests{job="app-server", instance="0"} 75 85 70 70
|
http_requests{job="app-server", instance="0"} 75 85 70 70
|
||||||
`)
|
`)
|
||||||
require.NoError(t, err)
|
t.Cleanup(func() { storage.Close() })
|
||||||
defer suite.Close()
|
|
||||||
|
|
||||||
require.NoError(t, suite.Run())
|
|
||||||
|
|
||||||
expr, err := parser.ParseExpr(`http_requests < 100`)
|
expr, err := parser.ParseExpr(`http_requests < 100`)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -343,7 +344,7 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) {
|
||||||
|
|
||||||
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
|
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
|
||||||
res, err := ruleWithoutExternalURL.Eval(
|
res, err := ruleWithoutExternalURL.Eval(
|
||||||
suite.Context(), 0, evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0,
|
context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0,
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for _, smpl := range res {
|
for _, smpl := range res {
|
||||||
|
@ -357,7 +358,7 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err = ruleWithExternalURL.Eval(
|
res, err = ruleWithExternalURL.Eval(
|
||||||
suite.Context(), 0, evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0,
|
context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0,
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for _, smpl := range res {
|
for _, smpl := range res {
|
||||||
|
@ -374,14 +375,11 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) {
|
func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) {
|
||||||
suite, err := promql.NewTest(t, `
|
storage := promql.LoadedStorage(t, `
|
||||||
load 1m
|
load 1m
|
||||||
http_requests{job="app-server", instance="0"} 75 85 70 70
|
http_requests{job="app-server", instance="0"} 75 85 70 70
|
||||||
`)
|
`)
|
||||||
require.NoError(t, err)
|
t.Cleanup(func() { storage.Close() })
|
||||||
defer suite.Close()
|
|
||||||
|
|
||||||
require.NoError(t, suite.Run())
|
|
||||||
|
|
||||||
expr, err := parser.ParseExpr(`http_requests < 100`)
|
expr, err := parser.ParseExpr(`http_requests < 100`)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -415,7 +413,7 @@ func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) {
|
||||||
|
|
||||||
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
|
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
|
||||||
res, err := rule.Eval(
|
res, err := rule.Eval(
|
||||||
suite.Context(), 0, evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0,
|
context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0,
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for _, smpl := range res {
|
for _, smpl := range res {
|
||||||
|
@ -431,14 +429,11 @@ func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAlertingRuleQueryInTemplate(t *testing.T) {
|
func TestAlertingRuleQueryInTemplate(t *testing.T) {
|
||||||
suite, err := promql.NewTest(t, `
|
storage := promql.LoadedStorage(t, `
|
||||||
load 1m
|
load 1m
|
||||||
http_requests{job="app-server", instance="0"} 70 85 70 70
|
http_requests{job="app-server", instance="0"} 70 85 70 70
|
||||||
`)
|
`)
|
||||||
require.NoError(t, err)
|
t.Cleanup(func() { storage.Close() })
|
||||||
defer suite.Close()
|
|
||||||
|
|
||||||
require.NoError(t, suite.Run())
|
|
||||||
|
|
||||||
expr, err := parser.ParseExpr(`sum(http_requests) < 100`)
|
expr, err := parser.ParseExpr(`sum(http_requests) < 100`)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -474,7 +469,7 @@ instance: {{ $v.Labels.instance }}, value: {{ printf "%.0f" $v.Value }};
|
||||||
require.Fail(t, "unexpected blocking when template expanding.")
|
require.Fail(t, "unexpected blocking when template expanding.")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return EngineQueryFunc(suite.QueryEngine(), suite.Storage())(ctx, q, ts)
|
return EngineQueryFunc(testEngine, storage)(ctx, q, ts)
|
||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
<-startQueryCh
|
<-startQueryCh
|
||||||
|
@ -485,7 +480,7 @@ instance: {{ $v.Labels.instance }}, value: {{ printf "%.0f" $v.Value }};
|
||||||
close(getDoneCh)
|
close(getDoneCh)
|
||||||
}()
|
}()
|
||||||
_, err = ruleWithQueryInTemplate.Eval(
|
_, err = ruleWithQueryInTemplate.Eval(
|
||||||
suite.Context(), 0, evalTime, slowQueryFunc, nil, 0,
|
context.TODO(), 0, evalTime, slowQueryFunc, nil, 0,
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
@ -543,15 +538,12 @@ func TestAlertingRuleDuplicate(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAlertingRuleLimit(t *testing.T) {
|
func TestAlertingRuleLimit(t *testing.T) {
|
||||||
suite, err := promql.NewTest(t, `
|
storage := promql.LoadedStorage(t, `
|
||||||
load 1m
|
load 1m
|
||||||
metric{label="1"} 1
|
metric{label="1"} 1
|
||||||
metric{label="2"} 1
|
metric{label="2"} 1
|
||||||
`)
|
`)
|
||||||
require.NoError(t, err)
|
t.Cleanup(func() { storage.Close() })
|
||||||
defer suite.Close()
|
|
||||||
|
|
||||||
require.NoError(t, suite.Run())
|
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
limit int
|
limit int
|
||||||
|
@ -588,7 +580,7 @@ func TestAlertingRuleLimit(t *testing.T) {
|
||||||
evalTime := time.Unix(0, 0)
|
evalTime := time.Unix(0, 0)
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
switch _, err := rule.Eval(suite.Context(), 0, evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, test.limit); {
|
switch _, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, test.limit); {
|
||||||
case err != nil:
|
case err != nil:
|
||||||
require.EqualError(t, err, test.err)
|
require.EqualError(t, err, test.err)
|
||||||
case test.err != "":
|
case test.err != "":
|
||||||
|
@ -730,14 +722,11 @@ func TestSendAlertsDontAffectActiveAlerts(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestKeepFiringFor(t *testing.T) {
|
func TestKeepFiringFor(t *testing.T) {
|
||||||
suite, err := promql.NewTest(t, `
|
storage := promql.LoadedStorage(t, `
|
||||||
load 1m
|
load 1m
|
||||||
http_requests{job="app-server", instance="0"} 75 85 70 70 10x5
|
http_requests{job="app-server", instance="0"} 75 85 70 70 10x5
|
||||||
`)
|
`)
|
||||||
require.NoError(t, err)
|
t.Cleanup(func() { storage.Close() })
|
||||||
defer suite.Close()
|
|
||||||
|
|
||||||
require.NoError(t, suite.Run())
|
|
||||||
|
|
||||||
expr, err := parser.ParseExpr(`http_requests > 50`)
|
expr, err := parser.ParseExpr(`http_requests > 50`)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -820,7 +809,7 @@ func TestKeepFiringFor(t *testing.T) {
|
||||||
t.Logf("case %d", i)
|
t.Logf("case %d", i)
|
||||||
evalTime := baseTime.Add(time.Duration(i) * time.Minute)
|
evalTime := baseTime.Add(time.Duration(i) * time.Minute)
|
||||||
result[0].T = timestamp.FromTime(evalTime)
|
result[0].T = timestamp.FromTime(evalTime)
|
||||||
res, err := rule.Eval(suite.Context(), 0, evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0)
|
res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
|
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
|
||||||
|
@ -837,20 +826,17 @@ func TestKeepFiringFor(t *testing.T) {
|
||||||
require.Equal(t, result, filteredRes)
|
require.Equal(t, result, filteredRes)
|
||||||
}
|
}
|
||||||
evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute)
|
evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute)
|
||||||
res, err := rule.Eval(suite.Context(), 0, evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0)
|
res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 0, len(res))
|
require.Equal(t, 0, len(res))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPendingAndKeepFiringFor(t *testing.T) {
|
func TestPendingAndKeepFiringFor(t *testing.T) {
|
||||||
suite, err := promql.NewTest(t, `
|
storage := promql.LoadedStorage(t, `
|
||||||
load 1m
|
load 1m
|
||||||
http_requests{job="app-server", instance="0"} 75 10x10
|
http_requests{job="app-server", instance="0"} 75 10x10
|
||||||
`)
|
`)
|
||||||
require.NoError(t, err)
|
t.Cleanup(func() { storage.Close() })
|
||||||
defer suite.Close()
|
|
||||||
|
|
||||||
require.NoError(t, suite.Run())
|
|
||||||
|
|
||||||
expr, err := parser.ParseExpr(`http_requests > 50`)
|
expr, err := parser.ParseExpr(`http_requests > 50`)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -877,7 +863,7 @@ func TestPendingAndKeepFiringFor(t *testing.T) {
|
||||||
|
|
||||||
baseTime := time.Unix(0, 0)
|
baseTime := time.Unix(0, 0)
|
||||||
result.T = timestamp.FromTime(baseTime)
|
result.T = timestamp.FromTime(baseTime)
|
||||||
res, err := rule.Eval(suite.Context(), 0, baseTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0)
|
res, err := rule.Eval(context.TODO(), 0, baseTime, EngineQueryFunc(testEngine, storage), nil, 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Len(t, res, 2)
|
require.Len(t, res, 2)
|
||||||
|
@ -892,7 +878,7 @@ func TestPendingAndKeepFiringFor(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
evalTime := baseTime.Add(time.Minute)
|
evalTime := baseTime.Add(time.Minute)
|
||||||
res, err = rule.Eval(suite.Context(), 0, evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0)
|
res, err = rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 0, len(res))
|
require.Equal(t, 0, len(res))
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,16 +49,12 @@ func TestMain(m *testing.M) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAlertingRule(t *testing.T) {
|
func TestAlertingRule(t *testing.T) {
|
||||||
suite, err := promql.NewTest(t, `
|
storage := promql.LoadedStorage(t, `
|
||||||
load 5m
|
load 5m
|
||||||
http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 95 105 105 95 85
|
http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 95 105 105 95 85
|
||||||
http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"} 80 90 100 110 120 130 140
|
http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"} 80 90 100 110 120 130 140
|
||||||
`)
|
`)
|
||||||
require.NoError(t, err)
|
t.Cleanup(func() { storage.Close() })
|
||||||
defer suite.Close()
|
|
||||||
|
|
||||||
err = suite.Run()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
|
expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -163,7 +159,7 @@ func TestAlertingRule(t *testing.T) {
|
||||||
|
|
||||||
evalTime := baseTime.Add(test.time)
|
evalTime := baseTime.Add(test.time)
|
||||||
|
|
||||||
res, err := rule.Eval(suite.Context(), 0, evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0)
|
res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
|
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
|
||||||
|
@ -195,16 +191,14 @@ func TestAlertingRule(t *testing.T) {
|
||||||
func TestForStateAddSamples(t *testing.T) {
|
func TestForStateAddSamples(t *testing.T) {
|
||||||
for _, evalDelay := range []time.Duration{0, time.Minute} {
|
for _, evalDelay := range []time.Duration{0, time.Minute} {
|
||||||
t.Run(fmt.Sprintf("evalDelay %s", evalDelay.String()), func(t *testing.T) {
|
t.Run(fmt.Sprintf("evalDelay %s", evalDelay.String()), func(t *testing.T) {
|
||||||
suite, err := promql.NewTest(t, `
|
storage := promql.LoadedStorage(t, `
|
||||||
load 5m
|
load 5m
|
||||||
http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 95 105 105 95 85
|
http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 95 105 105 95 85
|
||||||
http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"} 80 90 100 110 120 130 140
|
http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"} 80 90 100 110 120 130 140
|
||||||
`)
|
`)
|
||||||
require.NoError(t, err)
|
t.Cleanup(func() { storage.Close()
|
||||||
defer suite.Close()
|
|
||||||
|
|
||||||
err = suite.Run()
|
})
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
|
expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -315,7 +309,7 @@ func TestForStateAddSamples(t *testing.T) {
|
||||||
forState = float64(value.StaleNaN)
|
forState = float64(value.StaleNaN)
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := rule.Eval(suite.Context(), evalDelay, evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0)
|
res, err := rule.Eval(context.TODO(), evalDelay, evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var filteredRes promql.Vector // After removing 'ALERTS' samples.
|
var filteredRes promql.Vector // After removing 'ALERTS' samples.
|
||||||
|
@ -359,24 +353,20 @@ func sortAlerts(items []*Alert) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestForStateRestore(t *testing.T) {
|
func TestForStateRestore(t *testing.T) {
|
||||||
suite, err := promql.NewTest(t, `
|
storage := promql.LoadedStorage(t, `
|
||||||
load 5m
|
load 5m
|
||||||
http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 50 0 0 25 0 0 40 0 120
|
http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 50 0 0 25 0 0 40 0 120
|
||||||
http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"} 125 90 60 0 0 25 0 0 40 0 130
|
http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"} 125 90 60 0 0 25 0 0 40 0 130
|
||||||
`)
|
`)
|
||||||
require.NoError(t, err)
|
t.Cleanup(func() { storage.Close() })
|
||||||
defer suite.Close()
|
|
||||||
|
|
||||||
err = suite.Run()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
|
expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
opts := &ManagerOptions{
|
opts := &ManagerOptions{
|
||||||
QueryFunc: EngineQueryFunc(suite.QueryEngine(), suite.Storage()),
|
QueryFunc: EngineQueryFunc(testEngine, storage),
|
||||||
Appendable: suite.Storage(),
|
Appendable: storage,
|
||||||
Queryable: suite.Storage(),
|
Queryable: storage,
|
||||||
Context: context.Background(),
|
Context: context.Background(),
|
||||||
Logger: log.NewNopLogger(),
|
Logger: log.NewNopLogger(),
|
||||||
NotifyFunc: func(ctx context.Context, expr string, alerts ...*Alert) {},
|
NotifyFunc: func(ctx context.Context, expr string, alerts ...*Alert) {},
|
||||||
|
@ -410,7 +400,7 @@ func TestForStateRestore(t *testing.T) {
|
||||||
baseTime := time.Unix(0, 0)
|
baseTime := time.Unix(0, 0)
|
||||||
for _, duration := range initialRuns {
|
for _, duration := range initialRuns {
|
||||||
evalTime := baseTime.Add(duration)
|
evalTime := baseTime.Add(duration)
|
||||||
group.Eval(suite.Context(), evalTime)
|
group.Eval(context.TODO(), evalTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
exp := rule.ActiveAlerts()
|
exp := rule.ActiveAlerts()
|
||||||
|
@ -475,7 +465,7 @@ func TestForStateRestore(t *testing.T) {
|
||||||
|
|
||||||
restoreTime := baseTime.Add(tst.restoreDuration).Add(evalDelay)
|
restoreTime := baseTime.Add(tst.restoreDuration).Add(evalDelay)
|
||||||
// First eval before restoration.
|
// First eval before restoration.
|
||||||
newGroup.Eval(suite.Context(), restoreTime)
|
newGroup.Eval(context.TODO(), restoreTime)
|
||||||
// Restore happens here.
|
// Restore happens here.
|
||||||
newGroup.RestoreForState(restoreTime)
|
newGroup.RestoreForState(restoreTime)
|
||||||
|
|
||||||
|
@ -524,7 +514,7 @@ func TestForStateRestore(t *testing.T) {
|
||||||
// Testing the grace period.
|
// Testing the grace period.
|
||||||
for _, duration := range []time.Duration{10 * time.Minute, 15 * time.Minute, 20 * time.Minute} {
|
for _, duration := range []time.Duration{10 * time.Minute, 15 * time.Minute, 20 * time.Minute} {
|
||||||
evalTime := baseTime.Add(duration)
|
evalTime := baseTime.Add(duration)
|
||||||
group.Eval(suite.Context(), evalTime)
|
group.Eval(context.TODO(), evalTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, evalDelay := range []time.Duration{0, time.Minute} {
|
for _, evalDelay := range []time.Duration{0, time.Minute} {
|
||||||
|
@ -1706,16 +1696,11 @@ groups:
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRuleGroupEvalIterationFunc(t *testing.T) {
|
func TestRuleGroupEvalIterationFunc(t *testing.T) {
|
||||||
suite, err := promql.NewTest(t, `
|
storage := promql.LoadedStorage(t, `
|
||||||
load 5m
|
load 5m
|
||||||
http_requests{instance="0"} 75 85 50 0 0 25 0 0 40 0 120
|
http_requests{instance="0"} 75 85 50 0 0 25 0 0 40 0 120
|
||||||
`)
|
`)
|
||||||
|
t.Cleanup(func() { storage.Close() })
|
||||||
require.NoError(t, err)
|
|
||||||
defer suite.Close()
|
|
||||||
|
|
||||||
err = suite.Run()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
|
expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -1761,9 +1746,9 @@ func TestRuleGroupEvalIterationFunc(t *testing.T) {
|
||||||
|
|
||||||
testFunc := func(tst testInput) {
|
testFunc := func(tst testInput) {
|
||||||
opts := &ManagerOptions{
|
opts := &ManagerOptions{
|
||||||
QueryFunc: EngineQueryFunc(suite.QueryEngine(), suite.Storage()),
|
QueryFunc: EngineQueryFunc(testEngine, storage),
|
||||||
Appendable: suite.Storage(),
|
Appendable: storage,
|
||||||
Queryable: suite.Storage(),
|
Queryable: storage,
|
||||||
Context: context.Background(),
|
Context: context.Background(),
|
||||||
Logger: log.NewNopLogger(),
|
Logger: log.NewNopLogger(),
|
||||||
NotifyFunc: func(ctx context.Context, expr string, alerts ...*Alert) {},
|
NotifyFunc: func(ctx context.Context, expr string, alerts ...*Alert) {},
|
||||||
|
@ -1828,15 +1813,11 @@ func TestRuleGroupEvalIterationFunc(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNativeHistogramsInRecordingRules(t *testing.T) {
|
func TestNativeHistogramsInRecordingRules(t *testing.T) {
|
||||||
suite, err := promql.NewTest(t, "")
|
storage := teststorage.New(t)
|
||||||
require.NoError(t, err)
|
t.Cleanup(func() { storage.Close() })
|
||||||
t.Cleanup(suite.Close)
|
|
||||||
|
|
||||||
err = suite.Run()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Add some histograms.
|
// Add some histograms.
|
||||||
db := suite.TSDB()
|
db := storage.DB
|
||||||
hists := tsdbutil.GenerateTestHistograms(5)
|
hists := tsdbutil.GenerateTestHistograms(5)
|
||||||
ts := time.Now()
|
ts := time.Now()
|
||||||
app := db.Appender(context.Background())
|
app := db.Appender(context.Background())
|
||||||
|
@ -1848,9 +1829,9 @@ func TestNativeHistogramsInRecordingRules(t *testing.T) {
|
||||||
require.NoError(t, app.Commit())
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
opts := &ManagerOptions{
|
opts := &ManagerOptions{
|
||||||
QueryFunc: EngineQueryFunc(suite.QueryEngine(), suite.Storage()),
|
QueryFunc: EngineQueryFunc(testEngine, storage),
|
||||||
Appendable: suite.Storage(),
|
Appendable: storage,
|
||||||
Queryable: suite.Storage(),
|
Queryable: storage,
|
||||||
Context: context.Background(),
|
Context: context.Background(),
|
||||||
Logger: log.NewNopLogger(),
|
Logger: log.NewNopLogger(),
|
||||||
}
|
}
|
||||||
|
|
|
@ -109,27 +109,22 @@ var ruleEvalTestScenarios = []struct {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func setUpRuleEvalTest(t require.TestingT) *promql.Test {
|
func setUpRuleEvalTest(t require.TestingT) *teststorage.TestStorage {
|
||||||
suite, err := promql.NewTest(t, `
|
return promql.LoadedStorage(t, `
|
||||||
load 1m
|
load 1m
|
||||||
metric{label_a="1",label_b="3"} 1
|
metric{label_a="1",label_b="3"} 1
|
||||||
metric{label_a="2",label_b="4"} 10
|
metric{label_a="2",label_b="4"} 10
|
||||||
`)
|
`)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
return suite
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRuleEval(t *testing.T) {
|
func TestRuleEval(t *testing.T) {
|
||||||
suite := setUpRuleEvalTest(t)
|
storage := setUpRuleEvalTest(t)
|
||||||
defer suite.Close()
|
t.Cleanup(func() { storage.Close() })
|
||||||
|
|
||||||
require.NoError(t, suite.Run())
|
|
||||||
|
|
||||||
for _, scenario := range ruleEvalTestScenarios {
|
for _, scenario := range ruleEvalTestScenarios {
|
||||||
t.Run(scenario.name, func(t *testing.T) {
|
t.Run(scenario.name, func(t *testing.T) {
|
||||||
rule := NewRecordingRule("test_rule", scenario.expr, scenario.ruleLabels)
|
rule := NewRecordingRule("test_rule", scenario.expr, scenario.ruleLabels)
|
||||||
result, err := rule.Eval(suite.Context(), 0, ruleEvaluationTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0)
|
result, err := rule.Eval(context.TODO(), 0, ruleEvaluationTime, EngineQueryFunc(testEngine, storage), nil, 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, scenario.expected, result)
|
require.Equal(t, scenario.expected, result)
|
||||||
})
|
})
|
||||||
|
@ -137,10 +132,8 @@ func TestRuleEval(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkRuleEval(b *testing.B) {
|
func BenchmarkRuleEval(b *testing.B) {
|
||||||
suite := setUpRuleEvalTest(b)
|
storage := setUpRuleEvalTest(b)
|
||||||
defer suite.Close()
|
b.Cleanup(func() { storage.Close() })
|
||||||
|
|
||||||
require.NoError(b, suite.Run())
|
|
||||||
|
|
||||||
for _, scenario := range ruleEvalTestScenarios {
|
for _, scenario := range ruleEvalTestScenarios {
|
||||||
b.Run(scenario.name, func(b *testing.B) {
|
b.Run(scenario.name, func(b *testing.B) {
|
||||||
|
@ -149,7 +142,7 @@ func BenchmarkRuleEval(b *testing.B) {
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
_, err := rule.Eval(suite.Context(), 0, ruleEvaluationTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0)
|
_, err := rule.Eval(context.TODO(), 0, ruleEvaluationTime, EngineQueryFunc(testEngine, storage), nil, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
require.NoError(b, err)
|
require.NoError(b, err)
|
||||||
}
|
}
|
||||||
|
@ -184,15 +177,12 @@ func TestRuleEvalDuplicate(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRecordingRuleLimit(t *testing.T) {
|
func TestRecordingRuleLimit(t *testing.T) {
|
||||||
suite, err := promql.NewTest(t, `
|
storage := promql.LoadedStorage(t, `
|
||||||
load 1m
|
load 1m
|
||||||
metric{label="1"} 1
|
metric{label="1"} 1
|
||||||
metric{label="2"} 1
|
metric{label="2"} 1
|
||||||
`)
|
`)
|
||||||
require.NoError(t, err)
|
t.Cleanup(func() { storage.Close() })
|
||||||
defer suite.Close()
|
|
||||||
|
|
||||||
require.NoError(t, suite.Run())
|
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
limit int
|
limit int
|
||||||
|
@ -223,7 +213,7 @@ func TestRecordingRuleLimit(t *testing.T) {
|
||||||
evalTime := time.Unix(0, 0)
|
evalTime := time.Unix(0, 0)
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
switch _, err := rule.Eval(suite.Context(), 0, evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, test.limit); {
|
switch _, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, test.limit); {
|
||||||
case err != nil:
|
case err != nil:
|
||||||
require.EqualError(t, err, test.err)
|
require.EqualError(t, err, test.err)
|
||||||
case test.err != "":
|
case test.err != "":
|
||||||
|
|
|
@ -357,7 +357,7 @@ func (m *Manager) TargetsActive() map[string][]*Target {
|
||||||
return targets
|
return targets
|
||||||
}
|
}
|
||||||
|
|
||||||
// TargetsDropped returns the dropped targets during relabelling.
|
// TargetsDropped returns the dropped targets during relabelling, subject to KeepDroppedTargets limit.
|
||||||
func (m *Manager) TargetsDropped() map[string][]*Target {
|
func (m *Manager) TargetsDropped() map[string][]*Target {
|
||||||
m.mtxScrape.Lock()
|
m.mtxScrape.Lock()
|
||||||
defer m.mtxScrape.Unlock()
|
defer m.mtxScrape.Unlock()
|
||||||
|
@ -368,3 +368,14 @@ func (m *Manager) TargetsDropped() map[string][]*Target {
|
||||||
}
|
}
|
||||||
return targets
|
return targets
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *Manager) TargetsDroppedCounts() map[string]int {
|
||||||
|
m.mtxScrape.Lock()
|
||||||
|
defer m.mtxScrape.Unlock()
|
||||||
|
|
||||||
|
counts := make(map[string]int, len(m.scrapePools))
|
||||||
|
for tset, sp := range m.scrapePools {
|
||||||
|
counts[tset] = sp.droppedTargetsCount
|
||||||
|
}
|
||||||
|
return counts
|
||||||
|
}
|
||||||
|
|
|
@ -242,8 +242,9 @@ type scrapePool struct {
|
||||||
targetMtx sync.Mutex
|
targetMtx sync.Mutex
|
||||||
// activeTargets and loops must always be synchronized to have the same
|
// activeTargets and loops must always be synchronized to have the same
|
||||||
// set of hashes.
|
// set of hashes.
|
||||||
activeTargets map[uint64]*Target
|
activeTargets map[uint64]*Target
|
||||||
droppedTargets []*Target
|
droppedTargets []*Target // Subject to KeepDroppedTargets limit.
|
||||||
|
droppedTargetsCount int // Count of all dropped targets.
|
||||||
|
|
||||||
// Constructor for new scrape loops. This is settable for testing convenience.
|
// Constructor for new scrape loops. This is settable for testing convenience.
|
||||||
newLoop func(scrapeLoopOptions) loop
|
newLoop func(scrapeLoopOptions) loop
|
||||||
|
@ -354,12 +355,19 @@ func (sp *scrapePool) ActiveTargets() []*Target {
|
||||||
return tActive
|
return tActive
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Return dropped targets, subject to KeepDroppedTargets limit.
|
||||||
func (sp *scrapePool) DroppedTargets() []*Target {
|
func (sp *scrapePool) DroppedTargets() []*Target {
|
||||||
sp.targetMtx.Lock()
|
sp.targetMtx.Lock()
|
||||||
defer sp.targetMtx.Unlock()
|
defer sp.targetMtx.Unlock()
|
||||||
return sp.droppedTargets
|
return sp.droppedTargets
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (sp *scrapePool) DroppedTargetsCount() int {
|
||||||
|
sp.targetMtx.Lock()
|
||||||
|
defer sp.targetMtx.Unlock()
|
||||||
|
return sp.droppedTargetsCount
|
||||||
|
}
|
||||||
|
|
||||||
// stop terminates all scrape loops and returns after they all terminated.
|
// stop terminates all scrape loops and returns after they all terminated.
|
||||||
func (sp *scrapePool) stop() {
|
func (sp *scrapePool) stop() {
|
||||||
sp.mtx.Lock()
|
sp.mtx.Lock()
|
||||||
|
@ -506,6 +514,7 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
|
||||||
var targets []*Target
|
var targets []*Target
|
||||||
lb := labels.NewBuilder(labels.EmptyLabels())
|
lb := labels.NewBuilder(labels.EmptyLabels())
|
||||||
sp.droppedTargets = []*Target{}
|
sp.droppedTargets = []*Target{}
|
||||||
|
sp.droppedTargetsCount = 0
|
||||||
for _, tg := range tgs {
|
for _, tg := range tgs {
|
||||||
targets, failures := TargetsFromGroup(tg, sp.config, sp.noDefaultPort, targets, lb)
|
targets, failures := TargetsFromGroup(tg, sp.config, sp.noDefaultPort, targets, lb)
|
||||||
for _, err := range failures {
|
for _, err := range failures {
|
||||||
|
@ -520,7 +529,10 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
|
||||||
case nonEmpty:
|
case nonEmpty:
|
||||||
all = append(all, t)
|
all = append(all, t)
|
||||||
case !t.discoveredLabels.IsEmpty():
|
case !t.discoveredLabels.IsEmpty():
|
||||||
sp.droppedTargets = append(sp.droppedTargets, t)
|
if sp.config.KeepDroppedTargets == 0 || uint(len(sp.droppedTargets)) < sp.config.KeepDroppedTargets {
|
||||||
|
sp.droppedTargets = append(sp.droppedTargets, t)
|
||||||
|
}
|
||||||
|
sp.droppedTargetsCount++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -93,21 +93,25 @@ func TestDroppedTargetsList(t *testing.T) {
|
||||||
{
|
{
|
||||||
Targets: []model.LabelSet{
|
Targets: []model.LabelSet{
|
||||||
{model.AddressLabel: "127.0.0.1:9090"},
|
{model.AddressLabel: "127.0.0.1:9090"},
|
||||||
|
{model.AddressLabel: "127.0.0.1:9091"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
sp, _ = newScrapePool(cfg, app, 0, nil, &Options{})
|
sp, _ = newScrapePool(cfg, app, 0, nil, &Options{})
|
||||||
expectedLabelSetString = "{__address__=\"127.0.0.1:9090\", __scrape_interval__=\"0s\", __scrape_timeout__=\"0s\", job=\"dropMe\"}"
|
expectedLabelSetString = "{__address__=\"127.0.0.1:9090\", __scrape_interval__=\"0s\", __scrape_timeout__=\"0s\", job=\"dropMe\"}"
|
||||||
expectedLength = 1
|
expectedLength = 2
|
||||||
)
|
)
|
||||||
sp.Sync(tgs)
|
sp.Sync(tgs)
|
||||||
sp.Sync(tgs)
|
sp.Sync(tgs)
|
||||||
if len(sp.droppedTargets) != expectedLength {
|
require.Equal(t, expectedLength, len(sp.droppedTargets))
|
||||||
t.Fatalf("Length of dropped targets exceeded expected length, expected %v, got %v", expectedLength, len(sp.droppedTargets))
|
require.Equal(t, expectedLength, sp.droppedTargetsCount)
|
||||||
}
|
require.Equal(t, expectedLabelSetString, sp.droppedTargets[0].DiscoveredLabels().String())
|
||||||
if sp.droppedTargets[0].DiscoveredLabels().String() != expectedLabelSetString {
|
|
||||||
t.Fatalf("Got %v, expected %v", sp.droppedTargets[0].DiscoveredLabels().String(), expectedLabelSetString)
|
// Check that count is still correct when we don't retain all dropped targets.
|
||||||
}
|
sp.config.KeepDroppedTargets = 1
|
||||||
|
sp.Sync(tgs)
|
||||||
|
require.Equal(t, 1, len(sp.droppedTargets))
|
||||||
|
require.Equal(t, expectedLength, sp.droppedTargetsCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestDiscoveredLabelsUpdate checks that DiscoveredLabels are updated
|
// TestDiscoveredLabelsUpdate checks that DiscoveredLabels are updated
|
||||||
|
@ -1981,13 +1985,14 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) {
|
||||||
|
|
||||||
func TestScrapeLoopAppendExemplar(t *testing.T) {
|
func TestScrapeLoopAppendExemplar(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
title string
|
title string
|
||||||
scrapeText string
|
scrapeClassicHistograms bool
|
||||||
contentType string
|
scrapeText string
|
||||||
discoveryLabels []string
|
contentType string
|
||||||
floats []floatSample
|
discoveryLabels []string
|
||||||
histograms []histogramSample
|
floats []floatSample
|
||||||
exemplars []exemplar.Exemplar
|
histograms []histogramSample
|
||||||
|
exemplars []exemplar.Exemplar
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
title: "Metric without exemplars",
|
title: "Metric without exemplars",
|
||||||
|
@ -2141,6 +2146,115 @@ metric: <
|
||||||
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, Ts: 1234568, HasTs: false},
|
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, Ts: 1234568, HasTs: false},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
title: "Native histogram with two exemplars scraped as classic histogram",
|
||||||
|
scrapeText: `name: "test_histogram"
|
||||||
|
help: "Test histogram with many buckets removed to keep it manageable in size."
|
||||||
|
type: HISTOGRAM
|
||||||
|
metric: <
|
||||||
|
histogram: <
|
||||||
|
sample_count: 175
|
||||||
|
sample_sum: 0.0008280461746287094
|
||||||
|
bucket: <
|
||||||
|
cumulative_count: 2
|
||||||
|
upper_bound: -0.0004899999999999998
|
||||||
|
>
|
||||||
|
bucket: <
|
||||||
|
cumulative_count: 4
|
||||||
|
upper_bound: -0.0003899999999999998
|
||||||
|
exemplar: <
|
||||||
|
label: <
|
||||||
|
name: "dummyID"
|
||||||
|
value: "59727"
|
||||||
|
>
|
||||||
|
value: -0.00039
|
||||||
|
timestamp: <
|
||||||
|
seconds: 1625851155
|
||||||
|
nanos: 146848499
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
||||||
|
bucket: <
|
||||||
|
cumulative_count: 16
|
||||||
|
upper_bound: -0.0002899999999999998
|
||||||
|
exemplar: <
|
||||||
|
label: <
|
||||||
|
name: "dummyID"
|
||||||
|
value: "5617"
|
||||||
|
>
|
||||||
|
value: -0.00029
|
||||||
|
>
|
||||||
|
>
|
||||||
|
schema: 3
|
||||||
|
zero_threshold: 2.938735877055719e-39
|
||||||
|
zero_count: 2
|
||||||
|
negative_span: <
|
||||||
|
offset: -162
|
||||||
|
length: 1
|
||||||
|
>
|
||||||
|
negative_span: <
|
||||||
|
offset: 23
|
||||||
|
length: 4
|
||||||
|
>
|
||||||
|
negative_delta: 1
|
||||||
|
negative_delta: 3
|
||||||
|
negative_delta: -2
|
||||||
|
negative_delta: -1
|
||||||
|
negative_delta: 1
|
||||||
|
positive_span: <
|
||||||
|
offset: -161
|
||||||
|
length: 1
|
||||||
|
>
|
||||||
|
positive_span: <
|
||||||
|
offset: 8
|
||||||
|
length: 3
|
||||||
|
>
|
||||||
|
positive_delta: 1
|
||||||
|
positive_delta: 2
|
||||||
|
positive_delta: -1
|
||||||
|
positive_delta: -1
|
||||||
|
>
|
||||||
|
timestamp_ms: 1234568
|
||||||
|
>
|
||||||
|
|
||||||
|
`,
|
||||||
|
scrapeClassicHistograms: true,
|
||||||
|
contentType: "application/vnd.google.protobuf",
|
||||||
|
floats: []floatSample{
|
||||||
|
{metric: labels.FromStrings("__name__", "test_histogram_count"), t: 1234568, f: 175},
|
||||||
|
{metric: labels.FromStrings("__name__", "test_histogram_sum"), t: 1234568, f: 0.0008280461746287094},
|
||||||
|
{metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0004899999999999998"), t: 1234568, f: 2},
|
||||||
|
{metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0003899999999999998"), t: 1234568, f: 4},
|
||||||
|
{metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0002899999999999998"), t: 1234568, f: 16},
|
||||||
|
{metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "+Inf"), t: 1234568, f: 175},
|
||||||
|
},
|
||||||
|
histograms: []histogramSample{{
|
||||||
|
t: 1234568,
|
||||||
|
h: &histogram.Histogram{
|
||||||
|
Count: 175,
|
||||||
|
ZeroCount: 2,
|
||||||
|
Sum: 0.0008280461746287094,
|
||||||
|
ZeroThreshold: 2.938735877055719e-39,
|
||||||
|
Schema: 3,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: -161, Length: 1},
|
||||||
|
{Offset: 8, Length: 3},
|
||||||
|
},
|
||||||
|
NegativeSpans: []histogram.Span{
|
||||||
|
{Offset: -162, Length: 1},
|
||||||
|
{Offset: 23, Length: 4},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 2, -1, -1},
|
||||||
|
NegativeBuckets: []int64{1, 3, -2, -1, 1},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
exemplars: []exemplar.Exemplar{
|
||||||
|
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, Ts: 1625851155146, HasTs: true},
|
||||||
|
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, Ts: 1234568, HasTs: false},
|
||||||
|
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, Ts: 1625851155146, HasTs: true},
|
||||||
|
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, Ts: 1234568, HasTs: false},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
|
@ -2167,7 +2281,7 @@ metric: <
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
0,
|
0,
|
||||||
false,
|
test.scrapeClassicHistograms,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
nil,
|
nil,
|
||||||
|
@ -2177,6 +2291,9 @@ metric: <
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
|
||||||
for i := range test.floats {
|
for i := range test.floats {
|
||||||
|
if test.floats[i].t != 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
test.floats[i].t = timestamp.FromTime(now)
|
test.floats[i].t = timestamp.FromTime(now)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -29,4 +29,4 @@ jobs:
|
||||||
- name: Lint
|
- name: Lint
|
||||||
uses: golangci/golangci-lint-action@v3.4.0
|
uses: golangci/golangci-lint-action@v3.4.0
|
||||||
with:
|
with:
|
||||||
version: v1.53.3
|
version: v1.54.2
|
||||||
|
|
|
@ -85,7 +85,7 @@ function bumpVersion() {
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
# increase the version on all packages
|
# increase the version on all packages
|
||||||
npm version "${version}" --workspaces
|
npm version "${version}" --workspaces --include-workspace-root
|
||||||
}
|
}
|
||||||
|
|
||||||
if [[ "$1" == "--copy" ]]; then
|
if [[ "$1" == "--copy" ]]; then
|
||||||
|
|
|
@ -19,7 +19,7 @@ import (
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/histogram"
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
)
|
)
|
||||||
|
|
||||||
// BufferedSeriesIterator wraps an iterator with a look-back buffer.
|
// BufferedSeriesIterator wraps an iterator with a look-back buffer.
|
||||||
|
@ -69,7 +69,7 @@ func (b *BufferedSeriesIterator) ReduceDelta(delta int64) bool {
|
||||||
|
|
||||||
// PeekBack returns the nth previous element of the iterator. If there is none buffered,
|
// PeekBack returns the nth previous element of the iterator. If there is none buffered,
|
||||||
// ok is false.
|
// ok is false.
|
||||||
func (b *BufferedSeriesIterator) PeekBack(n int) (sample tsdbutil.Sample, ok bool) {
|
func (b *BufferedSeriesIterator) PeekBack(n int) (sample chunks.Sample, ok bool) {
|
||||||
return b.buf.nthLast(n)
|
return b.buf.nthLast(n)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -247,7 +247,7 @@ type sampleRing struct {
|
||||||
// allowed to be populated!) This avoids the overhead of the interface
|
// allowed to be populated!) This avoids the overhead of the interface
|
||||||
// wrapper for the happy (and by far most common) case of homogenous
|
// wrapper for the happy (and by far most common) case of homogenous
|
||||||
// samples.
|
// samples.
|
||||||
iBuf []tsdbutil.Sample
|
iBuf []chunks.Sample
|
||||||
fBuf []fSample
|
fBuf []fSample
|
||||||
hBuf []hSample
|
hBuf []hSample
|
||||||
fhBuf []fhSample
|
fhBuf []fhSample
|
||||||
|
@ -289,7 +289,7 @@ func newSampleRing(delta int64, size int, typ chunkenc.ValueType) *sampleRing {
|
||||||
case chunkenc.ValFloatHistogram:
|
case chunkenc.ValFloatHistogram:
|
||||||
r.fhBuf = make([]fhSample, size)
|
r.fhBuf = make([]fhSample, size)
|
||||||
default:
|
default:
|
||||||
r.iBuf = make([]tsdbutil.Sample, size)
|
r.iBuf = make([]chunks.Sample, size)
|
||||||
}
|
}
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
@ -383,7 +383,7 @@ func (it *sampleRingIterator) AtT() int64 {
|
||||||
return it.t
|
return it.t
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *sampleRing) at(i int) tsdbutil.Sample {
|
func (r *sampleRing) at(i int) chunks.Sample {
|
||||||
j := (r.f + i) % len(r.iBuf)
|
j := (r.f + i) % len(r.iBuf)
|
||||||
return r.iBuf[j]
|
return r.iBuf[j]
|
||||||
}
|
}
|
||||||
|
@ -408,7 +408,7 @@ func (r *sampleRing) atFH(i int) fhSample {
|
||||||
// implementation. If you know you are dealing with one of the implementations
|
// implementation. If you know you are dealing with one of the implementations
|
||||||
// from this package (fSample, hSample, fhSample), call one of the specialized
|
// from this package (fSample, hSample, fhSample), call one of the specialized
|
||||||
// methods addF, addH, or addFH for better performance.
|
// methods addF, addH, or addFH for better performance.
|
||||||
func (r *sampleRing) add(s tsdbutil.Sample) {
|
func (r *sampleRing) add(s chunks.Sample) {
|
||||||
if r.bufInUse == noBuf {
|
if r.bufInUse == noBuf {
|
||||||
// First sample.
|
// First sample.
|
||||||
switch s := s.(type) {
|
switch s := s.(type) {
|
||||||
|
@ -519,7 +519,7 @@ func (r *sampleRing) addFH(s fhSample) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// genericAdd is a generic implementation of adding a tsdbutil.Sample
|
// genericAdd is a generic implementation of adding a chunks.Sample
|
||||||
// implementation to a buffer of a sample ring. However, the Go compiler
|
// implementation to a buffer of a sample ring. However, the Go compiler
|
||||||
// currently (go1.20) decides to not expand the code during compile time, but
|
// currently (go1.20) decides to not expand the code during compile time, but
|
||||||
// creates dynamic code to handle the different types. That has a significant
|
// creates dynamic code to handle the different types. That has a significant
|
||||||
|
@ -529,7 +529,7 @@ func (r *sampleRing) addFH(s fhSample) {
|
||||||
// Therefore, genericAdd has been manually implemented for all the types
|
// Therefore, genericAdd has been manually implemented for all the types
|
||||||
// (addSample, addF, addH, addFH) below.
|
// (addSample, addF, addH, addFH) below.
|
||||||
//
|
//
|
||||||
// func genericAdd[T tsdbutil.Sample](s T, buf []T, r *sampleRing) []T {
|
// func genericAdd[T chunks.Sample](s T, buf []T, r *sampleRing) []T {
|
||||||
// l := len(buf)
|
// l := len(buf)
|
||||||
// // Grow the ring buffer if it fits no more elements.
|
// // Grow the ring buffer if it fits no more elements.
|
||||||
// if l == 0 {
|
// if l == 0 {
|
||||||
|
@ -568,15 +568,15 @@ func (r *sampleRing) addFH(s fhSample) {
|
||||||
// }
|
// }
|
||||||
|
|
||||||
// addSample is a handcoded specialization of genericAdd (see above).
|
// addSample is a handcoded specialization of genericAdd (see above).
|
||||||
func addSample(s tsdbutil.Sample, buf []tsdbutil.Sample, r *sampleRing) []tsdbutil.Sample {
|
func addSample(s chunks.Sample, buf []chunks.Sample, r *sampleRing) []chunks.Sample {
|
||||||
l := len(buf)
|
l := len(buf)
|
||||||
// Grow the ring buffer if it fits no more elements.
|
// Grow the ring buffer if it fits no more elements.
|
||||||
if l == 0 {
|
if l == 0 {
|
||||||
buf = make([]tsdbutil.Sample, 16)
|
buf = make([]chunks.Sample, 16)
|
||||||
l = 16
|
l = 16
|
||||||
}
|
}
|
||||||
if l == r.l {
|
if l == r.l {
|
||||||
newBuf := make([]tsdbutil.Sample, 2*l)
|
newBuf := make([]chunks.Sample, 2*l)
|
||||||
copy(newBuf[l+r.f:], buf[r.f:])
|
copy(newBuf[l+r.f:], buf[r.f:])
|
||||||
copy(newBuf, buf[:r.f])
|
copy(newBuf, buf[:r.f])
|
||||||
|
|
||||||
|
@ -748,7 +748,7 @@ func (r *sampleRing) reduceDelta(delta int64) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func genericReduceDelta[T tsdbutil.Sample](buf []T, r *sampleRing) {
|
func genericReduceDelta[T chunks.Sample](buf []T, r *sampleRing) {
|
||||||
// Free head of the buffer of samples that just fell out of the range.
|
// Free head of the buffer of samples that just fell out of the range.
|
||||||
l := len(buf)
|
l := len(buf)
|
||||||
tmin := buf[r.i].T() - r.delta
|
tmin := buf[r.i].T() - r.delta
|
||||||
|
@ -762,7 +762,7 @@ func genericReduceDelta[T tsdbutil.Sample](buf []T, r *sampleRing) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// nthLast returns the nth most recent element added to the ring.
|
// nthLast returns the nth most recent element added to the ring.
|
||||||
func (r *sampleRing) nthLast(n int) (tsdbutil.Sample, bool) {
|
func (r *sampleRing) nthLast(n int) (chunks.Sample, bool) {
|
||||||
if n > r.l {
|
if n > r.l {
|
||||||
return fSample{}, false
|
return fSample{}, false
|
||||||
}
|
}
|
||||||
|
@ -779,8 +779,8 @@ func (r *sampleRing) nthLast(n int) (tsdbutil.Sample, bool) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *sampleRing) samples() []tsdbutil.Sample {
|
func (r *sampleRing) samples() []chunks.Sample {
|
||||||
res := make([]tsdbutil.Sample, r.l)
|
res := make([]chunks.Sample, r.l)
|
||||||
|
|
||||||
k := r.f + r.l
|
k := r.f + r.l
|
||||||
var j int
|
var j int
|
||||||
|
|
|
@ -25,6 +25,7 @@ import (
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -62,116 +63,116 @@ func TestMergeQuerierWithChainMerger(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "one querier, two series",
|
name: "one querier, two series",
|
||||||
querierSeries: [][]Series{{
|
querierSeries: [][]Series{{
|
||||||
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}),
|
NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}),
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}),
|
||||||
}},
|
}},
|
||||||
expected: NewMockSeriesSet(
|
expected: NewMockSeriesSet(
|
||||||
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}),
|
NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}),
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two queriers, one different series each",
|
name: "two queriers, one different series each",
|
||||||
querierSeries: [][]Series{{
|
querierSeries: [][]Series{{
|
||||||
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}),
|
NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}),
|
||||||
}, {
|
}, {
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}),
|
||||||
}},
|
}},
|
||||||
expected: NewMockSeriesSet(
|
expected: NewMockSeriesSet(
|
||||||
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}),
|
NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}),
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two time unsorted queriers, two series each",
|
name: "two time unsorted queriers, two series each",
|
||||||
querierSeries: [][]Series{{
|
querierSeries: [][]Series{{
|
||||||
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{5, 5}, fSample{6, 6}}),
|
NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{5, 5}, fSample{6, 6}}),
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}),
|
||||||
}, {
|
}, {
|
||||||
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}),
|
NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}),
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{3, 3}, fSample{4, 4}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{3, 3}, fSample{4, 4}}),
|
||||||
}},
|
}},
|
||||||
expected: NewMockSeriesSet(
|
expected: NewMockSeriesSet(
|
||||||
NewListSeries(
|
NewListSeries(
|
||||||
labels.FromStrings("bar", "baz"),
|
labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}, fSample{6, 6}},
|
[]chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}, fSample{6, 6}},
|
||||||
),
|
),
|
||||||
NewListSeries(
|
NewListSeries(
|
||||||
labels.FromStrings("foo", "bar"),
|
labels.FromStrings("foo", "bar"),
|
||||||
[]tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}},
|
[]chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "five queriers, only two queriers have two time unsorted series each",
|
name: "five queriers, only two queriers have two time unsorted series each",
|
||||||
querierSeries: [][]Series{{}, {}, {
|
querierSeries: [][]Series{{}, {}, {
|
||||||
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{5, 5}, fSample{6, 6}}),
|
NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{5, 5}, fSample{6, 6}}),
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}),
|
||||||
}, {
|
}, {
|
||||||
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}),
|
NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}),
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{3, 3}, fSample{4, 4}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{3, 3}, fSample{4, 4}}),
|
||||||
}, {}},
|
}, {}},
|
||||||
expected: NewMockSeriesSet(
|
expected: NewMockSeriesSet(
|
||||||
NewListSeries(
|
NewListSeries(
|
||||||
labels.FromStrings("bar", "baz"),
|
labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}, fSample{6, 6}},
|
[]chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}, fSample{6, 6}},
|
||||||
),
|
),
|
||||||
NewListSeries(
|
NewListSeries(
|
||||||
labels.FromStrings("foo", "bar"),
|
labels.FromStrings("foo", "bar"),
|
||||||
[]tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}},
|
[]chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two queriers, only two queriers have two time unsorted series each, with 3 noop and one nil querier together",
|
name: "two queriers, only two queriers have two time unsorted series each, with 3 noop and one nil querier together",
|
||||||
querierSeries: [][]Series{{}, {}, {
|
querierSeries: [][]Series{{}, {}, {
|
||||||
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{5, 5}, fSample{6, 6}}),
|
NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{5, 5}, fSample{6, 6}}),
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}),
|
||||||
}, {
|
}, {
|
||||||
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}),
|
NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}}),
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{3, 3}, fSample{4, 4}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{3, 3}, fSample{4, 4}}),
|
||||||
}, {}},
|
}, {}},
|
||||||
extraQueriers: []Querier{NoopQuerier(), NoopQuerier(), nil, NoopQuerier()},
|
extraQueriers: []Querier{NoopQuerier(), NoopQuerier(), nil, NoopQuerier()},
|
||||||
expected: NewMockSeriesSet(
|
expected: NewMockSeriesSet(
|
||||||
NewListSeries(
|
NewListSeries(
|
||||||
labels.FromStrings("bar", "baz"),
|
labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}, fSample{6, 6}},
|
[]chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}, fSample{6, 6}},
|
||||||
),
|
),
|
||||||
NewListSeries(
|
NewListSeries(
|
||||||
labels.FromStrings("foo", "bar"),
|
labels.FromStrings("foo", "bar"),
|
||||||
[]tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}},
|
[]chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two queriers, with two series, one is overlapping",
|
name: "two queriers, with two series, one is overlapping",
|
||||||
querierSeries: [][]Series{{}, {}, {
|
querierSeries: [][]Series{{}, {}, {
|
||||||
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{2, 21}, fSample{3, 31}, fSample{5, 5}, fSample{6, 6}}),
|
NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{2, 21}, fSample{3, 31}, fSample{5, 5}, fSample{6, 6}}),
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}),
|
||||||
}, {
|
}, {
|
||||||
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 22}, fSample{3, 32}}),
|
NewListSeries(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 22}, fSample{3, 32}}),
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{3, 3}, fSample{4, 4}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{3, 3}, fSample{4, 4}}),
|
||||||
}, {}},
|
}, {}},
|
||||||
expected: NewMockSeriesSet(
|
expected: NewMockSeriesSet(
|
||||||
NewListSeries(
|
NewListSeries(
|
||||||
labels.FromStrings("bar", "baz"),
|
labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{fSample{1, 1}, fSample{2, 21}, fSample{3, 31}, fSample{5, 5}, fSample{6, 6}},
|
[]chunks.Sample{fSample{1, 1}, fSample{2, 21}, fSample{3, 31}, fSample{5, 5}, fSample{6, 6}},
|
||||||
),
|
),
|
||||||
NewListSeries(
|
NewListSeries(
|
||||||
labels.FromStrings("foo", "bar"),
|
labels.FromStrings("foo", "bar"),
|
||||||
[]tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}},
|
[]chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two queries, one with NaN samples series",
|
name: "two queries, one with NaN samples series",
|
||||||
querierSeries: [][]Series{{
|
querierSeries: [][]Series{{
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, math.NaN()}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, math.NaN()}}),
|
||||||
}, {
|
}, {
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{1, 1}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{1, 1}}),
|
||||||
}},
|
}},
|
||||||
expected: NewMockSeriesSet(
|
expected: NewMockSeriesSet(
|
||||||
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, math.NaN()}, fSample{1, 1}}),
|
NewListSeries(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, math.NaN()}, fSample{1, 1}}),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
|
@ -245,108 +246,108 @@ func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "one querier, two series",
|
name: "one querier, two series",
|
||||||
chkQuerierSeries: [][]ChunkSeries{{
|
chkQuerierSeries: [][]ChunkSeries{{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}}, []tsdbutil.Sample{fSample{2, 2}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}),
|
||||||
}},
|
}},
|
||||||
expected: NewMockChunkSeriesSet(
|
expected: NewMockChunkSeriesSet(
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}}, []tsdbutil.Sample{fSample{2, 2}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two secondaries, one different series each",
|
name: "two secondaries, one different series each",
|
||||||
chkQuerierSeries: [][]ChunkSeries{{
|
chkQuerierSeries: [][]ChunkSeries{{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}),
|
||||||
}, {
|
}, {
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}}, []tsdbutil.Sample{fSample{2, 2}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}),
|
||||||
}},
|
}},
|
||||||
expected: NewMockChunkSeriesSet(
|
expected: NewMockChunkSeriesSet(
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}}, []tsdbutil.Sample{fSample{2, 2}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two secondaries, two not in time order series each",
|
name: "two secondaries, two not in time order series each",
|
||||||
chkQuerierSeries: [][]ChunkSeries{{
|
chkQuerierSeries: [][]ChunkSeries{{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{5, 5}}, []tsdbutil.Sample{fSample{6, 6}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{5, 5}}, []chunks.Sample{fSample{6, 6}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}}, []tsdbutil.Sample{fSample{2, 2}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}),
|
||||||
}, {
|
}, {
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{3, 3}}, []tsdbutil.Sample{fSample{4, 4}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{3, 3}}, []chunks.Sample{fSample{4, 4}}),
|
||||||
}},
|
}},
|
||||||
expected: NewMockChunkSeriesSet(
|
expected: NewMockChunkSeriesSet(
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}},
|
[]chunks.Sample{fSample{1, 1}, fSample{2, 2}},
|
||||||
[]tsdbutil.Sample{fSample{3, 3}},
|
[]chunks.Sample{fSample{3, 3}},
|
||||||
[]tsdbutil.Sample{fSample{5, 5}},
|
[]chunks.Sample{fSample{5, 5}},
|
||||||
[]tsdbutil.Sample{fSample{6, 6}},
|
[]chunks.Sample{fSample{6, 6}},
|
||||||
),
|
),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"),
|
||||||
[]tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}},
|
[]chunks.Sample{fSample{0, 0}, fSample{1, 1}},
|
||||||
[]tsdbutil.Sample{fSample{2, 2}},
|
[]chunks.Sample{fSample{2, 2}},
|
||||||
[]tsdbutil.Sample{fSample{3, 3}},
|
[]chunks.Sample{fSample{3, 3}},
|
||||||
[]tsdbutil.Sample{fSample{4, 4}},
|
[]chunks.Sample{fSample{4, 4}},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "five secondaries, only two have two not in time order series each",
|
name: "five secondaries, only two have two not in time order series each",
|
||||||
chkQuerierSeries: [][]ChunkSeries{{}, {}, {
|
chkQuerierSeries: [][]ChunkSeries{{}, {}, {
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{5, 5}}, []tsdbutil.Sample{fSample{6, 6}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{5, 5}}, []chunks.Sample{fSample{6, 6}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}}, []tsdbutil.Sample{fSample{2, 2}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}),
|
||||||
}, {
|
}, {
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{3, 3}}, []tsdbutil.Sample{fSample{4, 4}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{3, 3}}, []chunks.Sample{fSample{4, 4}}),
|
||||||
}, {}},
|
}, {}},
|
||||||
expected: NewMockChunkSeriesSet(
|
expected: NewMockChunkSeriesSet(
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}},
|
[]chunks.Sample{fSample{1, 1}, fSample{2, 2}},
|
||||||
[]tsdbutil.Sample{fSample{3, 3}},
|
[]chunks.Sample{fSample{3, 3}},
|
||||||
[]tsdbutil.Sample{fSample{5, 5}},
|
[]chunks.Sample{fSample{5, 5}},
|
||||||
[]tsdbutil.Sample{fSample{6, 6}},
|
[]chunks.Sample{fSample{6, 6}},
|
||||||
),
|
),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"),
|
||||||
[]tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}},
|
[]chunks.Sample{fSample{0, 0}, fSample{1, 1}},
|
||||||
[]tsdbutil.Sample{fSample{2, 2}},
|
[]chunks.Sample{fSample{2, 2}},
|
||||||
[]tsdbutil.Sample{fSample{3, 3}},
|
[]chunks.Sample{fSample{3, 3}},
|
||||||
[]tsdbutil.Sample{fSample{4, 4}},
|
[]chunks.Sample{fSample{4, 4}},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two secondaries, with two not in time order series each, with 3 noop queries and one nil together",
|
name: "two secondaries, with two not in time order series each, with 3 noop queries and one nil together",
|
||||||
chkQuerierSeries: [][]ChunkSeries{{
|
chkQuerierSeries: [][]ChunkSeries{{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{5, 5}}, []tsdbutil.Sample{fSample{6, 6}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{5, 5}}, []chunks.Sample{fSample{6, 6}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}}, []tsdbutil.Sample{fSample{2, 2}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}}, []chunks.Sample{fSample{2, 2}}),
|
||||||
}, {
|
}, {
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{3, 3}}, []tsdbutil.Sample{fSample{4, 4}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{3, 3}}, []chunks.Sample{fSample{4, 4}}),
|
||||||
}},
|
}},
|
||||||
extraQueriers: []ChunkQuerier{NoopChunkedQuerier(), NoopChunkedQuerier(), nil, NoopChunkedQuerier()},
|
extraQueriers: []ChunkQuerier{NoopChunkedQuerier(), NoopChunkedQuerier(), nil, NoopChunkedQuerier()},
|
||||||
expected: NewMockChunkSeriesSet(
|
expected: NewMockChunkSeriesSet(
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}},
|
[]chunks.Sample{fSample{1, 1}, fSample{2, 2}},
|
||||||
[]tsdbutil.Sample{fSample{3, 3}},
|
[]chunks.Sample{fSample{3, 3}},
|
||||||
[]tsdbutil.Sample{fSample{5, 5}},
|
[]chunks.Sample{fSample{5, 5}},
|
||||||
[]tsdbutil.Sample{fSample{6, 6}},
|
[]chunks.Sample{fSample{6, 6}},
|
||||||
),
|
),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"),
|
||||||
[]tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}},
|
[]chunks.Sample{fSample{0, 0}, fSample{1, 1}},
|
||||||
[]tsdbutil.Sample{fSample{2, 2}},
|
[]chunks.Sample{fSample{2, 2}},
|
||||||
[]tsdbutil.Sample{fSample{3, 3}},
|
[]chunks.Sample{fSample{3, 3}},
|
||||||
[]tsdbutil.Sample{fSample{4, 4}},
|
[]chunks.Sample{fSample{4, 4}},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two queries, one with NaN samples series",
|
name: "two queries, one with NaN samples series",
|
||||||
chkQuerierSeries: [][]ChunkSeries{{
|
chkQuerierSeries: [][]ChunkSeries{{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, math.NaN()}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, math.NaN()}}),
|
||||||
}, {
|
}, {
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{1, 1}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{1, 1}}),
|
||||||
}},
|
}},
|
||||||
expected: NewMockChunkSeriesSet(
|
expected: NewMockChunkSeriesSet(
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{fSample{0, math.NaN()}}, []tsdbutil.Sample{fSample{1, 1}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []chunks.Sample{fSample{0, math.NaN()}}, []chunks.Sample{fSample{1, 1}}),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
|
@ -408,9 +409,9 @@ func TestCompactingChunkSeriesMerger(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "single series",
|
name: "single series",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two empty series",
|
name: "two empty series",
|
||||||
|
@ -423,150 +424,150 @@ func TestCompactingChunkSeriesMerger(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "two non overlapping",
|
name: "two non overlapping",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}, fSample{5, 5}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{5, 5}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{7, 7}, fSample{9, 9}}, []tsdbutil.Sample{fSample{10, 10}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{7, 7}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}, fSample{5, 5}}, []tsdbutil.Sample{fSample{7, 7}, fSample{9, 9}}, []tsdbutil.Sample{fSample{10, 10}}),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{5, 5}}, []chunks.Sample{fSample{7, 7}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two non overlapping in reverse order",
|
name: "two non overlapping in reverse order",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{7, 7}, fSample{9, 9}}, []tsdbutil.Sample{fSample{10, 10}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{7, 7}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}, fSample{5, 5}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{5, 5}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}, fSample{5, 5}}, []tsdbutil.Sample{fSample{7, 7}, fSample{9, 9}}, []tsdbutil.Sample{fSample{10, 10}}),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{5, 5}}, []chunks.Sample{fSample{7, 7}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two overlapping",
|
name: "two overlapping",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}, fSample{8, 8}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{8, 8}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{7, 7}, fSample{9, 9}}, []tsdbutil.Sample{fSample{10, 10}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{7, 7}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}, fSample{7, 7}, fSample{8, 8}, fSample{9, 9}}, []tsdbutil.Sample{fSample{10, 10}}),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{7, 7}, fSample{8, 8}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two duplicated",
|
name: "two duplicated",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "three overlapping",
|
name: "three overlapping",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{2, 2}, fSample{3, 3}, fSample{6, 6}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{2, 2}, fSample{3, 3}, fSample{6, 6}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{0, 0}, fSample{4, 4}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 0}, fSample{4, 4}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}, fSample{5, 5}, fSample{6, 6}}),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}, fSample{5, 5}, fSample{6, 6}}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "three in chained overlap",
|
name: "three in chained overlap",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{4, 4}, fSample{6, 66}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{4, 4}, fSample{6, 66}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{6, 6}, fSample{10, 10}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{6, 6}, fSample{10, 10}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}, fSample{5, 5}, fSample{6, 66}, fSample{10, 10}}),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}, fSample{5, 5}, fSample{6, 66}, fSample{10, 10}}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "three in chained overlap complex",
|
name: "three in chained overlap complex",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{0, 0}, fSample{5, 5}}, []tsdbutil.Sample{fSample{10, 10}, fSample{15, 15}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 0}, fSample{5, 5}}, []chunks.Sample{fSample{10, 10}, fSample{15, 15}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{2, 2}, fSample{20, 20}}, []tsdbutil.Sample{fSample{25, 25}, fSample{30, 30}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{2, 2}, fSample{20, 20}}, []chunks.Sample{fSample{25, 25}, fSample{30, 30}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{18, 18}, fSample{26, 26}}, []tsdbutil.Sample{fSample{31, 31}, fSample{35, 35}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{18, 18}, fSample{26, 26}}, []chunks.Sample{fSample{31, 31}, fSample{35, 35}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{fSample{0, 0}, fSample{2, 2}, fSample{5, 5}, fSample{10, 10}, fSample{15, 15}, fSample{18, 18}, fSample{20, 20}, fSample{25, 25}, fSample{26, 26}, fSample{30, 30}},
|
[]chunks.Sample{fSample{0, 0}, fSample{2, 2}, fSample{5, 5}, fSample{10, 10}, fSample{15, 15}, fSample{18, 18}, fSample{20, 20}, fSample{25, 25}, fSample{26, 26}, fSample{30, 30}},
|
||||||
[]tsdbutil.Sample{fSample{31, 31}, fSample{35, 35}},
|
[]chunks.Sample{fSample{31, 31}, fSample{35, 35}},
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "110 overlapping",
|
name: "110 overlapping",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), tsdbutil.GenerateSamples(0, 110)), // [0 - 110)
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), chunks.GenerateSamples(0, 110)), // [0 - 110)
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), tsdbutil.GenerateSamples(60, 50)), // [60 - 110)
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), chunks.GenerateSamples(60, 50)), // [60 - 110)
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||||
tsdbutil.GenerateSamples(0, 110),
|
chunks.GenerateSamples(0, 110),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "150 overlapping samples, split chunk",
|
name: "150 overlapping samples, split chunk",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), tsdbutil.GenerateSamples(0, 90)), // [0 - 90)
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), chunks.GenerateSamples(0, 90)), // [0 - 90)
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), tsdbutil.GenerateSamples(60, 90)), // [90 - 150)
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), chunks.GenerateSamples(60, 90)), // [90 - 150)
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||||
tsdbutil.GenerateSamples(0, 120),
|
chunks.GenerateSamples(0, 120),
|
||||||
tsdbutil.GenerateSamples(120, 30),
|
chunks.GenerateSamples(120, 30),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "histogram chunks overlapping",
|
name: "histogram chunks overlapping",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{histogramSample(0), histogramSample(5)}, []tsdbutil.Sample{histogramSample(10), histogramSample(15)}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{histogramSample(0), histogramSample(5)}, []chunks.Sample{histogramSample(10), histogramSample(15)}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{histogramSample(2), histogramSample(20)}, []tsdbutil.Sample{histogramSample(25), histogramSample(30)}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{histogramSample(2), histogramSample(20)}, []chunks.Sample{histogramSample(25), histogramSample(30)}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{histogramSample(18), histogramSample(26)}, []tsdbutil.Sample{histogramSample(31), histogramSample(35)}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{histogramSample(18), histogramSample(26)}, []chunks.Sample{histogramSample(31), histogramSample(35)}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{histogramSample(0), histogramSample(2), histogramSample(5), histogramSample(10), histogramSample(15), histogramSample(18), histogramSample(20), histogramSample(25), histogramSample(26), histogramSample(30)},
|
[]chunks.Sample{histogramSample(0), histogramSample(2), histogramSample(5), histogramSample(10), histogramSample(15), histogramSample(18), histogramSample(20), histogramSample(25), histogramSample(26), histogramSample(30)},
|
||||||
[]tsdbutil.Sample{histogramSample(31), histogramSample(35)},
|
[]chunks.Sample{histogramSample(31), histogramSample(35)},
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "histogram chunks overlapping with float chunks",
|
name: "histogram chunks overlapping with float chunks",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{histogramSample(0), histogramSample(5)}, []tsdbutil.Sample{histogramSample(10), histogramSample(15)}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{histogramSample(0), histogramSample(5)}, []chunks.Sample{histogramSample(10), histogramSample(15)}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{12, 12}}, []tsdbutil.Sample{fSample{14, 14}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{12, 12}}, []chunks.Sample{fSample{14, 14}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{histogramSample(0)},
|
[]chunks.Sample{histogramSample(0)},
|
||||||
[]tsdbutil.Sample{fSample{1, 1}},
|
[]chunks.Sample{fSample{1, 1}},
|
||||||
[]tsdbutil.Sample{histogramSample(5), histogramSample(10)},
|
[]chunks.Sample{histogramSample(5), histogramSample(10)},
|
||||||
[]tsdbutil.Sample{fSample{12, 12}, fSample{14, 14}},
|
[]chunks.Sample{fSample{12, 12}, fSample{14, 14}},
|
||||||
[]tsdbutil.Sample{histogramSample(15)},
|
[]chunks.Sample{histogramSample(15)},
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "float histogram chunks overlapping",
|
name: "float histogram chunks overlapping",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{floatHistogramSample(0), floatHistogramSample(5)}, []tsdbutil.Sample{floatHistogramSample(10), floatHistogramSample(15)}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{floatHistogramSample(0), floatHistogramSample(5)}, []chunks.Sample{floatHistogramSample(10), floatHistogramSample(15)}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{floatHistogramSample(2), floatHistogramSample(20)}, []tsdbutil.Sample{floatHistogramSample(25), floatHistogramSample(30)}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{floatHistogramSample(2), floatHistogramSample(20)}, []chunks.Sample{floatHistogramSample(25), floatHistogramSample(30)}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{floatHistogramSample(18), floatHistogramSample(26)}, []tsdbutil.Sample{floatHistogramSample(31), floatHistogramSample(35)}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{floatHistogramSample(18), floatHistogramSample(26)}, []chunks.Sample{floatHistogramSample(31), floatHistogramSample(35)}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{floatHistogramSample(0), floatHistogramSample(2), floatHistogramSample(5), floatHistogramSample(10), floatHistogramSample(15), floatHistogramSample(18), floatHistogramSample(20), floatHistogramSample(25), floatHistogramSample(26), floatHistogramSample(30)},
|
[]chunks.Sample{floatHistogramSample(0), floatHistogramSample(2), floatHistogramSample(5), floatHistogramSample(10), floatHistogramSample(15), floatHistogramSample(18), floatHistogramSample(20), floatHistogramSample(25), floatHistogramSample(26), floatHistogramSample(30)},
|
||||||
[]tsdbutil.Sample{floatHistogramSample(31), floatHistogramSample(35)},
|
[]chunks.Sample{floatHistogramSample(31), floatHistogramSample(35)},
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "float histogram chunks overlapping with float chunks",
|
name: "float histogram chunks overlapping with float chunks",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{floatHistogramSample(0), floatHistogramSample(5)}, []tsdbutil.Sample{floatHistogramSample(10), floatHistogramSample(15)}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{floatHistogramSample(0), floatHistogramSample(5)}, []chunks.Sample{floatHistogramSample(10), floatHistogramSample(15)}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{12, 12}}, []tsdbutil.Sample{fSample{14, 14}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{12, 12}}, []chunks.Sample{fSample{14, 14}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{floatHistogramSample(0)},
|
[]chunks.Sample{floatHistogramSample(0)},
|
||||||
[]tsdbutil.Sample{fSample{1, 1}},
|
[]chunks.Sample{fSample{1, 1}},
|
||||||
[]tsdbutil.Sample{floatHistogramSample(5), floatHistogramSample(10)},
|
[]chunks.Sample{floatHistogramSample(5), floatHistogramSample(10)},
|
||||||
[]tsdbutil.Sample{fSample{12, 12}, fSample{14, 14}},
|
[]chunks.Sample{fSample{12, 12}, fSample{14, 14}},
|
||||||
[]tsdbutil.Sample{floatHistogramSample(15)},
|
[]chunks.Sample{floatHistogramSample(15)},
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "float histogram chunks overlapping with histogram chunks",
|
name: "float histogram chunks overlapping with histogram chunks",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{floatHistogramSample(0), floatHistogramSample(5)}, []tsdbutil.Sample{floatHistogramSample(10), floatHistogramSample(15)}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{floatHistogramSample(0), floatHistogramSample(5)}, []chunks.Sample{floatHistogramSample(10), floatHistogramSample(15)}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{histogramSample(1), histogramSample(12)}, []tsdbutil.Sample{histogramSample(14)}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{histogramSample(1), histogramSample(12)}, []chunks.Sample{histogramSample(14)}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{floatHistogramSample(0)},
|
[]chunks.Sample{floatHistogramSample(0)},
|
||||||
[]tsdbutil.Sample{histogramSample(1)},
|
[]chunks.Sample{histogramSample(1)},
|
||||||
[]tsdbutil.Sample{floatHistogramSample(5), floatHistogramSample(10)},
|
[]chunks.Sample{floatHistogramSample(5), floatHistogramSample(10)},
|
||||||
[]tsdbutil.Sample{histogramSample(12), histogramSample(14)},
|
[]chunks.Sample{histogramSample(12), histogramSample(14)},
|
||||||
[]tsdbutil.Sample{floatHistogramSample(15)},
|
[]chunks.Sample{floatHistogramSample(15)},
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
|
@ -604,9 +605,9 @@ func TestConcatenatingChunkSeriesMerger(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "single series",
|
name: "single series",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}}),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two empty series",
|
name: "two empty series",
|
||||||
|
@ -619,92 +620,92 @@ func TestConcatenatingChunkSeriesMerger(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "two non overlapping",
|
name: "two non overlapping",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}, fSample{5, 5}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{5, 5}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{7, 7}, fSample{9, 9}}, []tsdbutil.Sample{fSample{10, 10}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{7, 7}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}, fSample{5, 5}}, []tsdbutil.Sample{fSample{7, 7}, fSample{9, 9}}, []tsdbutil.Sample{fSample{10, 10}}),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{5, 5}}, []chunks.Sample{fSample{7, 7}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two overlapping",
|
name: "two overlapping",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}, fSample{8, 8}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{8, 8}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{7, 7}, fSample{9, 9}}, []tsdbutil.Sample{fSample{10, 10}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{7, 7}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}}, []tsdbutil.Sample{fSample{3, 3}, fSample{8, 8}},
|
[]chunks.Sample{fSample{1, 1}, fSample{2, 2}}, []chunks.Sample{fSample{3, 3}, fSample{8, 8}},
|
||||||
[]tsdbutil.Sample{fSample{7, 7}, fSample{9, 9}}, []tsdbutil.Sample{fSample{10, 10}},
|
[]chunks.Sample{fSample{7, 7}, fSample{9, 9}}, []chunks.Sample{fSample{10, 10}},
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two duplicated",
|
name: "two duplicated",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}},
|
[]chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}},
|
||||||
[]tsdbutil.Sample{fSample{2, 2}, fSample{3, 3}, fSample{5, 5}},
|
[]chunks.Sample{fSample{2, 2}, fSample{3, 3}, fSample{5, 5}},
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "three overlapping",
|
name: "three overlapping",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{2, 2}, fSample{3, 3}, fSample{6, 6}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{2, 2}, fSample{3, 3}, fSample{6, 6}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{0, 0}, fSample{4, 4}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 0}, fSample{4, 4}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}},
|
[]chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}},
|
||||||
[]tsdbutil.Sample{fSample{2, 2}, fSample{3, 3}, fSample{6, 6}},
|
[]chunks.Sample{fSample{2, 2}, fSample{3, 3}, fSample{6, 6}},
|
||||||
[]tsdbutil.Sample{fSample{0, 0}, fSample{4, 4}},
|
[]chunks.Sample{fSample{0, 0}, fSample{4, 4}},
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "three in chained overlap",
|
name: "three in chained overlap",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{4, 4}, fSample{6, 66}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{4, 4}, fSample{6, 66}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{6, 6}, fSample{10, 10}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{6, 6}, fSample{10, 10}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}},
|
[]chunks.Sample{fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{5, 5}},
|
||||||
[]tsdbutil.Sample{fSample{4, 4}, fSample{6, 66}},
|
[]chunks.Sample{fSample{4, 4}, fSample{6, 66}},
|
||||||
[]tsdbutil.Sample{fSample{6, 6}, fSample{10, 10}},
|
[]chunks.Sample{fSample{6, 6}, fSample{10, 10}},
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "three in chained overlap complex",
|
name: "three in chained overlap complex",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{0, 0}, fSample{5, 5}}, []tsdbutil.Sample{fSample{10, 10}, fSample{15, 15}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{0, 0}, fSample{5, 5}}, []chunks.Sample{fSample{10, 10}, fSample{15, 15}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{2, 2}, fSample{20, 20}}, []tsdbutil.Sample{fSample{25, 25}, fSample{30, 30}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{2, 2}, fSample{20, 20}}, []chunks.Sample{fSample{25, 25}, fSample{30, 30}}),
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{fSample{18, 18}, fSample{26, 26}}, []tsdbutil.Sample{fSample{31, 31}, fSample{35, 35}}),
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []chunks.Sample{fSample{18, 18}, fSample{26, 26}}, []chunks.Sample{fSample{31, 31}, fSample{35, 35}}),
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||||
[]tsdbutil.Sample{fSample{0, 0}, fSample{5, 5}}, []tsdbutil.Sample{fSample{10, 10}, fSample{15, 15}},
|
[]chunks.Sample{fSample{0, 0}, fSample{5, 5}}, []chunks.Sample{fSample{10, 10}, fSample{15, 15}},
|
||||||
[]tsdbutil.Sample{fSample{2, 2}, fSample{20, 20}}, []tsdbutil.Sample{fSample{25, 25}, fSample{30, 30}},
|
[]chunks.Sample{fSample{2, 2}, fSample{20, 20}}, []chunks.Sample{fSample{25, 25}, fSample{30, 30}},
|
||||||
[]tsdbutil.Sample{fSample{18, 18}, fSample{26, 26}}, []tsdbutil.Sample{fSample{31, 31}, fSample{35, 35}},
|
[]chunks.Sample{fSample{18, 18}, fSample{26, 26}}, []chunks.Sample{fSample{31, 31}, fSample{35, 35}},
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "110 overlapping",
|
name: "110 overlapping",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), tsdbutil.GenerateSamples(0, 110)), // [0 - 110)
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), chunks.GenerateSamples(0, 110)), // [0 - 110)
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), tsdbutil.GenerateSamples(60, 50)), // [60 - 110)
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), chunks.GenerateSamples(60, 50)), // [60 - 110)
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||||
tsdbutil.GenerateSamples(0, 110),
|
chunks.GenerateSamples(0, 110),
|
||||||
tsdbutil.GenerateSamples(60, 50),
|
chunks.GenerateSamples(60, 50),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "150 overlapping samples, simply concatenated and no splits",
|
name: "150 overlapping samples, simply concatenated and no splits",
|
||||||
input: []ChunkSeries{
|
input: []ChunkSeries{
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), tsdbutil.GenerateSamples(0, 90)), // [0 - 90)
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), chunks.GenerateSamples(0, 90)), // [0 - 90)
|
||||||
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), tsdbutil.GenerateSamples(60, 90)), // [90 - 150)
|
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), chunks.GenerateSamples(60, 90)), // [90 - 150)
|
||||||
},
|
},
|
||||||
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
|
||||||
tsdbutil.GenerateSamples(0, 90),
|
chunks.GenerateSamples(0, 90),
|
||||||
tsdbutil.GenerateSamples(60, 90),
|
chunks.GenerateSamples(60, 90),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
|
@ -819,20 +820,20 @@ func (m *mockChunkSeriesSet) Warnings() Warnings { return nil }
|
||||||
func TestChainSampleIterator(t *testing.T) {
|
func TestChainSampleIterator(t *testing.T) {
|
||||||
for _, tc := range []struct {
|
for _, tc := range []struct {
|
||||||
input []chunkenc.Iterator
|
input []chunkenc.Iterator
|
||||||
expected []tsdbutil.Sample
|
expected []chunks.Sample
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
input: []chunkenc.Iterator{
|
input: []chunkenc.Iterator{
|
||||||
NewListSeriesIterator(samples{fSample{0, 0}, fSample{1, 1}}),
|
NewListSeriesIterator(samples{fSample{0, 0}, fSample{1, 1}}),
|
||||||
},
|
},
|
||||||
expected: []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}},
|
expected: []chunks.Sample{fSample{0, 0}, fSample{1, 1}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: []chunkenc.Iterator{
|
input: []chunkenc.Iterator{
|
||||||
NewListSeriesIterator(samples{fSample{0, 0}, fSample{1, 1}}),
|
NewListSeriesIterator(samples{fSample{0, 0}, fSample{1, 1}}),
|
||||||
NewListSeriesIterator(samples{fSample{2, 2}, fSample{3, 3}}),
|
NewListSeriesIterator(samples{fSample{2, 2}, fSample{3, 3}}),
|
||||||
},
|
},
|
||||||
expected: []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}},
|
expected: []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: []chunkenc.Iterator{
|
input: []chunkenc.Iterator{
|
||||||
|
@ -840,7 +841,7 @@ func TestChainSampleIterator(t *testing.T) {
|
||||||
NewListSeriesIterator(samples{fSample{1, 1}, fSample{4, 4}}),
|
NewListSeriesIterator(samples{fSample{1, 1}, fSample{4, 4}}),
|
||||||
NewListSeriesIterator(samples{fSample{2, 2}, fSample{5, 5}}),
|
NewListSeriesIterator(samples{fSample{2, 2}, fSample{5, 5}}),
|
||||||
},
|
},
|
||||||
expected: []tsdbutil.Sample{
|
expected: []chunks.Sample{
|
||||||
fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}, fSample{5, 5},
|
fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}, fSample{4, 4}, fSample{5, 5},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -854,7 +855,7 @@ func TestChainSampleIterator(t *testing.T) {
|
||||||
NewListSeriesIterator(samples{}),
|
NewListSeriesIterator(samples{}),
|
||||||
NewListSeriesIterator(samples{}),
|
NewListSeriesIterator(samples{}),
|
||||||
},
|
},
|
||||||
expected: []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}},
|
expected: []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}},
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
merged := ChainSampleIteratorFromIterators(nil, tc.input)
|
merged := ChainSampleIteratorFromIterators(nil, tc.input)
|
||||||
|
@ -868,14 +869,14 @@ func TestChainSampleIteratorSeek(t *testing.T) {
|
||||||
for _, tc := range []struct {
|
for _, tc := range []struct {
|
||||||
input []chunkenc.Iterator
|
input []chunkenc.Iterator
|
||||||
seek int64
|
seek int64
|
||||||
expected []tsdbutil.Sample
|
expected []chunks.Sample
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
input: []chunkenc.Iterator{
|
input: []chunkenc.Iterator{
|
||||||
NewListSeriesIterator(samples{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}),
|
NewListSeriesIterator(samples{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}),
|
||||||
},
|
},
|
||||||
seek: 1,
|
seek: 1,
|
||||||
expected: []tsdbutil.Sample{fSample{1, 1}, fSample{2, 2}},
|
expected: []chunks.Sample{fSample{1, 1}, fSample{2, 2}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: []chunkenc.Iterator{
|
input: []chunkenc.Iterator{
|
||||||
|
@ -883,7 +884,7 @@ func TestChainSampleIteratorSeek(t *testing.T) {
|
||||||
NewListSeriesIterator(samples{fSample{2, 2}, fSample{3, 3}}),
|
NewListSeriesIterator(samples{fSample{2, 2}, fSample{3, 3}}),
|
||||||
},
|
},
|
||||||
seek: 2,
|
seek: 2,
|
||||||
expected: []tsdbutil.Sample{fSample{2, 2}, fSample{3, 3}},
|
expected: []chunks.Sample{fSample{2, 2}, fSample{3, 3}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: []chunkenc.Iterator{
|
input: []chunkenc.Iterator{
|
||||||
|
@ -892,7 +893,7 @@ func TestChainSampleIteratorSeek(t *testing.T) {
|
||||||
NewListSeriesIterator(samples{fSample{2, 2}, fSample{5, 5}}),
|
NewListSeriesIterator(samples{fSample{2, 2}, fSample{5, 5}}),
|
||||||
},
|
},
|
||||||
seek: 2,
|
seek: 2,
|
||||||
expected: []tsdbutil.Sample{fSample{2, 2}, fSample{3, 3}, fSample{4, 4}, fSample{5, 5}},
|
expected: []chunks.Sample{fSample{2, 2}, fSample{3, 3}, fSample{4, 4}, fSample{5, 5}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: []chunkenc.Iterator{
|
input: []chunkenc.Iterator{
|
||||||
|
@ -900,11 +901,11 @@ func TestChainSampleIteratorSeek(t *testing.T) {
|
||||||
NewListSeriesIterator(samples{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}),
|
NewListSeriesIterator(samples{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}}),
|
||||||
},
|
},
|
||||||
seek: 0,
|
seek: 0,
|
||||||
expected: []tsdbutil.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}},
|
expected: []chunks.Sample{fSample{0, 0}, fSample{1, 1}, fSample{2, 2}, fSample{3, 3}},
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
merged := ChainSampleIteratorFromIterators(nil, tc.input)
|
merged := ChainSampleIteratorFromIterators(nil, tc.input)
|
||||||
actual := []tsdbutil.Sample{}
|
actual := []chunks.Sample{}
|
||||||
if merged.Seek(tc.seek) == chunkenc.ValFloat {
|
if merged.Seek(tc.seek) == chunkenc.ValFloat {
|
||||||
t, f := merged.At()
|
t, f := merged.At()
|
||||||
actual = append(actual, fSample{t, f})
|
actual = append(actual, fSample{t, f})
|
||||||
|
@ -920,7 +921,7 @@ func makeSeries(numSeries, numSamples int) []Series {
|
||||||
series := []Series{}
|
series := []Series{}
|
||||||
for j := 0; j < numSeries; j++ {
|
for j := 0; j < numSeries; j++ {
|
||||||
labels := labels.FromStrings("foo", fmt.Sprintf("bar%d", j))
|
labels := labels.FromStrings("foo", fmt.Sprintf("bar%d", j))
|
||||||
samples := []tsdbutil.Sample{}
|
samples := []chunks.Sample{}
|
||||||
for k := 0; k < numSamples; k++ {
|
for k := 0; k < numSamples; k++ {
|
||||||
samples = append(samples, fSample{t: int64(k), f: float64(k)})
|
samples = append(samples, fSample{t: int64(k), f: float64(k)})
|
||||||
}
|
}
|
||||||
|
|
|
@ -71,8 +71,8 @@ func (a ByLabelName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||||
// creates a new TimeSeries in the map if not found and returns the time series signature.
|
// creates a new TimeSeries in the map if not found and returns the time series signature.
|
||||||
// tsMap will be unmodified if either labels or sample is nil, but can still be modified if the exemplar is nil.
|
// tsMap will be unmodified if either labels or sample is nil, but can still be modified if the exemplar is nil.
|
||||||
func addSample(tsMap map[string]*prompb.TimeSeries, sample *prompb.Sample, labels []prompb.Label,
|
func addSample(tsMap map[string]*prompb.TimeSeries, sample *prompb.Sample, labels []prompb.Label,
|
||||||
datatype string) string {
|
datatype string,
|
||||||
|
) string {
|
||||||
if sample == nil || labels == nil || tsMap == nil {
|
if sample == nil || labels == nil || tsMap == nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
@ -164,7 +164,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
|
||||||
sort.Stable(ByLabelName(labels))
|
sort.Stable(ByLabelName(labels))
|
||||||
|
|
||||||
for _, label := range labels {
|
for _, label := range labels {
|
||||||
var finalKey = prometheustranslator.NormalizeLabel(label.Name)
|
finalKey := prometheustranslator.NormalizeLabel(label.Name)
|
||||||
if existingLabel, alreadyExists := l[finalKey]; alreadyExists {
|
if existingLabel, alreadyExists := l[finalKey]; alreadyExists {
|
||||||
existingLabel.Value = existingLabel.Value + ";" + label.Value
|
existingLabel.Value = existingLabel.Value + ";" + label.Value
|
||||||
l[finalKey] = existingLabel
|
l[finalKey] = existingLabel
|
||||||
|
@ -441,7 +441,8 @@ func maxTimestamp(a, b pcommon.Timestamp) pcommon.Timestamp {
|
||||||
|
|
||||||
// addSingleSummaryDataPoint converts pt to len(QuantileValues) + 2 samples.
|
// addSingleSummaryDataPoint converts pt to len(QuantileValues) + 2 samples.
|
||||||
func addSingleSummaryDataPoint(pt pmetric.SummaryDataPoint, resource pcommon.Resource, metric pmetric.Metric, settings Settings,
|
func addSingleSummaryDataPoint(pt pmetric.SummaryDataPoint, resource pcommon.Resource, metric pmetric.Metric, settings Settings,
|
||||||
tsMap map[string]*prompb.TimeSeries) {
|
tsMap map[string]*prompb.TimeSeries,
|
||||||
|
) {
|
||||||
timestamp := convertTimeStamp(pt.Timestamp())
|
timestamp := convertTimeStamp(pt.Timestamp())
|
||||||
// sum and count of the summary should append suffix to baseName
|
// sum and count of the summary should append suffix to baseName
|
||||||
baseName := prometheustranslator.BuildPromCompliantName(metric, settings.Namespace)
|
baseName := prometheustranslator.BuildPromCompliantName(metric, settings.Namespace)
|
||||||
|
|
|
@ -29,6 +29,7 @@ import (
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"go.opentelemetry.io/otel"
|
"go.opentelemetry.io/otel"
|
||||||
"go.opentelemetry.io/otel/attribute"
|
"go.opentelemetry.io/otel/attribute"
|
||||||
|
semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
|
||||||
"go.uber.org/atomic"
|
"go.uber.org/atomic"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
|
@ -545,6 +546,10 @@ func (t *QueueManager) sendMetadataWithBackoff(ctx context.Context, metadata []p
|
||||||
attribute.String("remote_name", t.storeClient.Name()),
|
attribute.String("remote_name", t.storeClient.Name()),
|
||||||
attribute.String("remote_url", t.storeClient.Endpoint()),
|
attribute.String("remote_url", t.storeClient.Endpoint()),
|
||||||
)
|
)
|
||||||
|
// Attributes defined by OpenTelemetry semantic conventions.
|
||||||
|
if try > 0 {
|
||||||
|
span.SetAttributes(semconv.HTTPResendCount(try))
|
||||||
|
}
|
||||||
|
|
||||||
begin := time.Now()
|
begin := time.Now()
|
||||||
err := t.storeClient.Store(ctx, req)
|
err := t.storeClient.Store(ctx, req)
|
||||||
|
|
File diff suppressed because one or more lines are too long
|
@ -22,7 +22,6 @@ import (
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type SeriesEntry struct {
|
type SeriesEntry struct {
|
||||||
|
@ -44,7 +43,7 @@ func (s *ChunkSeriesEntry) Iterator(it chunks.Iterator) chunks.Iterator { return
|
||||||
func (s *ChunkSeriesEntry) ChunkCount() (int, error) { return s.ChunkCountFn() }
|
func (s *ChunkSeriesEntry) ChunkCount() (int, error) { return s.ChunkCountFn() }
|
||||||
|
|
||||||
// NewListSeries returns series entry with iterator that allows to iterate over provided samples.
|
// NewListSeries returns series entry with iterator that allows to iterate over provided samples.
|
||||||
func NewListSeries(lset labels.Labels, s []tsdbutil.Sample) *SeriesEntry {
|
func NewListSeries(lset labels.Labels, s []chunks.Sample) *SeriesEntry {
|
||||||
samplesS := Samples(samples(s))
|
samplesS := Samples(samples(s))
|
||||||
return &SeriesEntry{
|
return &SeriesEntry{
|
||||||
Lset: lset,
|
Lset: lset,
|
||||||
|
@ -61,10 +60,10 @@ func NewListSeries(lset labels.Labels, s []tsdbutil.Sample) *SeriesEntry {
|
||||||
// NewListChunkSeriesFromSamples returns chunk series entry that allows to iterate over provided samples.
|
// NewListChunkSeriesFromSamples returns chunk series entry that allows to iterate over provided samples.
|
||||||
// NOTE: It uses inefficient chunks encoding implementation, not caring about chunk size.
|
// NOTE: It uses inefficient chunks encoding implementation, not caring about chunk size.
|
||||||
// Use only for testing.
|
// Use only for testing.
|
||||||
func NewListChunkSeriesFromSamples(lset labels.Labels, samples ...[]tsdbutil.Sample) *ChunkSeriesEntry {
|
func NewListChunkSeriesFromSamples(lset labels.Labels, samples ...[]chunks.Sample) *ChunkSeriesEntry {
|
||||||
chksFromSamples := make([]chunks.Meta, 0, len(samples))
|
chksFromSamples := make([]chunks.Meta, 0, len(samples))
|
||||||
for _, s := range samples {
|
for _, s := range samples {
|
||||||
cfs, err := tsdbutil.ChunkFromSamples(s)
|
cfs, err := chunks.ChunkFromSamples(s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &ChunkSeriesEntry{
|
return &ChunkSeriesEntry{
|
||||||
Lset: lset,
|
Lset: lset,
|
||||||
|
@ -101,14 +100,14 @@ type listSeriesIterator struct {
|
||||||
idx int
|
idx int
|
||||||
}
|
}
|
||||||
|
|
||||||
type samples []tsdbutil.Sample
|
type samples []chunks.Sample
|
||||||
|
|
||||||
func (s samples) Get(i int) tsdbutil.Sample { return s[i] }
|
func (s samples) Get(i int) chunks.Sample { return s[i] }
|
||||||
func (s samples) Len() int { return len(s) }
|
func (s samples) Len() int { return len(s) }
|
||||||
|
|
||||||
// Samples interface allows to work on arrays of types that are compatible with tsdbutil.Sample.
|
// Samples interface allows to work on arrays of types that are compatible with chunks.Sample.
|
||||||
type Samples interface {
|
type Samples interface {
|
||||||
Get(i int) tsdbutil.Sample
|
Get(i int) chunks.Sample
|
||||||
Len() int
|
Len() int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -429,9 +428,9 @@ func (e errChunksIterator) Err() error { return e.err }
|
||||||
// ExpandSamples iterates over all samples in the iterator, buffering all in slice.
|
// ExpandSamples iterates over all samples in the iterator, buffering all in slice.
|
||||||
// Optionally it takes samples constructor, useful when you want to compare sample slices with different
|
// Optionally it takes samples constructor, useful when you want to compare sample slices with different
|
||||||
// sample implementations. if nil, sample type from this package will be used.
|
// sample implementations. if nil, sample type from this package will be used.
|
||||||
func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, f float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample) ([]tsdbutil.Sample, error) {
|
func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, f float64, h *histogram.Histogram, fh *histogram.FloatHistogram) chunks.Sample) ([]chunks.Sample, error) {
|
||||||
if newSampleFn == nil {
|
if newSampleFn == nil {
|
||||||
newSampleFn = func(t int64, f float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample {
|
newSampleFn = func(t int64, f float64, h *histogram.Histogram, fh *histogram.FloatHistogram) chunks.Sample {
|
||||||
switch {
|
switch {
|
||||||
case h != nil:
|
case h != nil:
|
||||||
return hSample{t, h}
|
return hSample{t, h}
|
||||||
|
@ -443,7 +442,7 @@ func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, f float64,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var result []tsdbutil.Sample
|
var result []chunks.Sample
|
||||||
for {
|
for {
|
||||||
switch iter.Next() {
|
switch iter.Next() {
|
||||||
case chunkenc.ValNone:
|
case chunkenc.ValNone:
|
||||||
|
|
|
@ -25,7 +25,6 @@ import (
|
||||||
"github.com/prometheus/prometheus/model/value"
|
"github.com/prometheus/prometheus/model/value"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestListSeriesIterator(t *testing.T) {
|
func TestListSeriesIterator(t *testing.T) {
|
||||||
|
@ -111,11 +110,11 @@ func TestNewListChunkSeriesFromSamples(t *testing.T) {
|
||||||
func TestChunkSeriesSetToSeriesSet(t *testing.T) {
|
func TestChunkSeriesSetToSeriesSet(t *testing.T) {
|
||||||
series := []struct {
|
series := []struct {
|
||||||
lbs labels.Labels
|
lbs labels.Labels
|
||||||
samples []tsdbutil.Sample
|
samples []chunks.Sample
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
lbs: labels.FromStrings("__name__", "up", "instance", "localhost:8080"),
|
lbs: labels.FromStrings("__name__", "up", "instance", "localhost:8080"),
|
||||||
samples: []tsdbutil.Sample{
|
samples: []chunks.Sample{
|
||||||
fSample{t: 1, f: 1},
|
fSample{t: 1, f: 1},
|
||||||
fSample{t: 2, f: 2},
|
fSample{t: 2, f: 2},
|
||||||
fSample{t: 3, f: 3},
|
fSample{t: 3, f: 3},
|
||||||
|
@ -123,7 +122,7 @@ func TestChunkSeriesSetToSeriesSet(t *testing.T) {
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
lbs: labels.FromStrings("__name__", "up", "instance", "localhost:8081"),
|
lbs: labels.FromStrings("__name__", "up", "instance", "localhost:8081"),
|
||||||
samples: []tsdbutil.Sample{
|
samples: []chunks.Sample{
|
||||||
fSample{t: 1, f: 2},
|
fSample{t: 1, f: 2},
|
||||||
fSample{t: 2, f: 3},
|
fSample{t: 2, f: 3},
|
||||||
fSample{t: 3, f: 4},
|
fSample{t: 3, f: 4},
|
||||||
|
@ -158,84 +157,8 @@ func TestChunkSeriesSetToSeriesSet(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSeriesToChunks(t *testing.T) {
|
|
||||||
generateSamples := func(count int) []tsdbutil.Sample {
|
|
||||||
s := make([]tsdbutil.Sample, count)
|
|
||||||
|
|
||||||
for i := 0; i < count; i++ {
|
|
||||||
s[i] = fSample{t: int64(i), f: float64(i) * 10.0}
|
|
||||||
}
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
h := &histogram.Histogram{
|
|
||||||
Count: 0,
|
|
||||||
ZeroThreshold: 0.001,
|
|
||||||
Schema: 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
testCases := map[string]struct {
|
|
||||||
samples []tsdbutil.Sample
|
|
||||||
expectedChunkCount int
|
|
||||||
}{
|
|
||||||
"no samples": {
|
|
||||||
samples: []tsdbutil.Sample{},
|
|
||||||
expectedChunkCount: 0,
|
|
||||||
},
|
|
||||||
"single sample": {
|
|
||||||
samples: generateSamples(1),
|
|
||||||
expectedChunkCount: 1,
|
|
||||||
},
|
|
||||||
"120 samples": {
|
|
||||||
samples: generateSamples(120),
|
|
||||||
expectedChunkCount: 1,
|
|
||||||
},
|
|
||||||
"121 samples": {
|
|
||||||
samples: generateSamples(121),
|
|
||||||
expectedChunkCount: 2,
|
|
||||||
},
|
|
||||||
"240 samples": {
|
|
||||||
samples: generateSamples(240),
|
|
||||||
expectedChunkCount: 2,
|
|
||||||
},
|
|
||||||
"241 samples": {
|
|
||||||
samples: generateSamples(241),
|
|
||||||
expectedChunkCount: 3,
|
|
||||||
},
|
|
||||||
"float samples and histograms": {
|
|
||||||
samples: []tsdbutil.Sample{
|
|
||||||
fSample{t: 1, f: 10},
|
|
||||||
fSample{t: 2, f: 20},
|
|
||||||
hSample{t: 3, h: h},
|
|
||||||
fSample{t: 4, f: 40},
|
|
||||||
},
|
|
||||||
expectedChunkCount: 3,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, testCase := range testCases {
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
lset := labels.FromStrings("__name__", "test_series")
|
|
||||||
series := NewListSeries(lset, testCase.samples)
|
|
||||||
encoder := NewSeriesToChunkEncoder(series)
|
|
||||||
require.Equal(t, lset, encoder.Labels())
|
|
||||||
|
|
||||||
chks, err := ExpandChunks(encoder.Iterator(nil))
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, chks, testCase.expectedChunkCount)
|
|
||||||
count, err := encoder.ChunkCount()
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, testCase.expectedChunkCount, count)
|
|
||||||
|
|
||||||
encodedSamples := expandChunks(chks)
|
|
||||||
require.Equal(t, testCase.samples, encodedSamples)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type histogramTest struct {
|
type histogramTest struct {
|
||||||
samples []tsdbutil.Sample
|
samples []chunks.Sample
|
||||||
expectedCounterResetHeaders []chunkenc.CounterResetHeader
|
expectedCounterResetHeaders []chunkenc.CounterResetHeader
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -379,34 +302,34 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
||||||
|
|
||||||
tests := map[string]histogramTest{
|
tests := map[string]histogramTest{
|
||||||
"single histogram to single chunk": {
|
"single histogram to single chunk": {
|
||||||
samples: []tsdbutil.Sample{
|
samples: []chunks.Sample{
|
||||||
hSample{t: 1, h: h1},
|
hSample{t: 1, h: h1},
|
||||||
},
|
},
|
||||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset},
|
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset},
|
||||||
},
|
},
|
||||||
"two histograms encoded to a single chunk": {
|
"two histograms encoded to a single chunk": {
|
||||||
samples: []tsdbutil.Sample{
|
samples: []chunks.Sample{
|
||||||
hSample{t: 1, h: h1},
|
hSample{t: 1, h: h1},
|
||||||
hSample{t: 2, h: h2},
|
hSample{t: 2, h: h2},
|
||||||
},
|
},
|
||||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset},
|
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset},
|
||||||
},
|
},
|
||||||
"two histograms encoded to two chunks": {
|
"two histograms encoded to two chunks": {
|
||||||
samples: []tsdbutil.Sample{
|
samples: []chunks.Sample{
|
||||||
hSample{t: 1, h: h2},
|
hSample{t: 1, h: h2},
|
||||||
hSample{t: 2, h: h1},
|
hSample{t: 2, h: h1},
|
||||||
},
|
},
|
||||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset},
|
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset},
|
||||||
},
|
},
|
||||||
"histogram and stale sample encoded to two chunks": {
|
"histogram and stale sample encoded to two chunks": {
|
||||||
samples: []tsdbutil.Sample{
|
samples: []chunks.Sample{
|
||||||
hSample{t: 1, h: staleHistogram},
|
hSample{t: 1, h: staleHistogram},
|
||||||
hSample{t: 2, h: h1},
|
hSample{t: 2, h: h1},
|
||||||
},
|
},
|
||||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
|
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
|
||||||
},
|
},
|
||||||
"histogram and reduction in bucket encoded to two chunks": {
|
"histogram and reduction in bucket encoded to two chunks": {
|
||||||
samples: []tsdbutil.Sample{
|
samples: []chunks.Sample{
|
||||||
hSample{t: 1, h: h1},
|
hSample{t: 1, h: h1},
|
||||||
hSample{t: 2, h: h2down},
|
hSample{t: 2, h: h2down},
|
||||||
},
|
},
|
||||||
|
@ -414,34 +337,34 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
||||||
},
|
},
|
||||||
// Float histograms.
|
// Float histograms.
|
||||||
"single float histogram to single chunk": {
|
"single float histogram to single chunk": {
|
||||||
samples: []tsdbutil.Sample{
|
samples: []chunks.Sample{
|
||||||
fhSample{t: 1, fh: fh1},
|
fhSample{t: 1, fh: fh1},
|
||||||
},
|
},
|
||||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset},
|
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset},
|
||||||
},
|
},
|
||||||
"two float histograms encoded to a single chunk": {
|
"two float histograms encoded to a single chunk": {
|
||||||
samples: []tsdbutil.Sample{
|
samples: []chunks.Sample{
|
||||||
fhSample{t: 1, fh: fh1},
|
fhSample{t: 1, fh: fh1},
|
||||||
fhSample{t: 2, fh: fh2},
|
fhSample{t: 2, fh: fh2},
|
||||||
},
|
},
|
||||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset},
|
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset},
|
||||||
},
|
},
|
||||||
"two float histograms encoded to two chunks": {
|
"two float histograms encoded to two chunks": {
|
||||||
samples: []tsdbutil.Sample{
|
samples: []chunks.Sample{
|
||||||
fhSample{t: 1, fh: fh2},
|
fhSample{t: 1, fh: fh2},
|
||||||
fhSample{t: 2, fh: fh1},
|
fhSample{t: 2, fh: fh1},
|
||||||
},
|
},
|
||||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset},
|
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset},
|
||||||
},
|
},
|
||||||
"float histogram and stale sample encoded to two chunks": {
|
"float histogram and stale sample encoded to two chunks": {
|
||||||
samples: []tsdbutil.Sample{
|
samples: []chunks.Sample{
|
||||||
fhSample{t: 1, fh: staleFloatHistogram},
|
fhSample{t: 1, fh: staleFloatHistogram},
|
||||||
fhSample{t: 2, fh: fh1},
|
fhSample{t: 2, fh: fh1},
|
||||||
},
|
},
|
||||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
|
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
|
||||||
},
|
},
|
||||||
"float histogram and reduction in bucket encoded to two chunks": {
|
"float histogram and reduction in bucket encoded to two chunks": {
|
||||||
samples: []tsdbutil.Sample{
|
samples: []chunks.Sample{
|
||||||
fhSample{t: 1, fh: fh1},
|
fhSample{t: 1, fh: fh1},
|
||||||
fhSample{t: 2, fh: fh2down},
|
fhSample{t: 2, fh: fh2down},
|
||||||
},
|
},
|
||||||
|
@ -449,61 +372,61 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
||||||
},
|
},
|
||||||
// Mixed.
|
// Mixed.
|
||||||
"histogram and float histogram encoded to two chunks": {
|
"histogram and float histogram encoded to two chunks": {
|
||||||
samples: []tsdbutil.Sample{
|
samples: []chunks.Sample{
|
||||||
hSample{t: 1, h: h1},
|
hSample{t: 1, h: h1},
|
||||||
fhSample{t: 2, fh: fh2},
|
fhSample{t: 2, fh: fh2},
|
||||||
},
|
},
|
||||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
|
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
|
||||||
},
|
},
|
||||||
"float histogram and histogram encoded to two chunks": {
|
"float histogram and histogram encoded to two chunks": {
|
||||||
samples: []tsdbutil.Sample{
|
samples: []chunks.Sample{
|
||||||
fhSample{t: 1, fh: fh1},
|
fhSample{t: 1, fh: fh1},
|
||||||
hSample{t: 2, h: h2},
|
hSample{t: 2, h: h2},
|
||||||
},
|
},
|
||||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
|
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
|
||||||
},
|
},
|
||||||
"histogram and stale float histogram encoded to two chunks": {
|
"histogram and stale float histogram encoded to two chunks": {
|
||||||
samples: []tsdbutil.Sample{
|
samples: []chunks.Sample{
|
||||||
hSample{t: 1, h: h1},
|
hSample{t: 1, h: h1},
|
||||||
fhSample{t: 2, fh: staleFloatHistogram},
|
fhSample{t: 2, fh: staleFloatHistogram},
|
||||||
},
|
},
|
||||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
|
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
|
||||||
},
|
},
|
||||||
"single gauge histogram encoded to one chunk": {
|
"single gauge histogram encoded to one chunk": {
|
||||||
samples: []tsdbutil.Sample{
|
samples: []chunks.Sample{
|
||||||
hSample{t: 1, h: gh1},
|
hSample{t: 1, h: gh1},
|
||||||
},
|
},
|
||||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
|
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
|
||||||
},
|
},
|
||||||
"two gauge histograms encoded to one chunk when counter increases": {
|
"two gauge histograms encoded to one chunk when counter increases": {
|
||||||
samples: []tsdbutil.Sample{
|
samples: []chunks.Sample{
|
||||||
hSample{t: 1, h: gh1},
|
hSample{t: 1, h: gh1},
|
||||||
hSample{t: 2, h: gh2},
|
hSample{t: 2, h: gh2},
|
||||||
},
|
},
|
||||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
|
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
|
||||||
},
|
},
|
||||||
"two gauge histograms encoded to one chunk when counter decreases": {
|
"two gauge histograms encoded to one chunk when counter decreases": {
|
||||||
samples: []tsdbutil.Sample{
|
samples: []chunks.Sample{
|
||||||
hSample{t: 1, h: gh2},
|
hSample{t: 1, h: gh2},
|
||||||
hSample{t: 2, h: gh1},
|
hSample{t: 2, h: gh1},
|
||||||
},
|
},
|
||||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
|
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
|
||||||
},
|
},
|
||||||
"single gauge float histogram encoded to one chunk": {
|
"single gauge float histogram encoded to one chunk": {
|
||||||
samples: []tsdbutil.Sample{
|
samples: []chunks.Sample{
|
||||||
fhSample{t: 1, fh: gfh1},
|
fhSample{t: 1, fh: gfh1},
|
||||||
},
|
},
|
||||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
|
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
|
||||||
},
|
},
|
||||||
"two float gauge histograms encoded to one chunk when counter increases": {
|
"two float gauge histograms encoded to one chunk when counter increases": {
|
||||||
samples: []tsdbutil.Sample{
|
samples: []chunks.Sample{
|
||||||
fhSample{t: 1, fh: gfh1},
|
fhSample{t: 1, fh: gfh1},
|
||||||
fhSample{t: 2, fh: gfh2},
|
fhSample{t: 2, fh: gfh2},
|
||||||
},
|
},
|
||||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
|
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
|
||||||
},
|
},
|
||||||
"two float gauge histograms encoded to one chunk when counter decreases": {
|
"two float gauge histograms encoded to one chunk when counter decreases": {
|
||||||
samples: []tsdbutil.Sample{
|
samples: []chunks.Sample{
|
||||||
fhSample{t: 1, fh: gfh2},
|
fhSample{t: 1, fh: gfh2},
|
||||||
fhSample{t: 2, fh: gfh1},
|
fhSample{t: 2, fh: gfh1},
|
||||||
},
|
},
|
||||||
|
@ -520,7 +443,7 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
||||||
|
|
||||||
func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) {
|
func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) {
|
||||||
lbs := labels.FromStrings("__name__", "up", "instance", "localhost:8080")
|
lbs := labels.FromStrings("__name__", "up", "instance", "localhost:8080")
|
||||||
copiedSamples := []tsdbutil.Sample{}
|
copiedSamples := []chunks.Sample{}
|
||||||
for _, s := range test.samples {
|
for _, s := range test.samples {
|
||||||
switch cs := s.(type) {
|
switch cs := s.(type) {
|
||||||
case hSample:
|
case hSample:
|
||||||
|
@ -544,7 +467,7 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) {
|
||||||
require.Len(t, chks, count)
|
require.Len(t, chks, count)
|
||||||
|
|
||||||
// Decode all encoded samples and assert they are equal to the original ones.
|
// Decode all encoded samples and assert they are equal to the original ones.
|
||||||
encodedSamples := expandChunks(chks)
|
encodedSamples := expandHistogramSamples(chks)
|
||||||
require.Equal(t, len(test.samples), len(encodedSamples))
|
require.Equal(t, len(test.samples), len(encodedSamples))
|
||||||
|
|
||||||
for i, s := range test.samples {
|
for i, s := range test.samples {
|
||||||
|
@ -583,9 +506,9 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func expandChunks(chunks []chunks.Meta) (result []tsdbutil.Sample) {
|
func expandHistogramSamples(chunks []chunks.Meta) (result []chunks.Sample) {
|
||||||
if len(chunks) == 0 {
|
if len(chunks) == 0 {
|
||||||
return []tsdbutil.Sample{}
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, chunk := range chunks {
|
for _, chunk := range chunks {
|
||||||
|
@ -598,9 +521,6 @@ func expandChunks(chunks []chunks.Meta) (result []tsdbutil.Sample) {
|
||||||
case chunkenc.ValFloatHistogram:
|
case chunkenc.ValFloatHistogram:
|
||||||
t, fh := it.AtFloatHistogram()
|
t, fh := it.AtFloatHistogram()
|
||||||
result = append(result, fhSample{t: t, fh: fh})
|
result = append(result, fhSample{t: t, fh: fh})
|
||||||
case chunkenc.ValFloat:
|
|
||||||
t, f := it.At()
|
|
||||||
result = append(result, fSample{t: t, f: f})
|
|
||||||
default:
|
default:
|
||||||
panic("unexpected value type")
|
panic("unexpected value type")
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,6 +42,7 @@ import (
|
||||||
"github.com/prometheus/prometheus/tsdb/record"
|
"github.com/prometheus/prometheus/tsdb/record"
|
||||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
||||||
"github.com/prometheus/prometheus/tsdb/wlog"
|
"github.com/prometheus/prometheus/tsdb/wlog"
|
||||||
|
"github.com/prometheus/prometheus/util/zeropool"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -411,28 +412,13 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
|
||||||
dec record.Decoder
|
dec record.Decoder
|
||||||
lastRef = chunks.HeadSeriesRef(db.nextRef.Load())
|
lastRef = chunks.HeadSeriesRef(db.nextRef.Load())
|
||||||
|
|
||||||
decoded = make(chan interface{}, 10)
|
decoded = make(chan interface{}, 10)
|
||||||
errCh = make(chan error, 1)
|
errCh = make(chan error, 1)
|
||||||
seriesPool = sync.Pool{
|
|
||||||
New: func() interface{} {
|
seriesPool zeropool.Pool[[]record.RefSeries]
|
||||||
return []record.RefSeries{}
|
samplesPool zeropool.Pool[[]record.RefSample]
|
||||||
},
|
histogramsPool zeropool.Pool[[]record.RefHistogramSample]
|
||||||
}
|
floatHistogramsPool zeropool.Pool[[]record.RefFloatHistogramSample]
|
||||||
samplesPool = sync.Pool{
|
|
||||||
New: func() interface{} {
|
|
||||||
return []record.RefSample{}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
histogramsPool = sync.Pool{
|
|
||||||
New: func() interface{} {
|
|
||||||
return []record.RefHistogramSample{}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
floatHistogramsPool = sync.Pool{
|
|
||||||
New: func() interface{} {
|
|
||||||
return []record.RefFloatHistogramSample{}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -442,7 +428,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
|
||||||
rec := r.Record()
|
rec := r.Record()
|
||||||
switch dec.Type(rec) {
|
switch dec.Type(rec) {
|
||||||
case record.Series:
|
case record.Series:
|
||||||
series := seriesPool.Get().([]record.RefSeries)[:0]
|
series := seriesPool.Get()[:0]
|
||||||
series, err = dec.Series(rec, series)
|
series, err = dec.Series(rec, series)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errCh <- &wlog.CorruptionErr{
|
errCh <- &wlog.CorruptionErr{
|
||||||
|
@ -454,7 +440,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
|
||||||
}
|
}
|
||||||
decoded <- series
|
decoded <- series
|
||||||
case record.Samples:
|
case record.Samples:
|
||||||
samples := samplesPool.Get().([]record.RefSample)[:0]
|
samples := samplesPool.Get()[:0]
|
||||||
samples, err = dec.Samples(rec, samples)
|
samples, err = dec.Samples(rec, samples)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errCh <- &wlog.CorruptionErr{
|
errCh <- &wlog.CorruptionErr{
|
||||||
|
@ -466,7 +452,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
|
||||||
}
|
}
|
||||||
decoded <- samples
|
decoded <- samples
|
||||||
case record.HistogramSamples:
|
case record.HistogramSamples:
|
||||||
histograms := histogramsPool.Get().([]record.RefHistogramSample)[:0]
|
histograms := histogramsPool.Get()[:0]
|
||||||
histograms, err = dec.HistogramSamples(rec, histograms)
|
histograms, err = dec.HistogramSamples(rec, histograms)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errCh <- &wlog.CorruptionErr{
|
errCh <- &wlog.CorruptionErr{
|
||||||
|
@ -478,7 +464,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
|
||||||
}
|
}
|
||||||
decoded <- histograms
|
decoded <- histograms
|
||||||
case record.FloatHistogramSamples:
|
case record.FloatHistogramSamples:
|
||||||
floatHistograms := floatHistogramsPool.Get().([]record.RefFloatHistogramSample)[:0]
|
floatHistograms := floatHistogramsPool.Get()[:0]
|
||||||
floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms)
|
floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errCh <- &wlog.CorruptionErr{
|
errCh <- &wlog.CorruptionErr{
|
||||||
|
@ -523,8 +509,6 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:staticcheck
|
|
||||||
seriesPool.Put(v)
|
seriesPool.Put(v)
|
||||||
case []record.RefSample:
|
case []record.RefSample:
|
||||||
for _, entry := range v {
|
for _, entry := range v {
|
||||||
|
@ -539,8 +523,6 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
|
||||||
series.lastTs = entry.T
|
series.lastTs = entry.T
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:staticcheck
|
|
||||||
samplesPool.Put(v)
|
samplesPool.Put(v)
|
||||||
case []record.RefHistogramSample:
|
case []record.RefHistogramSample:
|
||||||
for _, entry := range v {
|
for _, entry := range v {
|
||||||
|
@ -555,7 +537,6 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
|
||||||
series.lastTs = entry.T
|
series.lastTs = entry.T
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
//nolint:staticcheck
|
|
||||||
histogramsPool.Put(v)
|
histogramsPool.Put(v)
|
||||||
case []record.RefFloatHistogramSample:
|
case []record.RefFloatHistogramSample:
|
||||||
for _, entry := range v {
|
for _, entry := range v {
|
||||||
|
@ -570,7 +551,6 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
|
||||||
series.lastTs = entry.T
|
series.lastTs = entry.T
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
//nolint:staticcheck
|
|
||||||
floatHistogramsPool.Put(v)
|
floatHistogramsPool.Put(v)
|
||||||
default:
|
default:
|
||||||
panic(fmt.Errorf("unexpected decoded type: %T", d))
|
panic(fmt.Errorf("unexpected decoded type: %T", d))
|
||||||
|
|
|
@ -33,6 +33,7 @@ import (
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/storage/remote"
|
"github.com/prometheus/prometheus/storage/remote"
|
||||||
"github.com/prometheus/prometheus/tsdb"
|
"github.com/prometheus/prometheus/tsdb"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
"github.com/prometheus/prometheus/tsdb/record"
|
"github.com/prometheus/prometheus/tsdb/record"
|
||||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
||||||
"github.com/prometheus/prometheus/tsdb/wlog"
|
"github.com/prometheus/prometheus/tsdb/wlog"
|
||||||
|
@ -132,7 +133,7 @@ func TestCommit(t *testing.T) {
|
||||||
lset := labels.New(l...)
|
lset := labels.New(l...)
|
||||||
|
|
||||||
for i := 0; i < numDatapoints; i++ {
|
for i := 0; i < numDatapoints; i++ {
|
||||||
sample := tsdbutil.GenerateSamples(0, 1)
|
sample := chunks.GenerateSamples(0, 1)
|
||||||
ref, err := app.Append(0, lset, sample[0].T(), sample[0].F())
|
ref, err := app.Append(0, lset, sample[0].T(), sample[0].F())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
@ -247,7 +248,7 @@ func TestRollback(t *testing.T) {
|
||||||
lset := labels.New(l...)
|
lset := labels.New(l...)
|
||||||
|
|
||||||
for i := 0; i < numDatapoints; i++ {
|
for i := 0; i < numDatapoints; i++ {
|
||||||
sample := tsdbutil.GenerateSamples(0, 1)
|
sample := chunks.GenerateSamples(0, 1)
|
||||||
_, err := app.Append(0, lset, sample[0].T(), sample[0].F())
|
_, err := app.Append(0, lset, sample[0].T(), sample[0].F())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,7 +36,6 @@ import (
|
||||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
|
||||||
"github.com/prometheus/prometheus/tsdb/wlog"
|
"github.com/prometheus/prometheus/tsdb/wlog"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -175,7 +174,7 @@ func TestCorruptedChunk(t *testing.T) {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
tmpdir := t.TempDir()
|
tmpdir := t.TempDir()
|
||||||
|
|
||||||
series := storage.NewListSeries(labels.FromStrings("a", "b"), []tsdbutil.Sample{sample{1, 1, nil, nil}})
|
series := storage.NewListSeries(labels.FromStrings("a", "b"), []chunks.Sample{sample{1, 1, nil, nil}})
|
||||||
blockDir := createBlock(t, tmpdir, []storage.Series{series})
|
blockDir := createBlock(t, tmpdir, []storage.Series{series})
|
||||||
files, err := sequenceFiles(chunkDir(blockDir))
|
files, err := sequenceFiles(chunkDir(blockDir))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -218,7 +217,7 @@ func TestLabelValuesWithMatchers(t *testing.T) {
|
||||||
seriesEntries = append(seriesEntries, storage.NewListSeries(labels.FromStrings(
|
seriesEntries = append(seriesEntries, storage.NewListSeries(labels.FromStrings(
|
||||||
"tens", fmt.Sprintf("value%d", i/10),
|
"tens", fmt.Sprintf("value%d", i/10),
|
||||||
"unique", fmt.Sprintf("value%d", i),
|
"unique", fmt.Sprintf("value%d", i),
|
||||||
), []tsdbutil.Sample{sample{100, 0, nil, nil}}))
|
), []chunks.Sample{sample{100, 0, nil, nil}}))
|
||||||
}
|
}
|
||||||
|
|
||||||
blockDir := createBlock(t, tmpdir, seriesEntries)
|
blockDir := createBlock(t, tmpdir, seriesEntries)
|
||||||
|
@ -356,12 +355,12 @@ func TestReadIndexFormatV1(t *testing.T) {
|
||||||
q, err := NewBlockQuerier(block, 0, 1000)
|
q, err := NewBlockQuerier(block, 0, 1000)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")),
|
require.Equal(t, query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")),
|
||||||
map[string][]tsdbutil.Sample{`{foo="bar"}`: {sample{t: 1, f: 2}}})
|
map[string][]chunks.Sample{`{foo="bar"}`: {sample{t: 1, f: 2}}})
|
||||||
|
|
||||||
q, err = NewBlockQuerier(block, 0, 1000)
|
q, err = NewBlockQuerier(block, 0, 1000)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, query(t, q, labels.MustNewMatcher(labels.MatchNotRegexp, "foo", "^.?$")),
|
require.Equal(t, query(t, q, labels.MustNewMatcher(labels.MatchNotRegexp, "foo", "^.?$")),
|
||||||
map[string][]tsdbutil.Sample{
|
map[string][]chunks.Sample{
|
||||||
`{foo="bar"}`: {sample{t: 1, f: 2}},
|
`{foo="bar"}`: {sample{t: 1, f: 2}},
|
||||||
`{foo="baz"}`: {sample{t: 3, f: 4}},
|
`{foo="baz"}`: {sample{t: 3, f: 4}},
|
||||||
})
|
})
|
||||||
|
@ -379,7 +378,7 @@ func BenchmarkLabelValuesWithMatchers(b *testing.B) {
|
||||||
"a_unique", fmt.Sprintf("value%d", i),
|
"a_unique", fmt.Sprintf("value%d", i),
|
||||||
"b_tens", fmt.Sprintf("value%d", i/(metricCount/10)),
|
"b_tens", fmt.Sprintf("value%d", i/(metricCount/10)),
|
||||||
"c_ninety", fmt.Sprintf("value%d", i/(metricCount/10)/9), // "0" for the first 90%, then "1"
|
"c_ninety", fmt.Sprintf("value%d", i/(metricCount/10)/9), // "0" for the first 90%, then "1"
|
||||||
), []tsdbutil.Sample{sample{100, 0, nil, nil}}))
|
), []chunks.Sample{sample{100, 0, nil, nil}}))
|
||||||
}
|
}
|
||||||
|
|
||||||
blockDir := createBlock(b, tmpdir, seriesEntries)
|
blockDir := createBlock(b, tmpdir, seriesEntries)
|
||||||
|
@ -415,13 +414,13 @@ func TestLabelNamesWithMatchers(t *testing.T) {
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
seriesEntries = append(seriesEntries, storage.NewListSeries(labels.FromStrings(
|
seriesEntries = append(seriesEntries, storage.NewListSeries(labels.FromStrings(
|
||||||
"unique", fmt.Sprintf("value%d", i),
|
"unique", fmt.Sprintf("value%d", i),
|
||||||
), []tsdbutil.Sample{sample{100, 0, nil, nil}}))
|
), []chunks.Sample{sample{100, 0, nil, nil}}))
|
||||||
|
|
||||||
if i%10 == 0 {
|
if i%10 == 0 {
|
||||||
seriesEntries = append(seriesEntries, storage.NewListSeries(labels.FromStrings(
|
seriesEntries = append(seriesEntries, storage.NewListSeries(labels.FromStrings(
|
||||||
"tens", fmt.Sprintf("value%d", i/10),
|
"tens", fmt.Sprintf("value%d", i/10),
|
||||||
"unique", fmt.Sprintf("value%d", i),
|
"unique", fmt.Sprintf("value%d", i),
|
||||||
), []tsdbutil.Sample{sample{100, 0, nil, nil}}))
|
), []chunks.Sample{sample{100, 0, nil, nil}}))
|
||||||
}
|
}
|
||||||
|
|
||||||
if i%20 == 0 {
|
if i%20 == 0 {
|
||||||
|
@ -429,7 +428,7 @@ func TestLabelNamesWithMatchers(t *testing.T) {
|
||||||
"tens", fmt.Sprintf("value%d", i/10),
|
"tens", fmt.Sprintf("value%d", i/10),
|
||||||
"twenties", fmt.Sprintf("value%d", i/20),
|
"twenties", fmt.Sprintf("value%d", i/20),
|
||||||
"unique", fmt.Sprintf("value%d", i),
|
"unique", fmt.Sprintf("value%d", i),
|
||||||
), []tsdbutil.Sample{sample{100, 0, nil, nil}}))
|
), []chunks.Sample{sample{100, 0, nil, nil}}))
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -555,7 +554,7 @@ func createHeadWithOOOSamples(tb testing.TB, w *wlog.WL, series []storage.Series
|
||||||
require.NoError(tb, err)
|
require.NoError(tb, err)
|
||||||
|
|
||||||
oooSampleLabels := make([]labels.Labels, 0, len(series))
|
oooSampleLabels := make([]labels.Labels, 0, len(series))
|
||||||
oooSamples := make([]tsdbutil.SampleSlice, 0, len(series))
|
oooSamples := make([]chunks.SampleSlice, 0, len(series))
|
||||||
|
|
||||||
var it chunkenc.Iterator
|
var it chunkenc.Iterator
|
||||||
totalSamples := 0
|
totalSamples := 0
|
||||||
|
@ -564,7 +563,7 @@ func createHeadWithOOOSamples(tb testing.TB, w *wlog.WL, series []storage.Series
|
||||||
ref := storage.SeriesRef(0)
|
ref := storage.SeriesRef(0)
|
||||||
it = s.Iterator(it)
|
it = s.Iterator(it)
|
||||||
lset := s.Labels()
|
lset := s.Labels()
|
||||||
os := tsdbutil.SampleSlice{}
|
os := chunks.SampleSlice{}
|
||||||
count := 0
|
count := 0
|
||||||
for it.Next() == chunkenc.ValFloat {
|
for it.Next() == chunkenc.ValFloat {
|
||||||
totalSamples++
|
totalSamples++
|
||||||
|
@ -615,16 +614,16 @@ const (
|
||||||
|
|
||||||
// genSeries generates series of float64 samples with a given number of labels and values.
|
// genSeries generates series of float64 samples with a given number of labels and values.
|
||||||
func genSeries(totalSeries, labelCount int, mint, maxt int64) []storage.Series {
|
func genSeries(totalSeries, labelCount int, mint, maxt int64) []storage.Series {
|
||||||
return genSeriesFromSampleGenerator(totalSeries, labelCount, mint, maxt, 1, func(ts int64) tsdbutil.Sample {
|
return genSeriesFromSampleGenerator(totalSeries, labelCount, mint, maxt, 1, func(ts int64) chunks.Sample {
|
||||||
return sample{t: ts, f: rand.Float64()}
|
return sample{t: ts, f: rand.Float64()}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// genHistogramSeries generates series of histogram samples with a given number of labels and values.
|
// genHistogramSeries generates series of histogram samples with a given number of labels and values.
|
||||||
func genHistogramSeries(totalSeries, labelCount int, mint, maxt, step int64, floatHistogram bool) []storage.Series {
|
func genHistogramSeries(totalSeries, labelCount int, mint, maxt, step int64, floatHistogram bool) []storage.Series {
|
||||||
return genSeriesFromSampleGenerator(totalSeries, labelCount, mint, maxt, step, func(ts int64) tsdbutil.Sample {
|
return genSeriesFromSampleGenerator(totalSeries, labelCount, mint, maxt, step, func(ts int64) chunks.Sample {
|
||||||
h := &histogram.Histogram{
|
h := &histogram.Histogram{
|
||||||
Count: 5 + uint64(ts*4),
|
Count: 7 + uint64(ts*5),
|
||||||
ZeroCount: 2 + uint64(ts),
|
ZeroCount: 2 + uint64(ts),
|
||||||
ZeroThreshold: 0.001,
|
ZeroThreshold: 0.001,
|
||||||
Sum: 18.4 * rand.Float64(),
|
Sum: 18.4 * rand.Float64(),
|
||||||
|
@ -656,14 +655,14 @@ func genHistogramSeries(totalSeries, labelCount int, mint, maxt, step int64, flo
|
||||||
func genHistogramAndFloatSeries(totalSeries, labelCount int, mint, maxt, step int64, floatHistogram bool) []storage.Series {
|
func genHistogramAndFloatSeries(totalSeries, labelCount int, mint, maxt, step int64, floatHistogram bool) []storage.Series {
|
||||||
floatSample := false
|
floatSample := false
|
||||||
count := 0
|
count := 0
|
||||||
return genSeriesFromSampleGenerator(totalSeries, labelCount, mint, maxt, step, func(ts int64) tsdbutil.Sample {
|
return genSeriesFromSampleGenerator(totalSeries, labelCount, mint, maxt, step, func(ts int64) chunks.Sample {
|
||||||
count++
|
count++
|
||||||
var s sample
|
var s sample
|
||||||
if floatSample {
|
if floatSample {
|
||||||
s = sample{t: ts, f: rand.Float64()}
|
s = sample{t: ts, f: rand.Float64()}
|
||||||
} else {
|
} else {
|
||||||
h := &histogram.Histogram{
|
h := &histogram.Histogram{
|
||||||
Count: 5 + uint64(ts*4),
|
Count: 7 + uint64(ts*5),
|
||||||
ZeroCount: 2 + uint64(ts),
|
ZeroCount: 2 + uint64(ts),
|
||||||
ZeroThreshold: 0.001,
|
ZeroThreshold: 0.001,
|
||||||
Sum: 18.4 * rand.Float64(),
|
Sum: 18.4 * rand.Float64(),
|
||||||
|
@ -697,7 +696,7 @@ func genHistogramAndFloatSeries(totalSeries, labelCount int, mint, maxt, step in
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func genSeriesFromSampleGenerator(totalSeries, labelCount int, mint, maxt, step int64, generator func(ts int64) tsdbutil.Sample) []storage.Series {
|
func genSeriesFromSampleGenerator(totalSeries, labelCount int, mint, maxt, step int64, generator func(ts int64) chunks.Sample) []storage.Series {
|
||||||
if totalSeries == 0 || labelCount == 0 {
|
if totalSeries == 0 || labelCount == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -710,7 +709,7 @@ func genSeriesFromSampleGenerator(totalSeries, labelCount int, mint, maxt, step
|
||||||
for j := 1; len(lbls) < labelCount; j++ {
|
for j := 1; len(lbls) < labelCount; j++ {
|
||||||
lbls[defaultLabelName+strconv.Itoa(j)] = defaultLabelValue + strconv.Itoa(j)
|
lbls[defaultLabelName+strconv.Itoa(j)] = defaultLabelValue + strconv.Itoa(j)
|
||||||
}
|
}
|
||||||
samples := make([]tsdbutil.Sample, 0, (maxt-mint)/step+1)
|
samples := make([]chunks.Sample, 0, (maxt-mint)/step+1)
|
||||||
for t := mint; t < maxt; t += step {
|
for t := mint; t < maxt; t += step {
|
||||||
samples = append(samples, generator(t))
|
samples = append(samples, generator(t))
|
||||||
}
|
}
|
||||||
|
@ -730,7 +729,7 @@ func populateSeries(lbls []map[string]string, mint, maxt int64) []storage.Series
|
||||||
if len(lbl) == 0 {
|
if len(lbl) == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
samples := make([]tsdbutil.Sample, 0, maxt-mint+1)
|
samples := make([]chunks.Sample, 0, maxt-mint+1)
|
||||||
for t := mint; t <= maxt; t++ {
|
for t := mint; t <= maxt; t++ {
|
||||||
samples = append(samples, sample{t: t, f: rand.Float64()})
|
samples = append(samples, sample{t: t, f: rand.Float64()})
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,7 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestBlockWriter(t *testing.T) {
|
func TestBlockWriter(t *testing.T) {
|
||||||
|
@ -52,9 +52,9 @@ func TestBlockWriter(t *testing.T) {
|
||||||
q, err := NewBlockQuerier(b, math.MinInt64, math.MaxInt64)
|
q, err := NewBlockQuerier(b, math.MinInt64, math.MaxInt64)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
series := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
|
series := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
|
||||||
sample1 := []tsdbutil.Sample{sample{t: ts1, f: v1}}
|
sample1 := []chunks.Sample{sample{t: ts1, f: v1}}
|
||||||
sample2 := []tsdbutil.Sample{sample{t: ts2, f: v2}}
|
sample2 := []chunks.Sample{sample{t: ts2, f: v2}}
|
||||||
expectedSeries := map[string][]tsdbutil.Sample{"{a=\"b\"}": sample1, "{c=\"d\"}": sample2}
|
expectedSeries := map[string][]chunks.Sample{"{a=\"b\"}": sample1, "{c=\"d\"}": sample2}
|
||||||
require.Equal(t, expectedSeries, series)
|
require.Equal(t, expectedSeries, series)
|
||||||
|
|
||||||
require.NoError(t, w.Close())
|
require.NoError(t, w.Close())
|
||||||
|
|
|
@ -52,6 +52,20 @@ func IsValidEncoding(e Encoding) bool {
|
||||||
return e == EncXOR || e == EncHistogram || e == EncFloatHistogram
|
return e == EncXOR || e == EncHistogram || e == EncFloatHistogram
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// MaxBytesPerXORChunk is the maximum size an XOR chunk can be.
|
||||||
|
MaxBytesPerXORChunk = 1024
|
||||||
|
// TargetBytesPerHistogramChunk sets a size target for each histogram chunk.
|
||||||
|
TargetBytesPerHistogramChunk = 1024
|
||||||
|
// MinSamplesPerHistogramChunk sets a minimum sample count for histogram chunks. This is desirable because a single
|
||||||
|
// histogram sample can be larger than TargetBytesPerHistogramChunk but we want to avoid too-small sample count
|
||||||
|
// chunks so we can achieve some measure of compression advantage even while dealing with really large histograms.
|
||||||
|
// Note that this minimum sample count is not enforced across chunk range boundaries (for example, if the chunk
|
||||||
|
// range is 100 and the first sample in the chunk range is 99, the next sample will be included in a new chunk
|
||||||
|
// resulting in the old chunk containing only a single sample).
|
||||||
|
MinSamplesPerHistogramChunk = 10
|
||||||
|
)
|
||||||
|
|
||||||
// Chunk holds a sequence of sample pairs that can be iterated over and appended to.
|
// Chunk holds a sequence of sample pairs that can be iterated over and appended to.
|
||||||
type Chunk interface {
|
type Chunk interface {
|
||||||
// Bytes returns the underlying byte slice of the chunk.
|
// Bytes returns the underlying byte slice of the chunk.
|
||||||
|
@ -94,7 +108,7 @@ type Appender interface {
|
||||||
// The returned bool isRecoded can be used to distinguish between the new Chunk c being a completely new Chunk
|
// The returned bool isRecoded can be used to distinguish between the new Chunk c being a completely new Chunk
|
||||||
// or the current Chunk recoded to a new Chunk.
|
// or the current Chunk recoded to a new Chunk.
|
||||||
// The Appender app that can be used for the next append is always returned.
|
// The Appender app that can be used for the next append is always returned.
|
||||||
AppendHistogram(prev *HistogramAppender, t int64, h *histogram.Histogram, appendOny bool) (c Chunk, isRecoded bool, app Appender, err error)
|
AppendHistogram(prev *HistogramAppender, t int64, h *histogram.Histogram, appendOnly bool) (c Chunk, isRecoded bool, app Appender, err error)
|
||||||
AppendFloatHistogram(prev *FloatHistogramAppender, t int64, h *histogram.FloatHistogram, appendOnly bool) (c Chunk, isRecoded bool, app Appender, err error)
|
AppendFloatHistogram(prev *FloatHistogramAppender, t int64, h *histogram.FloatHistogram, appendOnly bool) (c Chunk, isRecoded bool, app Appender, err error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -233,6 +233,11 @@ func (a *FloatHistogramAppender) appendable(h *histogram.FloatHistogram) (
|
||||||
if a.NumSamples() > 0 && a.GetCounterResetHeader() == GaugeType {
|
if a.NumSamples() > 0 && a.GetCounterResetHeader() == GaugeType {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if h.CounterResetHint == histogram.CounterReset {
|
||||||
|
// Always honor the explicit counter reset hint.
|
||||||
|
counterReset = true
|
||||||
|
return
|
||||||
|
}
|
||||||
if value.IsStaleNaN(h.Sum) {
|
if value.IsStaleNaN(h.Sum) {
|
||||||
// This is a stale sample whose buckets and spans don't matter.
|
// This is a stale sample whose buckets and spans don't matter.
|
||||||
okToAppend = true
|
okToAppend = true
|
||||||
|
@ -576,7 +581,11 @@ func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppend
|
||||||
return nil, false, a, nil
|
return nil, false, a, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if prev != nil && h.CounterResetHint != histogram.CounterReset {
|
switch {
|
||||||
|
case h.CounterResetHint == histogram.CounterReset:
|
||||||
|
// Always honor the explicit counter reset hint.
|
||||||
|
a.setCounterResetHeader(CounterReset)
|
||||||
|
case prev != nil:
|
||||||
// This is a new chunk, but continued from a previous one. We need to calculate the reset header unless already set.
|
// This is a new chunk, but continued from a previous one. We need to calculate the reset header unless already set.
|
||||||
_, _, _, counterReset := prev.appendable(h)
|
_, _, _, counterReset := prev.appendable(h)
|
||||||
if counterReset {
|
if counterReset {
|
||||||
|
@ -584,9 +593,6 @@ func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppend
|
||||||
} else {
|
} else {
|
||||||
a.setCounterResetHeader(NotCounterReset)
|
a.setCounterResetHeader(NotCounterReset)
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
// Honor the explicit counter reset hint.
|
|
||||||
a.setCounterResetHeader(CounterResetHeader(h.CounterResetHint))
|
|
||||||
}
|
}
|
||||||
return nil, false, a, nil
|
return nil, false, a, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,6 +26,46 @@ type floatResult struct {
|
||||||
h *histogram.FloatHistogram
|
h *histogram.FloatHistogram
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestFirstFloatHistogramExplicitCounterReset(t *testing.T) {
|
||||||
|
tests := map[string]struct {
|
||||||
|
hint histogram.CounterResetHint
|
||||||
|
expHeader CounterResetHeader
|
||||||
|
}{
|
||||||
|
"CounterReset": {
|
||||||
|
hint: histogram.CounterReset,
|
||||||
|
expHeader: CounterReset,
|
||||||
|
},
|
||||||
|
"NotCounterReset": {
|
||||||
|
hint: histogram.NotCounterReset,
|
||||||
|
expHeader: UnknownCounterReset,
|
||||||
|
},
|
||||||
|
"UnknownCounterReset": {
|
||||||
|
hint: histogram.UnknownCounterReset,
|
||||||
|
expHeader: UnknownCounterReset,
|
||||||
|
},
|
||||||
|
"Gauge": {
|
||||||
|
hint: histogram.GaugeType,
|
||||||
|
expHeader: GaugeType,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for name, test := range tests {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
h := &histogram.FloatHistogram{
|
||||||
|
CounterResetHint: test.hint,
|
||||||
|
}
|
||||||
|
chk := NewFloatHistogramChunk()
|
||||||
|
app, err := chk.Appender()
|
||||||
|
require.NoError(t, err)
|
||||||
|
newChk, recoded, newApp, err := app.AppendFloatHistogram(nil, 0, h, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Nil(t, newChk)
|
||||||
|
require.False(t, recoded)
|
||||||
|
require.Equal(t, app, newApp)
|
||||||
|
require.Equal(t, test.expHeader, chk.GetCounterResetHeader())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestFloatHistogramChunkSameBuckets(t *testing.T) {
|
func TestFloatHistogramChunkSameBuckets(t *testing.T) {
|
||||||
c := NewFloatHistogramChunk()
|
c := NewFloatHistogramChunk()
|
||||||
var exp []floatResult
|
var exp []floatResult
|
||||||
|
@ -399,6 +439,73 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
|
||||||
|
|
||||||
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
|
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{ // New histogram that has an explicit counter reset.
|
||||||
|
c, hApp, ts, h1 := setup()
|
||||||
|
h2 := h1.Copy()
|
||||||
|
h2.CounterResetHint = histogram.CounterReset
|
||||||
|
|
||||||
|
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // Start new chunk explicitly, and append a new histogram that is considered appendable to the previous chunk.
|
||||||
|
_, hApp, ts, h1 := setup()
|
||||||
|
h2 := h1.Copy() // Identity is appendable.
|
||||||
|
|
||||||
|
nextChunk := NewFloatHistogramChunk()
|
||||||
|
app, err := nextChunk.Appender()
|
||||||
|
require.NoError(t, err)
|
||||||
|
newChunk, recoded, newApp, err := app.AppendFloatHistogram(hApp, ts+1, h2, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Nil(t, newChunk)
|
||||||
|
require.False(t, recoded)
|
||||||
|
require.Equal(t, app, newApp)
|
||||||
|
assertSampleCount(t, nextChunk, 1, ValFloatHistogram)
|
||||||
|
require.Equal(t, NotCounterReset, nextChunk.GetCounterResetHeader())
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // Start new chunk explicitly, and append a new histogram that is not considered appendable to the previous chunk.
|
||||||
|
_, hApp, ts, h1 := setup()
|
||||||
|
h2 := h1.Copy()
|
||||||
|
h2.Count-- // Make this not appendable due to counter reset.
|
||||||
|
|
||||||
|
nextChunk := NewFloatHistogramChunk()
|
||||||
|
app, err := nextChunk.Appender()
|
||||||
|
require.NoError(t, err)
|
||||||
|
newChunk, recoded, newApp, err := app.AppendFloatHistogram(hApp, ts+1, h2, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Nil(t, newChunk)
|
||||||
|
require.False(t, recoded)
|
||||||
|
require.Equal(t, app, newApp)
|
||||||
|
assertSampleCount(t, nextChunk, 1, ValFloatHistogram)
|
||||||
|
require.Equal(t, CounterReset, nextChunk.GetCounterResetHeader())
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // Start new chunk explicitly, and append a new histogram that would need recoding if we added it to the chunk.
|
||||||
|
_, hApp, ts, h1 := setup()
|
||||||
|
h2 := h1.Copy()
|
||||||
|
h2.PositiveSpans = []histogram.Span{
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
{Offset: 1, Length: 4},
|
||||||
|
{Offset: 3, Length: 3},
|
||||||
|
}
|
||||||
|
h2.Count += 9
|
||||||
|
h2.ZeroCount++
|
||||||
|
h2.Sum = 30
|
||||||
|
h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1}
|
||||||
|
|
||||||
|
nextChunk := NewFloatHistogramChunk()
|
||||||
|
app, err := nextChunk.Appender()
|
||||||
|
require.NoError(t, err)
|
||||||
|
newChunk, recoded, newApp, err := app.AppendFloatHistogram(hApp, ts+1, h2, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Nil(t, newChunk)
|
||||||
|
require.False(t, recoded)
|
||||||
|
require.Equal(t, app, newApp)
|
||||||
|
assertSampleCount(t, nextChunk, 1, ValFloatHistogram)
|
||||||
|
require.Equal(t, NotCounterReset, nextChunk.GetCounterResetHeader())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertNewFloatHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *FloatHistogramAppender, ts int64, h *histogram.FloatHistogram, expectHeader CounterResetHeader) {
|
func assertNewFloatHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *FloatHistogramAppender, ts int64, h *histogram.FloatHistogram, expectHeader CounterResetHeader) {
|
||||||
|
|
|
@ -253,6 +253,11 @@ func (a *HistogramAppender) appendable(h *histogram.Histogram) (
|
||||||
if a.NumSamples() > 0 && a.GetCounterResetHeader() == GaugeType {
|
if a.NumSamples() > 0 && a.GetCounterResetHeader() == GaugeType {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if h.CounterResetHint == histogram.CounterReset {
|
||||||
|
// Always honor the explicit counter reset hint.
|
||||||
|
counterReset = true
|
||||||
|
return
|
||||||
|
}
|
||||||
if value.IsStaleNaN(h.Sum) {
|
if value.IsStaleNaN(h.Sum) {
|
||||||
// This is a stale sample whose buckets and spans don't matter.
|
// This is a stale sample whose buckets and spans don't matter.
|
||||||
okToAppend = true
|
okToAppend = true
|
||||||
|
@ -611,7 +616,11 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h
|
||||||
return nil, false, a, nil
|
return nil, false, a, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if prev != nil && h.CounterResetHint != histogram.CounterReset {
|
switch {
|
||||||
|
case h.CounterResetHint == histogram.CounterReset:
|
||||||
|
// Always honor the explicit counter reset hint.
|
||||||
|
a.setCounterResetHeader(CounterReset)
|
||||||
|
case prev != nil:
|
||||||
// This is a new chunk, but continued from a previous one. We need to calculate the reset header unless already set.
|
// This is a new chunk, but continued from a previous one. We need to calculate the reset header unless already set.
|
||||||
_, _, _, counterReset := prev.appendable(h)
|
_, _, _, counterReset := prev.appendable(h)
|
||||||
if counterReset {
|
if counterReset {
|
||||||
|
@ -619,9 +628,6 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h
|
||||||
} else {
|
} else {
|
||||||
a.setCounterResetHeader(NotCounterReset)
|
a.setCounterResetHeader(NotCounterReset)
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
// Honor the explicit counter reset hint.
|
|
||||||
a.setCounterResetHeader(CounterResetHeader(h.CounterResetHint))
|
|
||||||
}
|
}
|
||||||
return nil, false, a, nil
|
return nil, false, a, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,6 +27,46 @@ type result struct {
|
||||||
fh *histogram.FloatHistogram
|
fh *histogram.FloatHistogram
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestFirstHistogramExplicitCounterReset(t *testing.T) {
|
||||||
|
tests := map[string]struct {
|
||||||
|
hint histogram.CounterResetHint
|
||||||
|
expHeader CounterResetHeader
|
||||||
|
}{
|
||||||
|
"CounterReset": {
|
||||||
|
hint: histogram.CounterReset,
|
||||||
|
expHeader: CounterReset,
|
||||||
|
},
|
||||||
|
"NotCounterReset": {
|
||||||
|
hint: histogram.NotCounterReset,
|
||||||
|
expHeader: UnknownCounterReset,
|
||||||
|
},
|
||||||
|
"UnknownCounterReset": {
|
||||||
|
hint: histogram.UnknownCounterReset,
|
||||||
|
expHeader: UnknownCounterReset,
|
||||||
|
},
|
||||||
|
"Gauge": {
|
||||||
|
hint: histogram.GaugeType,
|
||||||
|
expHeader: GaugeType,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for name, test := range tests {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
h := &histogram.Histogram{
|
||||||
|
CounterResetHint: test.hint,
|
||||||
|
}
|
||||||
|
chk := NewHistogramChunk()
|
||||||
|
app, err := chk.Appender()
|
||||||
|
require.NoError(t, err)
|
||||||
|
newChk, recoded, newApp, err := app.AppendHistogram(nil, 0, h, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Nil(t, newChk)
|
||||||
|
require.False(t, recoded)
|
||||||
|
require.Equal(t, app, newApp)
|
||||||
|
require.Equal(t, test.expHeader, chk.GetCounterResetHeader())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestHistogramChunkSameBuckets(t *testing.T) {
|
func TestHistogramChunkSameBuckets(t *testing.T) {
|
||||||
c := NewHistogramChunk()
|
c := NewHistogramChunk()
|
||||||
var exp []result
|
var exp []result
|
||||||
|
@ -421,6 +461,76 @@ func TestHistogramChunkAppendable(t *testing.T) {
|
||||||
|
|
||||||
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
|
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{ // New histogram that has an explicit counter reset.
|
||||||
|
c, hApp, ts, h1 := setup()
|
||||||
|
h2 := h1.Copy()
|
||||||
|
h2.CounterResetHint = histogram.CounterReset
|
||||||
|
|
||||||
|
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // Start new chunk explicitly, and append a new histogram that is considered appendable to the previous chunk.
|
||||||
|
_, hApp, ts, h1 := setup()
|
||||||
|
h2 := h1.Copy() // Identity is appendable.
|
||||||
|
|
||||||
|
nextChunk := NewHistogramChunk()
|
||||||
|
app, err := nextChunk.Appender()
|
||||||
|
require.NoError(t, err)
|
||||||
|
newChunk, recoded, newApp, err := app.AppendHistogram(hApp, ts+1, h2, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Nil(t, newChunk)
|
||||||
|
require.False(t, recoded)
|
||||||
|
require.Equal(t, app, newApp)
|
||||||
|
assertSampleCount(t, nextChunk, 1, ValHistogram)
|
||||||
|
require.Equal(t, NotCounterReset, nextChunk.GetCounterResetHeader())
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // Start new chunk explicitly, and append a new histogram that is not considered appendable to the previous chunk.
|
||||||
|
_, hApp, ts, h1 := setup()
|
||||||
|
h2 := h1.Copy()
|
||||||
|
h2.Count-- // Make this not appendable due to counter reset.
|
||||||
|
|
||||||
|
nextChunk := NewHistogramChunk()
|
||||||
|
app, err := nextChunk.Appender()
|
||||||
|
require.NoError(t, err)
|
||||||
|
newChunk, recoded, newApp, err := app.AppendHistogram(hApp, ts+1, h2, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Nil(t, newChunk)
|
||||||
|
require.False(t, recoded)
|
||||||
|
require.Equal(t, app, newApp)
|
||||||
|
assertSampleCount(t, nextChunk, 1, ValHistogram)
|
||||||
|
require.Equal(t, CounterReset, nextChunk.GetCounterResetHeader())
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // Start new chunk explicitly, and append a new histogram that would need recoding if we added it to the chunk.
|
||||||
|
_, hApp, ts, h1 := setup()
|
||||||
|
h2 := h1.Copy()
|
||||||
|
h2.PositiveSpans = []histogram.Span{
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
{Offset: 1, Length: 4},
|
||||||
|
{Offset: 3, Length: 3},
|
||||||
|
}
|
||||||
|
h2.Count += 9
|
||||||
|
h2.ZeroCount++
|
||||||
|
h2.Sum = 30
|
||||||
|
// Existing histogram should get values converted from the above to:
|
||||||
|
// 6 3 0 3 0 0 2 4 5 0 1 (previous values with some new empty buckets in between)
|
||||||
|
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
|
||||||
|
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 1} // 7 5 1 3 1 0 2 5 5 0 1 (total 30)
|
||||||
|
|
||||||
|
nextChunk := NewHistogramChunk()
|
||||||
|
app, err := nextChunk.Appender()
|
||||||
|
require.NoError(t, err)
|
||||||
|
newChunk, recoded, newApp, err := app.AppendHistogram(hApp, ts+1, h2, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Nil(t, newChunk)
|
||||||
|
require.False(t, recoded)
|
||||||
|
require.Equal(t, app, newApp)
|
||||||
|
assertSampleCount(t, nextChunk, 1, ValHistogram)
|
||||||
|
require.Equal(t, NotCounterReset, nextChunk.GetCounterResetHeader())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertNewHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *HistogramAppender, ts int64, h *histogram.Histogram, expectHeader CounterResetHeader) {
|
func assertNewHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *HistogramAppender, ts int64, h *histogram.Histogram, expectHeader CounterResetHeader) {
|
||||||
|
|
|
@ -85,13 +85,21 @@ func (p HeadChunkRef) Unpack() (HeadSeriesRef, HeadChunkID) {
|
||||||
// - less than the above, but >= memSeries.firstID, then it's
|
// - less than the above, but >= memSeries.firstID, then it's
|
||||||
// memSeries.mmappedChunks[i] where i = HeadChunkID - memSeries.firstID.
|
// memSeries.mmappedChunks[i] where i = HeadChunkID - memSeries.firstID.
|
||||||
//
|
//
|
||||||
|
// If memSeries.headChunks is non-nil it points to a *memChunk that holds the current
|
||||||
|
// "open" (accepting appends) instance. *memChunk is a linked list and memChunk.next pointer
|
||||||
|
// might link to the older *memChunk instance.
|
||||||
|
// If there are multiple *memChunk instances linked to each other from memSeries.headChunks
|
||||||
|
// they will be m-mapped as soon as possible leaving only "open" *memChunk instance.
|
||||||
|
//
|
||||||
// Example:
|
// Example:
|
||||||
// assume a memSeries.firstChunkID=7 and memSeries.mmappedChunks=[p5,p6,p7,p8,p9].
|
// assume a memSeries.firstChunkID=7 and memSeries.mmappedChunks=[p5,p6,p7,p8,p9].
|
||||||
// | HeadChunkID value | refers to ... |
|
// | HeadChunkID value | refers to ... |
|
||||||
// |-------------------|----------------------------------------------------------------------------------------|
|
// |-------------------|----------------------------------------------------------------------------------------|
|
||||||
// | 0-6 | chunks that have been compacted to blocks, these won't return data for queries in Head |
|
// | 0-6 | chunks that have been compacted to blocks, these won't return data for queries in Head |
|
||||||
// | 7-11 | memSeries.mmappedChunks[i] where i is 0 to 4. |
|
// | 7-11 | memSeries.mmappedChunks[i] where i is 0 to 4. |
|
||||||
// | 12 | memSeries.headChunk |
|
// | 12 | *memChunk{next: nil}
|
||||||
|
// | 13 | *memChunk{next: ^}
|
||||||
|
// | 14 | memSeries.headChunks -> *memChunk{next: ^}
|
||||||
type HeadChunkID uint64
|
type HeadChunkID uint64
|
||||||
|
|
||||||
// BlockChunkRef refers to a chunk within a persisted block.
|
// BlockChunkRef refers to a chunk within a persisted block.
|
||||||
|
@ -132,6 +140,73 @@ type Meta struct {
|
||||||
OOOLastMinTime, OOOLastMaxTime int64
|
OOOLastMinTime, OOOLastMaxTime int64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ChunkFromSamples requires all samples to have the same type.
|
||||||
|
func ChunkFromSamples(s []Sample) (Meta, error) {
|
||||||
|
return ChunkFromSamplesGeneric(SampleSlice(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChunkFromSamplesGeneric requires all samples to have the same type.
|
||||||
|
func ChunkFromSamplesGeneric(s Samples) (Meta, error) {
|
||||||
|
emptyChunk := Meta{Chunk: chunkenc.NewXORChunk()}
|
||||||
|
mint, maxt := int64(0), int64(0)
|
||||||
|
|
||||||
|
if s.Len() > 0 {
|
||||||
|
mint, maxt = s.Get(0).T(), s.Get(s.Len()-1).T()
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.Len() == 0 {
|
||||||
|
return emptyChunk, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
sampleType := s.Get(0).Type()
|
||||||
|
c, err := chunkenc.NewEmptyChunk(sampleType.ChunkEncoding())
|
||||||
|
if err != nil {
|
||||||
|
return Meta{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ca, _ := c.Appender()
|
||||||
|
var newChunk chunkenc.Chunk
|
||||||
|
|
||||||
|
for i := 0; i < s.Len(); i++ {
|
||||||
|
switch sampleType {
|
||||||
|
case chunkenc.ValFloat:
|
||||||
|
ca.Append(s.Get(i).T(), s.Get(i).F())
|
||||||
|
case chunkenc.ValHistogram:
|
||||||
|
newChunk, _, ca, err = ca.AppendHistogram(nil, s.Get(i).T(), s.Get(i).H(), false)
|
||||||
|
if err != nil {
|
||||||
|
return emptyChunk, err
|
||||||
|
}
|
||||||
|
if newChunk != nil {
|
||||||
|
return emptyChunk, fmt.Errorf("did not expect to start a second chunk")
|
||||||
|
}
|
||||||
|
case chunkenc.ValFloatHistogram:
|
||||||
|
newChunk, _, ca, err = ca.AppendFloatHistogram(nil, s.Get(i).T(), s.Get(i).FH(), false)
|
||||||
|
if err != nil {
|
||||||
|
return emptyChunk, err
|
||||||
|
}
|
||||||
|
if newChunk != nil {
|
||||||
|
return emptyChunk, fmt.Errorf("did not expect to start a second chunk")
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unknown sample type %s", sampleType.String()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Meta{
|
||||||
|
MinTime: mint,
|
||||||
|
MaxTime: maxt,
|
||||||
|
Chunk: c,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PopulatedChunk creates a chunk populated with samples every second starting at minTime
|
||||||
|
func PopulatedChunk(numSamples int, minTime int64) (Meta, error) {
|
||||||
|
samples := make([]Sample, numSamples)
|
||||||
|
for i := 0; i < numSamples; i++ {
|
||||||
|
samples[i] = sample{t: minTime + int64(i*1000), f: 1.0}
|
||||||
|
}
|
||||||
|
return ChunkFromSamples(samples)
|
||||||
|
}
|
||||||
|
|
||||||
// Iterator iterates over the chunks of a single time series.
|
// Iterator iterates over the chunks of a single time series.
|
||||||
type Iterator interface {
|
type Iterator interface {
|
||||||
// At returns the current meta.
|
// At returns the current meta.
|
||||||
|
|
89
tsdb/chunks/samples.go
Normal file
89
tsdb/chunks/samples.go
Normal file
|
@ -0,0 +1,89 @@
|
||||||
|
// Copyright 2023 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package chunks
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Samples interface {
|
||||||
|
Get(i int) Sample
|
||||||
|
Len() int
|
||||||
|
}
|
||||||
|
|
||||||
|
type Sample interface {
|
||||||
|
T() int64
|
||||||
|
F() float64
|
||||||
|
H() *histogram.Histogram
|
||||||
|
FH() *histogram.FloatHistogram
|
||||||
|
Type() chunkenc.ValueType
|
||||||
|
}
|
||||||
|
|
||||||
|
type SampleSlice []Sample
|
||||||
|
|
||||||
|
func (s SampleSlice) Get(i int) Sample { return s[i] }
|
||||||
|
func (s SampleSlice) Len() int { return len(s) }
|
||||||
|
|
||||||
|
type sample struct {
|
||||||
|
t int64
|
||||||
|
f float64
|
||||||
|
h *histogram.Histogram
|
||||||
|
fh *histogram.FloatHistogram
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s sample) T() int64 {
|
||||||
|
return s.t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s sample) F() float64 {
|
||||||
|
return s.f
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s sample) H() *histogram.Histogram {
|
||||||
|
return s.h
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s sample) FH() *histogram.FloatHistogram {
|
||||||
|
return s.fh
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s sample) Type() chunkenc.ValueType {
|
||||||
|
switch {
|
||||||
|
case s.h != nil:
|
||||||
|
return chunkenc.ValHistogram
|
||||||
|
case s.fh != nil:
|
||||||
|
return chunkenc.ValFloatHistogram
|
||||||
|
default:
|
||||||
|
return chunkenc.ValFloat
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateSamples starting at start and counting up numSamples.
|
||||||
|
func GenerateSamples(start, numSamples int) []Sample {
|
||||||
|
return generateSamples(start, numSamples, func(i int) Sample {
|
||||||
|
return sample{
|
||||||
|
t: int64(i),
|
||||||
|
f: float64(i),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateSamples(start, numSamples int, gen func(int) Sample) []Sample {
|
||||||
|
samples := make([]Sample, 0, numSamples)
|
||||||
|
for i := start; i < start+numSamples; i++ {
|
||||||
|
samples = append(samples, gen(i))
|
||||||
|
}
|
||||||
|
return samples
|
||||||
|
}
|
|
@ -41,7 +41,6 @@ import (
|
||||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||||
"github.com/prometheus/prometheus/tsdb/index"
|
"github.com/prometheus/prometheus/tsdb/index"
|
||||||
"github.com/prometheus/prometheus/tsdb/tombstones"
|
"github.com/prometheus/prometheus/tsdb/tombstones"
|
||||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
|
||||||
"github.com/prometheus/prometheus/tsdb/wlog"
|
"github.com/prometheus/prometheus/tsdb/wlog"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -1634,7 +1633,7 @@ func TestHeadCompactionWithHistograms(t *testing.T) {
|
||||||
minute := func(m int) int64 { return int64(m) * time.Minute.Milliseconds() }
|
minute := func(m int) int64 { return int64(m) * time.Minute.Milliseconds() }
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
appendHistogram := func(
|
appendHistogram := func(
|
||||||
lbls labels.Labels, from, to int, h *histogram.Histogram, exp *[]tsdbutil.Sample,
|
lbls labels.Labels, from, to int, h *histogram.Histogram, exp *[]chunks.Sample,
|
||||||
) {
|
) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
app := head.Appender(ctx)
|
app := head.Appender(ctx)
|
||||||
|
@ -1663,7 +1662,7 @@ func TestHeadCompactionWithHistograms(t *testing.T) {
|
||||||
}
|
}
|
||||||
require.NoError(t, app.Commit())
|
require.NoError(t, app.Commit())
|
||||||
}
|
}
|
||||||
appendFloat := func(lbls labels.Labels, from, to int, exp *[]tsdbutil.Sample) {
|
appendFloat := func(lbls labels.Labels, from, to int, exp *[]chunks.Sample) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
app := head.Appender(ctx)
|
app := head.Appender(ctx)
|
||||||
for tsMinute := from; tsMinute <= to; tsMinute++ {
|
for tsMinute := from; tsMinute <= to; tsMinute++ {
|
||||||
|
@ -1679,10 +1678,10 @@ func TestHeadCompactionWithHistograms(t *testing.T) {
|
||||||
series2 = labels.FromStrings("foo", "bar2")
|
series2 = labels.FromStrings("foo", "bar2")
|
||||||
series3 = labels.FromStrings("foo", "bar3")
|
series3 = labels.FromStrings("foo", "bar3")
|
||||||
series4 = labels.FromStrings("foo", "bar4")
|
series4 = labels.FromStrings("foo", "bar4")
|
||||||
exp1, exp2, exp3, exp4 []tsdbutil.Sample
|
exp1, exp2, exp3, exp4 []chunks.Sample
|
||||||
)
|
)
|
||||||
h := &histogram.Histogram{
|
h := &histogram.Histogram{
|
||||||
Count: 11,
|
Count: 15,
|
||||||
ZeroCount: 4,
|
ZeroCount: 4,
|
||||||
ZeroThreshold: 0.001,
|
ZeroThreshold: 0.001,
|
||||||
Sum: 35.5,
|
Sum: 35.5,
|
||||||
|
@ -1737,7 +1736,7 @@ func TestHeadCompactionWithHistograms(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
actHists := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
|
actHists := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
|
||||||
require.Equal(t, map[string][]tsdbutil.Sample{
|
require.Equal(t, map[string][]chunks.Sample{
|
||||||
series1.String(): exp1,
|
series1.String(): exp1,
|
||||||
series2.String(): exp2,
|
series2.String(): exp2,
|
||||||
series3.String(): exp3,
|
series3.String(): exp3,
|
||||||
|
|
13
tsdb/db.go
13
tsdb/db.go
|
@ -1021,6 +1021,8 @@ func (db *DB) run() {
|
||||||
case db.compactc <- struct{}{}:
|
case db.compactc <- struct{}{}:
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
// We attempt mmapping of head chunks regularly.
|
||||||
|
db.head.mmapHeadChunks()
|
||||||
case <-db.compactc:
|
case <-db.compactc:
|
||||||
db.metrics.compactionsTriggered.Inc()
|
db.metrics.compactionsTriggered.Inc()
|
||||||
|
|
||||||
|
@ -1848,6 +1850,11 @@ func (db *DB) EnableCompactions() {
|
||||||
level.Info(db.logger).Log("msg", "Compactions enabled")
|
level.Info(db.logger).Log("msg", "Compactions enabled")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ForceHeadMMap is intended for use only in tests and benchmarks.
|
||||||
|
func (db *DB) ForceHeadMMap() {
|
||||||
|
db.head.mmapHeadChunks()
|
||||||
|
}
|
||||||
|
|
||||||
// Snapshot writes the current data to the directory. If withHead is set to true it
|
// Snapshot writes the current data to the directory. If withHead is set to true it
|
||||||
// will create a new block containing all data that's currently in the memory buffer/WAL.
|
// will create a new block containing all data that's currently in the memory buffer/WAL.
|
||||||
func (db *DB) Snapshot(dir string, withHead bool) error {
|
func (db *DB) Snapshot(dir string, withHead bool) error {
|
||||||
|
@ -2153,7 +2160,8 @@ func isBlockDir(fi fs.DirEntry) bool {
|
||||||
return err == nil
|
return err == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// isTmpDir returns true if the given file-info contains a block ULID or checkpoint prefix and a tmp extension.
|
// isTmpDir returns true if the given file-info contains a block ULID, a checkpoint prefix,
|
||||||
|
// or a chunk snapshot prefix and a tmp extension.
|
||||||
func isTmpDir(fi fs.DirEntry) bool {
|
func isTmpDir(fi fs.DirEntry) bool {
|
||||||
if !fi.IsDir() {
|
if !fi.IsDir() {
|
||||||
return false
|
return false
|
||||||
|
@ -2165,6 +2173,9 @@ func isTmpDir(fi fs.DirEntry) bool {
|
||||||
if strings.HasPrefix(fn, "checkpoint.") {
|
if strings.HasPrefix(fn, "checkpoint.") {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
if strings.HasPrefix(fn, chunkSnapshotPrefix) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
if _, err := ulid.ParseStrict(fn[:len(fn)-len(ext)]); err == nil {
|
if _, err := ulid.ParseStrict(fn[:len(fn)-len(ext)]); err == nil {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
238
tsdb/db_test.go
238
tsdb/db_test.go
|
@ -94,18 +94,18 @@ func openTestDB(t testing.TB, opts *Options, rngs []int64) (db *DB) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// query runs a matcher query against the querier and fully expands its data.
|
// query runs a matcher query against the querier and fully expands its data.
|
||||||
func query(t testing.TB, q storage.Querier, matchers ...*labels.Matcher) map[string][]tsdbutil.Sample {
|
func query(t testing.TB, q storage.Querier, matchers ...*labels.Matcher) map[string][]chunks.Sample {
|
||||||
ss := q.Select(false, nil, matchers...)
|
ss := q.Select(false, nil, matchers...)
|
||||||
defer func() {
|
defer func() {
|
||||||
require.NoError(t, q.Close())
|
require.NoError(t, q.Close())
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var it chunkenc.Iterator
|
var it chunkenc.Iterator
|
||||||
result := map[string][]tsdbutil.Sample{}
|
result := map[string][]chunks.Sample{}
|
||||||
for ss.Next() {
|
for ss.Next() {
|
||||||
series := ss.At()
|
series := ss.At()
|
||||||
|
|
||||||
samples := []tsdbutil.Sample{}
|
samples := []chunks.Sample{}
|
||||||
it = series.Iterator(it)
|
it = series.Iterator(it)
|
||||||
for typ := it.Next(); typ != chunkenc.ValNone; typ = it.Next() {
|
for typ := it.Next(); typ != chunkenc.ValNone; typ = it.Next() {
|
||||||
switch typ {
|
switch typ {
|
||||||
|
@ -138,12 +138,12 @@ func query(t testing.TB, q storage.Querier, matchers ...*labels.Matcher) map[str
|
||||||
}
|
}
|
||||||
|
|
||||||
// queryAndExpandChunks runs a matcher query against the querier and fully expands its data into samples.
|
// queryAndExpandChunks runs a matcher query against the querier and fully expands its data into samples.
|
||||||
func queryAndExpandChunks(t testing.TB, q storage.ChunkQuerier, matchers ...*labels.Matcher) map[string][][]tsdbutil.Sample {
|
func queryAndExpandChunks(t testing.TB, q storage.ChunkQuerier, matchers ...*labels.Matcher) map[string][][]chunks.Sample {
|
||||||
s := queryChunks(t, q, matchers...)
|
s := queryChunks(t, q, matchers...)
|
||||||
|
|
||||||
res := make(map[string][][]tsdbutil.Sample)
|
res := make(map[string][][]chunks.Sample)
|
||||||
for k, v := range s {
|
for k, v := range s {
|
||||||
var samples [][]tsdbutil.Sample
|
var samples [][]chunks.Sample
|
||||||
for _, chk := range v {
|
for _, chk := range v {
|
||||||
sam, err := storage.ExpandSamples(chk.Chunk.Iterator(nil), nil)
|
sam, err := storage.ExpandSamples(chk.Chunk.Iterator(nil), nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -229,7 +229,7 @@ func TestDataAvailableOnlyAfterCommit(t *testing.T) {
|
||||||
querier, err := db.Querier(context.TODO(), 0, 1)
|
querier, err := db.Querier(context.TODO(), 0, 1)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
|
seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
|
||||||
require.Equal(t, map[string][]tsdbutil.Sample{}, seriesSet)
|
require.Equal(t, map[string][]chunks.Sample{}, seriesSet)
|
||||||
|
|
||||||
err = app.Commit()
|
err = app.Commit()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -240,7 +240,7 @@ func TestDataAvailableOnlyAfterCommit(t *testing.T) {
|
||||||
|
|
||||||
seriesSet = query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
|
seriesSet = query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
|
||||||
|
|
||||||
require.Equal(t, map[string][]tsdbutil.Sample{`{foo="bar"}`: {sample{t: 0, f: 0}}}, seriesSet)
|
require.Equal(t, map[string][]chunks.Sample{`{foo="bar"}`: {sample{t: 0, f: 0}}}, seriesSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestNoPanicAfterWALCorruption ensures that querying the db after a WAL corruption doesn't cause a panic.
|
// TestNoPanicAfterWALCorruption ensures that querying the db after a WAL corruption doesn't cause a panic.
|
||||||
|
@ -250,7 +250,7 @@ func TestNoPanicAfterWALCorruption(t *testing.T) {
|
||||||
|
|
||||||
// Append until the first mmaped head chunk.
|
// Append until the first mmaped head chunk.
|
||||||
// This is to ensure that all samples can be read from the mmaped chunks when the WAL is corrupted.
|
// This is to ensure that all samples can be read from the mmaped chunks when the WAL is corrupted.
|
||||||
var expSamples []tsdbutil.Sample
|
var expSamples []chunks.Sample
|
||||||
var maxt int64
|
var maxt int64
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
{
|
{
|
||||||
|
@ -296,7 +296,7 @@ func TestNoPanicAfterWALCorruption(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "", ""))
|
seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "", ""))
|
||||||
// The last sample should be missing as it was after the WAL segment corruption.
|
// The last sample should be missing as it was after the WAL segment corruption.
|
||||||
require.Equal(t, map[string][]tsdbutil.Sample{`{foo="bar"}`: expSamples[0 : len(expSamples)-1]}, seriesSet)
|
require.Equal(t, map[string][]chunks.Sample{`{foo="bar"}`: expSamples[0 : len(expSamples)-1]}, seriesSet)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -319,7 +319,7 @@ func TestDataNotAvailableAfterRollback(t *testing.T) {
|
||||||
|
|
||||||
seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
|
seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
|
||||||
|
|
||||||
require.Equal(t, map[string][]tsdbutil.Sample{}, seriesSet)
|
require.Equal(t, map[string][]chunks.Sample{}, seriesSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDBAppenderAddRef(t *testing.T) {
|
func TestDBAppenderAddRef(t *testing.T) {
|
||||||
|
@ -369,7 +369,7 @@ func TestDBAppenderAddRef(t *testing.T) {
|
||||||
|
|
||||||
res := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
res := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
||||||
|
|
||||||
require.Equal(t, map[string][]tsdbutil.Sample{
|
require.Equal(t, map[string][]chunks.Sample{
|
||||||
labels.FromStrings("a", "b").String(): {
|
labels.FromStrings("a", "b").String(): {
|
||||||
sample{t: 123, f: 0},
|
sample{t: 123, f: 0},
|
||||||
sample{t: 124, f: 1},
|
sample{t: 124, f: 1},
|
||||||
|
@ -462,7 +462,7 @@ Outer:
|
||||||
|
|
||||||
res := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
res := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
||||||
|
|
||||||
expSamples := make([]tsdbutil.Sample, 0, len(c.remaint))
|
expSamples := make([]chunks.Sample, 0, len(c.remaint))
|
||||||
for _, ts := range c.remaint {
|
for _, ts := range c.remaint {
|
||||||
expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil})
|
expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil})
|
||||||
}
|
}
|
||||||
|
@ -622,7 +622,7 @@ func TestSkippingInvalidValuesInSameTxn(t *testing.T) {
|
||||||
|
|
||||||
ssMap := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
ssMap := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
||||||
|
|
||||||
require.Equal(t, map[string][]tsdbutil.Sample{
|
require.Equal(t, map[string][]chunks.Sample{
|
||||||
labels.New(labels.Label{Name: "a", Value: "b"}).String(): {sample{0, 1, nil, nil}},
|
labels.New(labels.Label{Name: "a", Value: "b"}).String(): {sample{0, 1, nil, nil}},
|
||||||
}, ssMap)
|
}, ssMap)
|
||||||
|
|
||||||
|
@ -639,7 +639,7 @@ func TestSkippingInvalidValuesInSameTxn(t *testing.T) {
|
||||||
|
|
||||||
ssMap = query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
ssMap = query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
||||||
|
|
||||||
require.Equal(t, map[string][]tsdbutil.Sample{
|
require.Equal(t, map[string][]chunks.Sample{
|
||||||
labels.New(labels.Label{Name: "a", Value: "b"}).String(): {sample{0, 1, nil, nil}, sample{10, 3, nil, nil}},
|
labels.New(labels.Label{Name: "a", Value: "b"}).String(): {sample{0, 1, nil, nil}, sample{10, 3, nil, nil}},
|
||||||
}, ssMap)
|
}, ssMap)
|
||||||
}
|
}
|
||||||
|
@ -790,7 +790,7 @@ Outer:
|
||||||
|
|
||||||
res := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
res := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
||||||
|
|
||||||
expSamples := make([]tsdbutil.Sample, 0, len(c.remaint))
|
expSamples := make([]chunks.Sample, 0, len(c.remaint))
|
||||||
for _, ts := range c.remaint {
|
for _, ts := range c.remaint {
|
||||||
expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil})
|
expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil})
|
||||||
}
|
}
|
||||||
|
@ -876,9 +876,9 @@ func TestDB_e2e(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
seriesMap := map[string][]tsdbutil.Sample{}
|
seriesMap := map[string][]chunks.Sample{}
|
||||||
for _, l := range lbls {
|
for _, l := range lbls {
|
||||||
seriesMap[labels.New(l...).String()] = []tsdbutil.Sample{}
|
seriesMap[labels.New(l...).String()] = []chunks.Sample{}
|
||||||
}
|
}
|
||||||
|
|
||||||
db := openTestDB(t, nil, nil)
|
db := openTestDB(t, nil, nil)
|
||||||
|
@ -891,7 +891,7 @@ func TestDB_e2e(t *testing.T) {
|
||||||
|
|
||||||
for _, l := range lbls {
|
for _, l := range lbls {
|
||||||
lset := labels.New(l...)
|
lset := labels.New(l...)
|
||||||
series := []tsdbutil.Sample{}
|
series := []chunks.Sample{}
|
||||||
|
|
||||||
ts := rand.Int63n(300)
|
ts := rand.Int63n(300)
|
||||||
for i := 0; i < numDatapoints; i++ {
|
for i := 0; i < numDatapoints; i++ {
|
||||||
|
@ -949,7 +949,7 @@ func TestDB_e2e(t *testing.T) {
|
||||||
mint := rand.Int63n(300)
|
mint := rand.Int63n(300)
|
||||||
maxt := mint + rand.Int63n(timeInterval*int64(numDatapoints))
|
maxt := mint + rand.Int63n(timeInterval*int64(numDatapoints))
|
||||||
|
|
||||||
expected := map[string][]tsdbutil.Sample{}
|
expected := map[string][]chunks.Sample{}
|
||||||
|
|
||||||
// Build the mockSeriesSet.
|
// Build the mockSeriesSet.
|
||||||
for _, m := range matched {
|
for _, m := range matched {
|
||||||
|
@ -963,7 +963,7 @@ func TestDB_e2e(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ss := q.Select(false, nil, qry.ms...)
|
ss := q.Select(false, nil, qry.ms...)
|
||||||
result := map[string][]tsdbutil.Sample{}
|
result := map[string][]chunks.Sample{}
|
||||||
|
|
||||||
for ss.Next() {
|
for ss.Next() {
|
||||||
x := ss.At()
|
x := ss.At()
|
||||||
|
@ -1227,7 +1227,7 @@ func TestTombstoneClean(t *testing.T) {
|
||||||
|
|
||||||
res := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
res := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
||||||
|
|
||||||
expSamples := make([]tsdbutil.Sample, 0, len(c.remaint))
|
expSamples := make([]chunks.Sample, 0, len(c.remaint))
|
||||||
for _, ts := range c.remaint {
|
for _, ts := range c.remaint {
|
||||||
expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil})
|
expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil})
|
||||||
}
|
}
|
||||||
|
@ -1542,6 +1542,7 @@ func TestSizeRetention(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
require.NoError(t, headApp.Commit())
|
require.NoError(t, headApp.Commit())
|
||||||
|
db.Head().mmapHeadChunks()
|
||||||
|
|
||||||
require.Eventually(t, func() bool {
|
require.Eventually(t, func() bool {
|
||||||
return db.Head().chunkDiskMapper.IsQueueEmpty()
|
return db.Head().chunkDiskMapper.IsQueueEmpty()
|
||||||
|
@ -2392,8 +2393,8 @@ func TestDBReadOnly(t *testing.T) {
|
||||||
logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
|
logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
|
||||||
expBlocks []*Block
|
expBlocks []*Block
|
||||||
expBlock *Block
|
expBlock *Block
|
||||||
expSeries map[string][]tsdbutil.Sample
|
expSeries map[string][]chunks.Sample
|
||||||
expChunks map[string][][]tsdbutil.Sample
|
expChunks map[string][][]chunks.Sample
|
||||||
expDBHash []byte
|
expDBHash []byte
|
||||||
matchAll = labels.MustNewMatcher(labels.MatchEqual, "", "")
|
matchAll = labels.MustNewMatcher(labels.MatchEqual, "", "")
|
||||||
err error
|
err error
|
||||||
|
@ -2720,8 +2721,8 @@ func TestDBQueryDoesntSeeAppendsAfterCreation(t *testing.T) {
|
||||||
require.Equal(t, map[string][]sample{`{foo="bar"}`: {{t: 0, f: 0}}}, seriesSet)
|
require.Equal(t, map[string][]sample{`{foo="bar"}`: {{t: 0, f: 0}}}, seriesSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
func assureChunkFromSamples(t *testing.T, samples []tsdbutil.Sample) chunks.Meta {
|
func assureChunkFromSamples(t *testing.T, samples []chunks.Sample) chunks.Meta {
|
||||||
chks, err := tsdbutil.ChunkFromSamples(samples)
|
chks, err := chunks.ChunkFromSamples(samples)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return chks
|
return chks
|
||||||
}
|
}
|
||||||
|
@ -2729,11 +2730,11 @@ func assureChunkFromSamples(t *testing.T, samples []tsdbutil.Sample) chunks.Meta
|
||||||
// TestChunkWriter_ReadAfterWrite ensures that chunk segment are cut at the set segment size and
|
// TestChunkWriter_ReadAfterWrite ensures that chunk segment are cut at the set segment size and
|
||||||
// that the resulted segments includes the expected chunks data.
|
// that the resulted segments includes the expected chunks data.
|
||||||
func TestChunkWriter_ReadAfterWrite(t *testing.T) {
|
func TestChunkWriter_ReadAfterWrite(t *testing.T) {
|
||||||
chk1 := assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 1, nil, nil}})
|
chk1 := assureChunkFromSamples(t, []chunks.Sample{sample{1, 1, nil, nil}})
|
||||||
chk2 := assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 2, nil, nil}})
|
chk2 := assureChunkFromSamples(t, []chunks.Sample{sample{1, 2, nil, nil}})
|
||||||
chk3 := assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 3, nil, nil}})
|
chk3 := assureChunkFromSamples(t, []chunks.Sample{sample{1, 3, nil, nil}})
|
||||||
chk4 := assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 4, nil, nil}})
|
chk4 := assureChunkFromSamples(t, []chunks.Sample{sample{1, 4, nil, nil}})
|
||||||
chk5 := assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 5, nil, nil}})
|
chk5 := assureChunkFromSamples(t, []chunks.Sample{sample{1, 5, nil, nil}})
|
||||||
chunkSize := len(chk1.Chunk.Bytes()) + chunks.MaxChunkLengthFieldSize + chunks.ChunkEncodingSize + crc32.Size
|
chunkSize := len(chk1.Chunk.Bytes()) + chunks.MaxChunkLengthFieldSize + chunks.ChunkEncodingSize + crc32.Size
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
@ -2933,11 +2934,11 @@ func TestRangeForTimestamp(t *testing.T) {
|
||||||
// Regression test for https://github.com/prometheus/prometheus/pull/6514.
|
// Regression test for https://github.com/prometheus/prometheus/pull/6514.
|
||||||
func TestChunkReader_ConcurrentReads(t *testing.T) {
|
func TestChunkReader_ConcurrentReads(t *testing.T) {
|
||||||
chks := []chunks.Meta{
|
chks := []chunks.Meta{
|
||||||
assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 1, nil, nil}}),
|
assureChunkFromSamples(t, []chunks.Sample{sample{1, 1, nil, nil}}),
|
||||||
assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 2, nil, nil}}),
|
assureChunkFromSamples(t, []chunks.Sample{sample{1, 2, nil, nil}}),
|
||||||
assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 3, nil, nil}}),
|
assureChunkFromSamples(t, []chunks.Sample{sample{1, 3, nil, nil}}),
|
||||||
assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 4, nil, nil}}),
|
assureChunkFromSamples(t, []chunks.Sample{sample{1, 4, nil, nil}}),
|
||||||
assureChunkFromSamples(t, []tsdbutil.Sample{sample{1, 5, nil, nil}}),
|
assureChunkFromSamples(t, []chunks.Sample{sample{1, 5, nil, nil}}),
|
||||||
}
|
}
|
||||||
|
|
||||||
tempDir := t.TempDir()
|
tempDir := t.TempDir()
|
||||||
|
@ -3153,6 +3154,9 @@ func TestOpen_VariousBlockStates(t *testing.T) {
|
||||||
tmpCheckpointDir := path.Join(tmpDir, "wal/checkpoint.00000001.tmp")
|
tmpCheckpointDir := path.Join(tmpDir, "wal/checkpoint.00000001.tmp")
|
||||||
err := os.MkdirAll(tmpCheckpointDir, 0o777)
|
err := os.MkdirAll(tmpCheckpointDir, 0o777)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
tmpChunkSnapshotDir := path.Join(tmpDir, chunkSnapshotPrefix+"0000.00000001.tmp")
|
||||||
|
err = os.MkdirAll(tmpChunkSnapshotDir, 0o777)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
opts := DefaultOptions()
|
opts := DefaultOptions()
|
||||||
opts.RetentionDuration = 0
|
opts.RetentionDuration = 0
|
||||||
|
@ -3186,6 +3190,8 @@ func TestOpen_VariousBlockStates(t *testing.T) {
|
||||||
require.Equal(t, len(expectedIgnoredDirs), ignored)
|
require.Equal(t, len(expectedIgnoredDirs), ignored)
|
||||||
_, err = os.Stat(tmpCheckpointDir)
|
_, err = os.Stat(tmpCheckpointDir)
|
||||||
require.True(t, os.IsNotExist(err))
|
require.True(t, os.IsNotExist(err))
|
||||||
|
_, err = os.Stat(tmpChunkSnapshotDir)
|
||||||
|
require.True(t, os.IsNotExist(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOneCheckpointPerCompactCall(t *testing.T) {
|
func TestOneCheckpointPerCompactCall(t *testing.T) {
|
||||||
|
@ -4157,7 +4163,7 @@ func TestOOOCompaction(t *testing.T) {
|
||||||
addSample(90, 310)
|
addSample(90, 310)
|
||||||
|
|
||||||
verifyDBSamples := func() {
|
verifyDBSamples := func() {
|
||||||
var series1Samples, series2Samples []tsdbutil.Sample
|
var series1Samples, series2Samples []chunks.Sample
|
||||||
for _, r := range [][2]int64{{90, 119}, {120, 239}, {240, 350}} {
|
for _, r := range [][2]int64{{90, 119}, {120, 239}, {240, 350}} {
|
||||||
fromMins, toMins := r[0], r[1]
|
fromMins, toMins := r[0], r[1]
|
||||||
for min := fromMins; min <= toMins; min++ {
|
for min := fromMins; min <= toMins; min++ {
|
||||||
|
@ -4166,7 +4172,7 @@ func TestOOOCompaction(t *testing.T) {
|
||||||
series2Samples = append(series2Samples, sample{ts, float64(2 * ts), nil, nil})
|
series2Samples = append(series2Samples, sample{ts, float64(2 * ts), nil, nil})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
expRes := map[string][]tsdbutil.Sample{
|
expRes := map[string][]chunks.Sample{
|
||||||
series1.String(): series1Samples,
|
series1.String(): series1Samples,
|
||||||
series2.String(): series2Samples,
|
series2.String(): series2Samples,
|
||||||
}
|
}
|
||||||
|
@ -4226,14 +4232,14 @@ func TestOOOCompaction(t *testing.T) {
|
||||||
checkEmptyOOOChunk(series2)
|
checkEmptyOOOChunk(series2)
|
||||||
|
|
||||||
verifySamples := func(block *Block, fromMins, toMins int64) {
|
verifySamples := func(block *Block, fromMins, toMins int64) {
|
||||||
series1Samples := make([]tsdbutil.Sample, 0, toMins-fromMins+1)
|
series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
|
||||||
series2Samples := make([]tsdbutil.Sample, 0, toMins-fromMins+1)
|
series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
|
||||||
for min := fromMins; min <= toMins; min++ {
|
for min := fromMins; min <= toMins; min++ {
|
||||||
ts := min * time.Minute.Milliseconds()
|
ts := min * time.Minute.Milliseconds()
|
||||||
series1Samples = append(series1Samples, sample{ts, float64(ts), nil, nil})
|
series1Samples = append(series1Samples, sample{ts, float64(ts), nil, nil})
|
||||||
series2Samples = append(series2Samples, sample{ts, float64(2 * ts), nil, nil})
|
series2Samples = append(series2Samples, sample{ts, float64(2 * ts), nil, nil})
|
||||||
}
|
}
|
||||||
expRes := map[string][]tsdbutil.Sample{
|
expRes := map[string][]chunks.Sample{
|
||||||
series1.String(): series1Samples,
|
series1.String(): series1Samples,
|
||||||
series2.String(): series2Samples,
|
series2.String(): series2Samples,
|
||||||
}
|
}
|
||||||
|
@ -4357,14 +4363,14 @@ func TestOOOCompactionWithNormalCompaction(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
verifySamples := func(block *Block, fromMins, toMins int64) {
|
verifySamples := func(block *Block, fromMins, toMins int64) {
|
||||||
series1Samples := make([]tsdbutil.Sample, 0, toMins-fromMins+1)
|
series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
|
||||||
series2Samples := make([]tsdbutil.Sample, 0, toMins-fromMins+1)
|
series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
|
||||||
for min := fromMins; min <= toMins; min++ {
|
for min := fromMins; min <= toMins; min++ {
|
||||||
ts := min * time.Minute.Milliseconds()
|
ts := min * time.Minute.Milliseconds()
|
||||||
series1Samples = append(series1Samples, sample{ts, float64(ts), nil, nil})
|
series1Samples = append(series1Samples, sample{ts, float64(ts), nil, nil})
|
||||||
series2Samples = append(series2Samples, sample{ts, float64(2 * ts), nil, nil})
|
series2Samples = append(series2Samples, sample{ts, float64(2 * ts), nil, nil})
|
||||||
}
|
}
|
||||||
expRes := map[string][]tsdbutil.Sample{
|
expRes := map[string][]chunks.Sample{
|
||||||
series1.String(): series1Samples,
|
series1.String(): series1Samples,
|
||||||
series2.String(): series2Samples,
|
series2.String(): series2Samples,
|
||||||
}
|
}
|
||||||
|
@ -4457,14 +4463,14 @@ func TestOOOCompactionWithDisabledWriteLog(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
verifySamples := func(block *Block, fromMins, toMins int64) {
|
verifySamples := func(block *Block, fromMins, toMins int64) {
|
||||||
series1Samples := make([]tsdbutil.Sample, 0, toMins-fromMins+1)
|
series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
|
||||||
series2Samples := make([]tsdbutil.Sample, 0, toMins-fromMins+1)
|
series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
|
||||||
for min := fromMins; min <= toMins; min++ {
|
for min := fromMins; min <= toMins; min++ {
|
||||||
ts := min * time.Minute.Milliseconds()
|
ts := min * time.Minute.Milliseconds()
|
||||||
series1Samples = append(series1Samples, sample{ts, float64(ts), nil, nil})
|
series1Samples = append(series1Samples, sample{ts, float64(ts), nil, nil})
|
||||||
series2Samples = append(series2Samples, sample{ts, float64(2 * ts), nil, nil})
|
series2Samples = append(series2Samples, sample{ts, float64(2 * ts), nil, nil})
|
||||||
}
|
}
|
||||||
expRes := map[string][]tsdbutil.Sample{
|
expRes := map[string][]chunks.Sample{
|
||||||
series1.String(): series1Samples,
|
series1.String(): series1Samples,
|
||||||
series2.String(): series2Samples,
|
series2.String(): series2Samples,
|
||||||
}
|
}
|
||||||
|
@ -4550,14 +4556,14 @@ func TestOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
verifySamples := func(fromMins, toMins int64) {
|
verifySamples := func(fromMins, toMins int64) {
|
||||||
series1Samples := make([]tsdbutil.Sample, 0, toMins-fromMins+1)
|
series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
|
||||||
series2Samples := make([]tsdbutil.Sample, 0, toMins-fromMins+1)
|
series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
|
||||||
for min := fromMins; min <= toMins; min++ {
|
for min := fromMins; min <= toMins; min++ {
|
||||||
ts := min * time.Minute.Milliseconds()
|
ts := min * time.Minute.Milliseconds()
|
||||||
series1Samples = append(series1Samples, sample{ts, float64(ts), nil, nil})
|
series1Samples = append(series1Samples, sample{ts, float64(ts), nil, nil})
|
||||||
series2Samples = append(series2Samples, sample{ts, float64(2 * ts), nil, nil})
|
series2Samples = append(series2Samples, sample{ts, float64(2 * ts), nil, nil})
|
||||||
}
|
}
|
||||||
expRes := map[string][]tsdbutil.Sample{
|
expRes := map[string][]chunks.Sample{
|
||||||
series1.String(): series1Samples,
|
series1.String(): series1Samples,
|
||||||
series2.String(): series2Samples,
|
series2.String(): series2Samples,
|
||||||
}
|
}
|
||||||
|
@ -4599,7 +4605,7 @@ func Test_Querier_OOOQuery(t *testing.T) {
|
||||||
series1 := labels.FromStrings("foo", "bar1")
|
series1 := labels.FromStrings("foo", "bar1")
|
||||||
|
|
||||||
minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }
|
minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }
|
||||||
addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []tsdbutil.Sample) ([]tsdbutil.Sample, int) {
|
addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []chunks.Sample) ([]chunks.Sample, int) {
|
||||||
app := db.Appender(context.Background())
|
app := db.Appender(context.Background())
|
||||||
totalAppended := 0
|
totalAppended := 0
|
||||||
for min := fromMins; min <= toMins; min += time.Minute.Milliseconds() {
|
for min := fromMins; min <= toMins; min += time.Minute.Milliseconds() {
|
||||||
|
@ -4650,7 +4656,7 @@ func Test_Querier_OOOQuery(t *testing.T) {
|
||||||
require.NoError(t, db.Close())
|
require.NoError(t, db.Close())
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var expSamples []tsdbutil.Sample
|
var expSamples []chunks.Sample
|
||||||
|
|
||||||
// Add in-order samples.
|
// Add in-order samples.
|
||||||
expSamples, _ = addSample(db, tc.inOrderMinT, tc.inOrderMaxT, tc.queryMinT, tc.queryMaxT, expSamples)
|
expSamples, _ = addSample(db, tc.inOrderMinT, tc.inOrderMaxT, tc.queryMinT, tc.queryMaxT, expSamples)
|
||||||
|
@ -4684,7 +4690,7 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) {
|
||||||
series1 := labels.FromStrings("foo", "bar1")
|
series1 := labels.FromStrings("foo", "bar1")
|
||||||
|
|
||||||
minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }
|
minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }
|
||||||
addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []tsdbutil.Sample) ([]tsdbutil.Sample, int) {
|
addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []chunks.Sample) ([]chunks.Sample, int) {
|
||||||
app := db.Appender(context.Background())
|
app := db.Appender(context.Background())
|
||||||
totalAppended := 0
|
totalAppended := 0
|
||||||
for min := fromMins; min <= toMins; min += time.Minute.Milliseconds() {
|
for min := fromMins; min <= toMins; min += time.Minute.Milliseconds() {
|
||||||
|
@ -4735,7 +4741,7 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) {
|
||||||
require.NoError(t, db.Close())
|
require.NoError(t, db.Close())
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var expSamples []tsdbutil.Sample
|
var expSamples []chunks.Sample
|
||||||
|
|
||||||
// Add in-order samples.
|
// Add in-order samples.
|
||||||
expSamples, _ = addSample(db, tc.inOrderMinT, tc.inOrderMaxT, tc.queryMinT, tc.queryMaxT, expSamples)
|
expSamples, _ = addSample(db, tc.inOrderMinT, tc.inOrderMaxT, tc.queryMinT, tc.queryMaxT, expSamples)
|
||||||
|
@ -4755,7 +4761,7 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) {
|
||||||
require.NotNil(t, chks[series1.String()])
|
require.NotNil(t, chks[series1.String()])
|
||||||
require.Equal(t, 1, len(chks))
|
require.Equal(t, 1, len(chks))
|
||||||
require.Equal(t, float64(oooSamples), prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamplesAppended), "number of ooo appended samples mismatch")
|
require.Equal(t, float64(oooSamples), prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamplesAppended), "number of ooo appended samples mismatch")
|
||||||
var gotSamples []tsdbutil.Sample
|
var gotSamples []chunks.Sample
|
||||||
for _, chunk := range chks[series1.String()] {
|
for _, chunk := range chks[series1.String()] {
|
||||||
it := chunk.Chunk.Iterator(nil)
|
it := chunk.Chunk.Iterator(nil)
|
||||||
for it.Next() == chunkenc.ValFloat {
|
for it.Next() == chunkenc.ValFloat {
|
||||||
|
@ -4783,7 +4789,7 @@ func TestOOOAppendAndQuery(t *testing.T) {
|
||||||
s2 := labels.FromStrings("foo", "bar2")
|
s2 := labels.FromStrings("foo", "bar2")
|
||||||
|
|
||||||
minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }
|
minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }
|
||||||
appendedSamples := make(map[string][]tsdbutil.Sample)
|
appendedSamples := make(map[string][]chunks.Sample)
|
||||||
totalSamples := 0
|
totalSamples := 0
|
||||||
addSample := func(lbls labels.Labels, fromMins, toMins int64, faceError bool) {
|
addSample := func(lbls labels.Labels, fromMins, toMins int64, faceError bool) {
|
||||||
app := db.Appender(context.Background())
|
app := db.Appender(context.Background())
|
||||||
|
@ -4820,7 +4826,7 @@ func TestOOOAppendAndQuery(t *testing.T) {
|
||||||
appendedSamples[k] = v
|
appendedSamples[k] = v
|
||||||
}
|
}
|
||||||
|
|
||||||
expSamples := make(map[string][]tsdbutil.Sample)
|
expSamples := make(map[string][]chunks.Sample)
|
||||||
for k, samples := range appendedSamples {
|
for k, samples := range appendedSamples {
|
||||||
for _, s := range samples {
|
for _, s := range samples {
|
||||||
if s.T() < from {
|
if s.T() < from {
|
||||||
|
@ -4904,7 +4910,7 @@ func TestOOODisabled(t *testing.T) {
|
||||||
|
|
||||||
s1 := labels.FromStrings("foo", "bar1")
|
s1 := labels.FromStrings("foo", "bar1")
|
||||||
minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }
|
minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }
|
||||||
expSamples := make(map[string][]tsdbutil.Sample)
|
expSamples := make(map[string][]chunks.Sample)
|
||||||
totalSamples := 0
|
totalSamples := 0
|
||||||
failedSamples := 0
|
failedSamples := 0
|
||||||
addSample := func(lbls labels.Labels, fromMins, toMins int64, faceError bool) {
|
addSample := func(lbls labels.Labels, fromMins, toMins int64, faceError bool) {
|
||||||
|
@ -4972,7 +4978,7 @@ func TestWBLAndMmapReplay(t *testing.T) {
|
||||||
s1 := labels.FromStrings("foo", "bar1")
|
s1 := labels.FromStrings("foo", "bar1")
|
||||||
|
|
||||||
minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }
|
minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }
|
||||||
expSamples := make(map[string][]tsdbutil.Sample)
|
expSamples := make(map[string][]chunks.Sample)
|
||||||
totalSamples := 0
|
totalSamples := 0
|
||||||
addSample := func(lbls labels.Labels, fromMins, toMins int64) {
|
addSample := func(lbls labels.Labels, fromMins, toMins int64) {
|
||||||
app := db.Appender(context.Background())
|
app := db.Appender(context.Background())
|
||||||
|
@ -4988,7 +4994,7 @@ func TestWBLAndMmapReplay(t *testing.T) {
|
||||||
require.NoError(t, app.Commit())
|
require.NoError(t, app.Commit())
|
||||||
}
|
}
|
||||||
|
|
||||||
testQuery := func(exp map[string][]tsdbutil.Sample) {
|
testQuery := func(exp map[string][]chunks.Sample) {
|
||||||
querier, err := db.Querier(context.TODO(), math.MinInt64, math.MaxInt64)
|
querier, err := db.Querier(context.TODO(), math.MinInt64, math.MaxInt64)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
@ -5018,7 +5024,7 @@ func TestWBLAndMmapReplay(t *testing.T) {
|
||||||
ms, created, err := db.head.getOrCreate(s1.Hash(), s1)
|
ms, created, err := db.head.getOrCreate(s1.Hash(), s1)
|
||||||
require.False(t, created)
|
require.False(t, created)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
var s1MmapSamples []tsdbutil.Sample
|
var s1MmapSamples []chunks.Sample
|
||||||
for _, mc := range ms.ooo.oooMmappedChunks {
|
for _, mc := range ms.ooo.oooMmappedChunks {
|
||||||
chk, err := db.head.chunkDiskMapper.Chunk(mc.ref)
|
chk, err := db.head.chunkDiskMapper.Chunk(mc.ref)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -5077,7 +5083,7 @@ func TestWBLAndMmapReplay(t *testing.T) {
|
||||||
require.Equal(t, oooMint, db.head.MinOOOTime())
|
require.Equal(t, oooMint, db.head.MinOOOTime())
|
||||||
require.Equal(t, oooMaxt, db.head.MaxOOOTime())
|
require.Equal(t, oooMaxt, db.head.MaxOOOTime())
|
||||||
inOrderSample := expSamples[s1.String()][len(expSamples[s1.String()])-1]
|
inOrderSample := expSamples[s1.String()][len(expSamples[s1.String()])-1]
|
||||||
testQuery(map[string][]tsdbutil.Sample{
|
testQuery(map[string][]chunks.Sample{
|
||||||
s1.String(): append(s1MmapSamples, inOrderSample),
|
s1.String(): append(s1MmapSamples, inOrderSample),
|
||||||
})
|
})
|
||||||
require.NoError(t, db.Close())
|
require.NoError(t, db.Close())
|
||||||
|
@ -5248,12 +5254,12 @@ func TestOOOCompactionFailure(t *testing.T) {
|
||||||
require.Equal(t, int64(0), f.Size())
|
require.Equal(t, int64(0), f.Size())
|
||||||
|
|
||||||
verifySamples := func(block *Block, fromMins, toMins int64) {
|
verifySamples := func(block *Block, fromMins, toMins int64) {
|
||||||
series1Samples := make([]tsdbutil.Sample, 0, toMins-fromMins+1)
|
series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
|
||||||
for min := fromMins; min <= toMins; min++ {
|
for min := fromMins; min <= toMins; min++ {
|
||||||
ts := min * time.Minute.Milliseconds()
|
ts := min * time.Minute.Milliseconds()
|
||||||
series1Samples = append(series1Samples, sample{ts, float64(ts), nil, nil})
|
series1Samples = append(series1Samples, sample{ts, float64(ts), nil, nil})
|
||||||
}
|
}
|
||||||
expRes := map[string][]tsdbutil.Sample{
|
expRes := map[string][]chunks.Sample{
|
||||||
series1.String(): series1Samples,
|
series1.String(): series1Samples,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5296,7 +5302,7 @@ func TestWBLCorruption(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
series1 := labels.FromStrings("foo", "bar1")
|
series1 := labels.FromStrings("foo", "bar1")
|
||||||
var allSamples, expAfterRestart []tsdbutil.Sample
|
var allSamples, expAfterRestart []chunks.Sample
|
||||||
addSamples := func(fromMins, toMins int64, afterRestart bool) {
|
addSamples := func(fromMins, toMins int64, afterRestart bool) {
|
||||||
app := db.Appender(context.Background())
|
app := db.Appender(context.Background())
|
||||||
for min := fromMins; min <= toMins; min++ {
|
for min := fromMins; min <= toMins; min++ {
|
||||||
|
@ -5368,12 +5374,12 @@ func TestWBLCorruption(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, f2.Size(), int64(100))
|
require.Greater(t, f2.Size(), int64(100))
|
||||||
|
|
||||||
verifySamples := func(expSamples []tsdbutil.Sample) {
|
verifySamples := func(expSamples []chunks.Sample) {
|
||||||
sort.Slice(expSamples, func(i, j int) bool {
|
sort.Slice(expSamples, func(i, j int) bool {
|
||||||
return expSamples[i].T() < expSamples[j].T()
|
return expSamples[i].T() < expSamples[j].T()
|
||||||
})
|
})
|
||||||
|
|
||||||
expRes := map[string][]tsdbutil.Sample{
|
expRes := map[string][]chunks.Sample{
|
||||||
series1.String(): expSamples,
|
series1.String(): expSamples,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5442,7 +5448,7 @@ func TestOOOMmapCorruption(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
series1 := labels.FromStrings("foo", "bar1")
|
series1 := labels.FromStrings("foo", "bar1")
|
||||||
var allSamples, expInMmapChunks []tsdbutil.Sample
|
var allSamples, expInMmapChunks []chunks.Sample
|
||||||
addSamples := func(fromMins, toMins int64, inMmapAfterCorruption bool) {
|
addSamples := func(fromMins, toMins int64, inMmapAfterCorruption bool) {
|
||||||
app := db.Appender(context.Background())
|
app := db.Appender(context.Background())
|
||||||
for min := fromMins; min <= toMins; min++ {
|
for min := fromMins; min <= toMins; min++ {
|
||||||
|
@ -5476,12 +5482,12 @@ func TestOOOMmapCorruption(t *testing.T) {
|
||||||
require.NoError(t, db.head.chunkDiskMapper.CutNewFile())
|
require.NoError(t, db.head.chunkDiskMapper.CutNewFile())
|
||||||
addSamples(260, 290, false)
|
addSamples(260, 290, false)
|
||||||
|
|
||||||
verifySamples := func(expSamples []tsdbutil.Sample) {
|
verifySamples := func(expSamples []chunks.Sample) {
|
||||||
sort.Slice(expSamples, func(i, j int) bool {
|
sort.Slice(expSamples, func(i, j int) bool {
|
||||||
return expSamples[i].T() < expSamples[j].T()
|
return expSamples[i].T() < expSamples[j].T()
|
||||||
})
|
})
|
||||||
|
|
||||||
expRes := map[string][]tsdbutil.Sample{
|
expRes := map[string][]chunks.Sample{
|
||||||
series1.String(): expSamples,
|
series1.String(): expSamples,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5578,7 +5584,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
series1 := labels.FromStrings("foo", "bar1")
|
series1 := labels.FromStrings("foo", "bar1")
|
||||||
addSamples := func(t *testing.T, db *DB, fromMins, toMins int64, success bool, allSamples []tsdbutil.Sample) []tsdbutil.Sample {
|
addSamples := func(t *testing.T, db *DB, fromMins, toMins int64, success bool, allSamples []chunks.Sample) []chunks.Sample {
|
||||||
app := db.Appender(context.Background())
|
app := db.Appender(context.Background())
|
||||||
for min := fromMins; min <= toMins; min++ {
|
for min := fromMins; min <= toMins; min++ {
|
||||||
ts := min * time.Minute.Milliseconds()
|
ts := min * time.Minute.Milliseconds()
|
||||||
|
@ -5594,12 +5600,12 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) {
|
||||||
return allSamples
|
return allSamples
|
||||||
}
|
}
|
||||||
|
|
||||||
verifySamples := func(t *testing.T, db *DB, expSamples []tsdbutil.Sample) {
|
verifySamples := func(t *testing.T, db *DB, expSamples []chunks.Sample) {
|
||||||
sort.Slice(expSamples, func(i, j int) bool {
|
sort.Slice(expSamples, func(i, j int) bool {
|
||||||
return expSamples[i].T() < expSamples[j].T()
|
return expSamples[i].T() < expSamples[j].T()
|
||||||
})
|
})
|
||||||
|
|
||||||
expRes := map[string][]tsdbutil.Sample{
|
expRes := map[string][]chunks.Sample{
|
||||||
series1.String(): expSamples,
|
series1.String(): expSamples,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5627,7 +5633,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("increase time window", func(t *testing.T) {
|
t.Run("increase time window", func(t *testing.T) {
|
||||||
var allSamples []tsdbutil.Sample
|
var allSamples []chunks.Sample
|
||||||
db := getDB(30 * time.Minute.Milliseconds())
|
db := getDB(30 * time.Minute.Milliseconds())
|
||||||
|
|
||||||
// In-order.
|
// In-order.
|
||||||
|
@ -5657,7 +5663,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("decrease time window and increase again", func(t *testing.T) {
|
t.Run("decrease time window and increase again", func(t *testing.T) {
|
||||||
var allSamples []tsdbutil.Sample
|
var allSamples []chunks.Sample
|
||||||
db := getDB(60 * time.Minute.Milliseconds())
|
db := getDB(60 * time.Minute.Milliseconds())
|
||||||
|
|
||||||
// In-order.
|
// In-order.
|
||||||
|
@ -5696,7 +5702,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("disabled to enabled", func(t *testing.T) {
|
t.Run("disabled to enabled", func(t *testing.T) {
|
||||||
var allSamples []tsdbutil.Sample
|
var allSamples []chunks.Sample
|
||||||
db := getDB(0)
|
db := getDB(0)
|
||||||
|
|
||||||
// In-order.
|
// In-order.
|
||||||
|
@ -5725,7 +5731,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("enabled to disabled", func(t *testing.T) {
|
t.Run("enabled to disabled", func(t *testing.T) {
|
||||||
var allSamples []tsdbutil.Sample
|
var allSamples []chunks.Sample
|
||||||
db := getDB(60 * time.Minute.Milliseconds())
|
db := getDB(60 * time.Minute.Milliseconds())
|
||||||
|
|
||||||
// In-order.
|
// In-order.
|
||||||
|
@ -5755,7 +5761,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("disabled to disabled", func(t *testing.T) {
|
t.Run("disabled to disabled", func(t *testing.T) {
|
||||||
var allSamples []tsdbutil.Sample
|
var allSamples []chunks.Sample
|
||||||
db := getDB(0)
|
db := getDB(0)
|
||||||
|
|
||||||
// In-order.
|
// In-order.
|
||||||
|
@ -5796,13 +5802,13 @@ func TestNoGapAfterRestartWithOOO(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
verifySamples := func(t *testing.T, db *DB, fromMins, toMins int64) {
|
verifySamples := func(t *testing.T, db *DB, fromMins, toMins int64) {
|
||||||
var expSamples []tsdbutil.Sample
|
var expSamples []chunks.Sample
|
||||||
for min := fromMins; min <= toMins; min++ {
|
for min := fromMins; min <= toMins; min++ {
|
||||||
ts := min * time.Minute.Milliseconds()
|
ts := min * time.Minute.Milliseconds()
|
||||||
expSamples = append(expSamples, sample{t: ts, f: float64(ts)})
|
expSamples = append(expSamples, sample{t: ts, f: float64(ts)})
|
||||||
}
|
}
|
||||||
|
|
||||||
expRes := map[string][]tsdbutil.Sample{
|
expRes := map[string][]chunks.Sample{
|
||||||
series1.String(): expSamples,
|
series1.String(): expSamples,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5899,7 +5905,7 @@ func TestWblReplayAfterOOODisableAndRestart(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
series1 := labels.FromStrings("foo", "bar1")
|
series1 := labels.FromStrings("foo", "bar1")
|
||||||
var allSamples []tsdbutil.Sample
|
var allSamples []chunks.Sample
|
||||||
addSamples := func(fromMins, toMins int64) {
|
addSamples := func(fromMins, toMins int64) {
|
||||||
app := db.Appender(context.Background())
|
app := db.Appender(context.Background())
|
||||||
for min := fromMins; min <= toMins; min++ {
|
for min := fromMins; min <= toMins; min++ {
|
||||||
|
@ -5916,12 +5922,12 @@ func TestWblReplayAfterOOODisableAndRestart(t *testing.T) {
|
||||||
// OOO samples.
|
// OOO samples.
|
||||||
addSamples(250, 260)
|
addSamples(250, 260)
|
||||||
|
|
||||||
verifySamples := func(expSamples []tsdbutil.Sample) {
|
verifySamples := func(expSamples []chunks.Sample) {
|
||||||
sort.Slice(expSamples, func(i, j int) bool {
|
sort.Slice(expSamples, func(i, j int) bool {
|
||||||
return expSamples[i].T() < expSamples[j].T()
|
return expSamples[i].T() < expSamples[j].T()
|
||||||
})
|
})
|
||||||
|
|
||||||
expRes := map[string][]tsdbutil.Sample{
|
expRes := map[string][]chunks.Sample{
|
||||||
series1.String(): expSamples,
|
series1.String(): expSamples,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5958,7 +5964,7 @@ func TestPanicOnApplyConfig(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
series1 := labels.FromStrings("foo", "bar1")
|
series1 := labels.FromStrings("foo", "bar1")
|
||||||
var allSamples []tsdbutil.Sample
|
var allSamples []chunks.Sample
|
||||||
addSamples := func(fromMins, toMins int64) {
|
addSamples := func(fromMins, toMins int64) {
|
||||||
app := db.Appender(context.Background())
|
app := db.Appender(context.Background())
|
||||||
for min := fromMins; min <= toMins; min++ {
|
for min := fromMins; min <= toMins; min++ {
|
||||||
|
@ -6006,7 +6012,7 @@ func TestDiskFillingUpAfterDisablingOOO(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
series1 := labels.FromStrings("foo", "bar1")
|
series1 := labels.FromStrings("foo", "bar1")
|
||||||
var allSamples []tsdbutil.Sample
|
var allSamples []chunks.Sample
|
||||||
addSamples := func(fromMins, toMins int64) {
|
addSamples := func(fromMins, toMins int64) {
|
||||||
app := db.Appender(context.Background())
|
app := db.Appender(context.Background())
|
||||||
for min := fromMins; min <= toMins; min++ {
|
for min := fromMins; min <= toMins; min++ {
|
||||||
|
@ -6056,12 +6062,14 @@ func TestDiskFillingUpAfterDisablingOOO(t *testing.T) {
|
||||||
|
|
||||||
// Check that m-map files gets deleted properly after compactions.
|
// Check that m-map files gets deleted properly after compactions.
|
||||||
|
|
||||||
|
db.head.mmapHeadChunks()
|
||||||
checkMmapFileContents([]string{"000001", "000002"}, nil)
|
checkMmapFileContents([]string{"000001", "000002"}, nil)
|
||||||
require.NoError(t, db.Compact())
|
require.NoError(t, db.Compact())
|
||||||
checkMmapFileContents([]string{"000002"}, []string{"000001"})
|
checkMmapFileContents([]string{"000002"}, []string{"000001"})
|
||||||
require.Nil(t, ms.ooo, "OOO mmap chunk was not compacted")
|
require.Nil(t, ms.ooo, "OOO mmap chunk was not compacted")
|
||||||
|
|
||||||
addSamples(501, 650)
|
addSamples(501, 650)
|
||||||
|
db.head.mmapHeadChunks()
|
||||||
checkMmapFileContents([]string{"000002", "000003"}, []string{"000001"})
|
checkMmapFileContents([]string{"000002", "000003"}, []string{"000001"})
|
||||||
require.NoError(t, db.Compact())
|
require.NoError(t, db.Compact())
|
||||||
checkMmapFileContents(nil, []string{"000001", "000002", "000003"})
|
checkMmapFileContents(nil, []string{"000001", "000002", "000003"})
|
||||||
|
@ -6095,7 +6103,7 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
appendHistogram := func(
|
appendHistogram := func(
|
||||||
lbls labels.Labels, tsMinute int, h *histogram.Histogram,
|
lbls labels.Labels, tsMinute int, h *histogram.Histogram,
|
||||||
exp *[]tsdbutil.Sample, expCRH histogram.CounterResetHint,
|
exp *[]chunks.Sample, expCRH histogram.CounterResetHint,
|
||||||
) {
|
) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
var err error
|
var err error
|
||||||
|
@ -6114,7 +6122,7 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, app.Commit())
|
require.NoError(t, app.Commit())
|
||||||
}
|
}
|
||||||
appendFloat := func(lbls labels.Labels, tsMinute int, val float64, exp *[]tsdbutil.Sample) {
|
appendFloat := func(lbls labels.Labels, tsMinute int, val float64, exp *[]chunks.Sample) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
app := db.Appender(ctx)
|
app := db.Appender(ctx)
|
||||||
_, err := app.Append(0, lbls, minute(tsMinute), val)
|
_, err := app.Append(0, lbls, minute(tsMinute), val)
|
||||||
|
@ -6123,7 +6131,7 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) {
|
||||||
*exp = append(*exp, sample{t: minute(tsMinute), f: val})
|
*exp = append(*exp, sample{t: minute(tsMinute), f: val})
|
||||||
}
|
}
|
||||||
|
|
||||||
testQuery := func(name, value string, exp map[string][]tsdbutil.Sample) {
|
testQuery := func(name, value string, exp map[string][]chunks.Sample) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
q, err := db.Querier(ctx, math.MinInt64, math.MaxInt64)
|
q, err := db.Querier(ctx, math.MinInt64, math.MaxInt64)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -6132,7 +6140,7 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
baseH := &histogram.Histogram{
|
baseH := &histogram.Histogram{
|
||||||
Count: 11,
|
Count: 15,
|
||||||
ZeroCount: 4,
|
ZeroCount: 4,
|
||||||
ZeroThreshold: 0.001,
|
ZeroThreshold: 0.001,
|
||||||
Sum: 35.5,
|
Sum: 35.5,
|
||||||
|
@ -6154,7 +6162,7 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) {
|
||||||
series2 = labels.FromStrings("foo", "bar2")
|
series2 = labels.FromStrings("foo", "bar2")
|
||||||
series3 = labels.FromStrings("foo", "bar3")
|
series3 = labels.FromStrings("foo", "bar3")
|
||||||
series4 = labels.FromStrings("foo", "bar4")
|
series4 = labels.FromStrings("foo", "bar4")
|
||||||
exp1, exp2, exp3, exp4 []tsdbutil.Sample
|
exp1, exp2, exp3, exp4 []chunks.Sample
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO(codesome): test everything for negative buckets as well.
|
// TODO(codesome): test everything for negative buckets as well.
|
||||||
|
@ -6162,23 +6170,23 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) {
|
||||||
h := baseH.Copy() // This is shared across all sub tests.
|
h := baseH.Copy() // This is shared across all sub tests.
|
||||||
|
|
||||||
appendHistogram(series1, 100, h, &exp1, histogram.UnknownCounterReset)
|
appendHistogram(series1, 100, h, &exp1, histogram.UnknownCounterReset)
|
||||||
testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1})
|
testQuery("foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1})
|
||||||
|
|
||||||
h.PositiveBuckets[0]++
|
h.PositiveBuckets[0]++
|
||||||
h.NegativeBuckets[0] += 2
|
h.NegativeBuckets[0] += 2
|
||||||
h.Count += 10
|
h.Count += 10
|
||||||
appendHistogram(series1, 101, h, &exp1, histogram.NotCounterReset)
|
appendHistogram(series1, 101, h, &exp1, histogram.NotCounterReset)
|
||||||
testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1})
|
testQuery("foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1})
|
||||||
|
|
||||||
t.Run("changing schema", func(t *testing.T) {
|
t.Run("changing schema", func(t *testing.T) {
|
||||||
h.Schema = 2
|
h.Schema = 2
|
||||||
appendHistogram(series1, 102, h, &exp1, histogram.UnknownCounterReset)
|
appendHistogram(series1, 102, h, &exp1, histogram.UnknownCounterReset)
|
||||||
testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1})
|
testQuery("foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1})
|
||||||
|
|
||||||
// Schema back to old.
|
// Schema back to old.
|
||||||
h.Schema = 1
|
h.Schema = 1
|
||||||
appendHistogram(series1, 103, h, &exp1, histogram.UnknownCounterReset)
|
appendHistogram(series1, 103, h, &exp1, histogram.UnknownCounterReset)
|
||||||
testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1})
|
testQuery("foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1})
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("new buckets incoming", func(t *testing.T) {
|
t.Run("new buckets incoming", func(t *testing.T) {
|
||||||
|
@ -6207,7 +6215,7 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) {
|
||||||
h.PositiveBuckets = append(h.PositiveBuckets, 1)
|
h.PositiveBuckets = append(h.PositiveBuckets, 1)
|
||||||
h.Count += 3
|
h.Count += 3
|
||||||
appendHistogram(series1, 104, h, &exp1, histogram.NotCounterReset)
|
appendHistogram(series1, 104, h, &exp1, histogram.NotCounterReset)
|
||||||
testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1})
|
testQuery("foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1})
|
||||||
|
|
||||||
// Because of the previous two histograms being on the active chunk,
|
// Because of the previous two histograms being on the active chunk,
|
||||||
// and the next append is only adding a new bucket, the active chunk
|
// and the next append is only adding a new bucket, the active chunk
|
||||||
|
@ -6245,7 +6253,7 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) {
|
||||||
// {2, 1, -1, 0, 1} -> {2, 1, 0, -1, 0, 1}
|
// {2, 1, -1, 0, 1} -> {2, 1, 0, -1, 0, 1}
|
||||||
h.PositiveBuckets = append(h.PositiveBuckets[:2], append([]int64{0}, h.PositiveBuckets[2:]...)...)
|
h.PositiveBuckets = append(h.PositiveBuckets[:2], append([]int64{0}, h.PositiveBuckets[2:]...)...)
|
||||||
appendHistogram(series1, 105, h, &exp1, histogram.NotCounterReset)
|
appendHistogram(series1, 105, h, &exp1, histogram.NotCounterReset)
|
||||||
testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1})
|
testQuery("foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1})
|
||||||
|
|
||||||
// We add 4 more histograms to clear out the buffer and see the re-encoded histograms.
|
// We add 4 more histograms to clear out the buffer and see the re-encoded histograms.
|
||||||
appendHistogram(series1, 106, h, &exp1, histogram.NotCounterReset)
|
appendHistogram(series1, 106, h, &exp1, histogram.NotCounterReset)
|
||||||
|
@ -6278,14 +6286,14 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) {
|
||||||
exp1[l-6] = sample{t: exp1[l-6].T(), h: h6}
|
exp1[l-6] = sample{t: exp1[l-6].T(), h: h6}
|
||||||
}
|
}
|
||||||
|
|
||||||
testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1})
|
testQuery("foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1})
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("buckets disappearing", func(t *testing.T) {
|
t.Run("buckets disappearing", func(t *testing.T) {
|
||||||
h.PositiveSpans[1].Length--
|
h.PositiveSpans[1].Length--
|
||||||
h.PositiveBuckets = h.PositiveBuckets[:len(h.PositiveBuckets)-1]
|
h.PositiveBuckets = h.PositiveBuckets[:len(h.PositiveBuckets)-1]
|
||||||
appendHistogram(series1, 110, h, &exp1, histogram.CounterReset)
|
appendHistogram(series1, 110, h, &exp1, histogram.CounterReset)
|
||||||
testQuery("foo", "bar1", map[string][]tsdbutil.Sample{series1.String(): exp1})
|
testQuery("foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -6293,22 +6301,22 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) {
|
||||||
appendFloat(series2, 100, 100, &exp2)
|
appendFloat(series2, 100, 100, &exp2)
|
||||||
appendFloat(series2, 101, 101, &exp2)
|
appendFloat(series2, 101, 101, &exp2)
|
||||||
appendFloat(series2, 102, 102, &exp2)
|
appendFloat(series2, 102, 102, &exp2)
|
||||||
testQuery("foo", "bar2", map[string][]tsdbutil.Sample{series2.String(): exp2})
|
testQuery("foo", "bar2", map[string][]chunks.Sample{series2.String(): exp2})
|
||||||
|
|
||||||
h := baseH.Copy()
|
h := baseH.Copy()
|
||||||
appendHistogram(series2, 103, h, &exp2, histogram.UnknownCounterReset)
|
appendHistogram(series2, 103, h, &exp2, histogram.UnknownCounterReset)
|
||||||
appendHistogram(series2, 104, h, &exp2, histogram.NotCounterReset)
|
appendHistogram(series2, 104, h, &exp2, histogram.NotCounterReset)
|
||||||
appendHistogram(series2, 105, h, &exp2, histogram.NotCounterReset)
|
appendHistogram(series2, 105, h, &exp2, histogram.NotCounterReset)
|
||||||
testQuery("foo", "bar2", map[string][]tsdbutil.Sample{series2.String(): exp2})
|
testQuery("foo", "bar2", map[string][]chunks.Sample{series2.String(): exp2})
|
||||||
|
|
||||||
// Switching between float and histograms again.
|
// Switching between float and histograms again.
|
||||||
appendFloat(series2, 106, 106, &exp2)
|
appendFloat(series2, 106, 106, &exp2)
|
||||||
appendFloat(series2, 107, 107, &exp2)
|
appendFloat(series2, 107, 107, &exp2)
|
||||||
testQuery("foo", "bar2", map[string][]tsdbutil.Sample{series2.String(): exp2})
|
testQuery("foo", "bar2", map[string][]chunks.Sample{series2.String(): exp2})
|
||||||
|
|
||||||
appendHistogram(series2, 108, h, &exp2, histogram.UnknownCounterReset)
|
appendHistogram(series2, 108, h, &exp2, histogram.UnknownCounterReset)
|
||||||
appendHistogram(series2, 109, h, &exp2, histogram.NotCounterReset)
|
appendHistogram(series2, 109, h, &exp2, histogram.NotCounterReset)
|
||||||
testQuery("foo", "bar2", map[string][]tsdbutil.Sample{series2.String(): exp2})
|
testQuery("foo", "bar2", map[string][]chunks.Sample{series2.String(): exp2})
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("series starting with histogram and then getting float", func(t *testing.T) {
|
t.Run("series starting with histogram and then getting float", func(t *testing.T) {
|
||||||
|
@ -6316,21 +6324,21 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) {
|
||||||
appendHistogram(series3, 101, h, &exp3, histogram.UnknownCounterReset)
|
appendHistogram(series3, 101, h, &exp3, histogram.UnknownCounterReset)
|
||||||
appendHistogram(series3, 102, h, &exp3, histogram.NotCounterReset)
|
appendHistogram(series3, 102, h, &exp3, histogram.NotCounterReset)
|
||||||
appendHistogram(series3, 103, h, &exp3, histogram.NotCounterReset)
|
appendHistogram(series3, 103, h, &exp3, histogram.NotCounterReset)
|
||||||
testQuery("foo", "bar3", map[string][]tsdbutil.Sample{series3.String(): exp3})
|
testQuery("foo", "bar3", map[string][]chunks.Sample{series3.String(): exp3})
|
||||||
|
|
||||||
appendFloat(series3, 104, 100, &exp3)
|
appendFloat(series3, 104, 100, &exp3)
|
||||||
appendFloat(series3, 105, 101, &exp3)
|
appendFloat(series3, 105, 101, &exp3)
|
||||||
appendFloat(series3, 106, 102, &exp3)
|
appendFloat(series3, 106, 102, &exp3)
|
||||||
testQuery("foo", "bar3", map[string][]tsdbutil.Sample{series3.String(): exp3})
|
testQuery("foo", "bar3", map[string][]chunks.Sample{series3.String(): exp3})
|
||||||
|
|
||||||
// Switching between histogram and float again.
|
// Switching between histogram and float again.
|
||||||
appendHistogram(series3, 107, h, &exp3, histogram.UnknownCounterReset)
|
appendHistogram(series3, 107, h, &exp3, histogram.UnknownCounterReset)
|
||||||
appendHistogram(series3, 108, h, &exp3, histogram.NotCounterReset)
|
appendHistogram(series3, 108, h, &exp3, histogram.NotCounterReset)
|
||||||
testQuery("foo", "bar3", map[string][]tsdbutil.Sample{series3.String(): exp3})
|
testQuery("foo", "bar3", map[string][]chunks.Sample{series3.String(): exp3})
|
||||||
|
|
||||||
appendFloat(series3, 109, 106, &exp3)
|
appendFloat(series3, 109, 106, &exp3)
|
||||||
appendFloat(series3, 110, 107, &exp3)
|
appendFloat(series3, 110, 107, &exp3)
|
||||||
testQuery("foo", "bar3", map[string][]tsdbutil.Sample{series3.String(): exp3})
|
testQuery("foo", "bar3", map[string][]chunks.Sample{series3.String(): exp3})
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("query mix of histogram and float series", func(t *testing.T) {
|
t.Run("query mix of histogram and float series", func(t *testing.T) {
|
||||||
|
@ -6339,7 +6347,7 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) {
|
||||||
appendFloat(series4, 101, 101, &exp4)
|
appendFloat(series4, 101, 101, &exp4)
|
||||||
appendFloat(series4, 102, 102, &exp4)
|
appendFloat(series4, 102, 102, &exp4)
|
||||||
|
|
||||||
testQuery("foo", "bar.*", map[string][]tsdbutil.Sample{
|
testQuery("foo", "bar.*", map[string][]chunks.Sample{
|
||||||
series1.String(): exp1,
|
series1.String(): exp1,
|
||||||
series2.String(): exp2,
|
series2.String(): exp2,
|
||||||
series3.String(): exp3,
|
series3.String(): exp3,
|
||||||
|
@ -6364,7 +6372,7 @@ func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
var it chunkenc.Iterator
|
var it chunkenc.Iterator
|
||||||
exp := make(map[string][]tsdbutil.Sample)
|
exp := make(map[string][]chunks.Sample)
|
||||||
for _, series := range blockSeries {
|
for _, series := range blockSeries {
|
||||||
createBlock(t, db.Dir(), series)
|
createBlock(t, db.Dir(), series)
|
||||||
|
|
||||||
|
@ -6447,7 +6455,7 @@ func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) {
|
||||||
t.Run("serial blocks with either histograms or floats in a block and not both", func(t *testing.T) {
|
t.Run("serial blocks with either histograms or floats in a block and not both", func(t *testing.T) {
|
||||||
testBlockQuerying(t,
|
testBlockQuerying(t,
|
||||||
genHistogramSeries(10, 5, minute(0), minute(119), minute(1), floatHistogram),
|
genHistogramSeries(10, 5, minute(0), minute(119), minute(1), floatHistogram),
|
||||||
genSeriesFromSampleGenerator(10, 5, minute(120), minute(239), minute(1), func(ts int64) tsdbutil.Sample {
|
genSeriesFromSampleGenerator(10, 5, minute(120), minute(239), minute(1), func(ts int64) chunks.Sample {
|
||||||
return sample{t: ts, f: rand.Float64()}
|
return sample{t: ts, f: rand.Float64()}
|
||||||
}),
|
}),
|
||||||
genHistogramSeries(10, 5, minute(240), minute(359), minute(1), floatHistogram),
|
genHistogramSeries(10, 5, minute(240), minute(359), minute(1), floatHistogram),
|
||||||
|
@ -6459,7 +6467,7 @@ func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) {
|
||||||
genHistogramAndFloatSeries(10, 5, minute(0), minute(60), minute(1), floatHistogram),
|
genHistogramAndFloatSeries(10, 5, minute(0), minute(60), minute(1), floatHistogram),
|
||||||
genHistogramSeries(10, 5, minute(61), minute(120), minute(1), floatHistogram),
|
genHistogramSeries(10, 5, minute(61), minute(120), minute(1), floatHistogram),
|
||||||
genHistogramAndFloatSeries(10, 5, minute(121), minute(180), minute(1), floatHistogram),
|
genHistogramAndFloatSeries(10, 5, minute(121), minute(180), minute(1), floatHistogram),
|
||||||
genSeriesFromSampleGenerator(10, 5, minute(181), minute(240), minute(1), func(ts int64) tsdbutil.Sample {
|
genSeriesFromSampleGenerator(10, 5, minute(181), minute(240), minute(1), func(ts int64) chunks.Sample {
|
||||||
return sample{t: ts, f: rand.Float64()}
|
return sample{t: ts, f: rand.Float64()}
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
|
@ -6476,7 +6484,7 @@ func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) {
|
||||||
t.Run("overlapping blocks with only histograms and only float in a series", func(t *testing.T) {
|
t.Run("overlapping blocks with only histograms and only float in a series", func(t *testing.T) {
|
||||||
testBlockQuerying(t,
|
testBlockQuerying(t,
|
||||||
genHistogramSeries(10, 5, minute(0), minute(120), minute(3), floatHistogram),
|
genHistogramSeries(10, 5, minute(0), minute(120), minute(3), floatHistogram),
|
||||||
genSeriesFromSampleGenerator(10, 5, minute(1), minute(120), minute(3), func(ts int64) tsdbutil.Sample {
|
genSeriesFromSampleGenerator(10, 5, minute(1), minute(120), minute(3), func(ts int64) chunks.Sample {
|
||||||
return sample{t: ts, f: rand.Float64()}
|
return sample{t: ts, f: rand.Float64()}
|
||||||
}),
|
}),
|
||||||
genHistogramSeries(10, 5, minute(2), minute(120), minute(3), floatHistogram),
|
genHistogramSeries(10, 5, minute(2), minute(120), minute(3), floatHistogram),
|
||||||
|
@ -6488,7 +6496,7 @@ func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) {
|
||||||
genHistogramAndFloatSeries(10, 5, minute(0), minute(60), minute(3), floatHistogram),
|
genHistogramAndFloatSeries(10, 5, minute(0), minute(60), minute(3), floatHistogram),
|
||||||
genHistogramSeries(10, 5, minute(46), minute(100), minute(3), floatHistogram),
|
genHistogramSeries(10, 5, minute(46), minute(100), minute(3), floatHistogram),
|
||||||
genHistogramAndFloatSeries(10, 5, minute(89), minute(140), minute(3), floatHistogram),
|
genHistogramAndFloatSeries(10, 5, minute(89), minute(140), minute(3), floatHistogram),
|
||||||
genSeriesFromSampleGenerator(10, 5, minute(126), minute(200), minute(3), func(ts int64) tsdbutil.Sample {
|
genSeriesFromSampleGenerator(10, 5, minute(126), minute(200), minute(3), func(ts int64) chunks.Sample {
|
||||||
return sample{t: ts, f: rand.Float64()}
|
return sample{t: ts, f: rand.Float64()}
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
|
@ -6505,7 +6513,7 @@ func TestNativeHistogramFlag(t *testing.T) {
|
||||||
require.NoError(t, db.Close())
|
require.NoError(t, db.Close())
|
||||||
})
|
})
|
||||||
h := &histogram.Histogram{
|
h := &histogram.Histogram{
|
||||||
Count: 6,
|
Count: 10,
|
||||||
ZeroCount: 4,
|
ZeroCount: 4,
|
||||||
ZeroThreshold: 0.001,
|
ZeroThreshold: 0.001,
|
||||||
Sum: 35.5,
|
Sum: 35.5,
|
||||||
|
@ -6545,7 +6553,7 @@ func TestNativeHistogramFlag(t *testing.T) {
|
||||||
q, err := db.Querier(context.Background(), math.MinInt, math.MaxInt64)
|
q, err := db.Querier(context.Background(), math.MinInt, math.MaxInt64)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
act := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
|
act := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
|
||||||
require.Equal(t, map[string][]tsdbutil.Sample{
|
require.Equal(t, map[string][]chunks.Sample{
|
||||||
l.String(): {sample{t: 200, h: h}, sample{t: 205, fh: h.ToFloat()}},
|
l.String(): {sample{t: 200, h: h}, sample{t: 205, fh: h.ToFloat()}},
|
||||||
}, act)
|
}, act)
|
||||||
}
|
}
|
||||||
|
@ -6558,7 +6566,7 @@ func TestNativeHistogramFlag(t *testing.T) {
|
||||||
// actual series contains a counter reset hint "UnknownCounterReset".
|
// actual series contains a counter reset hint "UnknownCounterReset".
|
||||||
// "GaugeType" hints are still strictly checked, and any "UnknownCounterReset"
|
// "GaugeType" hints are still strictly checked, and any "UnknownCounterReset"
|
||||||
// in an expected series has to be matched precisely by the actual series.
|
// in an expected series has to be matched precisely by the actual series.
|
||||||
func compareSeries(t require.TestingT, expected, actual map[string][]tsdbutil.Sample) {
|
func compareSeries(t require.TestingT, expected, actual map[string][]chunks.Sample) {
|
||||||
if len(expected) != len(actual) {
|
if len(expected) != len(actual) {
|
||||||
// The reason for the difference is not the counter reset hints
|
// The reason for the difference is not the counter reset hints
|
||||||
// (alone), so let's use the pretty diffing by the require
|
// (alone), so let's use the pretty diffing by the require
|
||||||
|
|
149
tsdb/head.go
149
tsdb/head.go
|
@ -42,7 +42,6 @@ import (
|
||||||
"github.com/prometheus/prometheus/tsdb/index"
|
"github.com/prometheus/prometheus/tsdb/index"
|
||||||
"github.com/prometheus/prometheus/tsdb/record"
|
"github.com/prometheus/prometheus/tsdb/record"
|
||||||
"github.com/prometheus/prometheus/tsdb/tombstones"
|
"github.com/prometheus/prometheus/tsdb/tombstones"
|
||||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
|
||||||
"github.com/prometheus/prometheus/tsdb/wlog"
|
"github.com/prometheus/prometheus/tsdb/wlog"
|
||||||
"github.com/prometheus/prometheus/util/zeropool"
|
"github.com/prometheus/prometheus/util/zeropool"
|
||||||
)
|
)
|
||||||
|
@ -370,6 +369,7 @@ type headMetrics struct {
|
||||||
mmapChunkCorruptionTotal prometheus.Counter
|
mmapChunkCorruptionTotal prometheus.Counter
|
||||||
snapshotReplayErrorTotal prometheus.Counter // Will be either 0 or 1.
|
snapshotReplayErrorTotal prometheus.Counter // Will be either 0 or 1.
|
||||||
oooHistogram prometheus.Histogram
|
oooHistogram prometheus.Histogram
|
||||||
|
mmapChunksTotal prometheus.Counter
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -494,6 +494,10 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics {
|
||||||
60 * 60 * 12, // 12h
|
60 * 60 * 12, // 12h
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
|
mmapChunksTotal: prometheus.NewCounter(prometheus.CounterOpts{
|
||||||
|
Name: "prometheus_tsdb_mmap_chunks_total",
|
||||||
|
Help: "Total number of chunks that were memory-mapped.",
|
||||||
|
}),
|
||||||
}
|
}
|
||||||
|
|
||||||
if r != nil {
|
if r != nil {
|
||||||
|
@ -521,6 +525,7 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics {
|
||||||
m.checkpointDeleteTotal,
|
m.checkpointDeleteTotal,
|
||||||
m.checkpointCreationFail,
|
m.checkpointCreationFail,
|
||||||
m.checkpointCreationTotal,
|
m.checkpointCreationTotal,
|
||||||
|
m.mmapChunksTotal,
|
||||||
m.mmapChunkCorruptionTotal,
|
m.mmapChunkCorruptionTotal,
|
||||||
m.snapshotReplayErrorTotal,
|
m.snapshotReplayErrorTotal,
|
||||||
m.oooHistogram,
|
m.oooHistogram,
|
||||||
|
@ -907,11 +912,11 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries)
|
||||||
numSamples: numSamples,
|
numSamples: numSamples,
|
||||||
})
|
})
|
||||||
h.updateMinMaxTime(mint, maxt)
|
h.updateMinMaxTime(mint, maxt)
|
||||||
if ms.headChunk != nil && maxt >= ms.headChunk.minTime {
|
if ms.headChunks != nil && maxt >= ms.headChunks.minTime {
|
||||||
// The head chunk was completed and was m-mapped after taking the snapshot.
|
// The head chunk was completed and was m-mapped after taking the snapshot.
|
||||||
// Hence remove this chunk.
|
// Hence remove this chunk.
|
||||||
ms.nextAt = 0
|
ms.nextAt = 0
|
||||||
ms.headChunk = nil
|
ms.headChunks = nil
|
||||||
ms.app = nil
|
ms.app = nil
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -1601,6 +1606,10 @@ func (h *Head) Close() error {
|
||||||
defer h.closedMtx.Unlock()
|
defer h.closedMtx.Unlock()
|
||||||
h.closed = true
|
h.closed = true
|
||||||
|
|
||||||
|
// mmap all but last chunk in case we're performing snapshot since that only
|
||||||
|
// takes samples from most recent head chunk.
|
||||||
|
h.mmapHeadChunks()
|
||||||
|
|
||||||
errs := tsdb_errors.NewMulti(h.chunkDiskMapper.Close())
|
errs := tsdb_errors.NewMulti(h.chunkDiskMapper.Close())
|
||||||
if errs.Err() == nil && h.opts.EnableMemorySnapshotOnShutdown {
|
if errs.Err() == nil && h.opts.EnableMemorySnapshotOnShutdown {
|
||||||
errs.Add(h.performChunkSnapshot())
|
errs.Add(h.performChunkSnapshot())
|
||||||
|
@ -1657,6 +1666,37 @@ func (h *Head) getOrCreateWithID(id chunks.HeadSeriesRef, hash uint64, lset labe
|
||||||
return s, true, nil
|
return s, true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// mmapHeadChunks will iterate all memSeries stored on Head and call mmapHeadChunks() on each of them.
|
||||||
|
//
|
||||||
|
// There are two types of chunks that store samples for each memSeries:
|
||||||
|
// A) Head chunk - stored on Go heap, when new samples are appended they go there.
|
||||||
|
// B) M-mapped chunks - memory mapped chunks, kernel manages the memory for us on-demand, these chunks
|
||||||
|
//
|
||||||
|
// are read-only.
|
||||||
|
//
|
||||||
|
// Calling mmapHeadChunks() will iterate all memSeries and m-mmap all chunks that should be m-mapped.
|
||||||
|
// The m-mapping operation is needs to be serialised and so it goes via central lock.
|
||||||
|
// If there are multiple concurrent memSeries that need to m-map some chunk then they can block each-other.
|
||||||
|
//
|
||||||
|
// To minimise the effect of locking on TSDB operations m-mapping is serialised and done away from
|
||||||
|
// sample append path, since waiting on a lock inside an append would lock the entire memSeries for
|
||||||
|
// (potentially) a long time, since that could eventually delay next scrape and/or cause query timeouts.
|
||||||
|
func (h *Head) mmapHeadChunks() {
|
||||||
|
var count int
|
||||||
|
for i := 0; i < h.series.size; i++ {
|
||||||
|
h.series.locks[i].RLock()
|
||||||
|
for _, all := range h.series.hashes[i] {
|
||||||
|
for _, series := range all {
|
||||||
|
series.Lock()
|
||||||
|
count += series.mmapChunks(h.chunkDiskMapper)
|
||||||
|
series.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
h.series.locks[i].RUnlock()
|
||||||
|
}
|
||||||
|
h.metrics.mmapChunksTotal.Add(float64(count))
|
||||||
|
}
|
||||||
|
|
||||||
// seriesHashmap is a simple hashmap for memSeries by their label set. It is built
|
// seriesHashmap is a simple hashmap for memSeries by their label set. It is built
|
||||||
// on top of a regular hashmap and holds a slice of series to resolve hash collisions.
|
// on top of a regular hashmap and holds a slice of series to resolve hash collisions.
|
||||||
// Its methods require the hash to be submitted with it to avoid re-computations throughout
|
// Its methods require the hash to be submitted with it to avoid re-computations throughout
|
||||||
|
@ -1788,7 +1828,7 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (
|
||||||
minOOOTime = series.ooo.oooHeadChunk.minTime
|
minOOOTime = series.ooo.oooHeadChunk.minTime
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(series.mmappedChunks) > 0 || series.headChunk != nil || series.pendingCommit ||
|
if len(series.mmappedChunks) > 0 || series.headChunks != nil || series.pendingCommit ||
|
||||||
(series.ooo != nil && (len(series.ooo.oooMmappedChunks) > 0 || series.ooo.oooHeadChunk != nil)) {
|
(series.ooo != nil && (len(series.ooo.oooMmappedChunks) > 0 || series.ooo.oooHeadChunk != nil)) {
|
||||||
seriesMint := series.minTime()
|
seriesMint := series.minTime()
|
||||||
if seriesMint < actualMint {
|
if seriesMint < actualMint {
|
||||||
|
@ -1904,7 +1944,7 @@ type sample struct {
|
||||||
fh *histogram.FloatHistogram
|
fh *histogram.FloatHistogram
|
||||||
}
|
}
|
||||||
|
|
||||||
func newSample(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample {
|
func newSample(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram) chunks.Sample {
|
||||||
return sample{t, v, h, fh}
|
return sample{t, v, h, fh}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1946,8 +1986,11 @@ type memSeries struct {
|
||||||
//
|
//
|
||||||
// pN is the pointer to the mmappedChunk referered to by HeadChunkID=N
|
// pN is the pointer to the mmappedChunk referered to by HeadChunkID=N
|
||||||
mmappedChunks []*mmappedChunk
|
mmappedChunks []*mmappedChunk
|
||||||
headChunk *memChunk // Most recent chunk in memory that's still being built.
|
// Most recent chunks in memory that are still being built or waiting to be mmapped.
|
||||||
firstChunkID chunks.HeadChunkID // HeadChunkID for mmappedChunks[0]
|
// This is a linked list, headChunks points to the most recent chunk, headChunks.next points
|
||||||
|
// to older chunk and so on.
|
||||||
|
headChunks *memChunk
|
||||||
|
firstChunkID chunks.HeadChunkID // HeadChunkID for mmappedChunks[0]
|
||||||
|
|
||||||
ooo *memSeriesOOOFields
|
ooo *memSeriesOOOFields
|
||||||
|
|
||||||
|
@ -1957,7 +2000,8 @@ type memSeries struct {
|
||||||
// to spread chunks writing across time. Doesn't apply to the last chunk of the chunk range. 0 to disable variance.
|
// to spread chunks writing across time. Doesn't apply to the last chunk of the chunk range. 0 to disable variance.
|
||||||
chunkEndTimeVariance float64
|
chunkEndTimeVariance float64
|
||||||
|
|
||||||
nextAt int64 // Timestamp at which to cut the next chunk.
|
nextAt int64 // Timestamp at which to cut the next chunk.
|
||||||
|
histogramChunkHasComputedEndTime bool // True if nextAt has been predicted for the current histograms chunk; false otherwise.
|
||||||
|
|
||||||
// We keep the last value here (in addition to appending it to the chunk) so we can check for duplicates.
|
// We keep the last value here (in addition to appending it to the chunk) so we can check for duplicates.
|
||||||
lastValue float64
|
lastValue float64
|
||||||
|
@ -1967,7 +2011,7 @@ type memSeries struct {
|
||||||
lastFloatHistogramValue *histogram.FloatHistogram
|
lastFloatHistogramValue *histogram.FloatHistogram
|
||||||
|
|
||||||
// Current appender for the head chunk. Set when a new head chunk is cut.
|
// Current appender for the head chunk. Set when a new head chunk is cut.
|
||||||
// It is nil only if headChunk is nil. E.g. if there was an appender that created a new series, but rolled back the commit
|
// It is nil only if headChunks is nil. E.g. if there was an appender that created a new series, but rolled back the commit
|
||||||
// (the first sample would create a headChunk, hence appender, but rollback skipped it while the Append() call would create a series).
|
// (the first sample would create a headChunk, hence appender, but rollback skipped it while the Append() call would create a series).
|
||||||
app chunkenc.Appender
|
app chunkenc.Appender
|
||||||
|
|
||||||
|
@ -2003,17 +2047,16 @@ func (s *memSeries) minTime() int64 {
|
||||||
if len(s.mmappedChunks) > 0 {
|
if len(s.mmappedChunks) > 0 {
|
||||||
return s.mmappedChunks[0].minTime
|
return s.mmappedChunks[0].minTime
|
||||||
}
|
}
|
||||||
if s.headChunk != nil {
|
if s.headChunks != nil {
|
||||||
return s.headChunk.minTime
|
return s.headChunks.oldest().minTime
|
||||||
}
|
}
|
||||||
return math.MinInt64
|
return math.MinInt64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *memSeries) maxTime() int64 {
|
func (s *memSeries) maxTime() int64 {
|
||||||
// The highest timestamps will always be in the regular (non-OOO) chunks, even if OOO is enabled.
|
// The highest timestamps will always be in the regular (non-OOO) chunks, even if OOO is enabled.
|
||||||
c := s.head()
|
if s.headChunks != nil {
|
||||||
if c != nil {
|
return s.headChunks.maxTime
|
||||||
return c.maxTime
|
|
||||||
}
|
}
|
||||||
if len(s.mmappedChunks) > 0 {
|
if len(s.mmappedChunks) > 0 {
|
||||||
return s.mmappedChunks[len(s.mmappedChunks)-1].maxTime
|
return s.mmappedChunks[len(s.mmappedChunks)-1].maxTime
|
||||||
|
@ -2026,12 +2069,29 @@ func (s *memSeries) maxTime() int64 {
|
||||||
// Chunk IDs remain unchanged.
|
// Chunk IDs remain unchanged.
|
||||||
func (s *memSeries) truncateChunksBefore(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) int {
|
func (s *memSeries) truncateChunksBefore(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) int {
|
||||||
var removedInOrder int
|
var removedInOrder int
|
||||||
if s.headChunk != nil && s.headChunk.maxTime < mint {
|
if s.headChunks != nil {
|
||||||
// If head chunk is truncated, we can truncate all mmapped chunks.
|
var i int
|
||||||
removedInOrder = 1 + len(s.mmappedChunks)
|
var nextChk *memChunk
|
||||||
s.firstChunkID += chunks.HeadChunkID(removedInOrder)
|
chk := s.headChunks
|
||||||
s.headChunk = nil
|
for chk != nil {
|
||||||
s.mmappedChunks = nil
|
if chk.maxTime < mint {
|
||||||
|
// If any head chunk is truncated, we can truncate all mmapped chunks.
|
||||||
|
removedInOrder = chk.len() + len(s.mmappedChunks)
|
||||||
|
s.firstChunkID += chunks.HeadChunkID(removedInOrder)
|
||||||
|
if i == 0 {
|
||||||
|
// This is the first chunk on the list so we need to remove the entire list.
|
||||||
|
s.headChunks = nil
|
||||||
|
} else {
|
||||||
|
// This is NOT the first chunk, unlink it from parent.
|
||||||
|
nextChk.prev = nil
|
||||||
|
}
|
||||||
|
s.mmappedChunks = nil
|
||||||
|
break
|
||||||
|
}
|
||||||
|
nextChk = chk
|
||||||
|
chk = chk.prev
|
||||||
|
i++
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if len(s.mmappedChunks) > 0 {
|
if len(s.mmappedChunks) > 0 {
|
||||||
for i, c := range s.mmappedChunks {
|
for i, c := range s.mmappedChunks {
|
||||||
|
@ -2071,13 +2131,52 @@ func (s *memSeries) cleanupAppendIDsBelow(bound uint64) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *memSeries) head() *memChunk {
|
|
||||||
return s.headChunk
|
|
||||||
}
|
|
||||||
|
|
||||||
type memChunk struct {
|
type memChunk struct {
|
||||||
chunk chunkenc.Chunk
|
chunk chunkenc.Chunk
|
||||||
minTime, maxTime int64
|
minTime, maxTime int64
|
||||||
|
prev *memChunk // Link to the previous element on the list.
|
||||||
|
}
|
||||||
|
|
||||||
|
// len returns the length of memChunk list, including the element it was called on.
|
||||||
|
func (mc *memChunk) len() (count int) {
|
||||||
|
elem := mc
|
||||||
|
for elem != nil {
|
||||||
|
count++
|
||||||
|
elem = elem.prev
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// oldest returns the oldest element on the list.
|
||||||
|
// For single element list this will be the same memChunk oldest() was called on.
|
||||||
|
func (mc *memChunk) oldest() (elem *memChunk) {
|
||||||
|
elem = mc
|
||||||
|
for elem.prev != nil {
|
||||||
|
elem = elem.prev
|
||||||
|
}
|
||||||
|
return elem
|
||||||
|
}
|
||||||
|
|
||||||
|
// atOffset returns a memChunk that's Nth element on the linked list.
|
||||||
|
func (mc *memChunk) atOffset(offset int) (elem *memChunk) {
|
||||||
|
if offset == 0 {
|
||||||
|
return mc
|
||||||
|
}
|
||||||
|
if offset < 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var i int
|
||||||
|
elem = mc
|
||||||
|
for i < offset {
|
||||||
|
i++
|
||||||
|
elem = elem.prev
|
||||||
|
if elem == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return elem
|
||||||
}
|
}
|
||||||
|
|
||||||
type oooHeadChunk struct {
|
type oooHeadChunk struct {
|
||||||
|
@ -2099,7 +2198,7 @@ func overlapsClosedInterval(mint1, maxt1, mint2, maxt2 int64) bool {
|
||||||
return mint1 <= maxt2 && mint2 <= maxt1
|
return mint1 <= maxt2 && mint2 <= maxt1
|
||||||
}
|
}
|
||||||
|
|
||||||
// mappedChunks describes a head chunk on disk that has been mmapped
|
// mmappedChunk describes a head chunk on disk that has been mmapped
|
||||||
type mmappedChunk struct {
|
type mmappedChunk struct {
|
||||||
ref chunks.ChunkDiskMapperRef
|
ref chunks.ChunkDiskMapperRef
|
||||||
numSamples uint16
|
numSamples uint16
|
||||||
|
|
|
@ -402,7 +402,7 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64
|
||||||
func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooTimeWindow int64) (isOOO bool, oooDelta int64, err error) {
|
func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooTimeWindow int64) (isOOO bool, oooDelta int64, err error) {
|
||||||
// Check if we can append in the in-order chunk.
|
// Check if we can append in the in-order chunk.
|
||||||
if t >= minValidTime {
|
if t >= minValidTime {
|
||||||
if s.head() == nil {
|
if s.headChunks == nil {
|
||||||
// The series has no sample and was freshly created.
|
// The series has no sample and was freshly created.
|
||||||
return false, 0, nil
|
return false, 0, nil
|
||||||
}
|
}
|
||||||
|
@ -440,15 +440,14 @@ func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooTi
|
||||||
|
|
||||||
// appendableHistogram checks whether the given histogram is valid for appending to the series.
|
// appendableHistogram checks whether the given histogram is valid for appending to the series.
|
||||||
func (s *memSeries) appendableHistogram(t int64, h *histogram.Histogram) error {
|
func (s *memSeries) appendableHistogram(t int64, h *histogram.Histogram) error {
|
||||||
c := s.head()
|
if s.headChunks == nil {
|
||||||
if c == nil {
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if t > c.maxTime {
|
if t > s.headChunks.maxTime {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if t < c.maxTime {
|
if t < s.headChunks.maxTime {
|
||||||
return storage.ErrOutOfOrderSample
|
return storage.ErrOutOfOrderSample
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -462,15 +461,14 @@ func (s *memSeries) appendableHistogram(t int64, h *histogram.Histogram) error {
|
||||||
|
|
||||||
// appendableFloatHistogram checks whether the given float histogram is valid for appending to the series.
|
// appendableFloatHistogram checks whether the given float histogram is valid for appending to the series.
|
||||||
func (s *memSeries) appendableFloatHistogram(t int64, fh *histogram.FloatHistogram) error {
|
func (s *memSeries) appendableFloatHistogram(t int64, fh *histogram.FloatHistogram) error {
|
||||||
c := s.head()
|
if s.headChunks == nil {
|
||||||
if c == nil {
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if t > c.maxTime {
|
if t > s.headChunks.maxTime {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if t < c.maxTime {
|
if t < s.headChunks.maxTime {
|
||||||
return storage.ErrOutOfOrderSample
|
return storage.ErrOutOfOrderSample
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -668,7 +666,7 @@ func ValidateHistogram(h *histogram.Histogram) error {
|
||||||
return errors.Wrap(err, "positive side")
|
return errors.Wrap(err, "positive side")
|
||||||
}
|
}
|
||||||
|
|
||||||
if c := nCount + pCount; c > h.Count {
|
if c := nCount + pCount + h.ZeroCount; c > h.Count {
|
||||||
return errors.Wrap(
|
return errors.Wrap(
|
||||||
storage.ErrHistogramCountNotBigEnough,
|
storage.ErrHistogramCountNotBigEnough,
|
||||||
fmt.Sprintf("%d observations found in buckets, but the Count field is %d", c, h.Count),
|
fmt.Sprintf("%d observations found in buckets, but the Count field is %d", c, h.Count),
|
||||||
|
@ -695,12 +693,9 @@ func ValidateFloatHistogram(h *histogram.FloatHistogram) error {
|
||||||
return errors.Wrap(err, "positive side")
|
return errors.Wrap(err, "positive side")
|
||||||
}
|
}
|
||||||
|
|
||||||
if c := nCount + pCount; c > h.Count {
|
// We do not check for h.Count being at least as large as the sum of the
|
||||||
return errors.Wrap(
|
// counts in the buckets because floating point precision issues can
|
||||||
storage.ErrHistogramCountNotBigEnough,
|
// create false positives here.
|
||||||
fmt.Sprintf("%f observations found in buckets, but the Count field is %f", c, h.Count),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -939,6 +934,8 @@ func (a *headAppender) Commit() (err error) {
|
||||||
|
|
||||||
oooSample, _, err := series.appendable(s.T, s.V, a.headMaxt, a.minValidTime, a.oooTimeWindow)
|
oooSample, _, err := series.appendable(s.T, s.V, a.headMaxt, a.minValidTime, a.oooTimeWindow)
|
||||||
switch err {
|
switch err {
|
||||||
|
case nil:
|
||||||
|
// Do nothing.
|
||||||
case storage.ErrOutOfOrderSample:
|
case storage.ErrOutOfOrderSample:
|
||||||
samplesAppended--
|
samplesAppended--
|
||||||
oooRejected++
|
oooRejected++
|
||||||
|
@ -948,8 +945,6 @@ func (a *headAppender) Commit() (err error) {
|
||||||
case storage.ErrTooOldSample:
|
case storage.ErrTooOldSample:
|
||||||
samplesAppended--
|
samplesAppended--
|
||||||
tooOldRejected++
|
tooOldRejected++
|
||||||
case nil:
|
|
||||||
// Do nothing.
|
|
||||||
default:
|
default:
|
||||||
samplesAppended--
|
samplesAppended--
|
||||||
}
|
}
|
||||||
|
@ -1172,7 +1167,7 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui
|
||||||
// Ignoring ok is ok, since we don't want to compare to the wrong previous appender anyway.
|
// Ignoring ok is ok, since we don't want to compare to the wrong previous appender anyway.
|
||||||
prevApp, _ := s.app.(*chunkenc.HistogramAppender)
|
prevApp, _ := s.app.(*chunkenc.HistogramAppender)
|
||||||
|
|
||||||
c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncHistogram, o)
|
c, sampleInOrder, chunkCreated := s.histogramsAppendPreprocessor(t, chunkenc.EncHistogram, o)
|
||||||
if !sampleInOrder {
|
if !sampleInOrder {
|
||||||
return sampleInOrder, chunkCreated
|
return sampleInOrder, chunkCreated
|
||||||
}
|
}
|
||||||
|
@ -1207,12 +1202,11 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui
|
||||||
return true, false
|
return true, false
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is a brand new chunk, switch out the head chunk (based on cutNewHeadChunk).
|
s.headChunks = &memChunk{
|
||||||
s.mmapCurrentHeadChunk(o.chunkDiskMapper)
|
|
||||||
s.headChunk = &memChunk{
|
|
||||||
chunk: newChunk,
|
chunk: newChunk,
|
||||||
minTime: t,
|
minTime: t,
|
||||||
maxTime: t,
|
maxTime: t,
|
||||||
|
prev: s.headChunks,
|
||||||
}
|
}
|
||||||
s.nextAt = rangeForTimestamp(t, o.chunkRange)
|
s.nextAt = rangeForTimestamp(t, o.chunkRange)
|
||||||
return true, true
|
return true, true
|
||||||
|
@ -1230,7 +1224,7 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram,
|
||||||
// Ignoring ok is ok, since we don't want to compare to the wrong previous appender anyway.
|
// Ignoring ok is ok, since we don't want to compare to the wrong previous appender anyway.
|
||||||
prevApp, _ := s.app.(*chunkenc.FloatHistogramAppender)
|
prevApp, _ := s.app.(*chunkenc.FloatHistogramAppender)
|
||||||
|
|
||||||
c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncFloatHistogram, o)
|
c, sampleInOrder, chunkCreated := s.histogramsAppendPreprocessor(t, chunkenc.EncFloatHistogram, o)
|
||||||
if !sampleInOrder {
|
if !sampleInOrder {
|
||||||
return sampleInOrder, chunkCreated
|
return sampleInOrder, chunkCreated
|
||||||
}
|
}
|
||||||
|
@ -1265,22 +1259,27 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram,
|
||||||
return true, false
|
return true, false
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is a brand new chunk, switch out the head chunk (based on cutNewHeadChunk).
|
s.headChunks = &memChunk{
|
||||||
s.mmapCurrentHeadChunk(o.chunkDiskMapper)
|
|
||||||
s.headChunk = &memChunk{
|
|
||||||
chunk: newChunk,
|
chunk: newChunk,
|
||||||
minTime: t,
|
minTime: t,
|
||||||
maxTime: t,
|
maxTime: t,
|
||||||
|
prev: s.headChunks,
|
||||||
}
|
}
|
||||||
s.nextAt = rangeForTimestamp(t, o.chunkRange)
|
s.nextAt = rangeForTimestamp(t, o.chunkRange)
|
||||||
return true, true
|
return true, true
|
||||||
}
|
}
|
||||||
|
|
||||||
// appendPreprocessor takes care of cutting new chunks and m-mapping old chunks.
|
// appendPreprocessor takes care of cutting new XOR chunks and m-mapping old ones. XOR chunks are cut based on the
|
||||||
|
// number of samples they contain with a soft cap in bytes.
|
||||||
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
|
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
|
||||||
// This should be called only when appending data.
|
// This should be called only when appending data.
|
||||||
func (s *memSeries) appendPreprocessor(t int64, e chunkenc.Encoding, o chunkOpts) (c *memChunk, sampleInOrder, chunkCreated bool) {
|
func (s *memSeries) appendPreprocessor(t int64, e chunkenc.Encoding, o chunkOpts) (c *memChunk, sampleInOrder, chunkCreated bool) {
|
||||||
c = s.head()
|
// We target chunkenc.MaxBytesPerXORChunk as a hard for the size of an XOR chunk. We must determine whether to cut
|
||||||
|
// a new head chunk without knowing the size of the next sample, however, so we assume the next sample will be a
|
||||||
|
// maximally-sized sample (19 bytes).
|
||||||
|
const maxBytesPerXORChunk = chunkenc.MaxBytesPerXORChunk - 19
|
||||||
|
|
||||||
|
c = s.headChunks
|
||||||
|
|
||||||
if c == nil {
|
if c == nil {
|
||||||
if len(s.mmappedChunks) > 0 && s.mmappedChunks[len(s.mmappedChunks)-1].maxTime >= t {
|
if len(s.mmappedChunks) > 0 && s.mmappedChunks[len(s.mmappedChunks)-1].maxTime >= t {
|
||||||
|
@ -1288,7 +1287,10 @@ func (s *memSeries) appendPreprocessor(t int64, e chunkenc.Encoding, o chunkOpts
|
||||||
return c, false, false
|
return c, false, false
|
||||||
}
|
}
|
||||||
// There is no head chunk in this series yet, create the first chunk for the sample.
|
// There is no head chunk in this series yet, create the first chunk for the sample.
|
||||||
c = s.cutNewHeadChunk(t, e, o.chunkDiskMapper, o.chunkRange)
|
c = s.cutNewHeadChunk(t, e, o.chunkRange)
|
||||||
|
chunkCreated = true
|
||||||
|
} else if len(c.chunk.Bytes()) > maxBytesPerXORChunk {
|
||||||
|
c = s.cutNewHeadChunk(t, e, o.chunkRange)
|
||||||
chunkCreated = true
|
chunkCreated = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1300,8 +1302,9 @@ func (s *memSeries) appendPreprocessor(t int64, e chunkenc.Encoding, o chunkOpts
|
||||||
if c.chunk.Encoding() != e {
|
if c.chunk.Encoding() != e {
|
||||||
// The chunk encoding expected by this append is different than the head chunk's
|
// The chunk encoding expected by this append is different than the head chunk's
|
||||||
// encoding. So we cut a new chunk with the expected encoding.
|
// encoding. So we cut a new chunk with the expected encoding.
|
||||||
c = s.cutNewHeadChunk(t, e, o.chunkDiskMapper, o.chunkRange)
|
c = s.cutNewHeadChunk(t, e, o.chunkRange)
|
||||||
chunkCreated = true
|
chunkCreated = true
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
numSamples := c.chunk.NumSamples()
|
numSamples := c.chunk.NumSamples()
|
||||||
|
@ -1319,7 +1322,7 @@ func (s *memSeries) appendPreprocessor(t int64, e chunkenc.Encoding, o chunkOpts
|
||||||
if numSamples == o.samplesPerChunk/4 {
|
if numSamples == o.samplesPerChunk/4 {
|
||||||
maxNextAt := s.nextAt
|
maxNextAt := s.nextAt
|
||||||
|
|
||||||
s.nextAt = computeChunkEndTime(c.minTime, c.maxTime, maxNextAt)
|
s.nextAt = computeChunkEndTime(c.minTime, c.maxTime, maxNextAt, 4)
|
||||||
s.nextAt = addJitterToChunkEndTime(s.shardHash, c.minTime, s.nextAt, maxNextAt, s.chunkEndTimeVariance)
|
s.nextAt = addJitterToChunkEndTime(s.shardHash, c.minTime, s.nextAt, maxNextAt, s.chunkEndTimeVariance)
|
||||||
}
|
}
|
||||||
// If numSamples > samplesPerChunk*2 then our previous prediction was invalid,
|
// If numSamples > samplesPerChunk*2 then our previous prediction was invalid,
|
||||||
|
@ -1328,24 +1331,102 @@ func (s *memSeries) appendPreprocessor(t int64, e chunkenc.Encoding, o chunkOpts
|
||||||
// as we expect more chunks to come.
|
// as we expect more chunks to come.
|
||||||
// Note that next chunk will have its nextAt recalculated for the new rate.
|
// Note that next chunk will have its nextAt recalculated for the new rate.
|
||||||
if t >= s.nextAt || numSamples >= o.samplesPerChunk*2 {
|
if t >= s.nextAt || numSamples >= o.samplesPerChunk*2 {
|
||||||
c = s.cutNewHeadChunk(t, e, o.chunkDiskMapper, o.chunkRange)
|
c = s.cutNewHeadChunk(t, e, o.chunkRange)
|
||||||
chunkCreated = true
|
chunkCreated = true
|
||||||
}
|
}
|
||||||
|
|
||||||
return c, true, chunkCreated
|
return c, true, chunkCreated
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// histogramsAppendPreprocessor takes care of cutting new histogram chunks and m-mapping old ones. Histogram chunks are
|
||||||
|
// cut based on their size in bytes.
|
||||||
|
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
|
||||||
|
// This should be called only when appending data.
|
||||||
|
func (s *memSeries) histogramsAppendPreprocessor(t int64, e chunkenc.Encoding, o chunkOpts) (c *memChunk, sampleInOrder, chunkCreated bool) {
|
||||||
|
c = s.headChunks
|
||||||
|
|
||||||
|
if c == nil {
|
||||||
|
if len(s.mmappedChunks) > 0 && s.mmappedChunks[len(s.mmappedChunks)-1].maxTime >= t {
|
||||||
|
// Out of order sample. Sample timestamp is already in the mmapped chunks, so ignore it.
|
||||||
|
return c, false, false
|
||||||
|
}
|
||||||
|
// There is no head chunk in this series yet, create the first chunk for the sample.
|
||||||
|
c = s.cutNewHeadChunk(t, e, o.chunkRange)
|
||||||
|
chunkCreated = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Out of order sample.
|
||||||
|
if c.maxTime >= t {
|
||||||
|
return c, false, chunkCreated
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.chunk.Encoding() != e {
|
||||||
|
// The chunk encoding expected by this append is different than the head chunk's
|
||||||
|
// encoding. So we cut a new chunk with the expected encoding.
|
||||||
|
c = s.cutNewHeadChunk(t, e, o.chunkRange)
|
||||||
|
chunkCreated = true
|
||||||
|
}
|
||||||
|
|
||||||
|
numSamples := c.chunk.NumSamples()
|
||||||
|
targetBytes := chunkenc.TargetBytesPerHistogramChunk
|
||||||
|
numBytes := len(c.chunk.Bytes())
|
||||||
|
|
||||||
|
if numSamples == 0 {
|
||||||
|
// It could be the new chunk created after reading the chunk snapshot,
|
||||||
|
// hence we fix the minTime of the chunk here.
|
||||||
|
c.minTime = t
|
||||||
|
s.nextAt = rangeForTimestamp(c.minTime, o.chunkRange)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Below, we will enforce chunkenc.MinSamplesPerHistogramChunk. There are, however, two cases that supersede it:
|
||||||
|
// - The current chunk range is ending before chunkenc.MinSamplesPerHistogramChunk will be satisfied.
|
||||||
|
// - s.nextAt was set while loading a chunk snapshot with the intent that a new chunk be cut on the next append.
|
||||||
|
var nextChunkRangeStart int64
|
||||||
|
if s.histogramChunkHasComputedEndTime {
|
||||||
|
nextChunkRangeStart = rangeForTimestamp(c.minTime, o.chunkRange)
|
||||||
|
} else {
|
||||||
|
// If we haven't yet computed an end time yet, s.nextAt is either set to
|
||||||
|
// rangeForTimestamp(c.minTime, o.chunkRange) or was set while loading a chunk snapshot. Either way, we want to
|
||||||
|
// skip enforcing chunkenc.MinSamplesPerHistogramChunk.
|
||||||
|
nextChunkRangeStart = s.nextAt
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we reach 25% of a chunk's desired maximum size, predict an end time
|
||||||
|
// for this chunk that will try to make samples equally distributed within
|
||||||
|
// the remaining chunks in the current chunk range.
|
||||||
|
// At the latest it must happen at the timestamp set when the chunk was cut.
|
||||||
|
if !s.histogramChunkHasComputedEndTime && numBytes >= targetBytes/4 {
|
||||||
|
ratioToFull := float64(targetBytes) / float64(numBytes)
|
||||||
|
s.nextAt = computeChunkEndTime(c.minTime, c.maxTime, s.nextAt, ratioToFull)
|
||||||
|
s.histogramChunkHasComputedEndTime = true
|
||||||
|
}
|
||||||
|
// If numBytes > targetBytes*2 then our previous prediction was invalid. This could happen if the sample rate has
|
||||||
|
// increased or if the bucket/span count has increased.
|
||||||
|
// Note that next chunk will have its nextAt recalculated for the new rate.
|
||||||
|
if (t >= s.nextAt || numBytes >= targetBytes*2) && (numSamples >= chunkenc.MinSamplesPerHistogramChunk || t >= nextChunkRangeStart) {
|
||||||
|
c = s.cutNewHeadChunk(t, e, o.chunkRange)
|
||||||
|
chunkCreated = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// The new chunk will also need a new computed end time.
|
||||||
|
if chunkCreated {
|
||||||
|
s.histogramChunkHasComputedEndTime = false
|
||||||
|
}
|
||||||
|
|
||||||
|
return c, true, chunkCreated
|
||||||
|
}
|
||||||
|
|
||||||
// computeChunkEndTime estimates the end timestamp based the beginning of a
|
// computeChunkEndTime estimates the end timestamp based the beginning of a
|
||||||
// chunk, its current timestamp and the upper bound up to which we insert data.
|
// chunk, its current timestamp and the upper bound up to which we insert data.
|
||||||
// It assumes that the time range is 1/4 full.
|
// It assumes that the time range is 1/ratioToFull full.
|
||||||
// Assuming that the samples will keep arriving at the same rate, it will make the
|
// Assuming that the samples will keep arriving at the same rate, it will make the
|
||||||
// remaining n chunks within this chunk range (before max) equally sized.
|
// remaining n chunks within this chunk range (before max) equally sized.
|
||||||
func computeChunkEndTime(start, cur, max int64) int64 {
|
func computeChunkEndTime(start, cur, max int64, ratioToFull float64) int64 {
|
||||||
n := (max - start) / ((cur - start + 1) * 4)
|
n := float64(max-start) / (float64(cur-start+1) * ratioToFull)
|
||||||
if n <= 1 {
|
if n <= 1 {
|
||||||
return max
|
return max
|
||||||
}
|
}
|
||||||
return start + (max-start)/n
|
return int64(float64(start) + float64(max-start)/math.Floor(n))
|
||||||
}
|
}
|
||||||
|
|
||||||
// addJitterToChunkEndTime return chunk's nextAt applying a jitter based on the provided expected variance.
|
// addJitterToChunkEndTime return chunk's nextAt applying a jitter based on the provided expected variance.
|
||||||
|
@ -1372,36 +1453,37 @@ func addJitterToChunkEndTime(seriesHash uint64, chunkMinTime, nextAt, maxNextAt
|
||||||
return min(maxNextAt, nextAt+chunkDurationVariance-(chunkDurationMaxVariance/2))
|
return min(maxNextAt, nextAt+chunkDurationVariance-(chunkDurationMaxVariance/2))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *memSeries) cutNewHeadChunk(
|
func (s *memSeries) cutNewHeadChunk(mint int64, e chunkenc.Encoding, chunkRange int64) *memChunk {
|
||||||
mint int64, e chunkenc.Encoding, chunkDiskMapper chunkDiskMapper, chunkRange int64,
|
// When cutting a new head chunk we create a new memChunk instance with .prev
|
||||||
) *memChunk {
|
// pointing at the current .headChunks, so it forms a linked list.
|
||||||
s.mmapCurrentHeadChunk(chunkDiskMapper)
|
// All but first headChunks list elements will be m-mapped as soon as possible
|
||||||
|
// so this is a single element list most of the time.
|
||||||
s.headChunk = &memChunk{
|
s.headChunks = &memChunk{
|
||||||
minTime: mint,
|
minTime: mint,
|
||||||
maxTime: math.MinInt64,
|
maxTime: math.MinInt64,
|
||||||
|
prev: s.headChunks,
|
||||||
}
|
}
|
||||||
|
|
||||||
if chunkenc.IsValidEncoding(e) {
|
if chunkenc.IsValidEncoding(e) {
|
||||||
var err error
|
var err error
|
||||||
s.headChunk.chunk, err = chunkenc.NewEmptyChunk(e)
|
s.headChunks.chunk, err = chunkenc.NewEmptyChunk(e)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err) // This should never happen.
|
panic(err) // This should never happen.
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
s.headChunk.chunk = chunkenc.NewXORChunk()
|
s.headChunks.chunk = chunkenc.NewXORChunk()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set upper bound on when the next chunk must be started. An earlier timestamp
|
// Set upper bound on when the next chunk must be started. An earlier timestamp
|
||||||
// may be chosen dynamically at a later point.
|
// may be chosen dynamically at a later point.
|
||||||
s.nextAt = rangeForTimestamp(mint, chunkRange)
|
s.nextAt = rangeForTimestamp(mint, chunkRange)
|
||||||
|
|
||||||
app, err := s.headChunk.chunk.Appender()
|
app, err := s.headChunks.chunk.Appender()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
s.app = app
|
s.app = app
|
||||||
return s.headChunk
|
return s.headChunks
|
||||||
}
|
}
|
||||||
|
|
||||||
// cutNewOOOHeadChunk cuts a new OOO chunk and m-maps the old chunk.
|
// cutNewOOOHeadChunk cuts a new OOO chunk and m-maps the old chunk.
|
||||||
|
@ -1435,19 +1517,32 @@ func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper chunkDiskMapper) chu
|
||||||
return chunkRef
|
return chunkRef
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *memSeries) mmapCurrentHeadChunk(chunkDiskMapper chunkDiskMapper) {
|
// mmapChunks will m-map all but first chunk on s.headChunks list.
|
||||||
if s.headChunk == nil || s.headChunk.chunk.NumSamples() == 0 {
|
func (s *memSeries) mmapChunks(chunkDiskMapper chunkDiskMapper) (count int) {
|
||||||
// There is no head chunk, so nothing to m-map here.
|
if s.headChunks == nil || s.headChunks.prev == nil {
|
||||||
|
// There is none or only one head chunk, so nothing to m-map here.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.headChunk.minTime, s.headChunk.maxTime, s.headChunk.chunk, false, handleChunkWriteError)
|
// Write chunks starting from the oldest one and stop before we get to current s.headChunk.
|
||||||
s.mmappedChunks = append(s.mmappedChunks, &mmappedChunk{
|
// If we have this chain: s.headChunk{t4} -> t3 -> t2 -> t1 -> t0
|
||||||
ref: chunkRef,
|
// then we need to write chunks t0 to t3, but skip s.headChunks.
|
||||||
numSamples: uint16(s.headChunk.chunk.NumSamples()),
|
for i := s.headChunks.len() - 1; i > 0; i-- {
|
||||||
minTime: s.headChunk.minTime,
|
chk := s.headChunks.atOffset(i)
|
||||||
maxTime: s.headChunk.maxTime,
|
chunkRef := chunkDiskMapper.WriteChunk(s.ref, chk.minTime, chk.maxTime, chk.chunk, false, handleChunkWriteError)
|
||||||
})
|
s.mmappedChunks = append(s.mmappedChunks, &mmappedChunk{
|
||||||
|
ref: chunkRef,
|
||||||
|
numSamples: uint16(chk.chunk.NumSamples()),
|
||||||
|
minTime: chk.minTime,
|
||||||
|
maxTime: chk.maxTime,
|
||||||
|
})
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
|
||||||
|
// Once we've written out all chunks except s.headChunks we need to unlink these from s.headChunk.
|
||||||
|
s.headChunks.prev = nil
|
||||||
|
|
||||||
|
return count
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleChunkWriteError(err error) {
|
func handleChunkWriteError(err error) {
|
||||||
|
|
|
@ -203,12 +203,27 @@ func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchB
|
||||||
Ref: chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.headChunkID(i))),
|
Ref: chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.headChunkID(i))),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
if s.headChunk != nil && s.headChunk.OverlapsClosedInterval(h.mint, h.maxt) {
|
|
||||||
*chks = append(*chks, chunks.Meta{
|
if s.headChunks != nil {
|
||||||
MinTime: s.headChunk.minTime,
|
var maxTime int64
|
||||||
MaxTime: math.MaxInt64, // Set the head chunks as open (being appended to).
|
var i, j int
|
||||||
Ref: chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.headChunkID(len(s.mmappedChunks)))),
|
for i = s.headChunks.len() - 1; i >= 0; i-- {
|
||||||
})
|
chk := s.headChunks.atOffset(i)
|
||||||
|
if i == 0 {
|
||||||
|
// Set the head chunk as open (being appended to) for the first headChunk.
|
||||||
|
maxTime = math.MaxInt64
|
||||||
|
} else {
|
||||||
|
maxTime = chk.maxTime
|
||||||
|
}
|
||||||
|
if chk.OverlapsClosedInterval(h.mint, h.maxt) {
|
||||||
|
*chks = append(*chks, chunks.Meta{
|
||||||
|
MinTime: chk.minTime,
|
||||||
|
MaxTime: maxTime,
|
||||||
|
Ref: chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.headChunkID(len(s.mmappedChunks)+j))),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
j++
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -216,7 +231,7 @@ func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchB
|
||||||
|
|
||||||
// headChunkID returns the HeadChunkID referred to by the given position.
|
// headChunkID returns the HeadChunkID referred to by the given position.
|
||||||
// * 0 <= pos < len(s.mmappedChunks) refer to s.mmappedChunks[pos]
|
// * 0 <= pos < len(s.mmappedChunks) refer to s.mmappedChunks[pos]
|
||||||
// * pos == len(s.mmappedChunks) refers to s.headChunk
|
// * pos >= len(s.mmappedChunks) refers to s.headChunks linked list
|
||||||
func (s *memSeries) headChunkID(pos int) chunks.HeadChunkID {
|
func (s *memSeries) headChunkID(pos int) chunks.HeadChunkID {
|
||||||
return chunks.HeadChunkID(pos) + s.firstChunkID
|
return chunks.HeadChunkID(pos) + s.firstChunkID
|
||||||
}
|
}
|
||||||
|
@ -325,7 +340,7 @@ func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc.
|
||||||
}
|
}
|
||||||
|
|
||||||
s.Lock()
|
s.Lock()
|
||||||
c, headChunk, err := s.chunk(cid, h.head.chunkDiskMapper, &h.head.memChunkPool)
|
c, headChunk, isOpen, err := s.chunk(cid, h.head.chunkDiskMapper, &h.head.memChunkPool)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.Unlock()
|
s.Unlock()
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
|
@ -334,6 +349,7 @@ func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc.
|
||||||
if !headChunk {
|
if !headChunk {
|
||||||
// Set this to nil so that Go GC can collect it after it has been used.
|
// Set this to nil so that Go GC can collect it after it has been used.
|
||||||
c.chunk = nil
|
c.chunk = nil
|
||||||
|
c.prev = nil
|
||||||
h.head.memChunkPool.Put(c)
|
h.head.memChunkPool.Put(c)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
@ -345,14 +361,14 @@ func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc.
|
||||||
}
|
}
|
||||||
|
|
||||||
chk, maxTime := c.chunk, c.maxTime
|
chk, maxTime := c.chunk, c.maxTime
|
||||||
if headChunk && copyLastChunk {
|
if headChunk && isOpen && copyLastChunk {
|
||||||
// The caller may ask to copy the head chunk in order to take the
|
// The caller may ask to copy the head chunk in order to take the
|
||||||
// bytes of the chunk without causing the race between read and append.
|
// bytes of the chunk without causing the race between read and append.
|
||||||
b := s.headChunk.chunk.Bytes()
|
b := s.headChunks.chunk.Bytes()
|
||||||
newB := make([]byte, len(b))
|
newB := make([]byte, len(b))
|
||||||
copy(newB, b) // TODO(codesome): Use bytes.Clone() when we upgrade to Go 1.20.
|
copy(newB, b) // TODO(codesome): Use bytes.Clone() when we upgrade to Go 1.20.
|
||||||
// TODO(codesome): Put back in the pool (non-trivial).
|
// TODO(codesome): Put back in the pool (non-trivial).
|
||||||
chk, err = h.head.opts.ChunkPool.Get(s.headChunk.chunk.Encoding(), newB)
|
chk, err = h.head.opts.ChunkPool.Get(s.headChunks.chunk.Encoding(), newB)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
@ -370,34 +386,60 @@ func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc.
|
||||||
// chunk returns the chunk for the HeadChunkID from memory or by m-mapping it from the disk.
|
// chunk returns the chunk for the HeadChunkID from memory or by m-mapping it from the disk.
|
||||||
// If headChunk is false, it means that the returned *memChunk
|
// If headChunk is false, it means that the returned *memChunk
|
||||||
// (and not the chunkenc.Chunk inside it) can be garbage collected after its usage.
|
// (and not the chunkenc.Chunk inside it) can be garbage collected after its usage.
|
||||||
func (s *memSeries) chunk(id chunks.HeadChunkID, cdm chunkDiskMapper, memChunkPool *sync.Pool) (chunk *memChunk, headChunk bool, err error) {
|
// if isOpen is true, it means that the returned *memChunk is used for appends.
|
||||||
|
func (s *memSeries) chunk(id chunks.HeadChunkID, cdm chunkDiskMapper, memChunkPool *sync.Pool) (chunk *memChunk, headChunk, isOpen bool, err error) {
|
||||||
// ix represents the index of chunk in the s.mmappedChunks slice. The chunk id's are
|
// ix represents the index of chunk in the s.mmappedChunks slice. The chunk id's are
|
||||||
// incremented by 1 when new chunk is created, hence (id - firstChunkID) gives the slice index.
|
// incremented by 1 when new chunk is created, hence (id - firstChunkID) gives the slice index.
|
||||||
// The max index for the s.mmappedChunks slice can be len(s.mmappedChunks)-1, hence if the ix
|
// The max index for the s.mmappedChunks slice can be len(s.mmappedChunks)-1, hence if the ix
|
||||||
// is len(s.mmappedChunks), it represents the next chunk, which is the head chunk.
|
// is >= len(s.mmappedChunks), it represents one of the chunks on s.headChunks linked list.
|
||||||
|
// The order of elemens is different for slice and linked list.
|
||||||
|
// For s.mmappedChunks slice newer chunks are appended to it.
|
||||||
|
// For s.headChunks list newer chunks are prepended to it.
|
||||||
|
//
|
||||||
|
// memSeries {
|
||||||
|
// mmappedChunks: [t0, t1, t2]
|
||||||
|
// headChunk: {t5}->{t4}->{t3}
|
||||||
|
// }
|
||||||
ix := int(id) - int(s.firstChunkID)
|
ix := int(id) - int(s.firstChunkID)
|
||||||
if ix < 0 || ix > len(s.mmappedChunks) {
|
|
||||||
return nil, false, storage.ErrNotFound
|
var headChunksLen int
|
||||||
|
if s.headChunks != nil {
|
||||||
|
headChunksLen = s.headChunks.len()
|
||||||
}
|
}
|
||||||
|
|
||||||
if ix == len(s.mmappedChunks) {
|
if ix < 0 || ix > len(s.mmappedChunks)+headChunksLen-1 {
|
||||||
if s.headChunk == nil {
|
return nil, false, false, storage.ErrNotFound
|
||||||
return nil, false, errors.New("invalid head chunk")
|
|
||||||
}
|
|
||||||
return s.headChunk, true, nil
|
|
||||||
}
|
}
|
||||||
chk, err := cdm.Chunk(s.mmappedChunks[ix].ref)
|
|
||||||
if err != nil {
|
if ix < len(s.mmappedChunks) {
|
||||||
if _, ok := err.(*chunks.CorruptionErr); ok {
|
chk, err := cdm.Chunk(s.mmappedChunks[ix].ref)
|
||||||
panic(err)
|
if err != nil {
|
||||||
|
if _, ok := err.(*chunks.CorruptionErr); ok {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return nil, false, false, err
|
||||||
}
|
}
|
||||||
return nil, false, err
|
mc := memChunkPool.Get().(*memChunk)
|
||||||
|
mc.chunk = chk
|
||||||
|
mc.minTime = s.mmappedChunks[ix].minTime
|
||||||
|
mc.maxTime = s.mmappedChunks[ix].maxTime
|
||||||
|
return mc, false, false, nil
|
||||||
}
|
}
|
||||||
mc := memChunkPool.Get().(*memChunk)
|
|
||||||
mc.chunk = chk
|
ix -= len(s.mmappedChunks)
|
||||||
mc.minTime = s.mmappedChunks[ix].minTime
|
|
||||||
mc.maxTime = s.mmappedChunks[ix].maxTime
|
offset := headChunksLen - ix - 1
|
||||||
return mc, false, nil
|
// headChunks is a linked list where first element is the most recent one and the last one is the oldest.
|
||||||
|
// This order is reversed when compared with mmappedChunks, since mmappedChunks[0] is the oldest chunk,
|
||||||
|
// while headChunk.atOffset(0) would give us the most recent chunk.
|
||||||
|
// So when calling headChunk.atOffset() we need to reverse the value of ix.
|
||||||
|
elem := s.headChunks.atOffset(offset)
|
||||||
|
if elem == nil {
|
||||||
|
// This should never really happen and would mean that headChunksLen value is NOT equal
|
||||||
|
// to the length of the headChunks list.
|
||||||
|
return nil, false, false, storage.ErrNotFound
|
||||||
|
}
|
||||||
|
return elem, true, offset == 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// oooMergedChunk returns the requested chunk based on the given chunks.Meta
|
// oooMergedChunk returns the requested chunk based on the given chunks.Meta
|
||||||
|
@ -689,8 +731,21 @@ func (s *memSeries) iterator(id chunks.HeadChunkID, c chunkenc.Chunk, isoState *
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.headChunk != nil {
|
ix -= len(s.mmappedChunks)
|
||||||
totalSamples += s.headChunk.chunk.NumSamples()
|
if s.headChunks != nil {
|
||||||
|
// Iterate all head chunks from the oldest to the newest.
|
||||||
|
headChunksLen := s.headChunks.len()
|
||||||
|
for j := headChunksLen - 1; j >= 0; j-- {
|
||||||
|
chk := s.headChunks.atOffset(j)
|
||||||
|
chkSamples := chk.chunk.NumSamples()
|
||||||
|
totalSamples += chkSamples
|
||||||
|
// Chunk ID is len(s.mmappedChunks) + $(headChunks list position).
|
||||||
|
// Where $(headChunks list position) is zero for the oldest chunk and $(s.headChunks.len() - 1)
|
||||||
|
// for the newest (open) chunk.
|
||||||
|
if headChunksLen-1-j < ix {
|
||||||
|
previousSamples += chkSamples
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Removing the extra transactionIDs that are relevant for samples that
|
// Removing the extra transactionIDs that are relevant for samples that
|
||||||
|
|
|
@ -15,11 +15,14 @@ package tsdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestBoundedChunk(t *testing.T) {
|
func TestBoundedChunk(t *testing.T) {
|
||||||
|
@ -176,3 +179,387 @@ func newTestChunk(numSamples int) chunkenc.Chunk {
|
||||||
}
|
}
|
||||||
return xor
|
return xor
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestMemSeries_chunk runs a series of tests on memSeries.chunk() calls.
|
||||||
|
// It will simulate various conditions to ensure all code paths in that function are covered.
|
||||||
|
func TestMemSeries_chunk(t *testing.T) {
|
||||||
|
const chunkRange int64 = 100
|
||||||
|
const chunkStep int64 = 5
|
||||||
|
|
||||||
|
appendSamples := func(t *testing.T, s *memSeries, start, end int64, cdm *chunks.ChunkDiskMapper) {
|
||||||
|
for i := start; i < end; i += chunkStep {
|
||||||
|
ok, _ := s.append(i, float64(i), 0, chunkOpts{
|
||||||
|
chunkDiskMapper: cdm,
|
||||||
|
chunkRange: chunkRange,
|
||||||
|
samplesPerChunk: DefaultSamplesPerChunk,
|
||||||
|
})
|
||||||
|
require.True(t, ok, "sample append failed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type setupFn func(*testing.T, *memSeries, *chunks.ChunkDiskMapper)
|
||||||
|
|
||||||
|
type callOutput uint8
|
||||||
|
const (
|
||||||
|
outOpenHeadChunk callOutput = iota // memSeries.chunk() call returned memSeries.headChunks with headChunk=true & isOpen=true
|
||||||
|
outClosedHeadChunk // memSeries.chunk() call returned memSeries.headChunks with headChunk=true & isOpen=false
|
||||||
|
outMmappedChunk // memSeries.chunk() call returned a chunk from memSeries.mmappedChunks with headChunk=false
|
||||||
|
outErr // memSeries.chunk() call returned an error
|
||||||
|
)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
setup setupFn // optional function called just before the test memSeries.chunk() call
|
||||||
|
inputID chunks.HeadChunkID // requested chunk id for memSeries.chunk() call
|
||||||
|
expected callOutput
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "call ix=0 on empty memSeries",
|
||||||
|
inputID: 0,
|
||||||
|
expected: outErr,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "call ix=1 on empty memSeries",
|
||||||
|
inputID: 1,
|
||||||
|
expected: outErr,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "firstChunkID > ix",
|
||||||
|
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
|
||||||
|
appendSamples(t, s, 0, chunkRange, cdm)
|
||||||
|
require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks")
|
||||||
|
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
|
||||||
|
require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
|
||||||
|
require.Equal(t, chunkRange-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
|
||||||
|
s.firstChunkID = 5
|
||||||
|
},
|
||||||
|
inputID: 1,
|
||||||
|
expected: outErr,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "call ix=0 on memSeries with no mmapped chunks",
|
||||||
|
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
|
||||||
|
appendSamples(t, s, 0, chunkRange, cdm)
|
||||||
|
require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks")
|
||||||
|
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
|
||||||
|
require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
|
||||||
|
require.Equal(t, chunkRange-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
|
||||||
|
},
|
||||||
|
inputID: 0,
|
||||||
|
expected: outOpenHeadChunk,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "call ix=1 on memSeries with no mmapped chunks",
|
||||||
|
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
|
||||||
|
appendSamples(t, s, 0, chunkRange, cdm)
|
||||||
|
require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks")
|
||||||
|
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
|
||||||
|
require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
|
||||||
|
require.Equal(t, chunkRange-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
|
||||||
|
},
|
||||||
|
inputID: 1,
|
||||||
|
expected: outErr,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "call ix=10 on memSeries with no mmapped chunks",
|
||||||
|
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
|
||||||
|
appendSamples(t, s, 0, chunkRange, cdm)
|
||||||
|
require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks")
|
||||||
|
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
|
||||||
|
require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
|
||||||
|
require.Equal(t, chunkRange-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
|
||||||
|
},
|
||||||
|
inputID: 10,
|
||||||
|
expected: outErr,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "call ix=0 on memSeries with 3 mmapped chunks",
|
||||||
|
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
|
||||||
|
appendSamples(t, s, 0, chunkRange*4, cdm)
|
||||||
|
s.mmapChunks(cdm)
|
||||||
|
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
|
||||||
|
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
|
||||||
|
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
|
||||||
|
require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
|
||||||
|
},
|
||||||
|
inputID: 0,
|
||||||
|
expected: outMmappedChunk,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "call ix=1 on memSeries with 3 mmapped chunks",
|
||||||
|
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
|
||||||
|
appendSamples(t, s, 0, chunkRange*4, cdm)
|
||||||
|
s.mmapChunks(cdm)
|
||||||
|
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
|
||||||
|
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
|
||||||
|
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
|
||||||
|
require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
|
||||||
|
},
|
||||||
|
inputID: 1,
|
||||||
|
expected: outMmappedChunk,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "call ix=3 on memSeries with 3 mmapped chunks",
|
||||||
|
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
|
||||||
|
appendSamples(t, s, 0, chunkRange*4, cdm)
|
||||||
|
s.mmapChunks(cdm)
|
||||||
|
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
|
||||||
|
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
|
||||||
|
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
|
||||||
|
require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
|
||||||
|
},
|
||||||
|
inputID: 3,
|
||||||
|
expected: outOpenHeadChunk,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "call ix=0 on memSeries with 3 mmapped chunks and no headChunk",
|
||||||
|
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
|
||||||
|
appendSamples(t, s, 0, chunkRange*4, cdm)
|
||||||
|
s.mmapChunks(cdm)
|
||||||
|
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
|
||||||
|
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
|
||||||
|
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
|
||||||
|
require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
|
||||||
|
s.headChunks = nil
|
||||||
|
},
|
||||||
|
inputID: 0,
|
||||||
|
expected: outMmappedChunk,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "call ix=2 on memSeries with 3 mmapped chunks and no headChunk",
|
||||||
|
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
|
||||||
|
appendSamples(t, s, 0, chunkRange*4, cdm)
|
||||||
|
s.mmapChunks(cdm)
|
||||||
|
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
|
||||||
|
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
|
||||||
|
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
|
||||||
|
require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
|
||||||
|
s.headChunks = nil
|
||||||
|
},
|
||||||
|
inputID: 2,
|
||||||
|
expected: outMmappedChunk,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "call ix=3 on memSeries with 3 mmapped chunks and no headChunk",
|
||||||
|
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
|
||||||
|
appendSamples(t, s, 0, chunkRange*4, cdm)
|
||||||
|
s.mmapChunks(cdm)
|
||||||
|
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
|
||||||
|
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
|
||||||
|
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
|
||||||
|
require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
|
||||||
|
s.headChunks = nil
|
||||||
|
},
|
||||||
|
inputID: 3,
|
||||||
|
expected: outErr,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "call ix=1 on memSeries with 3 mmapped chunks and closed ChunkDiskMapper",
|
||||||
|
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
|
||||||
|
appendSamples(t, s, 0, chunkRange*4, cdm)
|
||||||
|
s.mmapChunks(cdm)
|
||||||
|
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
|
||||||
|
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
|
||||||
|
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
|
||||||
|
require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
|
||||||
|
cdm.Close()
|
||||||
|
},
|
||||||
|
inputID: 1,
|
||||||
|
expected: outErr,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "call ix=3 on memSeries with 3 mmapped chunks and closed ChunkDiskMapper",
|
||||||
|
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
|
||||||
|
appendSamples(t, s, 0, chunkRange*4, cdm)
|
||||||
|
s.mmapChunks(cdm)
|
||||||
|
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
|
||||||
|
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
|
||||||
|
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
|
||||||
|
require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
|
||||||
|
cdm.Close()
|
||||||
|
},
|
||||||
|
inputID: 3,
|
||||||
|
expected: outOpenHeadChunk,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "call ix=0 on memSeries with 3 head chunks and no mmapped chunks",
|
||||||
|
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
|
||||||
|
appendSamples(t, s, 0, chunkRange*3, cdm)
|
||||||
|
require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks")
|
||||||
|
require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks")
|
||||||
|
require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
|
||||||
|
require.Equal(t, (chunkRange*3)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
|
||||||
|
},
|
||||||
|
inputID: 0,
|
||||||
|
expected: outClosedHeadChunk,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "call ix=1 on memSeries with 3 head chunks and no mmapped chunks",
|
||||||
|
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
|
||||||
|
appendSamples(t, s, 0, chunkRange*3, cdm)
|
||||||
|
require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks")
|
||||||
|
require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks")
|
||||||
|
require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
|
||||||
|
require.Equal(t, (chunkRange*3)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
|
||||||
|
},
|
||||||
|
inputID: 1,
|
||||||
|
expected: outClosedHeadChunk,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "call ix=10 on memSeries with 3 head chunks and no mmapped chunks",
|
||||||
|
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
|
||||||
|
appendSamples(t, s, 0, chunkRange*3, cdm)
|
||||||
|
require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks")
|
||||||
|
require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks")
|
||||||
|
require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
|
||||||
|
require.Equal(t, (chunkRange*3)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
|
||||||
|
},
|
||||||
|
inputID: 10,
|
||||||
|
expected: outErr,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "call ix=0 on memSeries with 3 head chunks and 3 mmapped chunks",
|
||||||
|
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
|
||||||
|
appendSamples(t, s, 0, chunkRange*4, cdm)
|
||||||
|
s.mmapChunks(cdm)
|
||||||
|
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
|
||||||
|
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
|
||||||
|
|
||||||
|
appendSamples(t, s, chunkRange*4, chunkRange*6, cdm)
|
||||||
|
require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks")
|
||||||
|
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
|
||||||
|
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
|
||||||
|
require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
|
||||||
|
},
|
||||||
|
inputID: 0,
|
||||||
|
expected: outMmappedChunk,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "call ix=2 on memSeries with 3 head chunks and 3 mmapped chunks",
|
||||||
|
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
|
||||||
|
appendSamples(t, s, 0, chunkRange*4, cdm)
|
||||||
|
s.mmapChunks(cdm)
|
||||||
|
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
|
||||||
|
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
|
||||||
|
|
||||||
|
appendSamples(t, s, chunkRange*4, chunkRange*6, cdm)
|
||||||
|
require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks")
|
||||||
|
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
|
||||||
|
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
|
||||||
|
require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
|
||||||
|
},
|
||||||
|
inputID: 2,
|
||||||
|
expected: outMmappedChunk,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "call ix=3 on memSeries with 3 head chunks and 3 mmapped chunks",
|
||||||
|
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
|
||||||
|
appendSamples(t, s, 0, chunkRange*4, cdm)
|
||||||
|
s.mmapChunks(cdm)
|
||||||
|
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
|
||||||
|
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
|
||||||
|
|
||||||
|
appendSamples(t, s, chunkRange*4, chunkRange*6, cdm)
|
||||||
|
require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks")
|
||||||
|
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
|
||||||
|
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
|
||||||
|
require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
|
||||||
|
},
|
||||||
|
inputID: 3,
|
||||||
|
expected: outClosedHeadChunk,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "call ix=5 on memSeries with 3 head chunks and 3 mmapped chunks",
|
||||||
|
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
|
||||||
|
appendSamples(t, s, 0, chunkRange*4, cdm)
|
||||||
|
s.mmapChunks(cdm)
|
||||||
|
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
|
||||||
|
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
|
||||||
|
|
||||||
|
appendSamples(t, s, chunkRange*4, chunkRange*6, cdm)
|
||||||
|
require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks")
|
||||||
|
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
|
||||||
|
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
|
||||||
|
require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
|
||||||
|
},
|
||||||
|
inputID: 5,
|
||||||
|
expected: outOpenHeadChunk,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "call ix=6 on memSeries with 3 head chunks and 3 mmapped chunks",
|
||||||
|
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
|
||||||
|
appendSamples(t, s, 0, chunkRange*4, cdm)
|
||||||
|
s.mmapChunks(cdm)
|
||||||
|
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
|
||||||
|
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
|
||||||
|
|
||||||
|
appendSamples(t, s, chunkRange*4, chunkRange*6, cdm)
|
||||||
|
require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks")
|
||||||
|
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
|
||||||
|
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
|
||||||
|
require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
|
||||||
|
},
|
||||||
|
inputID: 6,
|
||||||
|
expected: outErr,
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
name: "call ix=10 on memSeries with 3 head chunks and 3 mmapped chunks",
|
||||||
|
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
|
||||||
|
appendSamples(t, s, 0, chunkRange*4, cdm)
|
||||||
|
s.mmapChunks(cdm)
|
||||||
|
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
|
||||||
|
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
|
||||||
|
|
||||||
|
appendSamples(t, s, chunkRange*4, chunkRange*6, cdm)
|
||||||
|
require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks")
|
||||||
|
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
|
||||||
|
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
|
||||||
|
require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
|
||||||
|
},
|
||||||
|
inputID: 10,
|
||||||
|
expected: outErr,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
memChunkPool := &sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
return &memChunk{}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
chunkDiskMapper, err := chunks.NewChunkDiskMapper(nil, dir, chunkenc.NewPool(), chunks.DefaultWriteBufferSize, chunks.DefaultWriteQueueSize)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer func() {
|
||||||
|
require.NoError(t, chunkDiskMapper.Close())
|
||||||
|
}()
|
||||||
|
|
||||||
|
series := newMemSeries(labels.EmptyLabels(), 1, labels.StableHash(labels.EmptyLabels()), 0, true)
|
||||||
|
|
||||||
|
if tc.setup != nil {
|
||||||
|
tc.setup(t, series, chunkDiskMapper)
|
||||||
|
}
|
||||||
|
|
||||||
|
chk, headChunk, isOpen, err := series.chunk(tc.inputID, chunkDiskMapper, memChunkPool)
|
||||||
|
switch tc.expected {
|
||||||
|
case outOpenHeadChunk:
|
||||||
|
require.NoError(t, err, "unexpected error")
|
||||||
|
require.True(t, headChunk, "expected a chunk with headChunk=true but got headChunk=%v", headChunk)
|
||||||
|
require.True(t, isOpen, "expected a chunk with isOpen=true but got isOpen=%v", isOpen)
|
||||||
|
case outClosedHeadChunk:
|
||||||
|
require.NoError(t, err, "unexpected error")
|
||||||
|
require.True(t, headChunk, "expected a chunk with headChunk=true but got headChunk=%v", headChunk)
|
||||||
|
require.False(t, isOpen, "expected a chunk with isOpen=false but got isOpen=%v", isOpen)
|
||||||
|
case outMmappedChunk:
|
||||||
|
require.NoError(t, err, "unexpected error")
|
||||||
|
require.False(t, headChunk, "expected a chunk with headChunk=false but got gc=%v", headChunk)
|
||||||
|
case outErr:
|
||||||
|
require.Nil(t, chk, "got a non-nil chunk reference returned with an error")
|
||||||
|
require.Error(t, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -505,7 +505,7 @@ func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*m
|
||||||
// We do not reset oooHeadChunk because that is being replayed from a different WAL
|
// We do not reset oooHeadChunk because that is being replayed from a different WAL
|
||||||
// and has not been replayed here.
|
// and has not been replayed here.
|
||||||
mSeries.nextAt = 0
|
mSeries.nextAt = 0
|
||||||
mSeries.headChunk = nil
|
mSeries.headChunks = nil
|
||||||
mSeries.app = nil
|
mSeries.app = nil
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -597,6 +597,7 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp
|
||||||
if _, chunkCreated := ms.append(s.T, s.V, 0, appendChunkOpts); chunkCreated {
|
if _, chunkCreated := ms.append(s.T, s.V, 0, appendChunkOpts); chunkCreated {
|
||||||
h.metrics.chunksCreated.Inc()
|
h.metrics.chunksCreated.Inc()
|
||||||
h.metrics.chunks.Inc()
|
h.metrics.chunks.Inc()
|
||||||
|
_ = ms.mmapChunks(h.chunkDiskMapper)
|
||||||
}
|
}
|
||||||
if s.T > maxt {
|
if s.T > maxt {
|
||||||
maxt = s.T
|
maxt = s.T
|
||||||
|
@ -962,15 +963,15 @@ func (s *memSeries) encodeToSnapshotRecord(b []byte) []byte {
|
||||||
buf.PutBE64int64(0) // Backwards-compatibility; was chunkRange but now unused.
|
buf.PutBE64int64(0) // Backwards-compatibility; was chunkRange but now unused.
|
||||||
|
|
||||||
s.Lock()
|
s.Lock()
|
||||||
if s.headChunk == nil {
|
if s.headChunks == nil {
|
||||||
buf.PutUvarint(0)
|
buf.PutUvarint(0)
|
||||||
} else {
|
} else {
|
||||||
enc := s.headChunk.chunk.Encoding()
|
enc := s.headChunks.chunk.Encoding()
|
||||||
buf.PutUvarint(1)
|
buf.PutUvarint(1)
|
||||||
buf.PutBE64int64(s.headChunk.minTime)
|
buf.PutBE64int64(s.headChunks.minTime)
|
||||||
buf.PutBE64int64(s.headChunk.maxTime)
|
buf.PutBE64int64(s.headChunks.maxTime)
|
||||||
buf.PutByte(byte(enc))
|
buf.PutByte(byte(enc))
|
||||||
buf.PutUvarintBytes(s.headChunk.chunk.Bytes())
|
buf.PutUvarintBytes(s.headChunks.chunk.Bytes())
|
||||||
|
|
||||||
switch enc {
|
switch enc {
|
||||||
case chunkenc.EncXOR:
|
case chunkenc.EncXOR:
|
||||||
|
@ -1416,12 +1417,12 @@ func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSerie
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
series.nextAt = csr.mc.maxTime // This will create a new chunk on append.
|
series.nextAt = csr.mc.maxTime // This will create a new chunk on append.
|
||||||
series.headChunk = csr.mc
|
series.headChunks = csr.mc
|
||||||
series.lastValue = csr.lastValue
|
series.lastValue = csr.lastValue
|
||||||
series.lastHistogramValue = csr.lastHistogramValue
|
series.lastHistogramValue = csr.lastHistogramValue
|
||||||
series.lastFloatHistogramValue = csr.lastFloatHistogramValue
|
series.lastFloatHistogramValue = csr.lastFloatHistogramValue
|
||||||
|
|
||||||
app, err := series.headChunk.chunk.Appender()
|
app, err := series.headChunks.chunk.Appender()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errChan <- err
|
errChan <- err
|
||||||
return
|
return
|
||||||
|
@ -1518,7 +1519,7 @@ Outer:
|
||||||
default:
|
default:
|
||||||
// This is a record type we don't understand. It is either and old format from earlier versions,
|
// This is a record type we don't understand. It is either and old format from earlier versions,
|
||||||
// or a new format and the code was rolled back to old version.
|
// or a new format and the code was rolled back to old version.
|
||||||
loopErr = errors.Errorf("unsuported snapshot record type 0b%b", rec[0])
|
loopErr = errors.Errorf("unsupported snapshot record type 0b%b", rec[0])
|
||||||
break Outer
|
break Outer
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,6 @@ import (
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
|
||||||
"github.com/prometheus/prometheus/tsdb/wlog"
|
"github.com/prometheus/prometheus/tsdb/wlog"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -496,16 +495,16 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
||||||
queryMinT int64
|
queryMinT int64
|
||||||
queryMaxT int64
|
queryMaxT int64
|
||||||
firstInOrderSampleAt int64
|
firstInOrderSampleAt int64
|
||||||
inputSamples tsdbutil.SampleSlice
|
inputSamples chunks.SampleSlice
|
||||||
expChunkError bool
|
expChunkError bool
|
||||||
expChunksSamples []tsdbutil.SampleSlice
|
expChunksSamples []chunks.SampleSlice
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "Getting the head when there are no overlapping chunks returns just the samples in the head",
|
name: "Getting the head when there are no overlapping chunks returns just the samples in the head",
|
||||||
queryMinT: minutes(0),
|
queryMinT: minutes(0),
|
||||||
queryMaxT: minutes(100),
|
queryMaxT: minutes(100),
|
||||||
firstInOrderSampleAt: minutes(120),
|
firstInOrderSampleAt: minutes(120),
|
||||||
inputSamples: tsdbutil.SampleSlice{
|
inputSamples: chunks.SampleSlice{
|
||||||
sample{t: minutes(30), f: float64(0)},
|
sample{t: minutes(30), f: float64(0)},
|
||||||
sample{t: minutes(40), f: float64(0)},
|
sample{t: minutes(40), f: float64(0)},
|
||||||
},
|
},
|
||||||
|
@ -514,7 +513,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
||||||
// Query Interval [------------------------------------------------------------------------------------------]
|
// Query Interval [------------------------------------------------------------------------------------------]
|
||||||
// Chunk 0: Current Head [--------] (With 2 samples)
|
// Chunk 0: Current Head [--------] (With 2 samples)
|
||||||
// Output Graphically [--------] (With 2 samples)
|
// Output Graphically [--------] (With 2 samples)
|
||||||
expChunksSamples: []tsdbutil.SampleSlice{
|
expChunksSamples: []chunks.SampleSlice{
|
||||||
{
|
{
|
||||||
sample{t: minutes(30), f: float64(0)},
|
sample{t: minutes(30), f: float64(0)},
|
||||||
sample{t: minutes(40), f: float64(0)},
|
sample{t: minutes(40), f: float64(0)},
|
||||||
|
@ -526,7 +525,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
||||||
queryMinT: minutes(0),
|
queryMinT: minutes(0),
|
||||||
queryMaxT: minutes(100),
|
queryMaxT: minutes(100),
|
||||||
firstInOrderSampleAt: minutes(120),
|
firstInOrderSampleAt: minutes(120),
|
||||||
inputSamples: tsdbutil.SampleSlice{
|
inputSamples: chunks.SampleSlice{
|
||||||
// opts.OOOCapMax is 5 so these will be mmapped to the first mmapped chunk
|
// opts.OOOCapMax is 5 so these will be mmapped to the first mmapped chunk
|
||||||
sample{t: minutes(41), f: float64(0)},
|
sample{t: minutes(41), f: float64(0)},
|
||||||
sample{t: minutes(42), f: float64(0)},
|
sample{t: minutes(42), f: float64(0)},
|
||||||
|
@ -544,7 +543,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
||||||
// Chunk 0 [---] (With 5 samples)
|
// Chunk 0 [---] (With 5 samples)
|
||||||
// Chunk 1: Current Head [-----------------] (With 2 samples)
|
// Chunk 1: Current Head [-----------------] (With 2 samples)
|
||||||
// Output Graphically [-----------------] (With 7 samples)
|
// Output Graphically [-----------------] (With 7 samples)
|
||||||
expChunksSamples: []tsdbutil.SampleSlice{
|
expChunksSamples: []chunks.SampleSlice{
|
||||||
{
|
{
|
||||||
sample{t: minutes(30), f: float64(1)},
|
sample{t: minutes(30), f: float64(1)},
|
||||||
sample{t: minutes(41), f: float64(0)},
|
sample{t: minutes(41), f: float64(0)},
|
||||||
|
@ -561,7 +560,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
||||||
queryMinT: minutes(0),
|
queryMinT: minutes(0),
|
||||||
queryMaxT: minutes(100),
|
queryMaxT: minutes(100),
|
||||||
firstInOrderSampleAt: minutes(120),
|
firstInOrderSampleAt: minutes(120),
|
||||||
inputSamples: tsdbutil.SampleSlice{
|
inputSamples: chunks.SampleSlice{
|
||||||
// Chunk 0
|
// Chunk 0
|
||||||
sample{t: minutes(10), f: float64(0)},
|
sample{t: minutes(10), f: float64(0)},
|
||||||
sample{t: minutes(12), f: float64(0)},
|
sample{t: minutes(12), f: float64(0)},
|
||||||
|
@ -592,7 +591,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
||||||
// Chunk 2 [--------]
|
// Chunk 2 [--------]
|
||||||
// Chunk 3: Current Head [--------]
|
// Chunk 3: Current Head [--------]
|
||||||
// Output Graphically [----------------][-----------------]
|
// Output Graphically [----------------][-----------------]
|
||||||
expChunksSamples: []tsdbutil.SampleSlice{
|
expChunksSamples: []chunks.SampleSlice{
|
||||||
{
|
{
|
||||||
sample{t: minutes(10), f: float64(0)},
|
sample{t: minutes(10), f: float64(0)},
|
||||||
sample{t: minutes(12), f: float64(0)},
|
sample{t: minutes(12), f: float64(0)},
|
||||||
|
@ -619,7 +618,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
||||||
queryMinT: minutes(0),
|
queryMinT: minutes(0),
|
||||||
queryMaxT: minutes(100),
|
queryMaxT: minutes(100),
|
||||||
firstInOrderSampleAt: minutes(120),
|
firstInOrderSampleAt: minutes(120),
|
||||||
inputSamples: tsdbutil.SampleSlice{
|
inputSamples: chunks.SampleSlice{
|
||||||
// Chunk 0
|
// Chunk 0
|
||||||
sample{t: minutes(40), f: float64(0)},
|
sample{t: minutes(40), f: float64(0)},
|
||||||
sample{t: minutes(42), f: float64(0)},
|
sample{t: minutes(42), f: float64(0)},
|
||||||
|
@ -650,7 +649,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
||||||
// Chunk 2 [-------]
|
// Chunk 2 [-------]
|
||||||
// Chunk 3: Current Head [--------]
|
// Chunk 3: Current Head [--------]
|
||||||
// Output Graphically [----------------][-----------------]
|
// Output Graphically [----------------][-----------------]
|
||||||
expChunksSamples: []tsdbutil.SampleSlice{
|
expChunksSamples: []chunks.SampleSlice{
|
||||||
{
|
{
|
||||||
sample{t: minutes(10), f: float64(3)},
|
sample{t: minutes(10), f: float64(3)},
|
||||||
sample{t: minutes(20), f: float64(2)},
|
sample{t: minutes(20), f: float64(2)},
|
||||||
|
@ -677,7 +676,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
||||||
queryMinT: minutes(0),
|
queryMinT: minutes(0),
|
||||||
queryMaxT: minutes(100),
|
queryMaxT: minutes(100),
|
||||||
firstInOrderSampleAt: minutes(120),
|
firstInOrderSampleAt: minutes(120),
|
||||||
inputSamples: tsdbutil.SampleSlice{
|
inputSamples: chunks.SampleSlice{
|
||||||
// Chunk 0
|
// Chunk 0
|
||||||
sample{t: minutes(10), f: float64(0)},
|
sample{t: minutes(10), f: float64(0)},
|
||||||
sample{t: minutes(12), f: float64(0)},
|
sample{t: minutes(12), f: float64(0)},
|
||||||
|
@ -708,7 +707,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
||||||
// Chunk 2 [-------]
|
// Chunk 2 [-------]
|
||||||
// Chunk 3: Current Head [-------]
|
// Chunk 3: Current Head [-------]
|
||||||
// Output Graphically [-------][-------][-------][--------]
|
// Output Graphically [-------][-------][-------][--------]
|
||||||
expChunksSamples: []tsdbutil.SampleSlice{
|
expChunksSamples: []chunks.SampleSlice{
|
||||||
{
|
{
|
||||||
sample{t: minutes(10), f: float64(0)},
|
sample{t: minutes(10), f: float64(0)},
|
||||||
sample{t: minutes(12), f: float64(0)},
|
sample{t: minutes(12), f: float64(0)},
|
||||||
|
@ -741,7 +740,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
||||||
queryMinT: minutes(0),
|
queryMinT: minutes(0),
|
||||||
queryMaxT: minutes(100),
|
queryMaxT: minutes(100),
|
||||||
firstInOrderSampleAt: minutes(120),
|
firstInOrderSampleAt: minutes(120),
|
||||||
inputSamples: tsdbutil.SampleSlice{
|
inputSamples: chunks.SampleSlice{
|
||||||
// Chunk 0
|
// Chunk 0
|
||||||
sample{t: minutes(10), f: float64(0)},
|
sample{t: minutes(10), f: float64(0)},
|
||||||
sample{t: minutes(15), f: float64(0)},
|
sample{t: minutes(15), f: float64(0)},
|
||||||
|
@ -765,7 +764,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
||||||
// Chunk 1 [--------------------]
|
// Chunk 1 [--------------------]
|
||||||
// Chunk 2 Current Head [--------------]
|
// Chunk 2 Current Head [--------------]
|
||||||
// Output Graphically [-----------------------------------]
|
// Output Graphically [-----------------------------------]
|
||||||
expChunksSamples: []tsdbutil.SampleSlice{
|
expChunksSamples: []chunks.SampleSlice{
|
||||||
{
|
{
|
||||||
sample{t: minutes(10), f: float64(0)},
|
sample{t: minutes(10), f: float64(0)},
|
||||||
sample{t: minutes(15), f: float64(0)},
|
sample{t: minutes(15), f: float64(0)},
|
||||||
|
@ -784,7 +783,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
||||||
queryMinT: minutes(12),
|
queryMinT: minutes(12),
|
||||||
queryMaxT: minutes(33),
|
queryMaxT: minutes(33),
|
||||||
firstInOrderSampleAt: minutes(120),
|
firstInOrderSampleAt: minutes(120),
|
||||||
inputSamples: tsdbutil.SampleSlice{
|
inputSamples: chunks.SampleSlice{
|
||||||
// Chunk 0
|
// Chunk 0
|
||||||
sample{t: minutes(10), f: float64(0)},
|
sample{t: minutes(10), f: float64(0)},
|
||||||
sample{t: minutes(15), f: float64(0)},
|
sample{t: minutes(15), f: float64(0)},
|
||||||
|
@ -808,7 +807,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
||||||
// Chunk 1 [--------------------]
|
// Chunk 1 [--------------------]
|
||||||
// Chunk 2 Current Head [--------------]
|
// Chunk 2 Current Head [--------------]
|
||||||
// Output Graphically [-----------------------------------]
|
// Output Graphically [-----------------------------------]
|
||||||
expChunksSamples: []tsdbutil.SampleSlice{
|
expChunksSamples: []chunks.SampleSlice{
|
||||||
{
|
{
|
||||||
sample{t: minutes(10), f: float64(0)},
|
sample{t: minutes(10), f: float64(0)},
|
||||||
sample{t: minutes(15), f: float64(0)},
|
sample{t: minutes(15), f: float64(0)},
|
||||||
|
@ -853,7 +852,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
||||||
c, err := cr.Chunk(chks[i])
|
c, err := cr.Chunk(chks[i])
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var resultSamples tsdbutil.SampleSlice
|
var resultSamples chunks.SampleSlice
|
||||||
it := c.Iterator(nil)
|
it := c.Iterator(nil)
|
||||||
for it.Next() == chunkenc.ValFloat {
|
for it.Next() == chunkenc.ValFloat {
|
||||||
t, v := it.At()
|
t, v := it.At()
|
||||||
|
@ -892,17 +891,17 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
|
||||||
queryMinT int64
|
queryMinT int64
|
||||||
queryMaxT int64
|
queryMaxT int64
|
||||||
firstInOrderSampleAt int64
|
firstInOrderSampleAt int64
|
||||||
initialSamples tsdbutil.SampleSlice
|
initialSamples chunks.SampleSlice
|
||||||
samplesAfterSeriesCall tsdbutil.SampleSlice
|
samplesAfterSeriesCall chunks.SampleSlice
|
||||||
expChunkError bool
|
expChunkError bool
|
||||||
expChunksSamples []tsdbutil.SampleSlice
|
expChunksSamples []chunks.SampleSlice
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "Current head gets old, new and in between sample after Series call, they all should be omitted from the result",
|
name: "Current head gets old, new and in between sample after Series call, they all should be omitted from the result",
|
||||||
queryMinT: minutes(0),
|
queryMinT: minutes(0),
|
||||||
queryMaxT: minutes(100),
|
queryMaxT: minutes(100),
|
||||||
firstInOrderSampleAt: minutes(120),
|
firstInOrderSampleAt: minutes(120),
|
||||||
initialSamples: tsdbutil.SampleSlice{
|
initialSamples: chunks.SampleSlice{
|
||||||
// Chunk 0
|
// Chunk 0
|
||||||
sample{t: minutes(20), f: float64(0)},
|
sample{t: minutes(20), f: float64(0)},
|
||||||
sample{t: minutes(22), f: float64(0)},
|
sample{t: minutes(22), f: float64(0)},
|
||||||
|
@ -913,7 +912,7 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
|
||||||
sample{t: minutes(25), f: float64(1)},
|
sample{t: minutes(25), f: float64(1)},
|
||||||
sample{t: minutes(35), f: float64(1)},
|
sample{t: minutes(35), f: float64(1)},
|
||||||
},
|
},
|
||||||
samplesAfterSeriesCall: tsdbutil.SampleSlice{
|
samplesAfterSeriesCall: chunks.SampleSlice{
|
||||||
sample{t: minutes(10), f: float64(1)},
|
sample{t: minutes(10), f: float64(1)},
|
||||||
sample{t: minutes(32), f: float64(1)},
|
sample{t: minutes(32), f: float64(1)},
|
||||||
sample{t: minutes(50), f: float64(1)},
|
sample{t: minutes(50), f: float64(1)},
|
||||||
|
@ -926,7 +925,7 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
|
||||||
// New samples added after Series()
|
// New samples added after Series()
|
||||||
// Chunk 1: Current Head [-----------------------------------] (5 samples)
|
// Chunk 1: Current Head [-----------------------------------] (5 samples)
|
||||||
// Output Graphically [------------] (With 8 samples, samples newer than lastmint or older than lastmaxt are omitted but the ones in between are kept)
|
// Output Graphically [------------] (With 8 samples, samples newer than lastmint or older than lastmaxt are omitted but the ones in between are kept)
|
||||||
expChunksSamples: []tsdbutil.SampleSlice{
|
expChunksSamples: []chunks.SampleSlice{
|
||||||
{
|
{
|
||||||
sample{t: minutes(20), f: float64(0)},
|
sample{t: minutes(20), f: float64(0)},
|
||||||
sample{t: minutes(22), f: float64(0)},
|
sample{t: minutes(22), f: float64(0)},
|
||||||
|
@ -944,7 +943,7 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
|
||||||
queryMinT: minutes(0),
|
queryMinT: minutes(0),
|
||||||
queryMaxT: minutes(100),
|
queryMaxT: minutes(100),
|
||||||
firstInOrderSampleAt: minutes(120),
|
firstInOrderSampleAt: minutes(120),
|
||||||
initialSamples: tsdbutil.SampleSlice{
|
initialSamples: chunks.SampleSlice{
|
||||||
// Chunk 0
|
// Chunk 0
|
||||||
sample{t: minutes(20), f: float64(0)},
|
sample{t: minutes(20), f: float64(0)},
|
||||||
sample{t: minutes(22), f: float64(0)},
|
sample{t: minutes(22), f: float64(0)},
|
||||||
|
@ -955,7 +954,7 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
|
||||||
sample{t: minutes(25), f: float64(1)},
|
sample{t: minutes(25), f: float64(1)},
|
||||||
sample{t: minutes(35), f: float64(1)},
|
sample{t: minutes(35), f: float64(1)},
|
||||||
},
|
},
|
||||||
samplesAfterSeriesCall: tsdbutil.SampleSlice{
|
samplesAfterSeriesCall: chunks.SampleSlice{
|
||||||
sample{t: minutes(10), f: float64(1)},
|
sample{t: minutes(10), f: float64(1)},
|
||||||
sample{t: minutes(32), f: float64(1)},
|
sample{t: minutes(32), f: float64(1)},
|
||||||
sample{t: minutes(50), f: float64(1)},
|
sample{t: minutes(50), f: float64(1)},
|
||||||
|
@ -972,7 +971,7 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
|
||||||
// Chunk 1 (mmapped) [-------------------------] (5 samples)
|
// Chunk 1 (mmapped) [-------------------------] (5 samples)
|
||||||
// Chunk 2: Current Head [-----------] (2 samples)
|
// Chunk 2: Current Head [-----------] (2 samples)
|
||||||
// Output Graphically [------------] (8 samples) It has 5 from Chunk 0 and 3 from Chunk 1
|
// Output Graphically [------------] (8 samples) It has 5 from Chunk 0 and 3 from Chunk 1
|
||||||
expChunksSamples: []tsdbutil.SampleSlice{
|
expChunksSamples: []chunks.SampleSlice{
|
||||||
{
|
{
|
||||||
sample{t: minutes(20), f: float64(0)},
|
sample{t: minutes(20), f: float64(0)},
|
||||||
sample{t: minutes(22), f: float64(0)},
|
sample{t: minutes(22), f: float64(0)},
|
||||||
|
@ -1024,7 +1023,7 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
|
||||||
c, err := cr.Chunk(chks[i])
|
c, err := cr.Chunk(chks[i])
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var resultSamples tsdbutil.SampleSlice
|
var resultSamples chunks.SampleSlice
|
||||||
it := c.Iterator(nil)
|
it := c.Iterator(nil)
|
||||||
for it.Next() == chunkenc.ValFloat {
|
for it.Next() == chunkenc.ValFloat {
|
||||||
ts, v := it.At()
|
ts, v := it.At()
|
||||||
|
|
|
@ -19,6 +19,7 @@ import (
|
||||||
|
|
||||||
"github.com/oklog/ulid"
|
"github.com/oklog/ulid"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/histogram"
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
@ -186,6 +187,41 @@ func PostingsForMatchers(ix IndexPostingsReader, ms ...*labels.Matcher) (index.P
|
||||||
labelMustBeSet[m.Name] = true
|
labelMustBeSet[m.Name] = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
isSubtractingMatcher := func(m *labels.Matcher) bool {
|
||||||
|
if !labelMustBeSet[m.Name] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return (m.Type == labels.MatchNotEqual || m.Type == labels.MatchNotRegexp) && m.Matches("")
|
||||||
|
}
|
||||||
|
hasSubtractingMatchers, hasIntersectingMatchers := false, false
|
||||||
|
for _, m := range ms {
|
||||||
|
if isSubtractingMatcher(m) {
|
||||||
|
hasSubtractingMatchers = true
|
||||||
|
} else {
|
||||||
|
hasIntersectingMatchers = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasSubtractingMatchers && !hasIntersectingMatchers {
|
||||||
|
// If there's nothing to subtract from, add in everything and remove the notIts later.
|
||||||
|
// We prefer to get AllPostings so that the base of subtraction (i.e. allPostings)
|
||||||
|
// doesn't include series that may be added to the index reader during this function call.
|
||||||
|
k, v := index.AllPostingsKey()
|
||||||
|
allPostings, err := ix.Postings(k, v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
its = append(its, allPostings)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort matchers to have the intersecting matchers first.
|
||||||
|
// This way the base for subtraction is smaller and
|
||||||
|
// there is no chance that the set we subtract from
|
||||||
|
// contains postings of series that didn't exist when
|
||||||
|
// we constructed the set we subtract by.
|
||||||
|
slices.SortStableFunc(ms, func(i, j *labels.Matcher) bool {
|
||||||
|
return !isSubtractingMatcher(i) && isSubtractingMatcher(j)
|
||||||
|
})
|
||||||
|
|
||||||
for _, m := range ms {
|
for _, m := range ms {
|
||||||
switch {
|
switch {
|
||||||
|
@ -254,16 +290,6 @@ func PostingsForMatchers(ix IndexPostingsReader, ms ...*labels.Matcher) (index.P
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If there's nothing to subtract from, add in everything and remove the notIts later.
|
|
||||||
if len(its) == 0 && len(notIts) != 0 {
|
|
||||||
k, v := index.AllPostingsKey()
|
|
||||||
allPostings, err := ix.Postings(k, v)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
its = append(its, allPostings)
|
|
||||||
}
|
|
||||||
|
|
||||||
it := index.Intersect(its...)
|
it := index.Intersect(its...)
|
||||||
|
|
||||||
for _, n := range notIts {
|
for _, n := range notIts {
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -239,7 +240,7 @@ func testBlockQuerier(t *testing.T, c blockQuerierTestCase, ir IndexReader, cr C
|
||||||
require.Equal(t, errExp, errRes)
|
require.Equal(t, errExp, errRes)
|
||||||
|
|
||||||
require.Equal(t, len(chksExp), len(chksRes))
|
require.Equal(t, len(chksExp), len(chksRes))
|
||||||
var exp, act [][]tsdbutil.Sample
|
var exp, act [][]chunks.Sample
|
||||||
for i := range chksExp {
|
for i := range chksExp {
|
||||||
samples, err := storage.ExpandSamples(chksExp[i].Chunk.Iterator(nil), nil)
|
samples, err := storage.ExpandSamples(chksExp[i].Chunk.Iterator(nil), nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -291,24 +292,24 @@ func TestBlockQuerier(t *testing.T) {
|
||||||
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "a", ".*")},
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "a", ".*")},
|
||||||
exp: newMockSeriesSet([]storage.Series{
|
exp: newMockSeriesSet([]storage.Series{
|
||||||
storage.NewListSeries(labels.FromStrings("a", "a"),
|
storage.NewListSeries(labels.FromStrings("a", "a"),
|
||||||
[]tsdbutil.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
[]chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
||||||
),
|
),
|
||||||
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
||||||
[]tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
[]chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
||||||
),
|
),
|
||||||
storage.NewListSeries(labels.FromStrings("b", "b"),
|
storage.NewListSeries(labels.FromStrings("b", "b"),
|
||||||
[]tsdbutil.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}},
|
[]chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}},
|
||||||
),
|
),
|
||||||
}),
|
}),
|
||||||
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
||||||
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
||||||
[]tsdbutil.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}}, []tsdbutil.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
[]chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}}, []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
||||||
),
|
),
|
||||||
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
||||||
[]tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}, []tsdbutil.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
[]chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}, []chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
||||||
),
|
),
|
||||||
storage.NewListChunkSeriesFromSamples(labels.FromStrings("b", "b"),
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("b", "b"),
|
||||||
[]tsdbutil.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}}, []tsdbutil.Sample{sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}},
|
[]chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}}, []chunks.Sample{sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}},
|
||||||
),
|
),
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
|
@ -318,18 +319,18 @@ func TestBlockQuerier(t *testing.T) {
|
||||||
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
||||||
exp: newMockSeriesSet([]storage.Series{
|
exp: newMockSeriesSet([]storage.Series{
|
||||||
storage.NewListSeries(labels.FromStrings("a", "a"),
|
storage.NewListSeries(labels.FromStrings("a", "a"),
|
||||||
[]tsdbutil.Sample{sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
|
[]chunks.Sample{sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
|
||||||
),
|
),
|
||||||
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
||||||
[]tsdbutil.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
[]chunks.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
||||||
),
|
),
|
||||||
}),
|
}),
|
||||||
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
||||||
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
||||||
[]tsdbutil.Sample{sample{2, 3, nil, nil}, sample{3, 4, nil, nil}}, []tsdbutil.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
|
[]chunks.Sample{sample{2, 3, nil, nil}, sample{3, 4, nil, nil}}, []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
|
||||||
),
|
),
|
||||||
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
||||||
[]tsdbutil.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}, []tsdbutil.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
[]chunks.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}, []chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
||||||
),
|
),
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
|
@ -342,20 +343,20 @@ func TestBlockQuerier(t *testing.T) {
|
||||||
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
||||||
exp: newMockSeriesSet([]storage.Series{
|
exp: newMockSeriesSet([]storage.Series{
|
||||||
storage.NewListSeries(labels.FromStrings("a", "a"),
|
storage.NewListSeries(labels.FromStrings("a", "a"),
|
||||||
[]tsdbutil.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
[]chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
||||||
),
|
),
|
||||||
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
||||||
[]tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
[]chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
||||||
),
|
),
|
||||||
}),
|
}),
|
||||||
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
||||||
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
||||||
[]tsdbutil.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}},
|
[]chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
[]chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
||||||
),
|
),
|
||||||
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
||||||
[]tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}},
|
[]chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
[]chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
||||||
),
|
),
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
|
@ -368,18 +369,18 @@ func TestBlockQuerier(t *testing.T) {
|
||||||
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
||||||
exp: newMockSeriesSet([]storage.Series{
|
exp: newMockSeriesSet([]storage.Series{
|
||||||
storage.NewListSeries(labels.FromStrings("a", "a"),
|
storage.NewListSeries(labels.FromStrings("a", "a"),
|
||||||
[]tsdbutil.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
[]chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
||||||
),
|
),
|
||||||
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
||||||
[]tsdbutil.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
[]chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
||||||
),
|
),
|
||||||
}),
|
}),
|
||||||
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
||||||
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
||||||
[]tsdbutil.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
[]chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
||||||
),
|
),
|
||||||
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
||||||
[]tsdbutil.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
[]chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
||||||
),
|
),
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
|
@ -393,24 +394,24 @@ func TestBlockQuerier(t *testing.T) {
|
||||||
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "a", ".*")},
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "a", ".*")},
|
||||||
exp: newMockSeriesSet([]storage.Series{
|
exp: newMockSeriesSet([]storage.Series{
|
||||||
storage.NewListSeries(labels.FromStrings("a", "a"),
|
storage.NewListSeries(labels.FromStrings("a", "a"),
|
||||||
[]tsdbutil.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
[]chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
||||||
),
|
),
|
||||||
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
||||||
[]tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
[]chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
||||||
),
|
),
|
||||||
storage.NewListSeries(labels.FromStrings("b", "b"),
|
storage.NewListSeries(labels.FromStrings("b", "b"),
|
||||||
[]tsdbutil.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}},
|
[]chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}},
|
||||||
),
|
),
|
||||||
}),
|
}),
|
||||||
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
||||||
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
||||||
[]tsdbutil.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}}, []tsdbutil.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
[]chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}}, []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
||||||
),
|
),
|
||||||
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
||||||
[]tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}, []tsdbutil.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
[]chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}, []chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
||||||
),
|
),
|
||||||
storage.NewListChunkSeriesFromSamples(labels.FromStrings("b", "b"),
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("b", "b"),
|
||||||
[]tsdbutil.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}}, []tsdbutil.Sample{sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}},
|
[]chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}}, []chunks.Sample{sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}},
|
||||||
),
|
),
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
|
@ -467,24 +468,24 @@ func TestBlockQuerier_AgainstHeadWithOpenChunks(t *testing.T) {
|
||||||
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "a", ".*")},
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "a", ".*")},
|
||||||
exp: newMockSeriesSet([]storage.Series{
|
exp: newMockSeriesSet([]storage.Series{
|
||||||
storage.NewListSeries(labels.FromStrings("a", "a"),
|
storage.NewListSeries(labels.FromStrings("a", "a"),
|
||||||
[]tsdbutil.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
[]chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
||||||
),
|
),
|
||||||
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
||||||
[]tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
[]chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
||||||
),
|
),
|
||||||
storage.NewListSeries(labels.FromStrings("b", "b"),
|
storage.NewListSeries(labels.FromStrings("b", "b"),
|
||||||
[]tsdbutil.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}},
|
[]chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}},
|
||||||
),
|
),
|
||||||
}),
|
}),
|
||||||
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
||||||
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
||||||
[]tsdbutil.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
[]chunks.Sample{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
||||||
),
|
),
|
||||||
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
||||||
[]tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
[]chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
||||||
),
|
),
|
||||||
storage.NewListChunkSeriesFromSamples(labels.FromStrings("b", "b"),
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("b", "b"),
|
||||||
[]tsdbutil.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}},
|
[]chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}, sample{6, 7, nil, nil}, sample{7, 2, nil, nil}},
|
||||||
),
|
),
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
|
@ -494,18 +495,18 @@ func TestBlockQuerier_AgainstHeadWithOpenChunks(t *testing.T) {
|
||||||
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
||||||
exp: newMockSeriesSet([]storage.Series{
|
exp: newMockSeriesSet([]storage.Series{
|
||||||
storage.NewListSeries(labels.FromStrings("a", "a"),
|
storage.NewListSeries(labels.FromStrings("a", "a"),
|
||||||
[]tsdbutil.Sample{sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
|
[]chunks.Sample{sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
|
||||||
),
|
),
|
||||||
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
||||||
[]tsdbutil.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
[]chunks.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
||||||
),
|
),
|
||||||
}),
|
}),
|
||||||
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
||||||
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
||||||
[]tsdbutil.Sample{sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
|
[]chunks.Sample{sample{2, 3, nil, nil}, sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
|
||||||
),
|
),
|
||||||
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
||||||
[]tsdbutil.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
[]chunks.Sample{sample{2, 2, nil, nil}, sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
||||||
),
|
),
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
|
@ -549,18 +550,18 @@ func TestBlockQuerier_TrimmingDoesNotModifyOriginalTombstoneIntervals(t *testing
|
||||||
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "a", "a")},
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "a", "a")},
|
||||||
exp: newMockSeriesSet([]storage.Series{
|
exp: newMockSeriesSet([]storage.Series{
|
||||||
storage.NewListSeries(labels.FromStrings("a", "a"),
|
storage.NewListSeries(labels.FromStrings("a", "a"),
|
||||||
[]tsdbutil.Sample{sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
|
[]chunks.Sample{sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
|
||||||
),
|
),
|
||||||
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
||||||
[]tsdbutil.Sample{sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
[]chunks.Sample{sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
||||||
),
|
),
|
||||||
}),
|
}),
|
||||||
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
||||||
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
||||||
[]tsdbutil.Sample{sample{3, 4, nil, nil}}, []tsdbutil.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
|
[]chunks.Sample{sample{3, 4, nil, nil}}, []chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
|
||||||
),
|
),
|
||||||
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
||||||
[]tsdbutil.Sample{sample{3, 3, nil, nil}}, []tsdbutil.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
[]chunks.Sample{sample{3, 3, nil, nil}}, []chunks.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
||||||
),
|
),
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
|
@ -648,24 +649,24 @@ func TestBlockQuerierDelete(t *testing.T) {
|
||||||
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "a", ".*")},
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "a", ".*")},
|
||||||
exp: newMockSeriesSet([]storage.Series{
|
exp: newMockSeriesSet([]storage.Series{
|
||||||
storage.NewListSeries(labels.FromStrings("a", "a"),
|
storage.NewListSeries(labels.FromStrings("a", "a"),
|
||||||
[]tsdbutil.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
[]chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
||||||
),
|
),
|
||||||
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
||||||
[]tsdbutil.Sample{sample{5, 3, nil, nil}},
|
[]chunks.Sample{sample{5, 3, nil, nil}},
|
||||||
),
|
),
|
||||||
storage.NewListSeries(labels.FromStrings("b", "b"),
|
storage.NewListSeries(labels.FromStrings("b", "b"),
|
||||||
[]tsdbutil.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}},
|
[]chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}, sample{5, 1, nil, nil}},
|
||||||
),
|
),
|
||||||
}),
|
}),
|
||||||
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
||||||
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
||||||
[]tsdbutil.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
[]chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}, sample{7, 4, nil, nil}},
|
||||||
),
|
),
|
||||||
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
||||||
[]tsdbutil.Sample{sample{5, 3, nil, nil}},
|
[]chunks.Sample{sample{5, 3, nil, nil}},
|
||||||
),
|
),
|
||||||
storage.NewListChunkSeriesFromSamples(labels.FromStrings("b", "b"),
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("b", "b"),
|
||||||
[]tsdbutil.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}}, []tsdbutil.Sample{sample{5, 1, nil, nil}},
|
[]chunks.Sample{sample{1, 3, nil, nil}, sample{2, 2, nil, nil}, sample{3, 6, nil, nil}}, []chunks.Sample{sample{5, 1, nil, nil}},
|
||||||
),
|
),
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
|
@ -675,18 +676,18 @@ func TestBlockQuerierDelete(t *testing.T) {
|
||||||
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
||||||
exp: newMockSeriesSet([]storage.Series{
|
exp: newMockSeriesSet([]storage.Series{
|
||||||
storage.NewListSeries(labels.FromStrings("a", "a"),
|
storage.NewListSeries(labels.FromStrings("a", "a"),
|
||||||
[]tsdbutil.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
|
[]chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
|
||||||
),
|
),
|
||||||
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
||||||
[]tsdbutil.Sample{sample{5, 3, nil, nil}},
|
[]chunks.Sample{sample{5, 3, nil, nil}},
|
||||||
),
|
),
|
||||||
}),
|
}),
|
||||||
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
||||||
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
||||||
[]tsdbutil.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
|
[]chunks.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
|
||||||
),
|
),
|
||||||
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
||||||
[]tsdbutil.Sample{sample{5, 3, nil, nil}},
|
[]chunks.Sample{sample{5, 3, nil, nil}},
|
||||||
),
|
),
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
|
@ -703,14 +704,14 @@ type fakeChunksReader struct {
|
||||||
chks map[chunks.ChunkRef]chunkenc.Chunk
|
chks map[chunks.ChunkRef]chunkenc.Chunk
|
||||||
}
|
}
|
||||||
|
|
||||||
func createFakeReaderAndNotPopulatedChunks(s ...[]tsdbutil.Sample) (*fakeChunksReader, []chunks.Meta) {
|
func createFakeReaderAndNotPopulatedChunks(s ...[]chunks.Sample) (*fakeChunksReader, []chunks.Meta) {
|
||||||
f := &fakeChunksReader{
|
f := &fakeChunksReader{
|
||||||
chks: map[chunks.ChunkRef]chunkenc.Chunk{},
|
chks: map[chunks.ChunkRef]chunkenc.Chunk{},
|
||||||
}
|
}
|
||||||
chks := make([]chunks.Meta, 0, len(s))
|
chks := make([]chunks.Meta, 0, len(s))
|
||||||
|
|
||||||
for ref, samples := range s {
|
for ref, samples := range s {
|
||||||
chk, _ := tsdbutil.ChunkFromSamples(samples)
|
chk, _ := chunks.ChunkFromSamples(samples)
|
||||||
f.chks[chunks.ChunkRef(ref)] = chk.Chunk
|
f.chks[chunks.ChunkRef(ref)] = chk.Chunk
|
||||||
|
|
||||||
chks = append(chks, chunks.Meta{
|
chks = append(chks, chunks.Meta{
|
||||||
|
@ -733,9 +734,9 @@ func (r *fakeChunksReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) {
|
||||||
func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
name string
|
name string
|
||||||
chks [][]tsdbutil.Sample
|
chks [][]chunks.Sample
|
||||||
|
|
||||||
expected []tsdbutil.Sample
|
expected []chunks.Sample
|
||||||
expectedChks []chunks.Meta
|
expectedChks []chunks.Meta
|
||||||
|
|
||||||
intervals tombstones.Intervals
|
intervals tombstones.Intervals
|
||||||
|
@ -746,79 +747,79 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "no chunk",
|
name: "no chunk",
|
||||||
chks: [][]tsdbutil.Sample{},
|
chks: [][]chunks.Sample{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "one empty chunk", // This should never happen.
|
name: "one empty chunk", // This should never happen.
|
||||||
chks: [][]tsdbutil.Sample{{}},
|
chks: [][]chunks.Sample{{}},
|
||||||
|
|
||||||
expectedChks: []chunks.Meta{
|
expectedChks: []chunks.Meta{
|
||||||
assureChunkFromSamples(t, []tsdbutil.Sample{}),
|
assureChunkFromSamples(t, []chunks.Sample{}),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "three empty chunks", // This should never happen.
|
name: "three empty chunks", // This should never happen.
|
||||||
chks: [][]tsdbutil.Sample{{}, {}, {}},
|
chks: [][]chunks.Sample{{}, {}, {}},
|
||||||
|
|
||||||
expectedChks: []chunks.Meta{
|
expectedChks: []chunks.Meta{
|
||||||
assureChunkFromSamples(t, []tsdbutil.Sample{}),
|
assureChunkFromSamples(t, []chunks.Sample{}),
|
||||||
assureChunkFromSamples(t, []tsdbutil.Sample{}),
|
assureChunkFromSamples(t, []chunks.Sample{}),
|
||||||
assureChunkFromSamples(t, []tsdbutil.Sample{}),
|
assureChunkFromSamples(t, []chunks.Sample{}),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "one chunk",
|
name: "one chunk",
|
||||||
chks: [][]tsdbutil.Sample{
|
chks: [][]chunks.Sample{
|
||||||
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
||||||
},
|
},
|
||||||
|
|
||||||
expected: []tsdbutil.Sample{
|
expected: []chunks.Sample{
|
||||||
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
|
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
|
||||||
},
|
},
|
||||||
expectedChks: []chunks.Meta{
|
expectedChks: []chunks.Meta{
|
||||||
assureChunkFromSamples(t, []tsdbutil.Sample{
|
assureChunkFromSamples(t, []chunks.Sample{
|
||||||
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
|
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two full chunks",
|
name: "two full chunks",
|
||||||
chks: [][]tsdbutil.Sample{
|
chks: [][]chunks.Sample{
|
||||||
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
||||||
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
||||||
},
|
},
|
||||||
|
|
||||||
expected: []tsdbutil.Sample{
|
expected: []chunks.Sample{
|
||||||
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
|
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
|
||||||
},
|
},
|
||||||
expectedChks: []chunks.Meta{
|
expectedChks: []chunks.Meta{
|
||||||
assureChunkFromSamples(t, []tsdbutil.Sample{
|
assureChunkFromSamples(t, []chunks.Sample{
|
||||||
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
|
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
|
||||||
}),
|
}),
|
||||||
assureChunkFromSamples(t, []tsdbutil.Sample{
|
assureChunkFromSamples(t, []chunks.Sample{
|
||||||
sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
|
sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "three full chunks",
|
name: "three full chunks",
|
||||||
chks: [][]tsdbutil.Sample{
|
chks: [][]chunks.Sample{
|
||||||
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
||||||
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
||||||
{sample{10, 22, nil, nil}, sample{203, 3493, nil, nil}},
|
{sample{10, 22, nil, nil}, sample{203, 3493, nil, nil}},
|
||||||
},
|
},
|
||||||
|
|
||||||
expected: []tsdbutil.Sample{
|
expected: []chunks.Sample{
|
||||||
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil}, sample{10, 22, nil, nil}, sample{203, 3493, nil, nil},
|
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil}, sample{10, 22, nil, nil}, sample{203, 3493, nil, nil},
|
||||||
},
|
},
|
||||||
expectedChks: []chunks.Meta{
|
expectedChks: []chunks.Meta{
|
||||||
assureChunkFromSamples(t, []tsdbutil.Sample{
|
assureChunkFromSamples(t, []chunks.Sample{
|
||||||
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
|
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
|
||||||
}),
|
}),
|
||||||
assureChunkFromSamples(t, []tsdbutil.Sample{
|
assureChunkFromSamples(t, []chunks.Sample{
|
||||||
sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
|
sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
|
||||||
}),
|
}),
|
||||||
assureChunkFromSamples(t, []tsdbutil.Sample{
|
assureChunkFromSamples(t, []chunks.Sample{
|
||||||
sample{10, 22, nil, nil}, sample{203, 3493, nil, nil},
|
sample{10, 22, nil, nil}, sample{203, 3493, nil, nil},
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
|
@ -826,14 +827,14 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
||||||
// Seek cases.
|
// Seek cases.
|
||||||
{
|
{
|
||||||
name: "three empty chunks and seek", // This should never happen.
|
name: "three empty chunks and seek", // This should never happen.
|
||||||
chks: [][]tsdbutil.Sample{{}, {}, {}},
|
chks: [][]chunks.Sample{{}, {}, {}},
|
||||||
seek: 1,
|
seek: 1,
|
||||||
|
|
||||||
seekSuccess: false,
|
seekSuccess: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two chunks and seek beyond chunks",
|
name: "two chunks and seek beyond chunks",
|
||||||
chks: [][]tsdbutil.Sample{
|
chks: [][]chunks.Sample{
|
||||||
{sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
{sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
||||||
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
||||||
},
|
},
|
||||||
|
@ -843,92 +844,92 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two chunks and seek on middle of first chunk",
|
name: "two chunks and seek on middle of first chunk",
|
||||||
chks: [][]tsdbutil.Sample{
|
chks: [][]chunks.Sample{
|
||||||
{sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
{sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
||||||
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
||||||
},
|
},
|
||||||
seek: 2,
|
seek: 2,
|
||||||
|
|
||||||
seekSuccess: true,
|
seekSuccess: true,
|
||||||
expected: []tsdbutil.Sample{
|
expected: []chunks.Sample{
|
||||||
sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
|
sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two chunks and seek before first chunk",
|
name: "two chunks and seek before first chunk",
|
||||||
chks: [][]tsdbutil.Sample{
|
chks: [][]chunks.Sample{
|
||||||
{sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
{sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
||||||
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
||||||
},
|
},
|
||||||
seek: -32,
|
seek: -32,
|
||||||
|
|
||||||
seekSuccess: true,
|
seekSuccess: true,
|
||||||
expected: []tsdbutil.Sample{
|
expected: []chunks.Sample{
|
||||||
sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
|
sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Deletion / Trim cases.
|
// Deletion / Trim cases.
|
||||||
{
|
{
|
||||||
name: "no chunk with deletion interval",
|
name: "no chunk with deletion interval",
|
||||||
chks: [][]tsdbutil.Sample{},
|
chks: [][]chunks.Sample{},
|
||||||
intervals: tombstones.Intervals{{Mint: 20, Maxt: 21}},
|
intervals: tombstones.Intervals{{Mint: 20, Maxt: 21}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two chunks with trimmed first and last samples from edge chunks",
|
name: "two chunks with trimmed first and last samples from edge chunks",
|
||||||
chks: [][]tsdbutil.Sample{
|
chks: [][]chunks.Sample{
|
||||||
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
||||||
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
||||||
},
|
},
|
||||||
intervals: tombstones.Intervals{{Mint: math.MinInt64, Maxt: 2}}.Add(tombstones.Interval{Mint: 9, Maxt: math.MaxInt64}),
|
intervals: tombstones.Intervals{{Mint: math.MinInt64, Maxt: 2}}.Add(tombstones.Interval{Mint: 9, Maxt: math.MaxInt64}),
|
||||||
|
|
||||||
expected: []tsdbutil.Sample{
|
expected: []chunks.Sample{
|
||||||
sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil},
|
sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil},
|
||||||
},
|
},
|
||||||
expectedChks: []chunks.Meta{
|
expectedChks: []chunks.Meta{
|
||||||
assureChunkFromSamples(t, []tsdbutil.Sample{
|
assureChunkFromSamples(t, []chunks.Sample{
|
||||||
sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
|
sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
|
||||||
}),
|
}),
|
||||||
assureChunkFromSamples(t, []tsdbutil.Sample{
|
assureChunkFromSamples(t, []chunks.Sample{
|
||||||
sample{7, 89, nil, nil},
|
sample{7, 89, nil, nil},
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two chunks with trimmed middle sample of first chunk",
|
name: "two chunks with trimmed middle sample of first chunk",
|
||||||
chks: [][]tsdbutil.Sample{
|
chks: [][]chunks.Sample{
|
||||||
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
||||||
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
||||||
},
|
},
|
||||||
intervals: tombstones.Intervals{{Mint: 2, Maxt: 3}},
|
intervals: tombstones.Intervals{{Mint: 2, Maxt: 3}},
|
||||||
|
|
||||||
expected: []tsdbutil.Sample{
|
expected: []chunks.Sample{
|
||||||
sample{1, 2, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
|
sample{1, 2, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
|
||||||
},
|
},
|
||||||
expectedChks: []chunks.Meta{
|
expectedChks: []chunks.Meta{
|
||||||
assureChunkFromSamples(t, []tsdbutil.Sample{
|
assureChunkFromSamples(t, []chunks.Sample{
|
||||||
sample{1, 2, nil, nil}, sample{6, 1, nil, nil},
|
sample{1, 2, nil, nil}, sample{6, 1, nil, nil},
|
||||||
}),
|
}),
|
||||||
assureChunkFromSamples(t, []tsdbutil.Sample{
|
assureChunkFromSamples(t, []chunks.Sample{
|
||||||
sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
|
sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "two chunks with deletion across two chunks",
|
name: "two chunks with deletion across two chunks",
|
||||||
chks: [][]tsdbutil.Sample{
|
chks: [][]chunks.Sample{
|
||||||
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
||||||
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
||||||
},
|
},
|
||||||
intervals: tombstones.Intervals{{Mint: 6, Maxt: 7}},
|
intervals: tombstones.Intervals{{Mint: 6, Maxt: 7}},
|
||||||
|
|
||||||
expected: []tsdbutil.Sample{
|
expected: []chunks.Sample{
|
||||||
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{9, 8, nil, nil},
|
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{9, 8, nil, nil},
|
||||||
},
|
},
|
||||||
expectedChks: []chunks.Meta{
|
expectedChks: []chunks.Meta{
|
||||||
assureChunkFromSamples(t, []tsdbutil.Sample{
|
assureChunkFromSamples(t, []chunks.Sample{
|
||||||
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil},
|
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil},
|
||||||
}),
|
}),
|
||||||
assureChunkFromSamples(t, []tsdbutil.Sample{
|
assureChunkFromSamples(t, []chunks.Sample{
|
||||||
sample{9, 8, nil, nil},
|
sample{9, 8, nil, nil},
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
|
@ -936,7 +937,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
||||||
// Deletion with seek.
|
// Deletion with seek.
|
||||||
{
|
{
|
||||||
name: "two chunks with trimmed first and last samples from edge chunks, seek from middle of first chunk",
|
name: "two chunks with trimmed first and last samples from edge chunks, seek from middle of first chunk",
|
||||||
chks: [][]tsdbutil.Sample{
|
chks: [][]chunks.Sample{
|
||||||
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
||||||
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
||||||
},
|
},
|
||||||
|
@ -944,13 +945,13 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
||||||
|
|
||||||
seek: 3,
|
seek: 3,
|
||||||
seekSuccess: true,
|
seekSuccess: true,
|
||||||
expected: []tsdbutil.Sample{
|
expected: []chunks.Sample{
|
||||||
sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil},
|
sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "one histogram chunk",
|
name: "one histogram chunk",
|
||||||
chks: [][]tsdbutil.Sample{
|
chks: [][]chunks.Sample{
|
||||||
{
|
{
|
||||||
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
|
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
|
||||||
sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil},
|
sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil},
|
||||||
|
@ -958,14 +959,14 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
||||||
sample{6, 0, tsdbutil.GenerateTestHistogram(6), nil},
|
sample{6, 0, tsdbutil.GenerateTestHistogram(6), nil},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expected: []tsdbutil.Sample{
|
expected: []chunks.Sample{
|
||||||
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
|
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
|
||||||
sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
|
sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
|
||||||
sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
|
sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
|
||||||
sample{6, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
|
sample{6, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
|
||||||
},
|
},
|
||||||
expectedChks: []chunks.Meta{
|
expectedChks: []chunks.Meta{
|
||||||
assureChunkFromSamples(t, []tsdbutil.Sample{
|
assureChunkFromSamples(t, []chunks.Sample{
|
||||||
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
|
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
|
||||||
sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
|
sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
|
||||||
sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
|
sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
|
||||||
|
@ -975,7 +976,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "one histogram chunk intersect with deletion interval",
|
name: "one histogram chunk intersect with deletion interval",
|
||||||
chks: [][]tsdbutil.Sample{
|
chks: [][]chunks.Sample{
|
||||||
{
|
{
|
||||||
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
|
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
|
||||||
sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil},
|
sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil},
|
||||||
|
@ -984,13 +985,13 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}},
|
intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}},
|
||||||
expected: []tsdbutil.Sample{
|
expected: []chunks.Sample{
|
||||||
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
|
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
|
||||||
sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
|
sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
|
||||||
sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
|
sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
|
||||||
},
|
},
|
||||||
expectedChks: []chunks.Meta{
|
expectedChks: []chunks.Meta{
|
||||||
assureChunkFromSamples(t, []tsdbutil.Sample{
|
assureChunkFromSamples(t, []chunks.Sample{
|
||||||
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
|
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
|
||||||
sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
|
sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
|
||||||
sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
|
sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
|
||||||
|
@ -999,7 +1000,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "one float histogram chunk",
|
name: "one float histogram chunk",
|
||||||
chks: [][]tsdbutil.Sample{
|
chks: [][]chunks.Sample{
|
||||||
{
|
{
|
||||||
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
|
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
|
||||||
sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)},
|
sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)},
|
||||||
|
@ -1007,14 +1008,14 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
||||||
sample{6, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
|
sample{6, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expected: []tsdbutil.Sample{
|
expected: []chunks.Sample{
|
||||||
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
|
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
|
||||||
sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
|
sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
|
||||||
sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
|
sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
|
||||||
sample{6, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
|
sample{6, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
|
||||||
},
|
},
|
||||||
expectedChks: []chunks.Meta{
|
expectedChks: []chunks.Meta{
|
||||||
assureChunkFromSamples(t, []tsdbutil.Sample{
|
assureChunkFromSamples(t, []chunks.Sample{
|
||||||
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
|
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
|
||||||
sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
|
sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
|
||||||
sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
|
sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
|
||||||
|
@ -1024,7 +1025,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "one float histogram chunk intersect with deletion interval",
|
name: "one float histogram chunk intersect with deletion interval",
|
||||||
chks: [][]tsdbutil.Sample{
|
chks: [][]chunks.Sample{
|
||||||
{
|
{
|
||||||
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
|
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
|
||||||
sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)},
|
sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)},
|
||||||
|
@ -1033,13 +1034,13 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}},
|
intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}},
|
||||||
expected: []tsdbutil.Sample{
|
expected: []chunks.Sample{
|
||||||
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
|
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
|
||||||
sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
|
sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
|
||||||
sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
|
sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
|
||||||
},
|
},
|
||||||
expectedChks: []chunks.Meta{
|
expectedChks: []chunks.Meta{
|
||||||
assureChunkFromSamples(t, []tsdbutil.Sample{
|
assureChunkFromSamples(t, []chunks.Sample{
|
||||||
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
|
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
|
||||||
sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
|
sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
|
||||||
sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
|
sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
|
||||||
|
@ -1048,7 +1049,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "one gauge histogram chunk",
|
name: "one gauge histogram chunk",
|
||||||
chks: [][]tsdbutil.Sample{
|
chks: [][]chunks.Sample{
|
||||||
{
|
{
|
||||||
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
|
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
|
||||||
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
|
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
|
||||||
|
@ -1056,14 +1057,14 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
||||||
sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
|
sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expected: []tsdbutil.Sample{
|
expected: []chunks.Sample{
|
||||||
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
|
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
|
||||||
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
|
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
|
||||||
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
|
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
|
||||||
sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
|
sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
|
||||||
},
|
},
|
||||||
expectedChks: []chunks.Meta{
|
expectedChks: []chunks.Meta{
|
||||||
assureChunkFromSamples(t, []tsdbutil.Sample{
|
assureChunkFromSamples(t, []chunks.Sample{
|
||||||
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
|
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
|
||||||
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
|
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
|
||||||
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
|
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
|
||||||
|
@ -1073,7 +1074,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "one gauge histogram chunk intersect with deletion interval",
|
name: "one gauge histogram chunk intersect with deletion interval",
|
||||||
chks: [][]tsdbutil.Sample{
|
chks: [][]chunks.Sample{
|
||||||
{
|
{
|
||||||
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
|
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
|
||||||
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
|
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
|
||||||
|
@ -1082,13 +1083,13 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}},
|
intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}},
|
||||||
expected: []tsdbutil.Sample{
|
expected: []chunks.Sample{
|
||||||
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
|
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
|
||||||
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
|
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
|
||||||
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
|
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
|
||||||
},
|
},
|
||||||
expectedChks: []chunks.Meta{
|
expectedChks: []chunks.Meta{
|
||||||
assureChunkFromSamples(t, []tsdbutil.Sample{
|
assureChunkFromSamples(t, []chunks.Sample{
|
||||||
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
|
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
|
||||||
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
|
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
|
||||||
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
|
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
|
||||||
|
@ -1097,7 +1098,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "one gauge float histogram",
|
name: "one gauge float histogram",
|
||||||
chks: [][]tsdbutil.Sample{
|
chks: [][]chunks.Sample{
|
||||||
{
|
{
|
||||||
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
|
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
|
||||||
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
|
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
|
||||||
|
@ -1105,14 +1106,14 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
||||||
sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
|
sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expected: []tsdbutil.Sample{
|
expected: []chunks.Sample{
|
||||||
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
|
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
|
||||||
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
|
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
|
||||||
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
|
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
|
||||||
sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
|
sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
|
||||||
},
|
},
|
||||||
expectedChks: []chunks.Meta{
|
expectedChks: []chunks.Meta{
|
||||||
assureChunkFromSamples(t, []tsdbutil.Sample{
|
assureChunkFromSamples(t, []chunks.Sample{
|
||||||
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
|
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
|
||||||
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
|
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
|
||||||
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
|
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
|
||||||
|
@ -1122,7 +1123,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "one gauge float histogram chunk intersect with deletion interval",
|
name: "one gauge float histogram chunk intersect with deletion interval",
|
||||||
chks: [][]tsdbutil.Sample{
|
chks: [][]chunks.Sample{
|
||||||
{
|
{
|
||||||
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
|
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
|
||||||
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
|
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
|
||||||
|
@ -1131,13 +1132,13 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}},
|
intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}},
|
||||||
expected: []tsdbutil.Sample{
|
expected: []chunks.Sample{
|
||||||
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
|
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
|
||||||
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
|
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
|
||||||
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
|
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
|
||||||
},
|
},
|
||||||
expectedChks: []chunks.Meta{
|
expectedChks: []chunks.Meta{
|
||||||
assureChunkFromSamples(t, []tsdbutil.Sample{
|
assureChunkFromSamples(t, []chunks.Sample{
|
||||||
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
|
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
|
||||||
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
|
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
|
||||||
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
|
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
|
||||||
|
@ -1152,7 +1153,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
||||||
it := &populateWithDelSeriesIterator{}
|
it := &populateWithDelSeriesIterator{}
|
||||||
it.reset(ulid.ULID{}, f, chkMetas, tc.intervals)
|
it.reset(ulid.ULID{}, f, chkMetas, tc.intervals)
|
||||||
|
|
||||||
var r []tsdbutil.Sample
|
var r []chunks.Sample
|
||||||
if tc.seek != 0 {
|
if tc.seek != 0 {
|
||||||
require.Equal(t, tc.seekSuccess, it.Seek(tc.seek) == chunkenc.ValFloat)
|
require.Equal(t, tc.seekSuccess, it.Seek(tc.seek) == chunkenc.ValFloat)
|
||||||
require.Equal(t, tc.seekSuccess, it.Seek(tc.seek) == chunkenc.ValFloat) // Next one should be noop.
|
require.Equal(t, tc.seekSuccess, it.Seek(tc.seek) == chunkenc.ValFloat) // Next one should be noop.
|
||||||
|
@ -1198,9 +1199,9 @@ func rmChunkRefs(chks []chunks.Meta) {
|
||||||
// Regression for: https://github.com/prometheus/tsdb/pull/97
|
// Regression for: https://github.com/prometheus/tsdb/pull/97
|
||||||
func TestPopulateWithDelSeriesIterator_DoubleSeek(t *testing.T) {
|
func TestPopulateWithDelSeriesIterator_DoubleSeek(t *testing.T) {
|
||||||
f, chkMetas := createFakeReaderAndNotPopulatedChunks(
|
f, chkMetas := createFakeReaderAndNotPopulatedChunks(
|
||||||
[]tsdbutil.Sample{},
|
[]chunks.Sample{},
|
||||||
[]tsdbutil.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}},
|
[]chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}},
|
||||||
[]tsdbutil.Sample{sample{4, 4, nil, nil}, sample{5, 5, nil, nil}},
|
[]chunks.Sample{sample{4, 4, nil, nil}, sample{5, 5, nil, nil}},
|
||||||
)
|
)
|
||||||
|
|
||||||
it := &populateWithDelSeriesIterator{}
|
it := &populateWithDelSeriesIterator{}
|
||||||
|
@ -1217,9 +1218,9 @@ func TestPopulateWithDelSeriesIterator_DoubleSeek(t *testing.T) {
|
||||||
// skipped to the end when seeking a value in the current chunk.
|
// skipped to the end when seeking a value in the current chunk.
|
||||||
func TestPopulateWithDelSeriesIterator_SeekInCurrentChunk(t *testing.T) {
|
func TestPopulateWithDelSeriesIterator_SeekInCurrentChunk(t *testing.T) {
|
||||||
f, chkMetas := createFakeReaderAndNotPopulatedChunks(
|
f, chkMetas := createFakeReaderAndNotPopulatedChunks(
|
||||||
[]tsdbutil.Sample{},
|
[]chunks.Sample{},
|
||||||
[]tsdbutil.Sample{sample{1, 2, nil, nil}, sample{3, 4, nil, nil}, sample{5, 6, nil, nil}, sample{7, 8, nil, nil}},
|
[]chunks.Sample{sample{1, 2, nil, nil}, sample{3, 4, nil, nil}, sample{5, 6, nil, nil}, sample{7, 8, nil, nil}},
|
||||||
[]tsdbutil.Sample{},
|
[]chunks.Sample{},
|
||||||
)
|
)
|
||||||
|
|
||||||
it := &populateWithDelSeriesIterator{}
|
it := &populateWithDelSeriesIterator{}
|
||||||
|
@ -1237,7 +1238,7 @@ func TestPopulateWithDelSeriesIterator_SeekInCurrentChunk(t *testing.T) {
|
||||||
|
|
||||||
func TestPopulateWithDelSeriesIterator_SeekWithMinTime(t *testing.T) {
|
func TestPopulateWithDelSeriesIterator_SeekWithMinTime(t *testing.T) {
|
||||||
f, chkMetas := createFakeReaderAndNotPopulatedChunks(
|
f, chkMetas := createFakeReaderAndNotPopulatedChunks(
|
||||||
[]tsdbutil.Sample{sample{1, 6, nil, nil}, sample{5, 6, nil, nil}, sample{6, 8, nil, nil}},
|
[]chunks.Sample{sample{1, 6, nil, nil}, sample{5, 6, nil, nil}, sample{6, 8, nil, nil}},
|
||||||
)
|
)
|
||||||
|
|
||||||
it := &populateWithDelSeriesIterator{}
|
it := &populateWithDelSeriesIterator{}
|
||||||
|
@ -1250,7 +1251,7 @@ func TestPopulateWithDelSeriesIterator_SeekWithMinTime(t *testing.T) {
|
||||||
// Seek gets called and advances beyond the max time, which was just accepted as a valid sample.
|
// Seek gets called and advances beyond the max time, which was just accepted as a valid sample.
|
||||||
func TestPopulateWithDelSeriesIterator_NextWithMinTime(t *testing.T) {
|
func TestPopulateWithDelSeriesIterator_NextWithMinTime(t *testing.T) {
|
||||||
f, chkMetas := createFakeReaderAndNotPopulatedChunks(
|
f, chkMetas := createFakeReaderAndNotPopulatedChunks(
|
||||||
[]tsdbutil.Sample{sample{1, 6, nil, nil}, sample{5, 6, nil, nil}, sample{7, 8, nil, nil}},
|
[]chunks.Sample{sample{1, 6, nil, nil}, sample{5, 6, nil, nil}, sample{7, 8, nil, nil}},
|
||||||
)
|
)
|
||||||
|
|
||||||
it := &populateWithDelSeriesIterator{}
|
it := &populateWithDelSeriesIterator{}
|
||||||
|
@ -2213,6 +2214,71 @@ func TestPostingsForMatchers(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestQuerierIndexQueriesRace tests the index queries with racing appends.
|
||||||
|
func TestQuerierIndexQueriesRace(t *testing.T) {
|
||||||
|
const testRepeats = 1000
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
matchers []*labels.Matcher
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
matchers: []*labels.Matcher{
|
||||||
|
// This matcher should involve the AllPostings posting list in calculating the posting lists.
|
||||||
|
labels.MustNewMatcher(labels.MatchNotEqual, labels.MetricName, "metric"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
matchers: []*labels.Matcher{
|
||||||
|
// The first matcher should be effectively the same as AllPostings, because all series have always_0=0
|
||||||
|
// If it is evaluated first, then __name__=metric will contain more series than always_0=0.
|
||||||
|
labels.MustNewMatcher(labels.MatchNotEqual, "always_0", "0"),
|
||||||
|
labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, "metric"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range testCases {
|
||||||
|
c := c
|
||||||
|
t.Run(fmt.Sprintf("%v", c.matchers), func(t *testing.T) {
|
||||||
|
db := openTestDB(t, DefaultOptions(), nil)
|
||||||
|
h := db.Head()
|
||||||
|
t.Cleanup(func() {
|
||||||
|
require.NoError(t, db.Close())
|
||||||
|
})
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
wg.Add(1)
|
||||||
|
go appendSeries(t, ctx, wg, h)
|
||||||
|
t.Cleanup(wg.Wait)
|
||||||
|
t.Cleanup(cancel)
|
||||||
|
|
||||||
|
for i := 0; i < testRepeats; i++ {
|
||||||
|
q, err := db.Querier(ctx, math.MinInt64, math.MaxInt64)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
values, _, err := q.LabelValues("seq", c.matchers...)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Emptyf(t, values, `label values for label "seq" should be empty`)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendSeries(t *testing.T, ctx context.Context, wg *sync.WaitGroup, h *Head) {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
for i := 0; ctx.Err() != nil; i++ {
|
||||||
|
app := h.Appender(context.Background())
|
||||||
|
_, err := app.Append(0, labels.FromStrings(labels.MetricName, "metric", "seq", strconv.Itoa(i), "always_0", "0"), 0, 0)
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = app.Commit()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Throttle down the appends to keep the test somewhat nimble.
|
||||||
|
time.Sleep(time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// TestClose ensures that calling Close more than once doesn't block and doesn't panic.
|
// TestClose ensures that calling Close more than once doesn't block and doesn't panic.
|
||||||
func TestClose(t *testing.T) {
|
func TestClose(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
|
@ -1,159 +0,0 @@
|
||||||
// Copyright 2018 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package tsdbutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/histogram"
|
|
||||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
|
||||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Samples interface {
|
|
||||||
Get(i int) Sample
|
|
||||||
Len() int
|
|
||||||
}
|
|
||||||
|
|
||||||
type Sample interface {
|
|
||||||
T() int64
|
|
||||||
F() float64
|
|
||||||
H() *histogram.Histogram
|
|
||||||
FH() *histogram.FloatHistogram
|
|
||||||
Type() chunkenc.ValueType
|
|
||||||
}
|
|
||||||
|
|
||||||
type SampleSlice []Sample
|
|
||||||
|
|
||||||
func (s SampleSlice) Get(i int) Sample { return s[i] }
|
|
||||||
func (s SampleSlice) Len() int { return len(s) }
|
|
||||||
|
|
||||||
// ChunkFromSamples requires all samples to have the same type.
|
|
||||||
func ChunkFromSamples(s []Sample) (chunks.Meta, error) {
|
|
||||||
return ChunkFromSamplesGeneric(SampleSlice(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChunkFromSamplesGeneric requires all samples to have the same type.
|
|
||||||
func ChunkFromSamplesGeneric(s Samples) (chunks.Meta, error) {
|
|
||||||
emptyChunk := chunks.Meta{Chunk: chunkenc.NewXORChunk()}
|
|
||||||
mint, maxt := int64(0), int64(0)
|
|
||||||
|
|
||||||
if s.Len() > 0 {
|
|
||||||
mint, maxt = s.Get(0).T(), s.Get(s.Len()-1).T()
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.Len() == 0 {
|
|
||||||
return emptyChunk, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
sampleType := s.Get(0).Type()
|
|
||||||
c, err := chunkenc.NewEmptyChunk(sampleType.ChunkEncoding())
|
|
||||||
if err != nil {
|
|
||||||
return chunks.Meta{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ca, _ := c.Appender()
|
|
||||||
var newChunk chunkenc.Chunk
|
|
||||||
|
|
||||||
for i := 0; i < s.Len(); i++ {
|
|
||||||
switch sampleType {
|
|
||||||
case chunkenc.ValFloat:
|
|
||||||
ca.Append(s.Get(i).T(), s.Get(i).F())
|
|
||||||
case chunkenc.ValHistogram:
|
|
||||||
newChunk, _, ca, err = ca.AppendHistogram(nil, s.Get(i).T(), s.Get(i).H(), false)
|
|
||||||
if err != nil {
|
|
||||||
return emptyChunk, err
|
|
||||||
}
|
|
||||||
if newChunk != nil {
|
|
||||||
return emptyChunk, fmt.Errorf("did not expect to start a second chunk")
|
|
||||||
}
|
|
||||||
case chunkenc.ValFloatHistogram:
|
|
||||||
newChunk, _, ca, err = ca.AppendFloatHistogram(nil, s.Get(i).T(), s.Get(i).FH(), false)
|
|
||||||
if err != nil {
|
|
||||||
return emptyChunk, err
|
|
||||||
}
|
|
||||||
if newChunk != nil {
|
|
||||||
return emptyChunk, fmt.Errorf("did not expect to start a second chunk")
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("unknown sample type %s", sampleType.String()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return chunks.Meta{
|
|
||||||
MinTime: mint,
|
|
||||||
MaxTime: maxt,
|
|
||||||
Chunk: c,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type sample struct {
|
|
||||||
t int64
|
|
||||||
f float64
|
|
||||||
h *histogram.Histogram
|
|
||||||
fh *histogram.FloatHistogram
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s sample) T() int64 {
|
|
||||||
return s.t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s sample) F() float64 {
|
|
||||||
return s.f
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s sample) H() *histogram.Histogram {
|
|
||||||
return s.h
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s sample) FH() *histogram.FloatHistogram {
|
|
||||||
return s.fh
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s sample) Type() chunkenc.ValueType {
|
|
||||||
switch {
|
|
||||||
case s.h != nil:
|
|
||||||
return chunkenc.ValHistogram
|
|
||||||
case s.fh != nil:
|
|
||||||
return chunkenc.ValFloatHistogram
|
|
||||||
default:
|
|
||||||
return chunkenc.ValFloat
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PopulatedChunk creates a chunk populated with samples every second starting at minTime
|
|
||||||
func PopulatedChunk(numSamples int, minTime int64) (chunks.Meta, error) {
|
|
||||||
samples := make([]Sample, numSamples)
|
|
||||||
for i := 0; i < numSamples; i++ {
|
|
||||||
samples[i] = sample{t: minTime + int64(i*1000), f: 1.0}
|
|
||||||
}
|
|
||||||
return ChunkFromSamples(samples)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GenerateSamples starting at start and counting up numSamples.
|
|
||||||
func GenerateSamples(start, numSamples int) []Sample {
|
|
||||||
return generateSamples(start, numSamples, func(i int) Sample {
|
|
||||||
return sample{
|
|
||||||
t: int64(i),
|
|
||||||
f: float64(i),
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateSamples(start, numSamples int, gen func(int) Sample) []Sample {
|
|
||||||
samples := make([]Sample, 0, numSamples)
|
|
||||||
for i := start; i < start+numSamples; i++ {
|
|
||||||
samples = append(samples, gen(i))
|
|
||||||
}
|
|
||||||
return samples
|
|
||||||
}
|
|
|
@ -14,7 +14,7 @@
|
||||||
package tsdbutil
|
package tsdbutil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math/rand"
|
"math"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/histogram"
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
)
|
)
|
||||||
|
@ -33,7 +33,7 @@ func GenerateTestHistograms(n int) (r []*histogram.Histogram) {
|
||||||
// GenerateTestHistogram but it is up to the user to set any known counter reset hint.
|
// GenerateTestHistogram but it is up to the user to set any known counter reset hint.
|
||||||
func GenerateTestHistogram(i int) *histogram.Histogram {
|
func GenerateTestHistogram(i int) *histogram.Histogram {
|
||||||
return &histogram.Histogram{
|
return &histogram.Histogram{
|
||||||
Count: 10 + uint64(i*8),
|
Count: 12 + uint64(i*9),
|
||||||
ZeroCount: 2 + uint64(i),
|
ZeroCount: 2 + uint64(i),
|
||||||
ZeroThreshold: 0.001,
|
ZeroThreshold: 0.001,
|
||||||
Sum: 18.4 * float64(i+1),
|
Sum: 18.4 * float64(i+1),
|
||||||
|
@ -53,7 +53,8 @@ func GenerateTestHistogram(i int) *histogram.Histogram {
|
||||||
|
|
||||||
func GenerateTestGaugeHistograms(n int) (r []*histogram.Histogram) {
|
func GenerateTestGaugeHistograms(n int) (r []*histogram.Histogram) {
|
||||||
for x := 0; x < n; x++ {
|
for x := 0; x < n; x++ {
|
||||||
r = append(r, GenerateTestGaugeHistogram(rand.Intn(n)))
|
i := int(math.Sin(float64(x))*100) + 100
|
||||||
|
r = append(r, GenerateTestGaugeHistogram(i))
|
||||||
}
|
}
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
@ -78,7 +79,7 @@ func GenerateTestFloatHistograms(n int) (r []*histogram.FloatHistogram) {
|
||||||
// GenerateTestFloatHistogram but it is up to the user to set any known counter reset hint.
|
// GenerateTestFloatHistogram but it is up to the user to set any known counter reset hint.
|
||||||
func GenerateTestFloatHistogram(i int) *histogram.FloatHistogram {
|
func GenerateTestFloatHistogram(i int) *histogram.FloatHistogram {
|
||||||
return &histogram.FloatHistogram{
|
return &histogram.FloatHistogram{
|
||||||
Count: 10 + float64(i*8),
|
Count: 12 + float64(i*9),
|
||||||
ZeroCount: 2 + float64(i),
|
ZeroCount: 2 + float64(i),
|
||||||
ZeroThreshold: 0.001,
|
ZeroThreshold: 0.001,
|
||||||
Sum: 18.4 * float64(i+1),
|
Sum: 18.4 * float64(i+1),
|
||||||
|
@ -98,7 +99,8 @@ func GenerateTestFloatHistogram(i int) *histogram.FloatHistogram {
|
||||||
|
|
||||||
func GenerateTestGaugeFloatHistograms(n int) (r []*histogram.FloatHistogram) {
|
func GenerateTestGaugeFloatHistograms(n int) (r []*histogram.FloatHistogram) {
|
||||||
for x := 0; x < n; x++ {
|
for x := 0; x < n; x++ {
|
||||||
r = append(r, GenerateTestGaugeFloatHistogram(rand.Intn(n)))
|
i := int(math.Sin(float64(x))*100) + 100
|
||||||
|
r = append(r, GenerateTestGaugeFloatHistogram(i))
|
||||||
}
|
}
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,11 +16,12 @@ package wlog
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"crypto/rand"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash/crc32"
|
"hash/crc32"
|
||||||
"io"
|
"io"
|
||||||
"math/rand"
|
"math/big"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
@ -252,8 +253,11 @@ func generateRandomEntries(w *WL, records chan []byte) error {
|
||||||
default:
|
default:
|
||||||
sz = pageSize * 8
|
sz = pageSize * 8
|
||||||
}
|
}
|
||||||
|
n, err := rand.Int(rand.Reader, big.NewInt(sz))
|
||||||
rec := make([]byte, rand.Int63n(sz))
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rec := make([]byte, n.Int64())
|
||||||
if _, err := rand.Read(rec); err != nil {
|
if _, err := rand.Read(rec); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -262,7 +266,11 @@ func generateRandomEntries(w *WL, records chan []byte) error {
|
||||||
|
|
||||||
// Randomly batch up records.
|
// Randomly batch up records.
|
||||||
recs = append(recs, rec)
|
recs = append(recs, rec)
|
||||||
if rand.Intn(4) < 3 {
|
n, err = rand.Int(rand.Reader, big.NewInt(int64(4)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if int(n.Int64()) < 3 {
|
||||||
if err := w.Log(recs...); err != nil {
|
if err := w.Log(recs...); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,9 +16,9 @@ package wlog
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"crypto/rand"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math/rand"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
|
@ -100,6 +100,7 @@ type ScrapePoolsRetriever interface {
|
||||||
type TargetRetriever interface {
|
type TargetRetriever interface {
|
||||||
TargetsActive() map[string][]*scrape.Target
|
TargetsActive() map[string][]*scrape.Target
|
||||||
TargetsDropped() map[string][]*scrape.Target
|
TargetsDropped() map[string][]*scrape.Target
|
||||||
|
TargetsDroppedCounts() map[string]int
|
||||||
}
|
}
|
||||||
|
|
||||||
// AlertmanagerRetriever provides a list of all/dropped AlertManager URLs.
|
// AlertmanagerRetriever provides a list of all/dropped AlertManager URLs.
|
||||||
|
@ -898,8 +899,9 @@ type DroppedTarget struct {
|
||||||
|
|
||||||
// TargetDiscovery has all the active targets.
|
// TargetDiscovery has all the active targets.
|
||||||
type TargetDiscovery struct {
|
type TargetDiscovery struct {
|
||||||
ActiveTargets []*Target `json:"activeTargets"`
|
ActiveTargets []*Target `json:"activeTargets"`
|
||||||
DroppedTargets []*DroppedTarget `json:"droppedTargets"`
|
DroppedTargets []*DroppedTarget `json:"droppedTargets"`
|
||||||
|
DroppedTargetCounts map[string]int `json:"droppedTargetCounts"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// GlobalURLOptions contains fields used for deriving the global URL for local targets.
|
// GlobalURLOptions contains fields used for deriving the global URL for local targets.
|
||||||
|
@ -1039,6 +1041,9 @@ func (api *API) targets(r *http.Request) apiFuncResult {
|
||||||
} else {
|
} else {
|
||||||
res.ActiveTargets = []*Target{}
|
res.ActiveTargets = []*Target{}
|
||||||
}
|
}
|
||||||
|
if showDropped {
|
||||||
|
res.DroppedTargetCounts = api.targetRetriever(r.Context()).TargetsDroppedCounts()
|
||||||
|
}
|
||||||
if showDropped {
|
if showDropped {
|
||||||
targetsDropped := api.targetRetriever(r.Context()).TargetsDropped()
|
targetsDropped := api.targetRetriever(r.Context()).TargetsDropped()
|
||||||
droppedKeys, numTargets := sortKeys(targetsDropped)
|
droppedKeys, numTargets := sortKeys(targetsDropped)
|
||||||
|
|
|
@ -56,6 +56,17 @@ import (
|
||||||
"github.com/prometheus/prometheus/util/teststorage"
|
"github.com/prometheus/prometheus/util/teststorage"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var testEngine = promql.NewEngine(promql.EngineOpts{
|
||||||
|
Logger: nil,
|
||||||
|
Reg: nil,
|
||||||
|
MaxSamples: 10000,
|
||||||
|
Timeout: 100 * time.Second,
|
||||||
|
NoStepSubqueryIntervalFn: func(int64) int64 { return 60 * 1000 },
|
||||||
|
EnableAtModifier: true,
|
||||||
|
EnableNegativeOffset: true,
|
||||||
|
EnablePerStepStats: true,
|
||||||
|
})
|
||||||
|
|
||||||
// testMetaStore satisfies the scrape.MetricMetadataStore interface.
|
// testMetaStore satisfies the scrape.MetricMetadataStore interface.
|
||||||
// It is used to inject specific metadata as part of a test case.
|
// It is used to inject specific metadata as part of a test case.
|
||||||
type testMetaStore struct {
|
type testMetaStore struct {
|
||||||
|
@ -137,6 +148,14 @@ func (t testTargetRetriever) TargetsDropped() map[string][]*scrape.Target {
|
||||||
return t.droppedTargets
|
return t.droppedTargets
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t testTargetRetriever) TargetsDroppedCounts() map[string]int {
|
||||||
|
r := make(map[string]int)
|
||||||
|
for k, v := range t.droppedTargets {
|
||||||
|
r[k] = len(v)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
func (t *testTargetRetriever) SetMetadataStoreForTargets(identifier string, metadata scrape.MetricMetadataStore) error {
|
func (t *testTargetRetriever) SetMetadataStoreForTargets(identifier string, metadata scrape.MetricMetadataStore) error {
|
||||||
targets, ok := t.activeTargets[identifier]
|
targets, ok := t.activeTargets[identifier]
|
||||||
|
|
||||||
|
@ -297,7 +316,7 @@ var sampleFlagMap = map[string]string{
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEndpoints(t *testing.T) {
|
func TestEndpoints(t *testing.T) {
|
||||||
suite, err := promql.NewTest(t, `
|
storage := promql.LoadedStorage(t, `
|
||||||
load 1m
|
load 1m
|
||||||
test_metric1{foo="bar"} 0+100x100
|
test_metric1{foo="bar"} 0+100x100
|
||||||
test_metric1{foo="boo"} 1+0x100
|
test_metric1{foo="boo"} 1+0x100
|
||||||
|
@ -308,6 +327,7 @@ func TestEndpoints(t *testing.T) {
|
||||||
test_metric4{foo="boo", dup="1"} 1+0x100
|
test_metric4{foo="boo", dup="1"} 1+0x100
|
||||||
test_metric4{foo="boo"} 1+0x100
|
test_metric4{foo="boo"} 1+0x100
|
||||||
`)
|
`)
|
||||||
|
t.Cleanup(func() { storage.Close() })
|
||||||
|
|
||||||
start := time.Unix(0, 0)
|
start := time.Unix(0, 0)
|
||||||
exemplars := []exemplar.QueryResult{
|
exemplars := []exemplar.QueryResult{
|
||||||
|
@ -353,15 +373,10 @@ func TestEndpoints(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, ed := range exemplars {
|
for _, ed := range exemplars {
|
||||||
suite.ExemplarStorage().AppendExemplar(0, ed.SeriesLabels, ed.Exemplars[0])
|
_, err := storage.AppendExemplar(0, ed.SeriesLabels, ed.Exemplars[0])
|
||||||
require.NoError(t, err, "failed to add exemplar: %+v", ed.Exemplars[0])
|
require.NoError(t, err, "failed to add exemplar: %+v", ed.Exemplars[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer suite.Close()
|
|
||||||
|
|
||||||
require.NoError(t, suite.Run())
|
|
||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
|
||||||
t.Run("local", func(t *testing.T) {
|
t.Run("local", func(t *testing.T) {
|
||||||
|
@ -375,9 +390,9 @@ func TestEndpoints(t *testing.T) {
|
||||||
testTargetRetriever := setupTestTargetRetriever(t)
|
testTargetRetriever := setupTestTargetRetriever(t)
|
||||||
|
|
||||||
api := &API{
|
api := &API{
|
||||||
Queryable: suite.Storage(),
|
Queryable: storage,
|
||||||
QueryEngine: suite.QueryEngine(),
|
QueryEngine: testEngine,
|
||||||
ExemplarQueryable: suite.ExemplarQueryable(),
|
ExemplarQueryable: storage.ExemplarQueryable(),
|
||||||
targetRetriever: testTargetRetriever.toFactory(),
|
targetRetriever: testTargetRetriever.toFactory(),
|
||||||
alertmanagerRetriever: testAlertmanagerRetriever{}.toFactory(),
|
alertmanagerRetriever: testAlertmanagerRetriever{}.toFactory(),
|
||||||
flagsMap: sampleFlagMap,
|
flagsMap: sampleFlagMap,
|
||||||
|
@ -386,14 +401,14 @@ func TestEndpoints(t *testing.T) {
|
||||||
ready: func(f http.HandlerFunc) http.HandlerFunc { return f },
|
ready: func(f http.HandlerFunc) http.HandlerFunc { return f },
|
||||||
rulesRetriever: algr.toFactory(),
|
rulesRetriever: algr.toFactory(),
|
||||||
}
|
}
|
||||||
testEndpoints(t, api, testTargetRetriever, suite.ExemplarStorage(), true)
|
testEndpoints(t, api, testTargetRetriever, storage, true)
|
||||||
})
|
})
|
||||||
|
|
||||||
// Run all the API tests against a API that is wired to forward queries via
|
// Run all the API tests against a API that is wired to forward queries via
|
||||||
// the remote read client to a test server, which in turn sends them to the
|
// the remote read client to a test server, which in turn sends them to the
|
||||||
// data from the test suite.
|
// data from the test storage.
|
||||||
t.Run("remote", func(t *testing.T) {
|
t.Run("remote", func(t *testing.T) {
|
||||||
server := setupRemote(suite.Storage())
|
server := setupRemote(storage)
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
|
|
||||||
u, err := url.Parse(server.URL)
|
u, err := url.Parse(server.URL)
|
||||||
|
@ -438,8 +453,8 @@ func TestEndpoints(t *testing.T) {
|
||||||
|
|
||||||
api := &API{
|
api := &API{
|
||||||
Queryable: remote,
|
Queryable: remote,
|
||||||
QueryEngine: suite.QueryEngine(),
|
QueryEngine: testEngine,
|
||||||
ExemplarQueryable: suite.ExemplarQueryable(),
|
ExemplarQueryable: storage.ExemplarQueryable(),
|
||||||
targetRetriever: testTargetRetriever.toFactory(),
|
targetRetriever: testTargetRetriever.toFactory(),
|
||||||
alertmanagerRetriever: testAlertmanagerRetriever{}.toFactory(),
|
alertmanagerRetriever: testAlertmanagerRetriever{}.toFactory(),
|
||||||
flagsMap: sampleFlagMap,
|
flagsMap: sampleFlagMap,
|
||||||
|
@ -448,8 +463,7 @@ func TestEndpoints(t *testing.T) {
|
||||||
ready: func(f http.HandlerFunc) http.HandlerFunc { return f },
|
ready: func(f http.HandlerFunc) http.HandlerFunc { return f },
|
||||||
rulesRetriever: algr.toFactory(),
|
rulesRetriever: algr.toFactory(),
|
||||||
}
|
}
|
||||||
|
testEndpoints(t, api, testTargetRetriever, storage, false)
|
||||||
testEndpoints(t, api, testTargetRetriever, suite.ExemplarStorage(), false)
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -462,7 +476,7 @@ func (b byLabels) Less(i, j int) bool { return labels.Compare(b[i], b[j]) < 0 }
|
||||||
func TestGetSeries(t *testing.T) {
|
func TestGetSeries(t *testing.T) {
|
||||||
// TestEndpoints doesn't have enough label names to test api.labelNames
|
// TestEndpoints doesn't have enough label names to test api.labelNames
|
||||||
// endpoint properly. Hence we test it separately.
|
// endpoint properly. Hence we test it separately.
|
||||||
suite, err := promql.NewTest(t, `
|
storage := promql.LoadedStorage(t, `
|
||||||
load 1m
|
load 1m
|
||||||
test_metric1{foo1="bar", baz="abc"} 0+100x100
|
test_metric1{foo1="bar", baz="abc"} 0+100x100
|
||||||
test_metric1{foo2="boo"} 1+0x100
|
test_metric1{foo2="boo"} 1+0x100
|
||||||
|
@ -470,11 +484,9 @@ func TestGetSeries(t *testing.T) {
|
||||||
test_metric2{foo="boo", xyz="qwerty"} 1+0x100
|
test_metric2{foo="boo", xyz="qwerty"} 1+0x100
|
||||||
test_metric2{foo="baz", abc="qwerty"} 1+0x100
|
test_metric2{foo="baz", abc="qwerty"} 1+0x100
|
||||||
`)
|
`)
|
||||||
require.NoError(t, err)
|
t.Cleanup(func() { storage.Close() })
|
||||||
defer suite.Close()
|
|
||||||
require.NoError(t, suite.Run())
|
|
||||||
api := &API{
|
api := &API{
|
||||||
Queryable: suite.Storage(),
|
Queryable: storage,
|
||||||
}
|
}
|
||||||
request := func(method string, matchers ...string) (*http.Request, error) {
|
request := func(method string, matchers ...string) (*http.Request, error) {
|
||||||
u, err := url.Parse("http://example.com")
|
u, err := url.Parse("http://example.com")
|
||||||
|
@ -568,7 +580,7 @@ func TestGetSeries(t *testing.T) {
|
||||||
|
|
||||||
func TestQueryExemplars(t *testing.T) {
|
func TestQueryExemplars(t *testing.T) {
|
||||||
start := time.Unix(0, 0)
|
start := time.Unix(0, 0)
|
||||||
suite, err := promql.NewTest(t, `
|
storage := promql.LoadedStorage(t, `
|
||||||
load 1m
|
load 1m
|
||||||
test_metric1{foo="bar"} 0+100x100
|
test_metric1{foo="bar"} 0+100x100
|
||||||
test_metric1{foo="boo"} 1+0x100
|
test_metric1{foo="boo"} 1+0x100
|
||||||
|
@ -579,15 +591,12 @@ func TestQueryExemplars(t *testing.T) {
|
||||||
test_metric4{foo="boo", dup="1"} 1+0x100
|
test_metric4{foo="boo", dup="1"} 1+0x100
|
||||||
test_metric4{foo="boo"} 1+0x100
|
test_metric4{foo="boo"} 1+0x100
|
||||||
`)
|
`)
|
||||||
|
t.Cleanup(func() { storage.Close() })
|
||||||
require.NoError(t, err)
|
|
||||||
defer suite.Close()
|
|
||||||
require.NoError(t, suite.Run())
|
|
||||||
|
|
||||||
api := &API{
|
api := &API{
|
||||||
Queryable: suite.Storage(),
|
Queryable: storage,
|
||||||
QueryEngine: suite.QueryEngine(),
|
QueryEngine: testEngine,
|
||||||
ExemplarQueryable: suite.ExemplarQueryable(),
|
ExemplarQueryable: storage.ExemplarQueryable(),
|
||||||
}
|
}
|
||||||
|
|
||||||
request := func(method string, qs url.Values) (*http.Request, error) {
|
request := func(method string, qs url.Values) (*http.Request, error) {
|
||||||
|
@ -665,7 +674,7 @@ func TestQueryExemplars(t *testing.T) {
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
es := suite.ExemplarStorage()
|
es := storage
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
for _, te := range tc.exemplars {
|
for _, te := range tc.exemplars {
|
||||||
|
@ -692,7 +701,7 @@ func TestQueryExemplars(t *testing.T) {
|
||||||
func TestLabelNames(t *testing.T) {
|
func TestLabelNames(t *testing.T) {
|
||||||
// TestEndpoints doesn't have enough label names to test api.labelNames
|
// TestEndpoints doesn't have enough label names to test api.labelNames
|
||||||
// endpoint properly. Hence we test it separately.
|
// endpoint properly. Hence we test it separately.
|
||||||
suite, err := promql.NewTest(t, `
|
storage := promql.LoadedStorage(t, `
|
||||||
load 1m
|
load 1m
|
||||||
test_metric1{foo1="bar", baz="abc"} 0+100x100
|
test_metric1{foo1="bar", baz="abc"} 0+100x100
|
||||||
test_metric1{foo2="boo"} 1+0x100
|
test_metric1{foo2="boo"} 1+0x100
|
||||||
|
@ -700,11 +709,9 @@ func TestLabelNames(t *testing.T) {
|
||||||
test_metric2{foo="boo", xyz="qwerty"} 1+0x100
|
test_metric2{foo="boo", xyz="qwerty"} 1+0x100
|
||||||
test_metric2{foo="baz", abc="qwerty"} 1+0x100
|
test_metric2{foo="baz", abc="qwerty"} 1+0x100
|
||||||
`)
|
`)
|
||||||
require.NoError(t, err)
|
t.Cleanup(func() { storage.Close() })
|
||||||
defer suite.Close()
|
|
||||||
require.NoError(t, suite.Run())
|
|
||||||
api := &API{
|
api := &API{
|
||||||
Queryable: suite.Storage(),
|
Queryable: storage,
|
||||||
}
|
}
|
||||||
request := func(method string, matchers ...string) (*http.Request, error) {
|
request := func(method string, matchers ...string) (*http.Request, error) {
|
||||||
u, err := url.Parse("http://example.com")
|
u, err := url.Parse("http://example.com")
|
||||||
|
@ -793,14 +800,12 @@ func (testStats) Builtin() (_ stats.BuiltinStats) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStats(t *testing.T) {
|
func TestStats(t *testing.T) {
|
||||||
suite, err := promql.NewTest(t, ``)
|
storage := teststorage.New(t)
|
||||||
require.NoError(t, err)
|
t.Cleanup(func() { storage.Close() })
|
||||||
defer suite.Close()
|
|
||||||
require.NoError(t, suite.Run())
|
|
||||||
|
|
||||||
api := &API{
|
api := &API{
|
||||||
Queryable: suite.Storage(),
|
Queryable: storage,
|
||||||
QueryEngine: suite.QueryEngine(),
|
QueryEngine: testEngine,
|
||||||
now: func() time.Time {
|
now: func() time.Time {
|
||||||
return time.Unix(123, 0)
|
return time.Unix(123, 0)
|
||||||
},
|
},
|
||||||
|
@ -1384,6 +1389,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
DroppedTargetCounts: map[string]int{"blackbox": 1},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -1436,6 +1442,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
DroppedTargetCounts: map[string]int{"blackbox": 1},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -1498,6 +1505,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
DroppedTargetCounts: map[string]int{"blackbox": 1},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// With a matching metric.
|
// With a matching metric.
|
||||||
|
@ -3417,27 +3425,57 @@ func TestReturnAPIError(t *testing.T) {
|
||||||
var testResponseWriter = httptest.ResponseRecorder{}
|
var testResponseWriter = httptest.ResponseRecorder{}
|
||||||
|
|
||||||
func BenchmarkRespond(b *testing.B) {
|
func BenchmarkRespond(b *testing.B) {
|
||||||
b.ReportAllocs()
|
|
||||||
request, err := http.NewRequest(http.MethodGet, "/does-not-matter", nil)
|
|
||||||
require.NoError(b, err)
|
|
||||||
points := []promql.FPoint{}
|
points := []promql.FPoint{}
|
||||||
for i := 0; i < 10000; i++ {
|
for i := 0; i < 10000; i++ {
|
||||||
points = append(points, promql.FPoint{F: float64(i * 1000000), T: int64(i)})
|
points = append(points, promql.FPoint{F: float64(i * 1000000), T: int64(i)})
|
||||||
}
|
}
|
||||||
response := &QueryData{
|
matrix := promql.Matrix{}
|
||||||
ResultType: parser.ValueTypeMatrix,
|
for i := 0; i < 1000; i++ {
|
||||||
Result: promql.Matrix{
|
matrix = append(matrix, promql.Series{
|
||||||
promql.Series{
|
Metric: labels.FromStrings("__name__", fmt.Sprintf("series%v", i),
|
||||||
Floats: points,
|
"label", fmt.Sprintf("series%v", i),
|
||||||
Metric: labels.EmptyLabels(),
|
"label2", fmt.Sprintf("series%v", i)),
|
||||||
},
|
Floats: points[:10],
|
||||||
},
|
})
|
||||||
}
|
}
|
||||||
b.ResetTimer()
|
series := []labels.Labels{}
|
||||||
api := API{}
|
for i := 0; i < 1000; i++ {
|
||||||
api.InstallCodec(JSONCodec{})
|
series = append(series, labels.FromStrings("__name__", fmt.Sprintf("series%v", i),
|
||||||
for n := 0; n < b.N; n++ {
|
"label", fmt.Sprintf("series%v", i),
|
||||||
api.respond(&testResponseWriter, request, response, nil)
|
"label2", fmt.Sprintf("series%v", i)))
|
||||||
|
}
|
||||||
|
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
response interface{}
|
||||||
|
}{
|
||||||
|
{name: "10000 points no labels", response: &QueryData{
|
||||||
|
ResultType: parser.ValueTypeMatrix,
|
||||||
|
Result: promql.Matrix{
|
||||||
|
promql.Series{
|
||||||
|
Floats: points,
|
||||||
|
Metric: labels.EmptyLabels(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
{name: "1000 labels", response: series},
|
||||||
|
{name: "1000 series 10 points", response: &QueryData{
|
||||||
|
ResultType: parser.ValueTypeMatrix,
|
||||||
|
Result: matrix,
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
for _, c := range cases {
|
||||||
|
b.Run(c.name, func(b *testing.B) {
|
||||||
|
b.ReportAllocs()
|
||||||
|
request, err := http.NewRequest(http.MethodGet, "/does-not-matter", nil)
|
||||||
|
require.NoError(b, err)
|
||||||
|
b.ResetTimer()
|
||||||
|
api := API{}
|
||||||
|
api.InstallCodec(JSONCodec{})
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
api.respond(&testResponseWriter, request, c.response, nil)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -229,6 +229,11 @@ func (DummyTargetRetriever) TargetsDropped() map[string][]*scrape.Target {
|
||||||
return map[string][]*scrape.Target{}
|
return map[string][]*scrape.Target{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TargetsDroppedCounts implements targetRetriever.
|
||||||
|
func (DummyTargetRetriever) TargetsDroppedCounts() map[string]int {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// DummyAlertmanagerRetriever implements AlertmanagerRetriever.
|
// DummyAlertmanagerRetriever implements AlertmanagerRetriever.
|
||||||
type DummyAlertmanagerRetriever struct{}
|
type DummyAlertmanagerRetriever struct{}
|
||||||
|
|
||||||
|
|
|
@ -19,6 +19,7 @@ import (
|
||||||
jsoniter "github.com/json-iterator/go"
|
jsoniter "github.com/json-iterator/go"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/promql"
|
"github.com/prometheus/prometheus/promql"
|
||||||
"github.com/prometheus/prometheus/util/jsonutil"
|
"github.com/prometheus/prometheus/util/jsonutil"
|
||||||
)
|
)
|
||||||
|
@ -29,6 +30,7 @@ func init() {
|
||||||
jsoniter.RegisterTypeEncoderFunc("promql.FPoint", marshalFPointJSON, marshalPointJSONIsEmpty)
|
jsoniter.RegisterTypeEncoderFunc("promql.FPoint", marshalFPointJSON, marshalPointJSONIsEmpty)
|
||||||
jsoniter.RegisterTypeEncoderFunc("promql.HPoint", marshalHPointJSON, marshalPointJSONIsEmpty)
|
jsoniter.RegisterTypeEncoderFunc("promql.HPoint", marshalHPointJSON, marshalPointJSONIsEmpty)
|
||||||
jsoniter.RegisterTypeEncoderFunc("exemplar.Exemplar", marshalExemplarJSON, marshalExemplarJSONEmpty)
|
jsoniter.RegisterTypeEncoderFunc("exemplar.Exemplar", marshalExemplarJSON, marshalExemplarJSONEmpty)
|
||||||
|
jsoniter.RegisterTypeEncoderFunc("labels.Labels", unsafeMarshalLabelsJSON, labelsIsEmpty)
|
||||||
}
|
}
|
||||||
|
|
||||||
// JSONCodec is a Codec that encodes API responses as JSON.
|
// JSONCodec is a Codec that encodes API responses as JSON.
|
||||||
|
@ -68,12 +70,7 @@ func marshalSeriesJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
|
||||||
s := *((*promql.Series)(ptr))
|
s := *((*promql.Series)(ptr))
|
||||||
stream.WriteObjectStart()
|
stream.WriteObjectStart()
|
||||||
stream.WriteObjectField(`metric`)
|
stream.WriteObjectField(`metric`)
|
||||||
m, err := s.Metric.MarshalJSON()
|
marshalLabelsJSON(s.Metric, stream)
|
||||||
if err != nil {
|
|
||||||
stream.Error = err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
stream.SetBuffer(append(stream.Buffer(), m...))
|
|
||||||
|
|
||||||
for i, p := range s.Floats {
|
for i, p := range s.Floats {
|
||||||
stream.WriteMore()
|
stream.WriteMore()
|
||||||
|
@ -129,12 +126,7 @@ func marshalSampleJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
|
||||||
s := *((*promql.Sample)(ptr))
|
s := *((*promql.Sample)(ptr))
|
||||||
stream.WriteObjectStart()
|
stream.WriteObjectStart()
|
||||||
stream.WriteObjectField(`metric`)
|
stream.WriteObjectField(`metric`)
|
||||||
m, err := s.Metric.MarshalJSON()
|
marshalLabelsJSON(s.Metric, stream)
|
||||||
if err != nil {
|
|
||||||
stream.Error = err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
stream.SetBuffer(append(stream.Buffer(), m...))
|
|
||||||
stream.WriteMore()
|
stream.WriteMore()
|
||||||
if s.H == nil {
|
if s.H == nil {
|
||||||
stream.WriteObjectField(`value`)
|
stream.WriteObjectField(`value`)
|
||||||
|
@ -194,12 +186,7 @@ func marshalExemplarJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
|
||||||
|
|
||||||
// "labels" key.
|
// "labels" key.
|
||||||
stream.WriteObjectField(`labels`)
|
stream.WriteObjectField(`labels`)
|
||||||
lbls, err := p.Labels.MarshalJSON()
|
marshalLabelsJSON(p.Labels, stream)
|
||||||
if err != nil {
|
|
||||||
stream.Error = err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
stream.SetBuffer(append(stream.Buffer(), lbls...))
|
|
||||||
|
|
||||||
// "value" key.
|
// "value" key.
|
||||||
stream.WriteMore()
|
stream.WriteMore()
|
||||||
|
@ -217,3 +204,28 @@ func marshalExemplarJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
|
||||||
func marshalExemplarJSONEmpty(unsafe.Pointer) bool {
|
func marshalExemplarJSONEmpty(unsafe.Pointer) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func unsafeMarshalLabelsJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
|
||||||
|
labelsPtr := (*labels.Labels)(ptr)
|
||||||
|
marshalLabelsJSON(*labelsPtr, stream)
|
||||||
|
}
|
||||||
|
|
||||||
|
func marshalLabelsJSON(lbls labels.Labels, stream *jsoniter.Stream) {
|
||||||
|
stream.WriteObjectStart()
|
||||||
|
i := 0
|
||||||
|
lbls.Range(func(v labels.Label) {
|
||||||
|
if i != 0 {
|
||||||
|
stream.WriteMore()
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
stream.WriteString(v.Name)
|
||||||
|
stream.WriteRaw(`:`)
|
||||||
|
stream.WriteString(v.Value)
|
||||||
|
})
|
||||||
|
stream.WriteObjectEnd()
|
||||||
|
}
|
||||||
|
|
||||||
|
func labelsIsEmpty(ptr unsafe.Pointer) bool {
|
||||||
|
labelsPtr := (*labels.Labels)(ptr)
|
||||||
|
return labelsPtr.IsEmpty()
|
||||||
|
}
|
||||||
|
|
|
@ -36,6 +36,7 @@ import (
|
||||||
"github.com/prometheus/prometheus/promql"
|
"github.com/prometheus/prometheus/promql"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/tsdb"
|
"github.com/prometheus/prometheus/tsdb"
|
||||||
|
"github.com/prometheus/prometheus/util/teststorage"
|
||||||
)
|
)
|
||||||
|
|
||||||
var scenarios = map[string]struct {
|
var scenarios = map[string]struct {
|
||||||
|
@ -199,7 +200,7 @@ test_metric_without_labels{instance="baz"} 1001 6000000
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFederation(t *testing.T) {
|
func TestFederation(t *testing.T) {
|
||||||
suite, err := promql.NewTest(t, `
|
storage := promql.LoadedStorage(t, `
|
||||||
load 1m
|
load 1m
|
||||||
test_metric1{foo="bar",instance="i"} 0+100x100
|
test_metric1{foo="bar",instance="i"} 0+100x100
|
||||||
test_metric1{foo="boo",instance="i"} 1+0x100
|
test_metric1{foo="boo",instance="i"} 1+0x100
|
||||||
|
@ -208,17 +209,10 @@ func TestFederation(t *testing.T) {
|
||||||
test_metric_stale 1+10x99 stale
|
test_metric_stale 1+10x99 stale
|
||||||
test_metric_old 1+10x98
|
test_metric_old 1+10x98
|
||||||
`)
|
`)
|
||||||
if err != nil {
|
t.Cleanup(func() { storage.Close() })
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer suite.Close()
|
|
||||||
|
|
||||||
if err := suite.Run(); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
h := &Handler{
|
h := &Handler{
|
||||||
localStorage: &dbAdapter{suite.TSDB()},
|
localStorage: &dbAdapter{storage.DB},
|
||||||
lookbackDelta: 5 * time.Minute,
|
lookbackDelta: 5 * time.Minute,
|
||||||
now: func() model.Time { return 101 * 60 * 1000 }, // 101min after epoch.
|
now: func() model.Time { return 101 * 60 * 1000 }, // 101min after epoch.
|
||||||
config: &config.Config{
|
config: &config.Config{
|
||||||
|
@ -305,21 +299,14 @@ func normalizeBody(body *bytes.Buffer) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFederationWithNativeHistograms(t *testing.T) {
|
func TestFederationWithNativeHistograms(t *testing.T) {
|
||||||
suite, err := promql.NewTest(t, "")
|
storage := teststorage.New(t)
|
||||||
if err != nil {
|
t.Cleanup(func() { storage.Close() })
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer suite.Close()
|
|
||||||
|
|
||||||
if err := suite.Run(); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var expVec promql.Vector
|
var expVec promql.Vector
|
||||||
|
|
||||||
db := suite.TSDB()
|
db := storage.DB
|
||||||
hist := &histogram.Histogram{
|
hist := &histogram.Histogram{
|
||||||
Count: 10,
|
Count: 12,
|
||||||
ZeroCount: 2,
|
ZeroCount: 2,
|
||||||
ZeroThreshold: 0.001,
|
ZeroThreshold: 0.001,
|
||||||
Sum: 39.4,
|
Sum: 39.4,
|
||||||
|
@ -354,6 +341,7 @@ func TestFederationWithNativeHistograms(t *testing.T) {
|
||||||
for i := 0; i < 6; i++ {
|
for i := 0; i < 6; i++ {
|
||||||
l := labels.FromStrings("__name__", "test_metric", "foo", fmt.Sprintf("%d", i))
|
l := labels.FromStrings("__name__", "test_metric", "foo", fmt.Sprintf("%d", i))
|
||||||
expL := labels.FromStrings("__name__", "test_metric", "instance", "", "foo", fmt.Sprintf("%d", i))
|
expL := labels.FromStrings("__name__", "test_metric", "instance", "", "foo", fmt.Sprintf("%d", i))
|
||||||
|
var err error
|
||||||
switch i {
|
switch i {
|
||||||
case 0, 3:
|
case 0, 3:
|
||||||
_, err = app.Append(0, l, 100*60*1000, float64(i*100))
|
_, err = app.Append(0, l, 100*60*1000, float64(i*100))
|
||||||
|
@ -371,6 +359,7 @@ func TestFederationWithNativeHistograms(t *testing.T) {
|
||||||
})
|
})
|
||||||
default:
|
default:
|
||||||
hist.ZeroCount++
|
hist.ZeroCount++
|
||||||
|
hist.Count++
|
||||||
_, err = app.AppendHistogram(0, l, 100*60*1000, hist.Copy(), nil)
|
_, err = app.AppendHistogram(0, l, 100*60*1000, hist.Copy(), nil)
|
||||||
expVec = append(expVec, promql.Sample{
|
expVec = append(expVec, promql.Sample{
|
||||||
T: 100 * 60 * 1000,
|
T: 100 * 60 * 1000,
|
||||||
|
@ -383,7 +372,7 @@ func TestFederationWithNativeHistograms(t *testing.T) {
|
||||||
require.NoError(t, app.Commit())
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
h := &Handler{
|
h := &Handler{
|
||||||
localStorage: &dbAdapter{suite.TSDB()},
|
localStorage: &dbAdapter{db},
|
||||||
lookbackDelta: 5 * time.Minute,
|
lookbackDelta: 5 * time.Minute,
|
||||||
now: func() model.Time { return 101 * 60 * 1000 }, // 101min after epoch.
|
now: func() model.Time { return 101 * 60 * 1000 }, // 101min after epoch.
|
||||||
config: &config.Config{
|
config: &config.Config{
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
v16.14.2
|
v20.5.1
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
## Overview
|
## Overview
|
||||||
The `ui` directory contains static files and templates used in the web UI. For
|
The `ui` directory contains static files and templates used in the web UI. For
|
||||||
easier distribution they are compressed (c.f. Makefile) and statically compiled
|
easier distribution they are compressed (c.f. Makefile) and statically compiled
|
||||||
into the Prometheus binary using the embed package.
|
into the Prometheus binary using the embed package.
|
||||||
|
|
||||||
During development it is more convenient to always use the files on disk to
|
During development it is more convenient to always use the files on disk to
|
||||||
directly see changes without recompiling.
|
directly see changes without recompiling.
|
||||||
To make this work, remove the `builtinassets` build tag in the `flags` entry
|
To make this work, remove the `builtinassets` build tag in the `flags` entry
|
||||||
in `.promu.yml`, and then `make build` (or build Prometheus using
|
in `.promu.yml`, and then `make build` (or build Prometheus using
|
||||||
|
@ -26,7 +26,7 @@ react-app and also by others consumers (like Thanos)
|
||||||
To be able to build the react application you need:
|
To be able to build the react application you need:
|
||||||
|
|
||||||
* npm >= v7
|
* npm >= v7
|
||||||
* node >= v16
|
* node >= v20
|
||||||
|
|
||||||
### Installing npm dependencies
|
### Installing npm dependencies
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,7 @@
|
||||||
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
|
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@prometheus-io/lezer-promql": "0.46.0",
|
"@prometheus-io/lezer-promql": "0.46.0",
|
||||||
"lru-cache": "^6.0.0"
|
"lru-cache": "^7.18.3"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@codemirror/autocomplete": "^6.7.1",
|
"@codemirror/autocomplete": "^6.7.1",
|
||||||
|
@ -39,9 +39,8 @@
|
||||||
"@codemirror/state": "^6.2.1",
|
"@codemirror/state": "^6.2.1",
|
||||||
"@codemirror/view": "^6.13.0",
|
"@codemirror/view": "^6.13.0",
|
||||||
"@lezer/common": "^1.0.3",
|
"@lezer/common": "^1.0.3",
|
||||||
"@lezer/lr": "^1.3.6",
|
|
||||||
"@lezer/highlight": "^1.1.6",
|
"@lezer/highlight": "^1.1.6",
|
||||||
"@types/lru-cache": "^5.1.1",
|
"@lezer/lr": "^1.3.6",
|
||||||
"isomorphic-fetch": "^3.0.0",
|
"isomorphic-fetch": "^3.0.0",
|
||||||
"nock": "^13.3.1"
|
"nock": "^13.3.1"
|
||||||
},
|
},
|
||||||
|
|
|
@ -281,7 +281,7 @@ class Cache {
|
||||||
private flags: Record<string, string>;
|
private flags: Record<string, string>;
|
||||||
|
|
||||||
constructor(config?: CacheConfig) {
|
constructor(config?: CacheConfig) {
|
||||||
const maxAge = config && config.maxAge ? config.maxAge : 5 * 60 * 1000;
|
const maxAge: LRUCache.LimitedByTTL = { ttl: config && config.maxAge ? config.maxAge : 5 * 60 * 1000 };
|
||||||
this.completeAssociation = new LRUCache<string, Map<string, Set<string>>>(maxAge);
|
this.completeAssociation = new LRUCache<string, Map<string, Set<string>>>(maxAge);
|
||||||
this.metricMetadata = {};
|
this.metricMetadata = {};
|
||||||
this.labelValues = new LRUCache<string, string[]>(maxAge);
|
this.labelValues = new LRUCache<string, string[]>(maxAge);
|
||||||
|
|
|
@ -239,6 +239,18 @@ export const functionIdentifierTerms = [
|
||||||
info: 'Return the sum of observations from a native histogram (experimental feature)',
|
info: 'Return the sum of observations from a native histogram (experimental feature)',
|
||||||
type: 'function',
|
type: 'function',
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
label: 'histogram_stddev',
|
||||||
|
detail: 'function',
|
||||||
|
info: 'Estimate the standard deviation of observations from a native histogram (experimental feature)',
|
||||||
|
type: 'function',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: 'histogram_stdvar',
|
||||||
|
detail: 'function',
|
||||||
|
info: 'Estimate the standard variance of observations from a native histogram (experimental feature)',
|
||||||
|
type: 'function',
|
||||||
|
},
|
||||||
{
|
{
|
||||||
label: 'holt_winters',
|
label: 'holt_winters',
|
||||||
detail: 'function',
|
detail: 'function',
|
||||||
|
@ -430,7 +442,7 @@ export const functionIdentifierTerms = [
|
||||||
{
|
{
|
||||||
label: 'stdvar_over_time',
|
label: 'stdvar_over_time',
|
||||||
detail: 'function',
|
detail: 'function',
|
||||||
info: 'Calculate the standard variation within input series over time',
|
info: 'Calculate the standard variance within input series over time',
|
||||||
type: 'function',
|
type: 'function',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
@ -752,6 +752,30 @@ describe('promql operations', () => {
|
||||||
expectedValueType: ValueType.vector,
|
expectedValueType: ValueType.vector,
|
||||||
expectedDiag: [],
|
expectedDiag: [],
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
expr:
|
||||||
|
'histogram_stddev( # Root of the query, final result, returns the standard deviation of observations.\n' +
|
||||||
|
' sum by(method, path) ( # Argument to histogram_stddev(), an aggregated histogram.\n' +
|
||||||
|
' rate( # Argument to sum(), the per-second increase of a histogram over 5m.\n' +
|
||||||
|
' demo_api_request_duration_seconds{job="demo"}[5m] # Argument to rate(), a vector of sparse histogram series over the last 5m.\n' +
|
||||||
|
' )\n' +
|
||||||
|
' )\n' +
|
||||||
|
')',
|
||||||
|
expectedValueType: ValueType.vector,
|
||||||
|
expectedDiag: [],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expr:
|
||||||
|
'histogram_stdvar( # Root of the query, final result, returns the standard variance of observations.\n' +
|
||||||
|
' sum by(method, path) ( # Argument to histogram_stdvar(), an aggregated histogram.\n' +
|
||||||
|
' rate( # Argument to sum(), the per-second increase of a histogram over 5m.\n' +
|
||||||
|
' demo_api_request_duration_seconds{job="demo"}[5m] # Argument to rate(), a vector of sparse histogram series over the last 5m.\n' +
|
||||||
|
' )\n' +
|
||||||
|
' )\n' +
|
||||||
|
')',
|
||||||
|
expectedValueType: ValueType.vector,
|
||||||
|
expectedDiag: [],
|
||||||
|
},
|
||||||
{
|
{
|
||||||
expr: '1 @ start()',
|
expr: '1 @ start()',
|
||||||
expectedValueType: ValueType.scalar,
|
expectedValueType: ValueType.scalar,
|
||||||
|
|
|
@ -42,6 +42,8 @@ import {
|
||||||
HistogramCount,
|
HistogramCount,
|
||||||
HistogramFraction,
|
HistogramFraction,
|
||||||
HistogramQuantile,
|
HistogramQuantile,
|
||||||
|
HistogramStdDev,
|
||||||
|
HistogramStdVar,
|
||||||
HistogramSum,
|
HistogramSum,
|
||||||
HoltWinters,
|
HoltWinters,
|
||||||
Hour,
|
Hour,
|
||||||
|
@ -282,6 +284,18 @@ const promqlFunctions: { [key: number]: PromQLFunction } = {
|
||||||
variadic: 0,
|
variadic: 0,
|
||||||
returnType: ValueType.vector,
|
returnType: ValueType.vector,
|
||||||
},
|
},
|
||||||
|
[HistogramStdDev]: {
|
||||||
|
name: 'histogram_stddev',
|
||||||
|
argTypes: [ValueType.vector],
|
||||||
|
variadic: 0,
|
||||||
|
returnType: ValueType.vector,
|
||||||
|
},
|
||||||
|
[HistogramStdVar]: {
|
||||||
|
name: 'histogram_stdvar',
|
||||||
|
argTypes: [ValueType.vector],
|
||||||
|
variadic: 0,
|
||||||
|
returnType: ValueType.vector,
|
||||||
|
},
|
||||||
[HistogramSum]: {
|
[HistogramSum]: {
|
||||||
name: 'histogram_sum',
|
name: 'histogram_sum',
|
||||||
argTypes: [ValueType.vector],
|
argTypes: [ValueType.vector],
|
||||||
|
|
|
@ -31,11 +31,11 @@
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@lezer/generator": "^1.2.3",
|
"@lezer/generator": "^1.2.3",
|
||||||
"@lezer/lr": "^1.3.6",
|
"@lezer/highlight": "^1.1.6",
|
||||||
"@lezer/highlight": "^1.1.6"
|
"@lezer/lr": "^1.3.6"
|
||||||
},
|
},
|
||||||
"peerDependencies": {
|
"peerDependencies": {
|
||||||
"@lezer/lr": "^1.2.3",
|
"@lezer/highlight": "^1.1.2",
|
||||||
"@lezer/highlight": "^1.1.2"
|
"@lezer/lr": "^1.2.3"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -135,6 +135,8 @@ FunctionIdentifier {
|
||||||
HistogramCount |
|
HistogramCount |
|
||||||
HistogramFraction |
|
HistogramFraction |
|
||||||
HistogramQuantile |
|
HistogramQuantile |
|
||||||
|
HistogramStdDev |
|
||||||
|
HistogramStdVar |
|
||||||
HistogramSum |
|
HistogramSum |
|
||||||
HoltWinters |
|
HoltWinters |
|
||||||
Hour |
|
Hour |
|
||||||
|
@ -362,6 +364,8 @@ NumberLiteral {
|
||||||
HistogramCount { condFn<"histogram_count"> }
|
HistogramCount { condFn<"histogram_count"> }
|
||||||
HistogramFraction { condFn<"histogram_fraction"> }
|
HistogramFraction { condFn<"histogram_fraction"> }
|
||||||
HistogramQuantile { condFn<"histogram_quantile"> }
|
HistogramQuantile { condFn<"histogram_quantile"> }
|
||||||
|
HistogramStdDev { condFn<"histogram_stddev"> }
|
||||||
|
HistogramStdVar { condFn<"histogram_stdvar"> }
|
||||||
HistogramSum { condFn<"histogram_sum"> }
|
HistogramSum { condFn<"histogram_sum"> }
|
||||||
HoltWinters { condFn<"holt_winters"> }
|
HoltWinters { condFn<"holt_winters"> }
|
||||||
Hour { condFn<"hour"> }
|
Hour { condFn<"hour"> }
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue