mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
Merge branch 'main' into 3.0-main-sync
Conflicts: promql/engine_test.go Resolved by picking main changes but adjusting total_samples for query "max_over_time(metricWith1HistogramEvery10Seconds[60s])[20s:5s]" to 312. Via https://github.com/prometheus/prometheus/pull/13662 this histogram now stores 13 values per timestamp, but via https://github.com/prometheus/prometheus/pull/13904 the range query is now left-open. promql/promqltest/testdata/functions.test Resolved by picking changes in main. See also https://github.com/prometheus/prometheus/pull/13662, but adjust some range selectors (`s/1m/2m/`) to account for https://github.com/prometheus/prometheus/pull/13904. promql/promqltest/testdata/histograms.test Resolved by picking changes in main. See also https://github.com/prometheus/prometheus/pull/13662, but adjust some range selectors (`s/5m/10m/`) to account for https://github.com/prometheus/prometheus/pull/13904. Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
This commit is contained in:
commit
b4152309a4
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
|
@ -174,7 +174,7 @@ jobs:
|
||||||
with:
|
with:
|
||||||
args: --verbose
|
args: --verbose
|
||||||
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
|
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
|
||||||
version: v1.59.0
|
version: v1.59.1
|
||||||
fuzzing:
|
fuzzing:
|
||||||
uses: ./.github/workflows/fuzzing.yml
|
uses: ./.github/workflows/fuzzing.yml
|
||||||
if: github.event_name == 'pull_request'
|
if: github.event_name == 'pull_request'
|
||||||
|
|
|
@ -16,7 +16,7 @@ Maintainers for specific parts of the codebase:
|
||||||
* `model/histogram` and other code related to native histograms: Björn Rabenstein (<beorn@grafana.com> / @beorn7),
|
* `model/histogram` and other code related to native histograms: Björn Rabenstein (<beorn@grafana.com> / @beorn7),
|
||||||
George Krajcsovits (<gyorgy.krajcsovits@grafana.com> / @krajorama)
|
George Krajcsovits (<gyorgy.krajcsovits@grafana.com> / @krajorama)
|
||||||
* `storage`
|
* `storage`
|
||||||
* `remote`: Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (<tom.wilkie@gmail.com> / @tomwilkie)
|
* `remote`: Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Nicolás Pazos ( <npazosmendez@gmail.com> / @npazosmendez), Alex Greenbank ( <alex.greenbank@grafana.com> / @alexgreenbank)
|
||||||
* `otlptranslator`: Arve Knudsen (<arve.knudsen@gmail.com> / @aknuds1), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
|
* `otlptranslator`: Arve Knudsen (<arve.knudsen@gmail.com> / @aknuds1), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
|
||||||
* `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
|
* `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
|
||||||
* `agent`: Robert Fratto (<robert.fratto@grafana.com> / @rfratto)
|
* `agent`: Robert Fratto (<robert.fratto@grafana.com> / @rfratto)
|
||||||
|
|
2
Makefile
2
Makefile
|
@ -91,7 +91,7 @@ endif
|
||||||
|
|
||||||
promql/parser/generated_parser.y.go: promql/parser/generated_parser.y
|
promql/parser/generated_parser.y.go: promql/parser/generated_parser.y
|
||||||
@echo ">> running goyacc to generate the .go file."
|
@echo ">> running goyacc to generate the .go file."
|
||||||
@goyacc -l -o promql/parser/generated_parser.y.go promql/parser/generated_parser.y
|
@$(FIRST_GOPATH)/bin/goyacc -l -o promql/parser/generated_parser.y.go promql/parser/generated_parser.y
|
||||||
|
|
||||||
.PHONY: clean-parser
|
.PHONY: clean-parser
|
||||||
clean-parser:
|
clean-parser:
|
||||||
|
|
|
@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
|
||||||
SKIP_GOLANGCI_LINT :=
|
SKIP_GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT :=
|
GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT_OPTS ?=
|
GOLANGCI_LINT_OPTS ?=
|
||||||
GOLANGCI_LINT_VERSION ?= v1.59.0
|
GOLANGCI_LINT_VERSION ?= v1.59.1
|
||||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
|
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
|
||||||
# windows isn't included here because of the path separator being different.
|
# windows isn't included here because of the path separator being different.
|
||||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||||
|
|
|
@ -443,6 +443,9 @@ func main() {
|
||||||
serverOnlyFlag(a, "alertmanager.notification-queue-capacity", "The capacity of the queue for pending Alertmanager notifications.").
|
serverOnlyFlag(a, "alertmanager.notification-queue-capacity", "The capacity of the queue for pending Alertmanager notifications.").
|
||||||
Default("10000").IntVar(&cfg.notifier.QueueCapacity)
|
Default("10000").IntVar(&cfg.notifier.QueueCapacity)
|
||||||
|
|
||||||
|
serverOnlyFlag(a, "alertmanager.drain-notification-queue-on-shutdown", "Send any outstanding Alertmanager notifications when shutting down. If false, any outstanding Alertmanager notifications will be dropped when shutting down.").
|
||||||
|
Default("true").BoolVar(&cfg.notifier.DrainOnShutdown)
|
||||||
|
|
||||||
// TODO: Remove in Prometheus 3.0.
|
// TODO: Remove in Prometheus 3.0.
|
||||||
alertmanagerTimeout := a.Flag("alertmanager.timeout", "[DEPRECATED] This flag has no effect.").Hidden().String()
|
alertmanagerTimeout := a.Flag("alertmanager.timeout", "[DEPRECATED] This flag has no effect.").Hidden().String()
|
||||||
|
|
||||||
|
|
|
@ -72,7 +72,7 @@ func (p *queryLogTest) waitForPrometheus() error {
|
||||||
var err error
|
var err error
|
||||||
for x := 0; x < 20; x++ {
|
for x := 0; x < 20; x++ {
|
||||||
var r *http.Response
|
var r *http.Response
|
||||||
if r, err = http.Get(fmt.Sprintf("http://%s:%d%s/-/ready", p.host, p.port, p.prefix)); err == nil && r.StatusCode == 200 {
|
if r, err = http.Get(fmt.Sprintf("http://%s:%d%s/-/ready", p.host, p.port, p.prefix)); err == nil && r.StatusCode == http.StatusOK {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
time.Sleep(500 * time.Millisecond)
|
time.Sleep(500 * time.Millisecond)
|
||||||
|
|
|
@ -998,6 +998,7 @@ var expectedConf = &Config{
|
||||||
HostNetworkingHost: "localhost",
|
HostNetworkingHost: "localhost",
|
||||||
RefreshInterval: model.Duration(60 * time.Second),
|
RefreshInterval: model.Duration(60 * time.Second),
|
||||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||||
|
MatchFirstNetwork: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
@ -42,28 +42,29 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ec2Label = model.MetaLabelPrefix + "ec2_"
|
ec2Label = model.MetaLabelPrefix + "ec2_"
|
||||||
ec2LabelAMI = ec2Label + "ami"
|
ec2LabelAMI = ec2Label + "ami"
|
||||||
ec2LabelAZ = ec2Label + "availability_zone"
|
ec2LabelAZ = ec2Label + "availability_zone"
|
||||||
ec2LabelAZID = ec2Label + "availability_zone_id"
|
ec2LabelAZID = ec2Label + "availability_zone_id"
|
||||||
ec2LabelArch = ec2Label + "architecture"
|
ec2LabelArch = ec2Label + "architecture"
|
||||||
ec2LabelIPv6Addresses = ec2Label + "ipv6_addresses"
|
ec2LabelIPv6Addresses = ec2Label + "ipv6_addresses"
|
||||||
ec2LabelInstanceID = ec2Label + "instance_id"
|
ec2LabelInstanceID = ec2Label + "instance_id"
|
||||||
ec2LabelInstanceLifecycle = ec2Label + "instance_lifecycle"
|
ec2LabelInstanceLifecycle = ec2Label + "instance_lifecycle"
|
||||||
ec2LabelInstanceState = ec2Label + "instance_state"
|
ec2LabelInstanceState = ec2Label + "instance_state"
|
||||||
ec2LabelInstanceType = ec2Label + "instance_type"
|
ec2LabelInstanceType = ec2Label + "instance_type"
|
||||||
ec2LabelOwnerID = ec2Label + "owner_id"
|
ec2LabelOwnerID = ec2Label + "owner_id"
|
||||||
ec2LabelPlatform = ec2Label + "platform"
|
ec2LabelPlatform = ec2Label + "platform"
|
||||||
ec2LabelPrimarySubnetID = ec2Label + "primary_subnet_id"
|
ec2LabelPrimaryIPv6Addresses = ec2Label + "primary_ipv6_addresses"
|
||||||
ec2LabelPrivateDNS = ec2Label + "private_dns_name"
|
ec2LabelPrimarySubnetID = ec2Label + "primary_subnet_id"
|
||||||
ec2LabelPrivateIP = ec2Label + "private_ip"
|
ec2LabelPrivateDNS = ec2Label + "private_dns_name"
|
||||||
ec2LabelPublicDNS = ec2Label + "public_dns_name"
|
ec2LabelPrivateIP = ec2Label + "private_ip"
|
||||||
ec2LabelPublicIP = ec2Label + "public_ip"
|
ec2LabelPublicDNS = ec2Label + "public_dns_name"
|
||||||
ec2LabelRegion = ec2Label + "region"
|
ec2LabelPublicIP = ec2Label + "public_ip"
|
||||||
ec2LabelSubnetID = ec2Label + "subnet_id"
|
ec2LabelRegion = ec2Label + "region"
|
||||||
ec2LabelTag = ec2Label + "tag_"
|
ec2LabelSubnetID = ec2Label + "subnet_id"
|
||||||
ec2LabelVPCID = ec2Label + "vpc_id"
|
ec2LabelTag = ec2Label + "tag_"
|
||||||
ec2LabelSeparator = ","
|
ec2LabelVPCID = ec2Label + "vpc_id"
|
||||||
|
ec2LabelSeparator = ","
|
||||||
)
|
)
|
||||||
|
|
||||||
// DefaultEC2SDConfig is the default EC2 SD configuration.
|
// DefaultEC2SDConfig is the default EC2 SD configuration.
|
||||||
|
@ -317,6 +318,7 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
|
||||||
|
|
||||||
var subnets []string
|
var subnets []string
|
||||||
var ipv6addrs []string
|
var ipv6addrs []string
|
||||||
|
var primaryipv6addrs []string
|
||||||
subnetsMap := make(map[string]struct{})
|
subnetsMap := make(map[string]struct{})
|
||||||
for _, eni := range inst.NetworkInterfaces {
|
for _, eni := range inst.NetworkInterfaces {
|
||||||
if eni.SubnetId == nil {
|
if eni.SubnetId == nil {
|
||||||
|
@ -330,6 +332,15 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
|
||||||
|
|
||||||
for _, ipv6addr := range eni.Ipv6Addresses {
|
for _, ipv6addr := range eni.Ipv6Addresses {
|
||||||
ipv6addrs = append(ipv6addrs, *ipv6addr.Ipv6Address)
|
ipv6addrs = append(ipv6addrs, *ipv6addr.Ipv6Address)
|
||||||
|
if *ipv6addr.IsPrimaryIpv6 {
|
||||||
|
// we might have to extend the slice with more than one element
|
||||||
|
// that could leave empty strings in the list which is intentional
|
||||||
|
// to keep the position/device index information
|
||||||
|
for int64(len(primaryipv6addrs)) <= *eni.Attachment.DeviceIndex {
|
||||||
|
primaryipv6addrs = append(primaryipv6addrs, "")
|
||||||
|
}
|
||||||
|
primaryipv6addrs[*eni.Attachment.DeviceIndex] = *ipv6addr.Ipv6Address
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
labels[ec2LabelSubnetID] = model.LabelValue(
|
labels[ec2LabelSubnetID] = model.LabelValue(
|
||||||
|
@ -342,6 +353,12 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
|
||||||
strings.Join(ipv6addrs, ec2LabelSeparator) +
|
strings.Join(ipv6addrs, ec2LabelSeparator) +
|
||||||
ec2LabelSeparator)
|
ec2LabelSeparator)
|
||||||
}
|
}
|
||||||
|
if len(primaryipv6addrs) > 0 {
|
||||||
|
labels[ec2LabelPrimaryIPv6Addresses] = model.LabelValue(
|
||||||
|
ec2LabelSeparator +
|
||||||
|
strings.Join(primaryipv6addrs, ec2LabelSeparator) +
|
||||||
|
ec2LabelSeparator)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, t := range inst.Tags {
|
for _, t := range inst.Tags {
|
||||||
|
|
|
@ -97,6 +97,7 @@ func fetchApps(ctx context.Context, server string, client *http.Client) (*Applic
|
||||||
resp.Body.Close()
|
resp.Body.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
//nolint:usestdlibvars
|
||||||
if resp.StatusCode/100 != 2 {
|
if resp.StatusCode/100 != 2 {
|
||||||
return nil, fmt.Errorf("non 2xx status '%d' response during eureka service discovery", resp.StatusCode)
|
return nil, fmt.Errorf("non 2xx status '%d' response during eureka service discovery", resp.StatusCode)
|
||||||
}
|
}
|
||||||
|
|
|
@ -87,6 +87,7 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error)
|
||||||
resp.Body.Close()
|
resp.Body.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
//nolint:usestdlibvars
|
||||||
if resp.StatusCode/100 != 2 {
|
if resp.StatusCode/100 != 2 {
|
||||||
return nil, fmt.Errorf("non 2xx status '%d' response during hetzner service discovery with role robot", resp.StatusCode)
|
return nil, fmt.Errorf("non 2xx status '%d' response during hetzner service discovery with role robot", resp.StatusCode)
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,8 +22,10 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/container"
|
"github.com/docker/docker/api/types/container"
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
|
"github.com/docker/docker/api/types/network"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
@ -58,6 +60,7 @@ var DefaultDockerSDConfig = DockerSDConfig{
|
||||||
Filters: []Filter{},
|
Filters: []Filter{},
|
||||||
HostNetworkingHost: "localhost",
|
HostNetworkingHost: "localhost",
|
||||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||||
|
MatchFirstNetwork: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -73,7 +76,8 @@ type DockerSDConfig struct {
|
||||||
Filters []Filter `yaml:"filters"`
|
Filters []Filter `yaml:"filters"`
|
||||||
HostNetworkingHost string `yaml:"host_networking_host"`
|
HostNetworkingHost string `yaml:"host_networking_host"`
|
||||||
|
|
||||||
RefreshInterval model.Duration `yaml:"refresh_interval"`
|
RefreshInterval model.Duration `yaml:"refresh_interval"`
|
||||||
|
MatchFirstNetwork bool `yaml:"match_first_network"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovererMetrics implements discovery.Config.
|
// NewDiscovererMetrics implements discovery.Config.
|
||||||
|
@ -119,6 +123,7 @@ type DockerDiscovery struct {
|
||||||
port int
|
port int
|
||||||
hostNetworkingHost string
|
hostNetworkingHost string
|
||||||
filters filters.Args
|
filters filters.Args
|
||||||
|
matchFirstNetwork bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDockerDiscovery returns a new DockerDiscovery which periodically refreshes its targets.
|
// NewDockerDiscovery returns a new DockerDiscovery which periodically refreshes its targets.
|
||||||
|
@ -131,6 +136,7 @@ func NewDockerDiscovery(conf *DockerSDConfig, logger log.Logger, metrics discove
|
||||||
d := &DockerDiscovery{
|
d := &DockerDiscovery{
|
||||||
port: conf.Port,
|
port: conf.Port,
|
||||||
hostNetworkingHost: conf.HostNetworkingHost,
|
hostNetworkingHost: conf.HostNetworkingHost,
|
||||||
|
matchFirstNetwork: conf.MatchFirstNetwork,
|
||||||
}
|
}
|
||||||
|
|
||||||
hostURL, err := url.Parse(conf.Host)
|
hostURL, err := url.Parse(conf.Host)
|
||||||
|
@ -202,6 +208,11 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
|
||||||
return nil, fmt.Errorf("error while computing network labels: %w", err)
|
return nil, fmt.Errorf("error while computing network labels: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
allContainers := make(map[string]types.Container)
|
||||||
|
for _, c := range containers {
|
||||||
|
allContainers[c.ID] = c
|
||||||
|
}
|
||||||
|
|
||||||
for _, c := range containers {
|
for _, c := range containers {
|
||||||
if len(c.Names) == 0 {
|
if len(c.Names) == 0 {
|
||||||
continue
|
continue
|
||||||
|
@ -218,7 +229,50 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
|
||||||
commonLabels[dockerLabelContainerLabelPrefix+ln] = v
|
commonLabels[dockerLabelContainerLabelPrefix+ln] = v
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, n := range c.NetworkSettings.Networks {
|
networks := c.NetworkSettings.Networks
|
||||||
|
containerNetworkMode := container.NetworkMode(c.HostConfig.NetworkMode)
|
||||||
|
if len(networks) == 0 {
|
||||||
|
// Try to lookup shared networks
|
||||||
|
for {
|
||||||
|
if containerNetworkMode.IsContainer() {
|
||||||
|
tmpContainer, exists := allContainers[containerNetworkMode.ConnectedContainer()]
|
||||||
|
if !exists {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
networks = tmpContainer.NetworkSettings.Networks
|
||||||
|
containerNetworkMode = container.NetworkMode(tmpContainer.HostConfig.NetworkMode)
|
||||||
|
if len(networks) > 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.matchFirstNetwork && len(networks) > 1 {
|
||||||
|
// Match user defined network
|
||||||
|
if containerNetworkMode.IsUserDefined() {
|
||||||
|
networkMode := string(containerNetworkMode)
|
||||||
|
networks = map[string]*network.EndpointSettings{networkMode: networks[networkMode]}
|
||||||
|
} else {
|
||||||
|
// Get first network if container network mode has "none" value.
|
||||||
|
// This case appears under certain condition:
|
||||||
|
// 1. Container created with network set to "--net=none".
|
||||||
|
// 2. Disconnect network "none".
|
||||||
|
// 3. Reconnect network with user defined networks.
|
||||||
|
var first string
|
||||||
|
for k, n := range networks {
|
||||||
|
if n != nil {
|
||||||
|
first = k
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
networks = map[string]*network.EndpointSettings{first: networks[first]}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, n := range networks {
|
||||||
var added bool
|
var added bool
|
||||||
|
|
||||||
for _, p := range c.Ports {
|
for _, p := range c.Ports {
|
||||||
|
|
|
@ -16,6 +16,7 @@ package moby
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
@ -59,7 +60,7 @@ host: %s
|
||||||
tg := tgs[0]
|
tg := tgs[0]
|
||||||
require.NotNil(t, tg)
|
require.NotNil(t, tg)
|
||||||
require.NotNil(t, tg.Targets)
|
require.NotNil(t, tg.Targets)
|
||||||
require.Len(t, tg.Targets, 3)
|
require.Len(t, tg.Targets, 6)
|
||||||
|
|
||||||
for i, lbls := range []model.LabelSet{
|
for i, lbls := range []model.LabelSet{
|
||||||
{
|
{
|
||||||
|
@ -113,9 +114,259 @@ host: %s
|
||||||
"__meta_docker_container_network_mode": "host",
|
"__meta_docker_container_network_mode": "host",
|
||||||
"__meta_docker_network_ip": "",
|
"__meta_docker_network_ip": "",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"__address__": "172.20.0.2:3306",
|
||||||
|
"__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_service": "mysql",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
|
||||||
|
"__meta_docker_container_name": "/dockersd_mysql",
|
||||||
|
"__meta_docker_container_network_mode": "dockersd_private",
|
||||||
|
"__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
|
||||||
|
"__meta_docker_network_ingress": "false",
|
||||||
|
"__meta_docker_network_internal": "false",
|
||||||
|
"__meta_docker_network_ip": "172.20.0.2",
|
||||||
|
"__meta_docker_network_name": "dockersd_private",
|
||||||
|
"__meta_docker_network_scope": "local",
|
||||||
|
"__meta_docker_port_private": "3306",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "172.20.0.2:33060",
|
||||||
|
"__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_service": "mysql",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
|
||||||
|
"__meta_docker_container_name": "/dockersd_mysql",
|
||||||
|
"__meta_docker_container_network_mode": "dockersd_private",
|
||||||
|
"__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
|
||||||
|
"__meta_docker_network_ingress": "false",
|
||||||
|
"__meta_docker_network_internal": "false",
|
||||||
|
"__meta_docker_network_ip": "172.20.0.2",
|
||||||
|
"__meta_docker_network_name": "dockersd_private",
|
||||||
|
"__meta_docker_network_scope": "local",
|
||||||
|
"__meta_docker_port_private": "33060",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "172.20.0.2:9104",
|
||||||
|
"__meta_docker_container_id": "59bf76e8816af98856b90dd619c91027145ca501043b1c51756d03b085882e06",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_service": "mysqlexporter",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
|
||||||
|
"__meta_docker_container_label_maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>",
|
||||||
|
"__meta_docker_container_name": "/dockersd_mysql_exporter",
|
||||||
|
"__meta_docker_container_network_mode": "container:f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
|
||||||
|
"__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
|
||||||
|
"__meta_docker_network_ingress": "false",
|
||||||
|
"__meta_docker_network_internal": "false",
|
||||||
|
"__meta_docker_network_ip": "172.20.0.2",
|
||||||
|
"__meta_docker_network_name": "dockersd_private",
|
||||||
|
"__meta_docker_network_scope": "local",
|
||||||
|
"__meta_docker_port_private": "9104",
|
||||||
|
},
|
||||||
} {
|
} {
|
||||||
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
|
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
|
||||||
require.Equal(t, lbls, tg.Targets[i])
|
require.Equal(t, lbls, tg.Targets[i])
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDockerSDRefreshMatchAllNetworks(t *testing.T) {
|
||||||
|
sdmock := NewSDMock(t, "dockerprom")
|
||||||
|
sdmock.Setup()
|
||||||
|
|
||||||
|
e := sdmock.Endpoint()
|
||||||
|
url := e[:len(e)-1]
|
||||||
|
cfgString := fmt.Sprintf(`
|
||||||
|
---
|
||||||
|
host: %s
|
||||||
|
`, url)
|
||||||
|
var cfg DockerSDConfig
|
||||||
|
require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg))
|
||||||
|
|
||||||
|
cfg.MatchFirstNetwork = false
|
||||||
|
reg := prometheus.NewRegistry()
|
||||||
|
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||||
|
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||||
|
require.NoError(t, metrics.Register())
|
||||||
|
defer metrics.Unregister()
|
||||||
|
defer refreshMetrics.Unregister()
|
||||||
|
d, err := NewDockerDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tgs, err := d.refresh(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Len(t, tgs, 1)
|
||||||
|
|
||||||
|
tg := tgs[0]
|
||||||
|
require.NotNil(t, tg)
|
||||||
|
require.NotNil(t, tg.Targets)
|
||||||
|
require.Len(t, tg.Targets, 9)
|
||||||
|
|
||||||
|
sortFunc := func(labelSets []model.LabelSet) {
|
||||||
|
sort.Slice(labelSets, func(i, j int) bool {
|
||||||
|
return labelSets[i]["__address__"] < labelSets[j]["__address__"]
|
||||||
|
})
|
||||||
|
}
|
||||||
|
expected := []model.LabelSet{
|
||||||
|
{
|
||||||
|
"__address__": "172.19.0.2:9100",
|
||||||
|
"__meta_docker_container_id": "c301b928faceb1a18fe379f6bc178727ef920bb30b0f9b8592b32b36255a0eca",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_service": "node",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_version": "1.25.0",
|
||||||
|
"__meta_docker_container_label_maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>",
|
||||||
|
"__meta_docker_container_label_prometheus_job": "node",
|
||||||
|
"__meta_docker_container_name": "/dockersd_node_1",
|
||||||
|
"__meta_docker_container_network_mode": "dockersd_default",
|
||||||
|
"__meta_docker_network_id": "7189986ab399e144e52a71b7451b4e04e2158c044b4cd2f3ae26fc3a285d3798",
|
||||||
|
"__meta_docker_network_ingress": "false",
|
||||||
|
"__meta_docker_network_internal": "false",
|
||||||
|
"__meta_docker_network_ip": "172.19.0.2",
|
||||||
|
"__meta_docker_network_label_com_docker_compose_network": "default",
|
||||||
|
"__meta_docker_network_label_com_docker_compose_project": "dockersd",
|
||||||
|
"__meta_docker_network_label_com_docker_compose_version": "1.25.0",
|
||||||
|
"__meta_docker_network_name": "dockersd_default",
|
||||||
|
"__meta_docker_network_scope": "local",
|
||||||
|
"__meta_docker_port_private": "9100",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "172.19.0.3:80",
|
||||||
|
"__meta_docker_container_id": "c301b928faceb1a18fe379f6bc178727ef920bb30b0f9b8592b32b36255a0eca",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_service": "noport",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_version": "1.25.0",
|
||||||
|
"__meta_docker_container_label_maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>",
|
||||||
|
"__meta_docker_container_label_prometheus_job": "noport",
|
||||||
|
"__meta_docker_container_name": "/dockersd_noport_1",
|
||||||
|
"__meta_docker_container_network_mode": "dockersd_default",
|
||||||
|
"__meta_docker_network_id": "7189986ab399e144e52a71b7451b4e04e2158c044b4cd2f3ae26fc3a285d3798",
|
||||||
|
"__meta_docker_network_ingress": "false",
|
||||||
|
"__meta_docker_network_internal": "false",
|
||||||
|
"__meta_docker_network_ip": "172.19.0.3",
|
||||||
|
"__meta_docker_network_label_com_docker_compose_network": "default",
|
||||||
|
"__meta_docker_network_label_com_docker_compose_project": "dockersd",
|
||||||
|
"__meta_docker_network_label_com_docker_compose_version": "1.25.0",
|
||||||
|
"__meta_docker_network_name": "dockersd_default",
|
||||||
|
"__meta_docker_network_scope": "local",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "localhost",
|
||||||
|
"__meta_docker_container_id": "54ed6cc5c0988260436cb0e739b7b6c9cad6c439a93b4c4fdbe9753e1c94b189",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_service": "host_networking",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_version": "1.25.0",
|
||||||
|
"__meta_docker_container_name": "/dockersd_host_networking_1",
|
||||||
|
"__meta_docker_container_network_mode": "host",
|
||||||
|
"__meta_docker_network_ip": "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "172.20.0.2:3306",
|
||||||
|
"__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_service": "mysql",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
|
||||||
|
"__meta_docker_container_name": "/dockersd_mysql",
|
||||||
|
"__meta_docker_container_network_mode": "dockersd_private",
|
||||||
|
"__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
|
||||||
|
"__meta_docker_network_ingress": "false",
|
||||||
|
"__meta_docker_network_internal": "false",
|
||||||
|
"__meta_docker_network_ip": "172.20.0.2",
|
||||||
|
"__meta_docker_network_name": "dockersd_private",
|
||||||
|
"__meta_docker_network_scope": "local",
|
||||||
|
"__meta_docker_port_private": "3306",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "172.20.0.2:33060",
|
||||||
|
"__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_service": "mysql",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
|
||||||
|
"__meta_docker_container_name": "/dockersd_mysql",
|
||||||
|
"__meta_docker_container_network_mode": "dockersd_private",
|
||||||
|
"__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
|
||||||
|
"__meta_docker_network_ingress": "false",
|
||||||
|
"__meta_docker_network_internal": "false",
|
||||||
|
"__meta_docker_network_ip": "172.20.0.2",
|
||||||
|
"__meta_docker_network_name": "dockersd_private",
|
||||||
|
"__meta_docker_network_scope": "local",
|
||||||
|
"__meta_docker_port_private": "33060",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "172.21.0.2:3306",
|
||||||
|
"__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_service": "mysql",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
|
||||||
|
"__meta_docker_container_name": "/dockersd_mysql",
|
||||||
|
"__meta_docker_container_network_mode": "dockersd_private",
|
||||||
|
"__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8",
|
||||||
|
"__meta_docker_network_ingress": "false",
|
||||||
|
"__meta_docker_network_internal": "false",
|
||||||
|
"__meta_docker_network_ip": "172.21.0.2",
|
||||||
|
"__meta_docker_network_name": "dockersd_private1",
|
||||||
|
"__meta_docker_network_scope": "local",
|
||||||
|
"__meta_docker_port_private": "3306",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "172.21.0.2:33060",
|
||||||
|
"__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_service": "mysql",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
|
||||||
|
"__meta_docker_container_name": "/dockersd_mysql",
|
||||||
|
"__meta_docker_container_network_mode": "dockersd_private",
|
||||||
|
"__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8",
|
||||||
|
"__meta_docker_network_ingress": "false",
|
||||||
|
"__meta_docker_network_internal": "false",
|
||||||
|
"__meta_docker_network_ip": "172.21.0.2",
|
||||||
|
"__meta_docker_network_name": "dockersd_private1",
|
||||||
|
"__meta_docker_network_scope": "local",
|
||||||
|
"__meta_docker_port_private": "33060",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "172.21.0.2:9104",
|
||||||
|
"__meta_docker_container_id": "59bf76e8816af98856b90dd619c91027145ca501043b1c51756d03b085882e06",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_service": "mysqlexporter",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
|
||||||
|
"__meta_docker_container_label_maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>",
|
||||||
|
"__meta_docker_container_name": "/dockersd_mysql_exporter",
|
||||||
|
"__meta_docker_container_network_mode": "container:f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
|
||||||
|
"__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8",
|
||||||
|
"__meta_docker_network_ingress": "false",
|
||||||
|
"__meta_docker_network_internal": "false",
|
||||||
|
"__meta_docker_network_ip": "172.21.0.2",
|
||||||
|
"__meta_docker_network_name": "dockersd_private1",
|
||||||
|
"__meta_docker_network_scope": "local",
|
||||||
|
"__meta_docker_port_private": "9104",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "172.20.0.2:9104",
|
||||||
|
"__meta_docker_container_id": "59bf76e8816af98856b90dd619c91027145ca501043b1c51756d03b085882e06",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_service": "mysqlexporter",
|
||||||
|
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
|
||||||
|
"__meta_docker_container_label_maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>",
|
||||||
|
"__meta_docker_container_name": "/dockersd_mysql_exporter",
|
||||||
|
"__meta_docker_container_network_mode": "container:f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
|
||||||
|
"__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
|
||||||
|
"__meta_docker_network_ingress": "false",
|
||||||
|
"__meta_docker_network_internal": "false",
|
||||||
|
"__meta_docker_network_ip": "172.20.0.2",
|
||||||
|
"__meta_docker_network_name": "dockersd_private",
|
||||||
|
"__meta_docker_network_scope": "local",
|
||||||
|
"__meta_docker_port_private": "9104",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
sortFunc(expected)
|
||||||
|
sortFunc(tg.Targets)
|
||||||
|
|
||||||
|
for i, lbls := range expected {
|
||||||
|
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
|
||||||
|
require.Equal(t, lbls, tg.Targets[i])
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -128,5 +128,105 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"Mounts": []
|
"Mounts": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
|
||||||
|
"Names": [
|
||||||
|
"/dockersd_mysql"
|
||||||
|
],
|
||||||
|
"Image": "mysql:5.7.29",
|
||||||
|
"ImageID": "sha256:5d9483f9a7b21c87e0f5b9776c3e06567603c28c0062013eda127c968175f5e8",
|
||||||
|
"Command": "mysqld",
|
||||||
|
"Created": 1616273136,
|
||||||
|
"Ports": [
|
||||||
|
{
|
||||||
|
"PrivatePort": 3306,
|
||||||
|
"Type": "tcp"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"PrivatePort": 33060,
|
||||||
|
"Type": "tcp"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"Labels": {
|
||||||
|
"com.docker.compose.project": "dockersd",
|
||||||
|
"com.docker.compose.service": "mysql",
|
||||||
|
"com.docker.compose.version": "2.2.2"
|
||||||
|
},
|
||||||
|
"State": "running",
|
||||||
|
"Status": "Up 40 seconds",
|
||||||
|
"HostConfig": {
|
||||||
|
"NetworkMode": "dockersd_private"
|
||||||
|
},
|
||||||
|
"NetworkSettings": {
|
||||||
|
"Networks": {
|
||||||
|
"dockersd_private": {
|
||||||
|
"IPAMConfig": null,
|
||||||
|
"Links": null,
|
||||||
|
"Aliases": null,
|
||||||
|
"NetworkID": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
|
||||||
|
"EndpointID": "80f8a61b37701a9991bb98c75ddd23fd9b7c16b5575ca81343f6b44ff4a2a9d9",
|
||||||
|
"Gateway": "172.20.0.1",
|
||||||
|
"IPAddress": "172.20.0.2",
|
||||||
|
"IPPrefixLen": 16,
|
||||||
|
"IPv6Gateway": "",
|
||||||
|
"GlobalIPv6Address": "",
|
||||||
|
"GlobalIPv6PrefixLen": 0,
|
||||||
|
"MacAddress": "02:42:ac:14:00:0a",
|
||||||
|
"DriverOpts": null
|
||||||
|
},
|
||||||
|
"dockersd_private1": {
|
||||||
|
"IPAMConfig": {},
|
||||||
|
"Links": null,
|
||||||
|
"Aliases": [
|
||||||
|
"mysql",
|
||||||
|
"mysql",
|
||||||
|
"f9ade4b83199"
|
||||||
|
],
|
||||||
|
"NetworkID": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8",
|
||||||
|
"EndpointID": "f80921d10e78c99a5907705aae75befea40c3d3e9f820e66ab392f7274be16b8",
|
||||||
|
"Gateway": "172.21.0.1",
|
||||||
|
"IPAddress": "172.21.0.2",
|
||||||
|
"IPPrefixLen": 24,
|
||||||
|
"IPv6Gateway": "",
|
||||||
|
"GlobalIPv6Address": "",
|
||||||
|
"GlobalIPv6PrefixLen": 0,
|
||||||
|
"MacAddress": "02:42:ac:15:00:02",
|
||||||
|
"DriverOpts": null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Mounts": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Id": "59bf76e8816af98856b90dd619c91027145ca501043b1c51756d03b085882e06",
|
||||||
|
"Names": [
|
||||||
|
"/dockersd_mysql_exporter"
|
||||||
|
],
|
||||||
|
"Image": "prom/mysqld-exporter:latest",
|
||||||
|
"ImageID": "sha256:121b8a7cd0525dd89aaec58ad7d34c3bb3714740e5a67daf6510ccf71ab219a9",
|
||||||
|
"Command": "/bin/mysqld_exporter",
|
||||||
|
"Created": 1616273136,
|
||||||
|
"Ports": [
|
||||||
|
{
|
||||||
|
"PrivatePort": 9104,
|
||||||
|
"Type": "tcp"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"Labels": {
|
||||||
|
"com.docker.compose.project": "dockersd",
|
||||||
|
"com.docker.compose.service": "mysqlexporter",
|
||||||
|
"com.docker.compose.version": "2.2.2",
|
||||||
|
"maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>"
|
||||||
|
},
|
||||||
|
"State": "running",
|
||||||
|
"Status": "Up 40 seconds",
|
||||||
|
"HostConfig": {
|
||||||
|
"NetworkMode": "container:f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8"
|
||||||
|
},
|
||||||
|
"NetworkSettings": {
|
||||||
|
"Networks": {}
|
||||||
|
},
|
||||||
|
"Mounts": []
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
54
discovery/moby/testdata/dockerprom/networks.json
vendored
54
discovery/moby/testdata/dockerprom/networks.json
vendored
|
@ -111,5 +111,59 @@
|
||||||
"Containers": {},
|
"Containers": {},
|
||||||
"Options": {},
|
"Options": {},
|
||||||
"Labels": {}
|
"Labels": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Name": "dockersd_private",
|
||||||
|
"Id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
|
||||||
|
"Created": "2022-03-25T09:21:17.718370976+08:00",
|
||||||
|
"Scope": "local",
|
||||||
|
"Driver": "bridge",
|
||||||
|
"EnableIPv6": false,
|
||||||
|
"IPAM": {
|
||||||
|
"Driver": "default",
|
||||||
|
"Options": null,
|
||||||
|
"Config": [
|
||||||
|
{
|
||||||
|
"Subnet": "172.20.0.1/16"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"Internal": false,
|
||||||
|
"Attachable": false,
|
||||||
|
"Ingress": false,
|
||||||
|
"ConfigFrom": {
|
||||||
|
"Network": ""
|
||||||
|
},
|
||||||
|
"ConfigOnly": false,
|
||||||
|
"Containers": {},
|
||||||
|
"Options": {},
|
||||||
|
"Labels": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Name": "dockersd_private1",
|
||||||
|
"Id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8",
|
||||||
|
"Created": "2022-03-25T09:21:17.718370976+08:00",
|
||||||
|
"Scope": "local",
|
||||||
|
"Driver": "bridge",
|
||||||
|
"EnableIPv6": false,
|
||||||
|
"IPAM": {
|
||||||
|
"Driver": "default",
|
||||||
|
"Options": null,
|
||||||
|
"Config": [
|
||||||
|
{
|
||||||
|
"Subnet": "172.21.0.1/16"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"Internal": false,
|
||||||
|
"Attachable": false,
|
||||||
|
"Ingress": false,
|
||||||
|
"ConfigFrom": {
|
||||||
|
"Network": ""
|
||||||
|
},
|
||||||
|
"ConfigOnly": false,
|
||||||
|
"Containers": {},
|
||||||
|
"Options": {},
|
||||||
|
"Labels": {}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
|
@ -146,12 +146,18 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
|
||||||
openstackLabelUserID: model.LabelValue(s.UserID),
|
openstackLabelUserID: model.LabelValue(s.UserID),
|
||||||
}
|
}
|
||||||
|
|
||||||
flavorID, ok := s.Flavor["id"].(string)
|
flavorName, nameOk := s.Flavor["original_name"].(string)
|
||||||
if !ok {
|
// "original_name" is only available for microversion >= 2.47. It was added in favor of "id".
|
||||||
level.Warn(i.logger).Log("msg", "Invalid type for flavor id, expected string")
|
if !nameOk {
|
||||||
continue
|
flavorID, idOk := s.Flavor["id"].(string)
|
||||||
|
if !idOk {
|
||||||
|
level.Warn(i.logger).Log("msg", "Invalid type for both flavor original_name and flavor id, expected string")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorID)
|
||||||
|
} else {
|
||||||
|
labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorName)
|
||||||
}
|
}
|
||||||
labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorID)
|
|
||||||
|
|
||||||
imageID, ok := s.Image["id"].(string)
|
imageID, ok := s.Image["id"].(string)
|
||||||
if ok {
|
if ok {
|
||||||
|
|
|
@ -84,7 +84,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": model.LabelValue("10.0.0.31:0"),
|
"__address__": model.LabelValue("10.0.0.31:0"),
|
||||||
"__meta_openstack_instance_flavor": model.LabelValue("1"),
|
"__meta_openstack_instance_flavor": model.LabelValue("m1.medium"),
|
||||||
"__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682ba"),
|
"__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682ba"),
|
||||||
"__meta_openstack_instance_image": model.LabelValue("f90f6034-2570-4974-8351-6b49732ef2eb"),
|
"__meta_openstack_instance_image": model.LabelValue("f90f6034-2570-4974-8351-6b49732ef2eb"),
|
||||||
"__meta_openstack_instance_status": model.LabelValue("ACTIVE"),
|
"__meta_openstack_instance_status": model.LabelValue("ACTIVE"),
|
||||||
|
@ -96,7 +96,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": model.LabelValue("10.0.0.33:0"),
|
"__address__": model.LabelValue("10.0.0.33:0"),
|
||||||
"__meta_openstack_instance_flavor": model.LabelValue("4"),
|
"__meta_openstack_instance_flavor": model.LabelValue("m1.small"),
|
||||||
"__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682bb"),
|
"__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682bb"),
|
||||||
"__meta_openstack_instance_status": model.LabelValue("ACTIVE"),
|
"__meta_openstack_instance_status": model.LabelValue("ACTIVE"),
|
||||||
"__meta_openstack_instance_name": model.LabelValue("merp"),
|
"__meta_openstack_instance_name": model.LabelValue("merp"),
|
||||||
|
@ -108,7 +108,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": model.LabelValue("10.0.0.34:0"),
|
"__address__": model.LabelValue("10.0.0.34:0"),
|
||||||
"__meta_openstack_instance_flavor": model.LabelValue("4"),
|
"__meta_openstack_instance_flavor": model.LabelValue("m1.small"),
|
||||||
"__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682bb"),
|
"__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682bb"),
|
||||||
"__meta_openstack_instance_status": model.LabelValue("ACTIVE"),
|
"__meta_openstack_instance_status": model.LabelValue("ACTIVE"),
|
||||||
"__meta_openstack_instance_name": model.LabelValue("merp"),
|
"__meta_openstack_instance_name": model.LabelValue("merp"),
|
||||||
|
|
|
@ -427,13 +427,17 @@ const serverListBody = `
|
||||||
"OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000",
|
"OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000",
|
||||||
"OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack",
|
"OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack",
|
||||||
"flavor": {
|
"flavor": {
|
||||||
"id": "1",
|
"vcpus": 2,
|
||||||
"links": [
|
"ram": 4096,
|
||||||
{
|
"disk": 0,
|
||||||
"href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1",
|
"ephemeral": 0,
|
||||||
"rel": "bookmark"
|
"swap": 0,
|
||||||
}
|
"original_name": "m1.medium",
|
||||||
]
|
"extra_specs": {
|
||||||
|
"aggregate_instance_extra_specs:general": "true",
|
||||||
|
"hw:mem_page_size": "large",
|
||||||
|
"hw:vif_multiqueue_enabled": "true"
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"id": "9e5476bd-a4ec-4653-93d6-72c93aa682ba",
|
"id": "9e5476bd-a4ec-4653-93d6-72c93aa682ba",
|
||||||
"security_groups": [
|
"security_groups": [
|
||||||
|
@ -498,13 +502,17 @@ const serverListBody = `
|
||||||
"OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000",
|
"OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000",
|
||||||
"OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack",
|
"OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack",
|
||||||
"flavor": {
|
"flavor": {
|
||||||
"id": "4",
|
"vcpus": 2,
|
||||||
"links": [
|
"ram": 4096,
|
||||||
{
|
"disk": 0,
|
||||||
"href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1",
|
"ephemeral": 0,
|
||||||
"rel": "bookmark"
|
"swap": 0,
|
||||||
}
|
"original_name": "m1.small",
|
||||||
]
|
"extra_specs": {
|
||||||
|
"aggregate_instance_extra_specs:general": "true",
|
||||||
|
"hw:mem_page_size": "large",
|
||||||
|
"hw:vif_multiqueue_enabled": "true"
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"id": "9e5476bd-a4ec-4653-93d6-72c93aa682bb",
|
"id": "9e5476bd-a4ec-4653-93d6-72c93aa682bb",
|
||||||
"security_groups": [
|
"security_groups": [
|
||||||
|
|
|
@ -50,6 +50,7 @@ The Prometheus monitoring server
|
||||||
| <code class="text-nowrap">--rules.alert.resend-delay</code> | Minimum amount of time to wait before resending an alert to Alertmanager. Use with server mode only. | `1m` |
|
| <code class="text-nowrap">--rules.alert.resend-delay</code> | Minimum amount of time to wait before resending an alert to Alertmanager. Use with server mode only. | `1m` |
|
||||||
| <code class="text-nowrap">--rules.max-concurrent-evals</code> | Global concurrency limit for independent rules that can run concurrently. When set, "query.max-concurrency" may need to be adjusted accordingly. Use with server mode only. | `4` |
|
| <code class="text-nowrap">--rules.max-concurrent-evals</code> | Global concurrency limit for independent rules that can run concurrently. When set, "query.max-concurrency" may need to be adjusted accordingly. Use with server mode only. | `4` |
|
||||||
| <code class="text-nowrap">--alertmanager.notification-queue-capacity</code> | The capacity of the queue for pending Alertmanager notifications. Use with server mode only. | `10000` |
|
| <code class="text-nowrap">--alertmanager.notification-queue-capacity</code> | The capacity of the queue for pending Alertmanager notifications. Use with server mode only. | `10000` |
|
||||||
|
| <code class="text-nowrap">--alertmanager.drain-notification-queue-on-shutdown</code> | Send any outstanding Alertmanager notifications when shutting down. If false, any outstanding Alertmanager notifications will be dropped when shutting down. Use with server mode only. | `true` |
|
||||||
| <code class="text-nowrap">--query.lookback-delta</code> | The maximum lookback duration for retrieving metrics during expression evaluations and federation. Use with server mode only. | `5m` |
|
| <code class="text-nowrap">--query.lookback-delta</code> | The maximum lookback duration for retrieving metrics during expression evaluations and federation. Use with server mode only. | `5m` |
|
||||||
| <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` |
|
| <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` |
|
||||||
| <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` |
|
| <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` |
|
||||||
|
|
|
@ -941,6 +941,9 @@ tls_config:
|
||||||
# The host to use if the container is in host networking mode.
|
# The host to use if the container is in host networking mode.
|
||||||
[ host_networking_host: <string> | default = "localhost" ]
|
[ host_networking_host: <string> | default = "localhost" ]
|
||||||
|
|
||||||
|
# Match the first network if the container has multiple networks defined, thus avoiding collecting duplicate targets.
|
||||||
|
[ match_first_network: <boolean> | default = true ]
|
||||||
|
|
||||||
# Optional filters to limit the discovery process to a subset of available
|
# Optional filters to limit the discovery process to a subset of available
|
||||||
# resources.
|
# resources.
|
||||||
# The available filters are listed in the upstream documentation:
|
# The available filters are listed in the upstream documentation:
|
||||||
|
@ -1229,6 +1232,7 @@ The following meta labels are available on targets during [relabeling](#relabel_
|
||||||
* `__meta_ec2_ipv6_addresses`: comma separated list of IPv6 addresses assigned to the instance's network interfaces, if present
|
* `__meta_ec2_ipv6_addresses`: comma separated list of IPv6 addresses assigned to the instance's network interfaces, if present
|
||||||
* `__meta_ec2_owner_id`: the ID of the AWS account that owns the EC2 instance
|
* `__meta_ec2_owner_id`: the ID of the AWS account that owns the EC2 instance
|
||||||
* `__meta_ec2_platform`: the Operating System platform, set to 'windows' on Windows servers, absent otherwise
|
* `__meta_ec2_platform`: the Operating System platform, set to 'windows' on Windows servers, absent otherwise
|
||||||
|
* `__meta_ec2_primary_ipv6_addresses`: comma separated list of the Primary IPv6 addresses of the instance, if present. The list is ordered based on the position of each corresponding network interface in the attachment order.
|
||||||
* `__meta_ec2_primary_subnet_id`: the subnet ID of the primary network interface, if available
|
* `__meta_ec2_primary_subnet_id`: the subnet ID of the primary network interface, if available
|
||||||
* `__meta_ec2_private_dns_name`: the private DNS name of the instance, if available
|
* `__meta_ec2_private_dns_name`: the private DNS name of the instance, if available
|
||||||
* `__meta_ec2_private_ip`: the private IP address of the instance, if present
|
* `__meta_ec2_private_ip`: the private IP address of the instance, if present
|
||||||
|
@ -1359,7 +1363,7 @@ interface.
|
||||||
The following meta labels are available on targets during [relabeling](#relabel_config):
|
The following meta labels are available on targets during [relabeling](#relabel_config):
|
||||||
|
|
||||||
* `__meta_openstack_address_pool`: the pool of the private IP.
|
* `__meta_openstack_address_pool`: the pool of the private IP.
|
||||||
* `__meta_openstack_instance_flavor`: the flavor ID of the OpenStack instance.
|
* `__meta_openstack_instance_flavor`: the flavor name of the OpenStack instance, or the flavor ID if the flavor name isn't available.
|
||||||
* `__meta_openstack_instance_id`: the OpenStack instance ID.
|
* `__meta_openstack_instance_id`: the OpenStack instance ID.
|
||||||
* `__meta_openstack_instance_image`: the ID of the image the OpenStack instance is using.
|
* `__meta_openstack_instance_image`: the ID of the image the OpenStack instance is using.
|
||||||
* `__meta_openstack_instance_name`: the OpenStack instance name.
|
* `__meta_openstack_instance_name`: the OpenStack instance name.
|
||||||
|
|
|
@ -79,7 +79,12 @@ labels of the 1-element output vector from the input vector.
|
||||||
## `ceil()`
|
## `ceil()`
|
||||||
|
|
||||||
`ceil(v instant-vector)` rounds the sample values of all elements in `v` up to
|
`ceil(v instant-vector)` rounds the sample values of all elements in `v` up to
|
||||||
the nearest integer.
|
the nearest integer value greater than or equal to v.
|
||||||
|
|
||||||
|
* `ceil(+Inf) = +Inf`
|
||||||
|
* `ceil(±0) = ±0`
|
||||||
|
* `ceil(1.49) = 2.0`
|
||||||
|
* `ceil(1.78) = 2.0`
|
||||||
|
|
||||||
## `changes()`
|
## `changes()`
|
||||||
|
|
||||||
|
@ -173,7 +178,12 @@ Special cases are:
|
||||||
## `floor()`
|
## `floor()`
|
||||||
|
|
||||||
`floor(v instant-vector)` rounds the sample values of all elements in `v` down
|
`floor(v instant-vector)` rounds the sample values of all elements in `v` down
|
||||||
to the nearest integer.
|
to the nearest integer value smaller than or equal to v.
|
||||||
|
|
||||||
|
* `floor(+Inf) = +Inf`
|
||||||
|
* `floor(±0) = ±0`
|
||||||
|
* `floor(1.49) = 1.0`
|
||||||
|
* `floor(1.78) = 1.0`
|
||||||
|
|
||||||
## `histogram_avg()`
|
## `histogram_avg()`
|
||||||
|
|
||||||
|
|
4
go.mod
4
go.mod
|
@ -146,10 +146,10 @@ require (
|
||||||
github.com/hashicorp/cronexpr v1.1.2 // indirect
|
github.com/hashicorp/cronexpr v1.1.2 // indirect
|
||||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||||
github.com/hashicorp/go-hclog v1.5.0 // indirect
|
github.com/hashicorp/go-hclog v1.6.3 // indirect
|
||||||
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
|
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
|
||||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||||
github.com/hashicorp/go-retryablehttp v0.7.4 // indirect
|
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
|
||||||
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
|
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
|
||||||
github.com/hashicorp/golang-lru v0.6.0 // indirect
|
github.com/hashicorp/golang-lru v0.6.0 // indirect
|
||||||
github.com/hashicorp/serf v0.10.1 // indirect
|
github.com/hashicorp/serf v0.10.1 // indirect
|
||||||
|
|
9
go.sum
9
go.sum
|
@ -369,9 +369,8 @@ github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
||||||
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
|
||||||
github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c=
|
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
||||||
github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
|
||||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||||
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
||||||
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||||
|
@ -383,8 +382,8 @@ github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+
|
||||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||||
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||||
github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA=
|
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
|
||||||
github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
|
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
|
||||||
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||||
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
|
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
|
||||||
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
||||||
|
|
|
@ -30,11 +30,12 @@ import (
|
||||||
type FloatHistogram struct {
|
type FloatHistogram struct {
|
||||||
// Counter reset information.
|
// Counter reset information.
|
||||||
CounterResetHint CounterResetHint
|
CounterResetHint CounterResetHint
|
||||||
// Currently valid schema numbers are -4 <= n <= 8. They are all for
|
// Currently valid schema numbers are -4 <= n <= 8 for exponential buckets.
|
||||||
// base-2 bucket schemas, where 1 is a bucket boundary in each case, and
|
// They are all for base-2 bucket schemas, where 1 is a bucket boundary in
|
||||||
// then each power of two is divided into 2^n logarithmic buckets. Or
|
// each case, and then each power of two is divided into 2^n logarithmic buckets.
|
||||||
// in other words, each bucket boundary is the previous boundary times
|
// Or in other words, each bucket boundary is the previous boundary times
|
||||||
// 2^(2^-n).
|
// 2^(2^-n). Another valid schema number is -53 for custom buckets, defined by
|
||||||
|
// the CustomValues field.
|
||||||
Schema int32
|
Schema int32
|
||||||
// Width of the zero bucket.
|
// Width of the zero bucket.
|
||||||
ZeroThreshold float64
|
ZeroThreshold float64
|
||||||
|
@ -49,6 +50,16 @@ type FloatHistogram struct {
|
||||||
// Observation counts in buckets. Each represents an absolute count and
|
// Observation counts in buckets. Each represents an absolute count and
|
||||||
// must be zero or positive.
|
// must be zero or positive.
|
||||||
PositiveBuckets, NegativeBuckets []float64
|
PositiveBuckets, NegativeBuckets []float64
|
||||||
|
// Holds the custom (usually upper) bounds for bucket definitions, otherwise nil.
|
||||||
|
// This slice is interned, to be treated as immutable and copied by reference.
|
||||||
|
// These numbers should be strictly increasing. This field is only used when the
|
||||||
|
// schema is for custom buckets, and the ZeroThreshold, ZeroCount, NegativeSpans
|
||||||
|
// and NegativeBuckets fields are not used in that case.
|
||||||
|
CustomValues []float64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *FloatHistogram) UsesCustomBuckets() bool {
|
||||||
|
return IsCustomBucketsSchema(h.Schema)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy returns a deep copy of the Histogram.
|
// Copy returns a deep copy of the Histogram.
|
||||||
|
@ -56,28 +67,37 @@ func (h *FloatHistogram) Copy() *FloatHistogram {
|
||||||
c := FloatHistogram{
|
c := FloatHistogram{
|
||||||
CounterResetHint: h.CounterResetHint,
|
CounterResetHint: h.CounterResetHint,
|
||||||
Schema: h.Schema,
|
Schema: h.Schema,
|
||||||
ZeroThreshold: h.ZeroThreshold,
|
|
||||||
ZeroCount: h.ZeroCount,
|
|
||||||
Count: h.Count,
|
Count: h.Count,
|
||||||
Sum: h.Sum,
|
Sum: h.Sum,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if h.UsesCustomBuckets() {
|
||||||
|
if len(h.CustomValues) != 0 {
|
||||||
|
c.CustomValues = make([]float64, len(h.CustomValues))
|
||||||
|
copy(c.CustomValues, h.CustomValues)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
c.ZeroThreshold = h.ZeroThreshold
|
||||||
|
c.ZeroCount = h.ZeroCount
|
||||||
|
|
||||||
|
if len(h.NegativeSpans) != 0 {
|
||||||
|
c.NegativeSpans = make([]Span, len(h.NegativeSpans))
|
||||||
|
copy(c.NegativeSpans, h.NegativeSpans)
|
||||||
|
}
|
||||||
|
if len(h.NegativeBuckets) != 0 {
|
||||||
|
c.NegativeBuckets = make([]float64, len(h.NegativeBuckets))
|
||||||
|
copy(c.NegativeBuckets, h.NegativeBuckets)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if len(h.PositiveSpans) != 0 {
|
if len(h.PositiveSpans) != 0 {
|
||||||
c.PositiveSpans = make([]Span, len(h.PositiveSpans))
|
c.PositiveSpans = make([]Span, len(h.PositiveSpans))
|
||||||
copy(c.PositiveSpans, h.PositiveSpans)
|
copy(c.PositiveSpans, h.PositiveSpans)
|
||||||
}
|
}
|
||||||
if len(h.NegativeSpans) != 0 {
|
|
||||||
c.NegativeSpans = make([]Span, len(h.NegativeSpans))
|
|
||||||
copy(c.NegativeSpans, h.NegativeSpans)
|
|
||||||
}
|
|
||||||
if len(h.PositiveBuckets) != 0 {
|
if len(h.PositiveBuckets) != 0 {
|
||||||
c.PositiveBuckets = make([]float64, len(h.PositiveBuckets))
|
c.PositiveBuckets = make([]float64, len(h.PositiveBuckets))
|
||||||
copy(c.PositiveBuckets, h.PositiveBuckets)
|
copy(c.PositiveBuckets, h.PositiveBuckets)
|
||||||
}
|
}
|
||||||
if len(h.NegativeBuckets) != 0 {
|
|
||||||
c.NegativeBuckets = make([]float64, len(h.NegativeBuckets))
|
|
||||||
copy(c.NegativeBuckets, h.NegativeBuckets)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &c
|
return &c
|
||||||
}
|
}
|
||||||
|
@ -87,32 +107,53 @@ func (h *FloatHistogram) Copy() *FloatHistogram {
|
||||||
func (h *FloatHistogram) CopyTo(to *FloatHistogram) {
|
func (h *FloatHistogram) CopyTo(to *FloatHistogram) {
|
||||||
to.CounterResetHint = h.CounterResetHint
|
to.CounterResetHint = h.CounterResetHint
|
||||||
to.Schema = h.Schema
|
to.Schema = h.Schema
|
||||||
to.ZeroThreshold = h.ZeroThreshold
|
|
||||||
to.ZeroCount = h.ZeroCount
|
|
||||||
to.Count = h.Count
|
to.Count = h.Count
|
||||||
to.Sum = h.Sum
|
to.Sum = h.Sum
|
||||||
|
|
||||||
|
if h.UsesCustomBuckets() {
|
||||||
|
to.ZeroThreshold = 0
|
||||||
|
to.ZeroCount = 0
|
||||||
|
|
||||||
|
to.NegativeSpans = clearIfNotNil(to.NegativeSpans)
|
||||||
|
to.NegativeBuckets = clearIfNotNil(to.NegativeBuckets)
|
||||||
|
|
||||||
|
to.CustomValues = resize(to.CustomValues, len(h.CustomValues))
|
||||||
|
copy(to.CustomValues, h.CustomValues)
|
||||||
|
} else {
|
||||||
|
to.ZeroThreshold = h.ZeroThreshold
|
||||||
|
to.ZeroCount = h.ZeroCount
|
||||||
|
|
||||||
|
to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans))
|
||||||
|
copy(to.NegativeSpans, h.NegativeSpans)
|
||||||
|
|
||||||
|
to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets))
|
||||||
|
copy(to.NegativeBuckets, h.NegativeBuckets)
|
||||||
|
|
||||||
|
to.CustomValues = clearIfNotNil(to.CustomValues)
|
||||||
|
}
|
||||||
|
|
||||||
to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans))
|
to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans))
|
||||||
copy(to.PositiveSpans, h.PositiveSpans)
|
copy(to.PositiveSpans, h.PositiveSpans)
|
||||||
|
|
||||||
to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans))
|
|
||||||
copy(to.NegativeSpans, h.NegativeSpans)
|
|
||||||
|
|
||||||
to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets))
|
to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets))
|
||||||
copy(to.PositiveBuckets, h.PositiveBuckets)
|
copy(to.PositiveBuckets, h.PositiveBuckets)
|
||||||
|
|
||||||
to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets))
|
|
||||||
copy(to.NegativeBuckets, h.NegativeBuckets)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CopyToSchema works like Copy, but the returned deep copy has the provided
|
// CopyToSchema works like Copy, but the returned deep copy has the provided
|
||||||
// target schema, which must be ≤ the original schema (i.e. it must have a lower
|
// target schema, which must be ≤ the original schema (i.e. it must have a lower
|
||||||
// resolution).
|
// resolution). This method panics if a custom buckets schema is used in the
|
||||||
|
// receiving FloatHistogram or as the provided targetSchema.
|
||||||
func (h *FloatHistogram) CopyToSchema(targetSchema int32) *FloatHistogram {
|
func (h *FloatHistogram) CopyToSchema(targetSchema int32) *FloatHistogram {
|
||||||
if targetSchema == h.Schema {
|
if targetSchema == h.Schema {
|
||||||
// Fast path.
|
// Fast path.
|
||||||
return h.Copy()
|
return h.Copy()
|
||||||
}
|
}
|
||||||
|
if h.UsesCustomBuckets() {
|
||||||
|
panic(fmt.Errorf("cannot reduce resolution to %d when there are custom buckets", targetSchema))
|
||||||
|
}
|
||||||
|
if IsCustomBucketsSchema(targetSchema) {
|
||||||
|
panic("cannot reduce resolution to custom buckets schema")
|
||||||
|
}
|
||||||
if targetSchema > h.Schema {
|
if targetSchema > h.Schema {
|
||||||
panic(fmt.Errorf("cannot copy from schema %d to %d", h.Schema, targetSchema))
|
panic(fmt.Errorf("cannot copy from schema %d to %d", h.Schema, targetSchema))
|
||||||
}
|
}
|
||||||
|
@ -185,6 +226,9 @@ func (h *FloatHistogram) TestExpression() string {
|
||||||
if m.ZeroThreshold != 0 {
|
if m.ZeroThreshold != 0 {
|
||||||
res = append(res, fmt.Sprintf("z_bucket_w:%g", m.ZeroThreshold))
|
res = append(res, fmt.Sprintf("z_bucket_w:%g", m.ZeroThreshold))
|
||||||
}
|
}
|
||||||
|
if m.UsesCustomBuckets() {
|
||||||
|
res = append(res, fmt.Sprintf("custom_values:%g", m.CustomValues))
|
||||||
|
}
|
||||||
|
|
||||||
addBuckets := func(kind, bucketsKey, offsetKey string, buckets []float64, spans []Span) []string {
|
addBuckets := func(kind, bucketsKey, offsetKey string, buckets []float64, spans []Span) []string {
|
||||||
if len(spans) > 1 {
|
if len(spans) > 1 {
|
||||||
|
@ -210,14 +254,18 @@ func (h *FloatHistogram) TestExpression() string {
|
||||||
return "{{" + strings.Join(res, " ") + "}}"
|
return "{{" + strings.Join(res, " ") + "}}"
|
||||||
}
|
}
|
||||||
|
|
||||||
// ZeroBucket returns the zero bucket.
|
// ZeroBucket returns the zero bucket. This method panics if the schema is for custom buckets.
|
||||||
func (h *FloatHistogram) ZeroBucket() Bucket[float64] {
|
func (h *FloatHistogram) ZeroBucket() Bucket[float64] {
|
||||||
|
if h.UsesCustomBuckets() {
|
||||||
|
panic("histograms with custom buckets have no zero bucket")
|
||||||
|
}
|
||||||
return Bucket[float64]{
|
return Bucket[float64]{
|
||||||
Lower: -h.ZeroThreshold,
|
Lower: -h.ZeroThreshold,
|
||||||
Upper: h.ZeroThreshold,
|
Upper: h.ZeroThreshold,
|
||||||
LowerInclusive: true,
|
LowerInclusive: true,
|
||||||
UpperInclusive: true,
|
UpperInclusive: true,
|
||||||
Count: h.ZeroCount,
|
Count: h.ZeroCount,
|
||||||
|
// Index is irrelevant for the zero bucket.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -263,9 +311,18 @@ func (h *FloatHistogram) Div(scalar float64) *FloatHistogram {
|
||||||
//
|
//
|
||||||
// The method reconciles differences in the zero threshold and in the schema, and
|
// The method reconciles differences in the zero threshold and in the schema, and
|
||||||
// changes them if needed. The other histogram will not be modified in any case.
|
// changes them if needed. The other histogram will not be modified in any case.
|
||||||
|
// Adding is currently only supported between 2 exponential histograms, or between
|
||||||
|
// 2 custom buckets histograms with the exact same custom bounds.
|
||||||
//
|
//
|
||||||
// This method returns a pointer to the receiving histogram for convenience.
|
// This method returns a pointer to the receiving histogram for convenience.
|
||||||
func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram {
|
func (h *FloatHistogram) Add(other *FloatHistogram) (*FloatHistogram, error) {
|
||||||
|
if h.UsesCustomBuckets() != other.UsesCustomBuckets() {
|
||||||
|
return nil, ErrHistogramsIncompatibleSchema
|
||||||
|
}
|
||||||
|
if h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, other.CustomValues) {
|
||||||
|
return nil, ErrHistogramsIncompatibleBounds
|
||||||
|
}
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case other.CounterResetHint == h.CounterResetHint:
|
case other.CounterResetHint == h.CounterResetHint:
|
||||||
// Adding apples to apples, all good. No need to change anything.
|
// Adding apples to apples, all good. No need to change anything.
|
||||||
|
@ -290,19 +347,28 @@ func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram {
|
||||||
// TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place
|
// TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place
|
||||||
}
|
}
|
||||||
|
|
||||||
otherZeroCount := h.reconcileZeroBuckets(other)
|
if !h.UsesCustomBuckets() {
|
||||||
h.ZeroCount += otherZeroCount
|
otherZeroCount := h.reconcileZeroBuckets(other)
|
||||||
|
h.ZeroCount += otherZeroCount
|
||||||
|
}
|
||||||
h.Count += other.Count
|
h.Count += other.Count
|
||||||
h.Sum += other.Sum
|
h.Sum += other.Sum
|
||||||
|
|
||||||
var (
|
var (
|
||||||
hPositiveSpans = h.PositiveSpans
|
hPositiveSpans = h.PositiveSpans
|
||||||
hPositiveBuckets = h.PositiveBuckets
|
hPositiveBuckets = h.PositiveBuckets
|
||||||
hNegativeSpans = h.NegativeSpans
|
|
||||||
hNegativeBuckets = h.NegativeBuckets
|
|
||||||
|
|
||||||
otherPositiveSpans = other.PositiveSpans
|
otherPositiveSpans = other.PositiveSpans
|
||||||
otherPositiveBuckets = other.PositiveBuckets
|
otherPositiveBuckets = other.PositiveBuckets
|
||||||
|
)
|
||||||
|
|
||||||
|
if h.UsesCustomBuckets() {
|
||||||
|
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
|
||||||
|
return h, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
hNegativeSpans = h.NegativeSpans
|
||||||
|
hNegativeBuckets = h.NegativeBuckets
|
||||||
otherNegativeSpans = other.NegativeSpans
|
otherNegativeSpans = other.NegativeSpans
|
||||||
otherNegativeBuckets = other.NegativeBuckets
|
otherNegativeBuckets = other.NegativeBuckets
|
||||||
)
|
)
|
||||||
|
@ -321,24 +387,40 @@ func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram {
|
||||||
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
|
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
|
||||||
h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets)
|
h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets)
|
||||||
|
|
||||||
return h
|
return h, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sub works like Add but subtracts the other histogram.
|
// Sub works like Add but subtracts the other histogram.
|
||||||
func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram {
|
func (h *FloatHistogram) Sub(other *FloatHistogram) (*FloatHistogram, error) {
|
||||||
otherZeroCount := h.reconcileZeroBuckets(other)
|
if h.UsesCustomBuckets() != other.UsesCustomBuckets() {
|
||||||
h.ZeroCount -= otherZeroCount
|
return nil, ErrHistogramsIncompatibleSchema
|
||||||
|
}
|
||||||
|
if h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, other.CustomValues) {
|
||||||
|
return nil, ErrHistogramsIncompatibleBounds
|
||||||
|
}
|
||||||
|
|
||||||
|
if !h.UsesCustomBuckets() {
|
||||||
|
otherZeroCount := h.reconcileZeroBuckets(other)
|
||||||
|
h.ZeroCount -= otherZeroCount
|
||||||
|
}
|
||||||
h.Count -= other.Count
|
h.Count -= other.Count
|
||||||
h.Sum -= other.Sum
|
h.Sum -= other.Sum
|
||||||
|
|
||||||
var (
|
var (
|
||||||
hPositiveSpans = h.PositiveSpans
|
hPositiveSpans = h.PositiveSpans
|
||||||
hPositiveBuckets = h.PositiveBuckets
|
hPositiveBuckets = h.PositiveBuckets
|
||||||
hNegativeSpans = h.NegativeSpans
|
|
||||||
hNegativeBuckets = h.NegativeBuckets
|
|
||||||
|
|
||||||
otherPositiveSpans = other.PositiveSpans
|
otherPositiveSpans = other.PositiveSpans
|
||||||
otherPositiveBuckets = other.PositiveBuckets
|
otherPositiveBuckets = other.PositiveBuckets
|
||||||
|
)
|
||||||
|
|
||||||
|
if h.UsesCustomBuckets() {
|
||||||
|
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
|
||||||
|
return h, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
hNegativeSpans = h.NegativeSpans
|
||||||
|
hNegativeBuckets = h.NegativeBuckets
|
||||||
otherNegativeSpans = other.NegativeSpans
|
otherNegativeSpans = other.NegativeSpans
|
||||||
otherNegativeBuckets = other.NegativeBuckets
|
otherNegativeBuckets = other.NegativeBuckets
|
||||||
)
|
)
|
||||||
|
@ -356,7 +438,7 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram {
|
||||||
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
|
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
|
||||||
h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets)
|
h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets)
|
||||||
|
|
||||||
return h
|
return h, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Equals returns true if the given float histogram matches exactly.
|
// Equals returns true if the given float histogram matches exactly.
|
||||||
|
@ -365,29 +447,42 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram {
|
||||||
// but they must represent the same bucket layout to match.
|
// but they must represent the same bucket layout to match.
|
||||||
// Sum, Count, ZeroCount and bucket values are compared based on their bit patterns
|
// Sum, Count, ZeroCount and bucket values are compared based on their bit patterns
|
||||||
// because this method is about data equality rather than mathematical equality.
|
// because this method is about data equality rather than mathematical equality.
|
||||||
|
// We ignore fields that are not used based on the exponential / custom buckets schema,
|
||||||
|
// but check fields where differences may cause unintended behaviour even if they are not
|
||||||
|
// supposed to be used according to the schema.
|
||||||
func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool {
|
func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool {
|
||||||
if h2 == nil {
|
if h2 == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold ||
|
if h.Schema != h2.Schema ||
|
||||||
math.Float64bits(h.ZeroCount) != math.Float64bits(h2.ZeroCount) ||
|
|
||||||
math.Float64bits(h.Count) != math.Float64bits(h2.Count) ||
|
math.Float64bits(h.Count) != math.Float64bits(h2.Count) ||
|
||||||
math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) {
|
math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if h.UsesCustomBuckets() {
|
||||||
|
if !FloatBucketsMatch(h.CustomValues, h2.CustomValues) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if h.ZeroThreshold != h2.ZeroThreshold ||
|
||||||
|
math.Float64bits(h.ZeroCount) != math.Float64bits(h2.ZeroCount) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !FloatBucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
if !spansMatch(h.PositiveSpans, h2.PositiveSpans) {
|
if !spansMatch(h.PositiveSpans, h2.PositiveSpans) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
|
if !FloatBucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) {
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if !floatBucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !floatBucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) {
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -403,6 +498,7 @@ func (h *FloatHistogram) Size() int {
|
||||||
negSpanSize := len(h.NegativeSpans) * 8 // 8 bytes (int32 + uint32).
|
negSpanSize := len(h.NegativeSpans) * 8 // 8 bytes (int32 + uint32).
|
||||||
posBucketSize := len(h.PositiveBuckets) * 8 // 8 bytes (float64).
|
posBucketSize := len(h.PositiveBuckets) * 8 // 8 bytes (float64).
|
||||||
negBucketSize := len(h.NegativeBuckets) * 8 // 8 bytes (float64).
|
negBucketSize := len(h.NegativeBuckets) * 8 // 8 bytes (float64).
|
||||||
|
customBoundSize := len(h.CustomValues) * 8 // 8 bytes (float64).
|
||||||
|
|
||||||
// Total size of the struct.
|
// Total size of the struct.
|
||||||
|
|
||||||
|
@ -417,9 +513,10 @@ func (h *FloatHistogram) Size() int {
|
||||||
// fh.NegativeSpans is 24 bytes.
|
// fh.NegativeSpans is 24 bytes.
|
||||||
// fh.PositiveBuckets is 24 bytes.
|
// fh.PositiveBuckets is 24 bytes.
|
||||||
// fh.NegativeBuckets is 24 bytes.
|
// fh.NegativeBuckets is 24 bytes.
|
||||||
structSize := 144
|
// fh.CustomValues is 24 bytes.
|
||||||
|
structSize := 168
|
||||||
|
|
||||||
return structSize + posSpanSize + negSpanSize + posBucketSize + negBucketSize
|
return structSize + posSpanSize + negSpanSize + posBucketSize + negBucketSize + customBoundSize
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compact eliminates empty buckets at the beginning and end of each span, then
|
// Compact eliminates empty buckets at the beginning and end of each span, then
|
||||||
|
@ -504,6 +601,12 @@ func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool {
|
||||||
if h.Count < previous.Count {
|
if h.Count < previous.Count {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
if h.UsesCustomBuckets() != previous.UsesCustomBuckets() || (h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, previous.CustomValues)) {
|
||||||
|
// Mark that something has changed or that the application has been restarted. However, this does
|
||||||
|
// not matter so much since the change in schema will be handled directly in the chunks and PromQL
|
||||||
|
// functions.
|
||||||
|
return true
|
||||||
|
}
|
||||||
if h.Schema > previous.Schema {
|
if h.Schema > previous.Schema {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -609,7 +712,7 @@ func (h *FloatHistogram) NegativeBucketIterator() BucketIterator[float64] {
|
||||||
// positive buckets in descending order (starting at the highest bucket and
|
// positive buckets in descending order (starting at the highest bucket and
|
||||||
// going down towards the zero bucket).
|
// going down towards the zero bucket).
|
||||||
func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64] {
|
func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64] {
|
||||||
it := newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true)
|
it := newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues)
|
||||||
return &it
|
return &it
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -617,7 +720,7 @@ func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64]
|
||||||
// negative buckets in ascending order (starting at the lowest bucket and going
|
// negative buckets in ascending order (starting at the lowest bucket and going
|
||||||
// up towards the zero bucket).
|
// up towards the zero bucket).
|
||||||
func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64] {
|
func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64] {
|
||||||
it := newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false)
|
it := newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil)
|
||||||
return &it
|
return &it
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -629,7 +732,7 @@ func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64]
|
||||||
func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] {
|
func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] {
|
||||||
return &allFloatBucketIterator{
|
return &allFloatBucketIterator{
|
||||||
h: h,
|
h: h,
|
||||||
leftIter: newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false),
|
leftIter: newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil),
|
||||||
rightIter: h.floatBucketIterator(true, 0, h.Schema),
|
rightIter: h.floatBucketIterator(true, 0, h.Schema),
|
||||||
state: -1,
|
state: -1,
|
||||||
}
|
}
|
||||||
|
@ -643,30 +746,52 @@ func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] {
|
||||||
func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] {
|
func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] {
|
||||||
return &allFloatBucketIterator{
|
return &allFloatBucketIterator{
|
||||||
h: h,
|
h: h,
|
||||||
leftIter: newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true),
|
leftIter: newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues),
|
||||||
rightIter: h.floatBucketIterator(false, 0, h.Schema),
|
rightIter: h.floatBucketIterator(false, 0, h.Schema),
|
||||||
state: -1,
|
state: -1,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate validates consistency between span and bucket slices. Also, buckets are checked
|
// Validate validates consistency between span and bucket slices. Also, buckets are checked
|
||||||
// against negative values.
|
// against negative values. We check to make sure there are no unexpected fields or field values
|
||||||
|
// based on the exponential / custom buckets schema.
|
||||||
// We do not check for h.Count being at least as large as the sum of the
|
// We do not check for h.Count being at least as large as the sum of the
|
||||||
// counts in the buckets because floating point precision issues can
|
// counts in the buckets because floating point precision issues can
|
||||||
// create false positives here.
|
// create false positives here.
|
||||||
func (h *FloatHistogram) Validate() error {
|
func (h *FloatHistogram) Validate() error {
|
||||||
if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
|
|
||||||
return fmt.Errorf("negative side: %w", err)
|
|
||||||
}
|
|
||||||
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
|
|
||||||
return fmt.Errorf("positive side: %w", err)
|
|
||||||
}
|
|
||||||
var nCount, pCount float64
|
var nCount, pCount float64
|
||||||
err := checkHistogramBuckets(h.NegativeBuckets, &nCount, false)
|
if h.UsesCustomBuckets() {
|
||||||
if err != nil {
|
if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
|
||||||
return fmt.Errorf("negative side: %w", err)
|
return fmt.Errorf("custom buckets: %w", err)
|
||||||
|
}
|
||||||
|
if h.ZeroCount != 0 {
|
||||||
|
return fmt.Errorf("custom buckets: must have zero count of 0")
|
||||||
|
}
|
||||||
|
if h.ZeroThreshold != 0 {
|
||||||
|
return fmt.Errorf("custom buckets: must have zero threshold of 0")
|
||||||
|
}
|
||||||
|
if len(h.NegativeSpans) > 0 {
|
||||||
|
return fmt.Errorf("custom buckets: must not have negative spans")
|
||||||
|
}
|
||||||
|
if len(h.NegativeBuckets) > 0 {
|
||||||
|
return fmt.Errorf("custom buckets: must not have negative buckets")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
|
||||||
|
return fmt.Errorf("positive side: %w", err)
|
||||||
|
}
|
||||||
|
if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
|
||||||
|
return fmt.Errorf("negative side: %w", err)
|
||||||
|
}
|
||||||
|
err := checkHistogramBuckets(h.NegativeBuckets, &nCount, false)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("negative side: %w", err)
|
||||||
|
}
|
||||||
|
if h.CustomValues != nil {
|
||||||
|
return fmt.Errorf("histogram with exponential schema must not have custom bounds")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
err = checkHistogramBuckets(h.PositiveBuckets, &pCount, false)
|
err := checkHistogramBuckets(h.PositiveBuckets, &pCount, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("positive side: %w", err)
|
return fmt.Errorf("positive side: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -790,17 +915,25 @@ func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 {
|
||||||
// If positive is true, the returned iterator iterates through the positive
|
// If positive is true, the returned iterator iterates through the positive
|
||||||
// buckets, otherwise through the negative buckets.
|
// buckets, otherwise through the negative buckets.
|
||||||
//
|
//
|
||||||
// If absoluteStartValue is < the lowest absolute value of any upper bucket
|
// Only for exponential schemas, if absoluteStartValue is < the lowest absolute
|
||||||
// boundary, the iterator starts with the first bucket. Otherwise, it will skip
|
// value of any upper bucket boundary, the iterator starts with the first bucket.
|
||||||
// all buckets with an absolute value of their upper boundary ≤
|
// Otherwise, it will skip all buckets with an absolute value of their upper boundary ≤
|
||||||
// absoluteStartValue.
|
// absoluteStartValue. For custom bucket schemas, absoluteStartValue is ignored and
|
||||||
|
// no buckets are skipped.
|
||||||
//
|
//
|
||||||
// targetSchema must be ≤ the schema of FloatHistogram (and of course within the
|
// targetSchema must be ≤ the schema of FloatHistogram (and of course within the
|
||||||
// legal values for schemas in general). The buckets are merged to match the
|
// legal values for schemas in general). The buckets are merged to match the
|
||||||
// targetSchema prior to iterating (without mutating FloatHistogram).
|
// targetSchema prior to iterating (without mutating FloatHistogram), but custom buckets
|
||||||
|
// schemas cannot be merged with other schemas.
|
||||||
func (h *FloatHistogram) floatBucketIterator(
|
func (h *FloatHistogram) floatBucketIterator(
|
||||||
positive bool, absoluteStartValue float64, targetSchema int32,
|
positive bool, absoluteStartValue float64, targetSchema int32,
|
||||||
) floatBucketIterator {
|
) floatBucketIterator {
|
||||||
|
if h.UsesCustomBuckets() && targetSchema != h.Schema {
|
||||||
|
panic(fmt.Errorf("cannot merge from custom buckets schema to exponential schema"))
|
||||||
|
}
|
||||||
|
if !h.UsesCustomBuckets() && IsCustomBucketsSchema(targetSchema) {
|
||||||
|
panic(fmt.Errorf("cannot merge from exponential buckets schema to custom schema"))
|
||||||
|
}
|
||||||
if targetSchema > h.Schema {
|
if targetSchema > h.Schema {
|
||||||
panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema))
|
panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema))
|
||||||
}
|
}
|
||||||
|
@ -816,6 +949,7 @@ func (h *FloatHistogram) floatBucketIterator(
|
||||||
if positive {
|
if positive {
|
||||||
i.spans = h.PositiveSpans
|
i.spans = h.PositiveSpans
|
||||||
i.buckets = h.PositiveBuckets
|
i.buckets = h.PositiveBuckets
|
||||||
|
i.customValues = h.CustomValues
|
||||||
} else {
|
} else {
|
||||||
i.spans = h.NegativeSpans
|
i.spans = h.NegativeSpans
|
||||||
i.buckets = h.NegativeBuckets
|
i.buckets = h.NegativeBuckets
|
||||||
|
@ -825,14 +959,15 @@ func (h *FloatHistogram) floatBucketIterator(
|
||||||
|
|
||||||
// reverseFloatBucketIterator is a low-level constructor for reverse bucket iterators.
|
// reverseFloatBucketIterator is a low-level constructor for reverse bucket iterators.
|
||||||
func newReverseFloatBucketIterator(
|
func newReverseFloatBucketIterator(
|
||||||
spans []Span, buckets []float64, schema int32, positive bool,
|
spans []Span, buckets []float64, schema int32, positive bool, customValues []float64,
|
||||||
) reverseFloatBucketIterator {
|
) reverseFloatBucketIterator {
|
||||||
r := reverseFloatBucketIterator{
|
r := reverseFloatBucketIterator{
|
||||||
baseBucketIterator: baseBucketIterator[float64, float64]{
|
baseBucketIterator: baseBucketIterator[float64, float64]{
|
||||||
schema: schema,
|
schema: schema,
|
||||||
spans: spans,
|
spans: spans,
|
||||||
buckets: buckets,
|
buckets: buckets,
|
||||||
positive: positive,
|
positive: positive,
|
||||||
|
customValues: customValues,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -946,9 +1081,9 @@ func (i *floatBucketIterator) Next() bool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Skip buckets before absoluteStartValue.
|
// Skip buckets before absoluteStartValue for exponential schemas.
|
||||||
// TODO(beorn7): Maybe do something more efficient than this recursive call.
|
// TODO(beorn7): Maybe do something more efficient than this recursive call.
|
||||||
if !i.boundReachedStartValue && getBound(i.currIdx, i.targetSchema) <= i.absoluteStartValue {
|
if !i.boundReachedStartValue && IsExponentialSchema(i.targetSchema) && getBoundExponential(i.currIdx, i.targetSchema) <= i.absoluteStartValue {
|
||||||
return i.Next()
|
return i.Next()
|
||||||
}
|
}
|
||||||
i.boundReachedStartValue = true
|
i.boundReachedStartValue = true
|
||||||
|
@ -1010,14 +1145,7 @@ func (i *allFloatBucketIterator) Next() bool {
|
||||||
case 0:
|
case 0:
|
||||||
i.state = 1
|
i.state = 1
|
||||||
if i.h.ZeroCount > 0 {
|
if i.h.ZeroCount > 0 {
|
||||||
i.currBucket = Bucket[float64]{
|
i.currBucket = i.h.ZeroBucket()
|
||||||
Lower: -i.h.ZeroThreshold,
|
|
||||||
Upper: i.h.ZeroThreshold,
|
|
||||||
LowerInclusive: true,
|
|
||||||
UpperInclusive: true,
|
|
||||||
Count: i.h.ZeroCount,
|
|
||||||
// Index is irrelevant for the zero bucket.
|
|
||||||
}
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return i.Next()
|
return i.Next()
|
||||||
|
@ -1076,7 +1204,7 @@ func addBuckets(
|
||||||
for _, spanB := range spansB {
|
for _, spanB := range spansB {
|
||||||
indexB += spanB.Offset
|
indexB += spanB.Offset
|
||||||
for j := 0; j < int(spanB.Length); j++ {
|
for j := 0; j < int(spanB.Length); j++ {
|
||||||
if lowerThanThreshold && getBound(indexB, schema) <= threshold {
|
if lowerThanThreshold && IsExponentialSchema(schema) && getBoundExponential(indexB, schema) <= threshold {
|
||||||
goto nextLoop
|
goto nextLoop
|
||||||
}
|
}
|
||||||
lowerThanThreshold = false
|
lowerThanThreshold = false
|
||||||
|
@ -1177,7 +1305,7 @@ func addBuckets(
|
||||||
return spansA, bucketsA
|
return spansA, bucketsA
|
||||||
}
|
}
|
||||||
|
|
||||||
func floatBucketsMatch(b1, b2 []float64) bool {
|
func FloatBucketsMatch(b1, b2 []float64) bool {
|
||||||
if len(b1) != len(b2) {
|
if len(b1) != len(b2) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -1191,7 +1319,15 @@ func floatBucketsMatch(b1, b2 []float64) bool {
|
||||||
|
|
||||||
// ReduceResolution reduces the float histogram's spans, buckets into target schema.
|
// ReduceResolution reduces the float histogram's spans, buckets into target schema.
|
||||||
// The target schema must be smaller than the current float histogram's schema.
|
// The target schema must be smaller than the current float histogram's schema.
|
||||||
|
// This will panic if the histogram has custom buckets or if the target schema is
|
||||||
|
// a custom buckets schema.
|
||||||
func (h *FloatHistogram) ReduceResolution(targetSchema int32) *FloatHistogram {
|
func (h *FloatHistogram) ReduceResolution(targetSchema int32) *FloatHistogram {
|
||||||
|
if h.UsesCustomBuckets() {
|
||||||
|
panic("cannot reduce resolution when there are custom buckets")
|
||||||
|
}
|
||||||
|
if IsCustomBucketsSchema(targetSchema) {
|
||||||
|
panic("cannot reduce resolution to custom buckets schema")
|
||||||
|
}
|
||||||
if targetSchema >= h.Schema {
|
if targetSchema >= h.Schema {
|
||||||
panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema))
|
panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema))
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -20,14 +20,33 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
const (
|
||||||
ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets")
|
ExponentialSchemaMax int32 = 8
|
||||||
ErrHistogramCountMismatch = errors.New("histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)")
|
ExponentialSchemaMin int32 = -4
|
||||||
ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative")
|
CustomBucketsSchema int32 = -53
|
||||||
ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative")
|
|
||||||
ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets")
|
||||||
|
ErrHistogramCountMismatch = errors.New("histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)")
|
||||||
|
ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative")
|
||||||
|
ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative")
|
||||||
|
ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided")
|
||||||
|
ErrHistogramCustomBucketsMismatch = errors.New("histogram custom bounds are too few")
|
||||||
|
ErrHistogramCustomBucketsInvalid = errors.New("histogram custom bounds must be in strictly increasing order")
|
||||||
|
ErrHistogramCustomBucketsInfinite = errors.New("histogram custom bounds must be finite")
|
||||||
|
ErrHistogramsIncompatibleSchema = errors.New("cannot apply this operation on histograms with a mix of exponential and custom bucket schemas")
|
||||||
|
ErrHistogramsIncompatibleBounds = errors.New("cannot apply this operation on custom buckets histograms with different custom bounds")
|
||||||
|
)
|
||||||
|
|
||||||
|
func IsCustomBucketsSchema(s int32) bool {
|
||||||
|
return s == CustomBucketsSchema
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsExponentialSchema(s int32) bool {
|
||||||
|
return s >= ExponentialSchemaMin && s <= ExponentialSchemaMax
|
||||||
|
}
|
||||||
|
|
||||||
// BucketCount is a type constraint for the count in a bucket, which can be
|
// BucketCount is a type constraint for the count in a bucket, which can be
|
||||||
// float64 (for type FloatHistogram) or uint64 (for type Histogram).
|
// float64 (for type FloatHistogram) or uint64 (for type Histogram).
|
||||||
type BucketCount interface {
|
type BucketCount interface {
|
||||||
|
@ -115,6 +134,8 @@ type baseBucketIterator[BC BucketCount, IBC InternalBucketCount] struct {
|
||||||
|
|
||||||
currCount IBC // Count in the current bucket.
|
currCount IBC // Count in the current bucket.
|
||||||
currIdx int32 // The actual bucket index.
|
currIdx int32 // The actual bucket index.
|
||||||
|
|
||||||
|
customValues []float64 // Bounds (usually upper) for histograms with custom buckets.
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *baseBucketIterator[BC, IBC]) At() Bucket[BC] {
|
func (b *baseBucketIterator[BC, IBC]) At() Bucket[BC] {
|
||||||
|
@ -128,14 +149,19 @@ func (b *baseBucketIterator[BC, IBC]) at(schema int32) Bucket[BC] {
|
||||||
Index: b.currIdx,
|
Index: b.currIdx,
|
||||||
}
|
}
|
||||||
if b.positive {
|
if b.positive {
|
||||||
bucket.Upper = getBound(b.currIdx, schema)
|
bucket.Upper = getBound(b.currIdx, schema, b.customValues)
|
||||||
bucket.Lower = getBound(b.currIdx-1, schema)
|
bucket.Lower = getBound(b.currIdx-1, schema, b.customValues)
|
||||||
} else {
|
} else {
|
||||||
bucket.Lower = -getBound(b.currIdx, schema)
|
bucket.Lower = -getBound(b.currIdx, schema, b.customValues)
|
||||||
bucket.Upper = -getBound(b.currIdx-1, schema)
|
bucket.Upper = -getBound(b.currIdx-1, schema, b.customValues)
|
||||||
|
}
|
||||||
|
if IsCustomBucketsSchema(schema) {
|
||||||
|
bucket.LowerInclusive = b.currIdx == 0
|
||||||
|
bucket.UpperInclusive = true
|
||||||
|
} else {
|
||||||
|
bucket.LowerInclusive = bucket.Lower < 0
|
||||||
|
bucket.UpperInclusive = bucket.Upper > 0
|
||||||
}
|
}
|
||||||
bucket.LowerInclusive = bucket.Lower < 0
|
|
||||||
bucket.UpperInclusive = bucket.Upper > 0
|
|
||||||
return bucket
|
return bucket
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -393,7 +419,55 @@ func checkHistogramBuckets[BC BucketCount, IBC InternalBucketCount](buckets []IB
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getBound(idx, schema int32) float64 {
|
func checkHistogramCustomBounds(bounds []float64, spans []Span, numBuckets int) error {
|
||||||
|
prev := math.Inf(-1)
|
||||||
|
for _, curr := range bounds {
|
||||||
|
if curr <= prev {
|
||||||
|
return fmt.Errorf("previous bound is %f and current is %f: %w", prev, curr, ErrHistogramCustomBucketsInvalid)
|
||||||
|
}
|
||||||
|
prev = curr
|
||||||
|
}
|
||||||
|
if prev == math.Inf(1) {
|
||||||
|
return fmt.Errorf("last +Inf bound must not be explicitly defined: %w", ErrHistogramCustomBucketsInfinite)
|
||||||
|
}
|
||||||
|
|
||||||
|
var spanBuckets int
|
||||||
|
var totalSpanLength int
|
||||||
|
for n, span := range spans {
|
||||||
|
if span.Offset < 0 {
|
||||||
|
return fmt.Errorf("span number %d with offset %d: %w", n+1, span.Offset, ErrHistogramSpanNegativeOffset)
|
||||||
|
}
|
||||||
|
spanBuckets += int(span.Length)
|
||||||
|
totalSpanLength += int(span.Length) + int(span.Offset)
|
||||||
|
}
|
||||||
|
if spanBuckets != numBuckets {
|
||||||
|
return fmt.Errorf("spans need %d buckets, have %d buckets: %w", spanBuckets, numBuckets, ErrHistogramSpansBucketsMismatch)
|
||||||
|
}
|
||||||
|
if (len(bounds) + 1) < totalSpanLength {
|
||||||
|
return fmt.Errorf("only %d custom bounds defined which is insufficient to cover total span length of %d: %w", len(bounds), totalSpanLength, ErrHistogramCustomBucketsMismatch)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getBound(idx, schema int32, customValues []float64) float64 {
|
||||||
|
if IsCustomBucketsSchema(schema) {
|
||||||
|
length := int32(len(customValues))
|
||||||
|
switch {
|
||||||
|
case idx > length || idx < -1:
|
||||||
|
panic(fmt.Errorf("index %d out of bounds for custom bounds of length %d", idx, length))
|
||||||
|
case idx == length:
|
||||||
|
return math.Inf(1)
|
||||||
|
case idx == -1:
|
||||||
|
return math.Inf(-1)
|
||||||
|
default:
|
||||||
|
return customValues[idx]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return getBoundExponential(idx, schema)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getBoundExponential(idx, schema int32) float64 {
|
||||||
// Here a bit of context about the behavior for the last bucket counting
|
// Here a bit of context about the behavior for the last bucket counting
|
||||||
// regular numbers (called simply "last bucket" below) and the bucket
|
// regular numbers (called simply "last bucket" below) and the bucket
|
||||||
// counting observations of ±Inf (called "inf bucket" below, with an idx
|
// counting observations of ±Inf (called "inf bucket" below, with an idx
|
||||||
|
@ -703,3 +777,10 @@ func reduceResolution[IBC InternalBucketCount](
|
||||||
|
|
||||||
return targetSpans, targetBuckets
|
return targetSpans, targetBuckets
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func clearIfNotNil[T any](items []T) []T {
|
||||||
|
if items == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return items[:0]
|
||||||
|
}
|
||||||
|
|
|
@ -21,7 +21,7 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestGetBound(t *testing.T) {
|
func TestGetBoundExponential(t *testing.T) {
|
||||||
scenarios := []struct {
|
scenarios := []struct {
|
||||||
idx int32
|
idx int32
|
||||||
schema int32
|
schema int32
|
||||||
|
@ -105,7 +105,7 @@ func TestGetBound(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, s := range scenarios {
|
for _, s := range scenarios {
|
||||||
got := getBound(s.idx, s.schema)
|
got := getBoundExponential(s.idx, s.schema)
|
||||||
if s.want != got {
|
if s.want != got {
|
||||||
require.Equal(t, s.want, got, "idx %d, schema %d", s.idx, s.schema)
|
require.Equal(t, s.want, got, "idx %d, schema %d", s.idx, s.schema)
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,11 +49,12 @@ const (
|
||||||
type Histogram struct {
|
type Histogram struct {
|
||||||
// Counter reset information.
|
// Counter reset information.
|
||||||
CounterResetHint CounterResetHint
|
CounterResetHint CounterResetHint
|
||||||
// Currently valid schema numbers are -4 <= n <= 8. They are all for
|
// Currently valid schema numbers are -4 <= n <= 8 for exponential buckets,
|
||||||
// base-2 bucket schemas, where 1 is a bucket boundary in each case, and
|
// They are all for base-2 bucket schemas, where 1 is a bucket boundary in
|
||||||
// then each power of two is divided into 2^n logarithmic buckets. Or
|
// each case, and then each power of two is divided into 2^n logarithmic buckets.
|
||||||
// in other words, each bucket boundary is the previous boundary times
|
// Or in other words, each bucket boundary is the previous boundary times
|
||||||
// 2^(2^-n).
|
// 2^(2^-n). Another valid schema number is -53 for custom buckets, defined by
|
||||||
|
// the CustomValues field.
|
||||||
Schema int32
|
Schema int32
|
||||||
// Width of the zero bucket.
|
// Width of the zero bucket.
|
||||||
ZeroThreshold float64
|
ZeroThreshold float64
|
||||||
|
@ -69,6 +70,12 @@ type Histogram struct {
|
||||||
// count. All following ones are deltas relative to the previous
|
// count. All following ones are deltas relative to the previous
|
||||||
// element.
|
// element.
|
||||||
PositiveBuckets, NegativeBuckets []int64
|
PositiveBuckets, NegativeBuckets []int64
|
||||||
|
// Holds the custom (usually upper) bounds for bucket definitions, otherwise nil.
|
||||||
|
// This slice is interned, to be treated as immutable and copied by reference.
|
||||||
|
// These numbers should be strictly increasing. This field is only used when the
|
||||||
|
// schema is for custom buckets, and the ZeroThreshold, ZeroCount, NegativeSpans
|
||||||
|
// and NegativeBuckets fields are not used in that case.
|
||||||
|
CustomValues []float64
|
||||||
}
|
}
|
||||||
|
|
||||||
// A Span defines a continuous sequence of buckets.
|
// A Span defines a continuous sequence of buckets.
|
||||||
|
@ -80,33 +87,46 @@ type Span struct {
|
||||||
Length uint32
|
Length uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (h *Histogram) UsesCustomBuckets() bool {
|
||||||
|
return IsCustomBucketsSchema(h.Schema)
|
||||||
|
}
|
||||||
|
|
||||||
// Copy returns a deep copy of the Histogram.
|
// Copy returns a deep copy of the Histogram.
|
||||||
func (h *Histogram) Copy() *Histogram {
|
func (h *Histogram) Copy() *Histogram {
|
||||||
c := Histogram{
|
c := Histogram{
|
||||||
CounterResetHint: h.CounterResetHint,
|
CounterResetHint: h.CounterResetHint,
|
||||||
Schema: h.Schema,
|
Schema: h.Schema,
|
||||||
ZeroThreshold: h.ZeroThreshold,
|
|
||||||
ZeroCount: h.ZeroCount,
|
|
||||||
Count: h.Count,
|
Count: h.Count,
|
||||||
Sum: h.Sum,
|
Sum: h.Sum,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if h.UsesCustomBuckets() {
|
||||||
|
if len(h.CustomValues) != 0 {
|
||||||
|
c.CustomValues = make([]float64, len(h.CustomValues))
|
||||||
|
copy(c.CustomValues, h.CustomValues)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
c.ZeroThreshold = h.ZeroThreshold
|
||||||
|
c.ZeroCount = h.ZeroCount
|
||||||
|
|
||||||
|
if len(h.NegativeSpans) != 0 {
|
||||||
|
c.NegativeSpans = make([]Span, len(h.NegativeSpans))
|
||||||
|
copy(c.NegativeSpans, h.NegativeSpans)
|
||||||
|
}
|
||||||
|
if len(h.NegativeBuckets) != 0 {
|
||||||
|
c.NegativeBuckets = make([]int64, len(h.NegativeBuckets))
|
||||||
|
copy(c.NegativeBuckets, h.NegativeBuckets)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if len(h.PositiveSpans) != 0 {
|
if len(h.PositiveSpans) != 0 {
|
||||||
c.PositiveSpans = make([]Span, len(h.PositiveSpans))
|
c.PositiveSpans = make([]Span, len(h.PositiveSpans))
|
||||||
copy(c.PositiveSpans, h.PositiveSpans)
|
copy(c.PositiveSpans, h.PositiveSpans)
|
||||||
}
|
}
|
||||||
if len(h.NegativeSpans) != 0 {
|
|
||||||
c.NegativeSpans = make([]Span, len(h.NegativeSpans))
|
|
||||||
copy(c.NegativeSpans, h.NegativeSpans)
|
|
||||||
}
|
|
||||||
if len(h.PositiveBuckets) != 0 {
|
if len(h.PositiveBuckets) != 0 {
|
||||||
c.PositiveBuckets = make([]int64, len(h.PositiveBuckets))
|
c.PositiveBuckets = make([]int64, len(h.PositiveBuckets))
|
||||||
copy(c.PositiveBuckets, h.PositiveBuckets)
|
copy(c.PositiveBuckets, h.PositiveBuckets)
|
||||||
}
|
}
|
||||||
if len(h.NegativeBuckets) != 0 {
|
|
||||||
c.NegativeBuckets = make([]int64, len(h.NegativeBuckets))
|
|
||||||
copy(c.NegativeBuckets, h.NegativeBuckets)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &c
|
return &c
|
||||||
}
|
}
|
||||||
|
@ -116,22 +136,36 @@ func (h *Histogram) Copy() *Histogram {
|
||||||
func (h *Histogram) CopyTo(to *Histogram) {
|
func (h *Histogram) CopyTo(to *Histogram) {
|
||||||
to.CounterResetHint = h.CounterResetHint
|
to.CounterResetHint = h.CounterResetHint
|
||||||
to.Schema = h.Schema
|
to.Schema = h.Schema
|
||||||
to.ZeroThreshold = h.ZeroThreshold
|
|
||||||
to.ZeroCount = h.ZeroCount
|
|
||||||
to.Count = h.Count
|
to.Count = h.Count
|
||||||
to.Sum = h.Sum
|
to.Sum = h.Sum
|
||||||
|
|
||||||
|
if h.UsesCustomBuckets() {
|
||||||
|
to.ZeroThreshold = 0
|
||||||
|
to.ZeroCount = 0
|
||||||
|
|
||||||
|
to.NegativeSpans = clearIfNotNil(to.NegativeSpans)
|
||||||
|
to.NegativeBuckets = clearIfNotNil(to.NegativeBuckets)
|
||||||
|
|
||||||
|
to.CustomValues = resize(to.CustomValues, len(h.CustomValues))
|
||||||
|
copy(to.CustomValues, h.CustomValues)
|
||||||
|
} else {
|
||||||
|
to.ZeroThreshold = h.ZeroThreshold
|
||||||
|
to.ZeroCount = h.ZeroCount
|
||||||
|
|
||||||
|
to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans))
|
||||||
|
copy(to.NegativeSpans, h.NegativeSpans)
|
||||||
|
|
||||||
|
to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets))
|
||||||
|
copy(to.NegativeBuckets, h.NegativeBuckets)
|
||||||
|
|
||||||
|
to.CustomValues = clearIfNotNil(to.CustomValues)
|
||||||
|
}
|
||||||
|
|
||||||
to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans))
|
to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans))
|
||||||
copy(to.PositiveSpans, h.PositiveSpans)
|
copy(to.PositiveSpans, h.PositiveSpans)
|
||||||
|
|
||||||
to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans))
|
|
||||||
copy(to.NegativeSpans, h.NegativeSpans)
|
|
||||||
|
|
||||||
to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets))
|
to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets))
|
||||||
copy(to.PositiveBuckets, h.PositiveBuckets)
|
copy(to.PositiveBuckets, h.PositiveBuckets)
|
||||||
|
|
||||||
to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets))
|
|
||||||
copy(to.NegativeBuckets, h.NegativeBuckets)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns a string representation of the Histogram.
|
// String returns a string representation of the Histogram.
|
||||||
|
@ -165,8 +199,11 @@ func (h *Histogram) String() string {
|
||||||
return sb.String()
|
return sb.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ZeroBucket returns the zero bucket.
|
// ZeroBucket returns the zero bucket. This method panics if the schema is for custom buckets.
|
||||||
func (h *Histogram) ZeroBucket() Bucket[uint64] {
|
func (h *Histogram) ZeroBucket() Bucket[uint64] {
|
||||||
|
if h.UsesCustomBuckets() {
|
||||||
|
panic("histograms with custom buckets have no zero bucket")
|
||||||
|
}
|
||||||
return Bucket[uint64]{
|
return Bucket[uint64]{
|
||||||
Lower: -h.ZeroThreshold,
|
Lower: -h.ZeroThreshold,
|
||||||
Upper: h.ZeroThreshold,
|
Upper: h.ZeroThreshold,
|
||||||
|
@ -179,14 +216,14 @@ func (h *Histogram) ZeroBucket() Bucket[uint64] {
|
||||||
// PositiveBucketIterator returns a BucketIterator to iterate over all positive
|
// PositiveBucketIterator returns a BucketIterator to iterate over all positive
|
||||||
// buckets in ascending order (starting next to the zero bucket and going up).
|
// buckets in ascending order (starting next to the zero bucket and going up).
|
||||||
func (h *Histogram) PositiveBucketIterator() BucketIterator[uint64] {
|
func (h *Histogram) PositiveBucketIterator() BucketIterator[uint64] {
|
||||||
it := newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true)
|
it := newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues)
|
||||||
return &it
|
return &it
|
||||||
}
|
}
|
||||||
|
|
||||||
// NegativeBucketIterator returns a BucketIterator to iterate over all negative
|
// NegativeBucketIterator returns a BucketIterator to iterate over all negative
|
||||||
// buckets in descending order (starting next to the zero bucket and going down).
|
// buckets in descending order (starting next to the zero bucket and going down).
|
||||||
func (h *Histogram) NegativeBucketIterator() BucketIterator[uint64] {
|
func (h *Histogram) NegativeBucketIterator() BucketIterator[uint64] {
|
||||||
it := newRegularBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false)
|
it := newRegularBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil)
|
||||||
return &it
|
return &it
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -207,30 +244,42 @@ func (h *Histogram) CumulativeBucketIterator() BucketIterator[uint64] {
|
||||||
// but they must represent the same bucket layout to match.
|
// but they must represent the same bucket layout to match.
|
||||||
// Sum is compared based on its bit pattern because this method
|
// Sum is compared based on its bit pattern because this method
|
||||||
// is about data equality rather than mathematical equality.
|
// is about data equality rather than mathematical equality.
|
||||||
|
// We ignore fields that are not used based on the exponential / custom buckets schema,
|
||||||
|
// but check fields where differences may cause unintended behaviour even if they are not
|
||||||
|
// supposed to be used according to the schema.
|
||||||
func (h *Histogram) Equals(h2 *Histogram) bool {
|
func (h *Histogram) Equals(h2 *Histogram) bool {
|
||||||
if h2 == nil {
|
if h2 == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold ||
|
if h.Schema != h2.Schema || h.Count != h2.Count ||
|
||||||
h.ZeroCount != h2.ZeroCount || h.Count != h2.Count ||
|
|
||||||
math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) {
|
math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if h.UsesCustomBuckets() {
|
||||||
|
if !FloatBucketsMatch(h.CustomValues, h2.CustomValues) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if h.ZeroThreshold != h2.ZeroThreshold || h.ZeroCount != h2.ZeroCount {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !slices.Equal(h.NegativeBuckets, h2.NegativeBuckets) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
if !spansMatch(h.PositiveSpans, h2.PositiveSpans) {
|
if !spansMatch(h.PositiveSpans, h2.PositiveSpans) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if !slices.Equal(h.PositiveBuckets, h2.PositiveBuckets) {
|
if !slices.Equal(h.PositiveBuckets, h2.PositiveBuckets) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if !slices.Equal(h.NegativeBuckets, h2.NegativeBuckets) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -321,17 +370,36 @@ func (h *Histogram) ToFloat(fh *FloatHistogram) *FloatHistogram {
|
||||||
}
|
}
|
||||||
fh.CounterResetHint = h.CounterResetHint
|
fh.CounterResetHint = h.CounterResetHint
|
||||||
fh.Schema = h.Schema
|
fh.Schema = h.Schema
|
||||||
fh.ZeroThreshold = h.ZeroThreshold
|
|
||||||
fh.ZeroCount = float64(h.ZeroCount)
|
|
||||||
fh.Count = float64(h.Count)
|
fh.Count = float64(h.Count)
|
||||||
fh.Sum = h.Sum
|
fh.Sum = h.Sum
|
||||||
|
|
||||||
|
if h.UsesCustomBuckets() {
|
||||||
|
fh.ZeroThreshold = 0
|
||||||
|
fh.ZeroCount = 0
|
||||||
|
fh.NegativeSpans = clearIfNotNil(fh.NegativeSpans)
|
||||||
|
fh.NegativeBuckets = clearIfNotNil(fh.NegativeBuckets)
|
||||||
|
|
||||||
|
fh.CustomValues = resize(fh.CustomValues, len(h.CustomValues))
|
||||||
|
copy(fh.CustomValues, h.CustomValues)
|
||||||
|
} else {
|
||||||
|
fh.ZeroThreshold = h.ZeroThreshold
|
||||||
|
fh.ZeroCount = float64(h.ZeroCount)
|
||||||
|
|
||||||
|
fh.NegativeSpans = resize(fh.NegativeSpans, len(h.NegativeSpans))
|
||||||
|
copy(fh.NegativeSpans, h.NegativeSpans)
|
||||||
|
|
||||||
|
fh.NegativeBuckets = resize(fh.NegativeBuckets, len(h.NegativeBuckets))
|
||||||
|
var currentNegative float64
|
||||||
|
for i, b := range h.NegativeBuckets {
|
||||||
|
currentNegative += float64(b)
|
||||||
|
fh.NegativeBuckets[i] = currentNegative
|
||||||
|
}
|
||||||
|
fh.CustomValues = clearIfNotNil(fh.CustomValues)
|
||||||
|
}
|
||||||
|
|
||||||
fh.PositiveSpans = resize(fh.PositiveSpans, len(h.PositiveSpans))
|
fh.PositiveSpans = resize(fh.PositiveSpans, len(h.PositiveSpans))
|
||||||
copy(fh.PositiveSpans, h.PositiveSpans)
|
copy(fh.PositiveSpans, h.PositiveSpans)
|
||||||
|
|
||||||
fh.NegativeSpans = resize(fh.NegativeSpans, len(h.NegativeSpans))
|
|
||||||
copy(fh.NegativeSpans, h.NegativeSpans)
|
|
||||||
|
|
||||||
fh.PositiveBuckets = resize(fh.PositiveBuckets, len(h.PositiveBuckets))
|
fh.PositiveBuckets = resize(fh.PositiveBuckets, len(h.PositiveBuckets))
|
||||||
var currentPositive float64
|
var currentPositive float64
|
||||||
for i, b := range h.PositiveBuckets {
|
for i, b := range h.PositiveBuckets {
|
||||||
|
@ -339,13 +407,6 @@ func (h *Histogram) ToFloat(fh *FloatHistogram) *FloatHistogram {
|
||||||
fh.PositiveBuckets[i] = currentPositive
|
fh.PositiveBuckets[i] = currentPositive
|
||||||
}
|
}
|
||||||
|
|
||||||
fh.NegativeBuckets = resize(fh.NegativeBuckets, len(h.NegativeBuckets))
|
|
||||||
var currentNegative float64
|
|
||||||
for i, b := range h.NegativeBuckets {
|
|
||||||
currentNegative += float64(b)
|
|
||||||
fh.NegativeBuckets[i] = currentNegative
|
|
||||||
}
|
|
||||||
|
|
||||||
return fh
|
return fh
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -357,25 +418,47 @@ func resize[T any](items []T, n int) []T {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate validates consistency between span and bucket slices. Also, buckets are checked
|
// Validate validates consistency between span and bucket slices. Also, buckets are checked
|
||||||
// against negative values.
|
// against negative values. We check to make sure there are no unexpected fields or field values
|
||||||
|
// based on the exponential / custom buckets schema.
|
||||||
// For histograms that have not observed any NaN values (based on IsNaN(h.Sum) check), a
|
// For histograms that have not observed any NaN values (based on IsNaN(h.Sum) check), a
|
||||||
// strict h.Count = nCount + pCount + h.ZeroCount check is performed.
|
// strict h.Count = nCount + pCount + h.ZeroCount check is performed.
|
||||||
// Otherwise, only a lower bound check will be done (h.Count >= nCount + pCount + h.ZeroCount),
|
// Otherwise, only a lower bound check will be done (h.Count >= nCount + pCount + h.ZeroCount),
|
||||||
// because NaN observations do not increment the values of buckets (but they do increment
|
// because NaN observations do not increment the values of buckets (but they do increment
|
||||||
// the total h.Count).
|
// the total h.Count).
|
||||||
func (h *Histogram) Validate() error {
|
func (h *Histogram) Validate() error {
|
||||||
if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
|
|
||||||
return fmt.Errorf("negative side: %w", err)
|
|
||||||
}
|
|
||||||
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
|
|
||||||
return fmt.Errorf("positive side: %w", err)
|
|
||||||
}
|
|
||||||
var nCount, pCount uint64
|
var nCount, pCount uint64
|
||||||
err := checkHistogramBuckets(h.NegativeBuckets, &nCount, true)
|
if h.UsesCustomBuckets() {
|
||||||
if err != nil {
|
if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
|
||||||
return fmt.Errorf("negative side: %w", err)
|
return fmt.Errorf("custom buckets: %w", err)
|
||||||
|
}
|
||||||
|
if h.ZeroCount != 0 {
|
||||||
|
return fmt.Errorf("custom buckets: must have zero count of 0")
|
||||||
|
}
|
||||||
|
if h.ZeroThreshold != 0 {
|
||||||
|
return fmt.Errorf("custom buckets: must have zero threshold of 0")
|
||||||
|
}
|
||||||
|
if len(h.NegativeSpans) > 0 {
|
||||||
|
return fmt.Errorf("custom buckets: must not have negative spans")
|
||||||
|
}
|
||||||
|
if len(h.NegativeBuckets) > 0 {
|
||||||
|
return fmt.Errorf("custom buckets: must not have negative buckets")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
|
||||||
|
return fmt.Errorf("positive side: %w", err)
|
||||||
|
}
|
||||||
|
if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
|
||||||
|
return fmt.Errorf("negative side: %w", err)
|
||||||
|
}
|
||||||
|
err := checkHistogramBuckets(h.NegativeBuckets, &nCount, true)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("negative side: %w", err)
|
||||||
|
}
|
||||||
|
if h.CustomValues != nil {
|
||||||
|
return fmt.Errorf("histogram with exponential schema must not have custom bounds")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
err = checkHistogramBuckets(h.PositiveBuckets, &pCount, true)
|
err := checkHistogramBuckets(h.PositiveBuckets, &pCount, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("positive side: %w", err)
|
return fmt.Errorf("positive side: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -398,12 +481,13 @@ type regularBucketIterator struct {
|
||||||
baseBucketIterator[uint64, int64]
|
baseBucketIterator[uint64, int64]
|
||||||
}
|
}
|
||||||
|
|
||||||
func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool) regularBucketIterator {
|
func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool, customValues []float64) regularBucketIterator {
|
||||||
i := baseBucketIterator[uint64, int64]{
|
i := baseBucketIterator[uint64, int64]{
|
||||||
schema: schema,
|
schema: schema,
|
||||||
spans: spans,
|
spans: spans,
|
||||||
buckets: buckets,
|
buckets: buckets,
|
||||||
positive: positive,
|
positive: positive,
|
||||||
|
customValues: customValues,
|
||||||
}
|
}
|
||||||
return regularBucketIterator{i}
|
return regularBucketIterator{i}
|
||||||
}
|
}
|
||||||
|
@ -477,7 +561,7 @@ func (c *cumulativeBucketIterator) Next() bool {
|
||||||
|
|
||||||
if c.emptyBucketCount > 0 {
|
if c.emptyBucketCount > 0 {
|
||||||
// We are traversing through empty buckets at the moment.
|
// We are traversing through empty buckets at the moment.
|
||||||
c.currUpper = getBound(c.currIdx, c.h.Schema)
|
c.currUpper = getBound(c.currIdx, c.h.Schema, c.h.CustomValues)
|
||||||
c.currIdx++
|
c.currIdx++
|
||||||
c.emptyBucketCount--
|
c.emptyBucketCount--
|
||||||
return true
|
return true
|
||||||
|
@ -494,7 +578,7 @@ func (c *cumulativeBucketIterator) Next() bool {
|
||||||
|
|
||||||
c.currCount += c.h.PositiveBuckets[c.posBucketsIdx]
|
c.currCount += c.h.PositiveBuckets[c.posBucketsIdx]
|
||||||
c.currCumulativeCount += uint64(c.currCount)
|
c.currCumulativeCount += uint64(c.currCount)
|
||||||
c.currUpper = getBound(c.currIdx, c.h.Schema)
|
c.currUpper = getBound(c.currIdx, c.h.Schema, c.h.CustomValues)
|
||||||
|
|
||||||
c.posBucketsIdx++
|
c.posBucketsIdx++
|
||||||
c.idxInSpan++
|
c.idxInSpan++
|
||||||
|
@ -524,7 +608,15 @@ func (c *cumulativeBucketIterator) At() Bucket[uint64] {
|
||||||
|
|
||||||
// ReduceResolution reduces the histogram's spans, buckets into target schema.
|
// ReduceResolution reduces the histogram's spans, buckets into target schema.
|
||||||
// The target schema must be smaller than the current histogram's schema.
|
// The target schema must be smaller than the current histogram's schema.
|
||||||
|
// This will panic if the histogram has custom buckets or if the target schema is
|
||||||
|
// a custom buckets schema.
|
||||||
func (h *Histogram) ReduceResolution(targetSchema int32) *Histogram {
|
func (h *Histogram) ReduceResolution(targetSchema int32) *Histogram {
|
||||||
|
if h.UsesCustomBuckets() {
|
||||||
|
panic("cannot reduce resolution when there are custom buckets")
|
||||||
|
}
|
||||||
|
if IsCustomBucketsSchema(targetSchema) {
|
||||||
|
panic("cannot reduce resolution to custom buckets schema")
|
||||||
|
}
|
||||||
if targetSchema >= h.Schema {
|
if targetSchema >= h.Schema {
|
||||||
panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema))
|
panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema))
|
||||||
}
|
}
|
||||||
|
|
|
@ -69,6 +69,21 @@ func TestHistogramString(t *testing.T) {
|
||||||
},
|
},
|
||||||
expectedString: "{count:19, sum:2.7, [-64,-32):1, [-16,-8):1, [-8,-4):2, [-4,-2):1, [-2,-1):3, [-1,-0.5):1, (0.5,1]:1, (1,2]:3, (2,4]:1, (4,8]:2, (8,16]:1, (16,32]:1, (32,64]:1}",
|
expectedString: "{count:19, sum:2.7, [-64,-32):1, [-16,-8):1, [-8,-4):2, [-4,-2):1, [-2,-1):3, [-1,-0.5):1, (0.5,1]:1, (1,2]:3, (2,4]:1, (4,8]:2, (8,16]:1, (16,32]:1, (32,64]:1}",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
histogram: Histogram{
|
||||||
|
Schema: CustomBucketsSchema,
|
||||||
|
Count: 19,
|
||||||
|
Sum: 2.7,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: 0, Length: 4},
|
||||||
|
{Offset: 0, Length: 0},
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
|
||||||
|
CustomValues: []float64{1, 2, 5, 10, 15, 20, 25, 50},
|
||||||
|
},
|
||||||
|
expectedString: "{count:19, sum:2.7, [-Inf,1]:1, (1,2]:3, (2,5]:1, (5,10]:2, (10,15]:1, (15,20]:1, (20,25]:1}",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, c := range cases {
|
for i, c := range cases {
|
||||||
|
@ -208,6 +223,26 @@ func TestCumulativeBucketIterator(t *testing.T) {
|
||||||
{Lower: math.Inf(-1), Upper: 16, Count: 8, LowerInclusive: true, UpperInclusive: true, Index: 2},
|
{Lower: math.Inf(-1), Upper: 16, Count: 8, LowerInclusive: true, UpperInclusive: true, Index: 2},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
histogram: Histogram{
|
||||||
|
Schema: CustomBucketsSchema,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||||
|
CustomValues: []float64{5, 10, 20, 50},
|
||||||
|
},
|
||||||
|
expectedBuckets: []Bucket[uint64]{
|
||||||
|
{Lower: math.Inf(-1), Upper: 5, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0},
|
||||||
|
{Lower: math.Inf(-1), Upper: 10, Count: 3, LowerInclusive: true, UpperInclusive: true, Index: 1},
|
||||||
|
|
||||||
|
{Lower: math.Inf(-1), Upper: 20, Count: 3, LowerInclusive: true, UpperInclusive: true, Index: 2},
|
||||||
|
|
||||||
|
{Lower: math.Inf(-1), Upper: 50, Count: 4, LowerInclusive: true, UpperInclusive: true, Index: 3},
|
||||||
|
{Lower: math.Inf(-1), Upper: math.Inf(1), Count: 5, LowerInclusive: true, UpperInclusive: true, Index: 4},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, c := range cases {
|
for i, c := range cases {
|
||||||
|
@ -368,6 +403,62 @@ func TestRegularBucketIterator(t *testing.T) {
|
||||||
},
|
},
|
||||||
expectedNegativeBuckets: []Bucket[uint64]{},
|
expectedNegativeBuckets: []Bucket[uint64]{},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
histogram: Histogram{
|
||||||
|
Schema: CustomBucketsSchema,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||||
|
CustomValues: []float64{5, 10, 20, 50},
|
||||||
|
},
|
||||||
|
expectedPositiveBuckets: []Bucket[uint64]{
|
||||||
|
{Lower: math.Inf(-1), Upper: 5, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0},
|
||||||
|
{Lower: 5, Upper: 10, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1},
|
||||||
|
|
||||||
|
{Lower: 20, Upper: 50, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 3},
|
||||||
|
{Lower: 50, Upper: math.Inf(1), Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4},
|
||||||
|
},
|
||||||
|
expectedNegativeBuckets: []Bucket[uint64]{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
histogram: Histogram{
|
||||||
|
Schema: CustomBucketsSchema,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||||
|
CustomValues: []float64{0, 10, 20, 50},
|
||||||
|
},
|
||||||
|
expectedPositiveBuckets: []Bucket[uint64]{
|
||||||
|
{Lower: math.Inf(-1), Upper: 0, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0},
|
||||||
|
{Lower: 0, Upper: 10, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1},
|
||||||
|
|
||||||
|
{Lower: 20, Upper: 50, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 3},
|
||||||
|
{Lower: 50, Upper: math.Inf(1), Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4},
|
||||||
|
},
|
||||||
|
expectedNegativeBuckets: []Bucket[uint64]{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
histogram: Histogram{
|
||||||
|
Schema: CustomBucketsSchema,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: 0, Length: 5},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 1, 0, -1, 0},
|
||||||
|
CustomValues: []float64{-5, 0, 20, 50},
|
||||||
|
},
|
||||||
|
expectedPositiveBuckets: []Bucket[uint64]{
|
||||||
|
{Lower: math.Inf(-1), Upper: -5, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0},
|
||||||
|
{Lower: -5, Upper: 0, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1},
|
||||||
|
{Lower: 0, Upper: 20, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 2},
|
||||||
|
{Lower: 20, Upper: 50, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 3},
|
||||||
|
{Lower: 50, Upper: math.Inf(1), Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4},
|
||||||
|
},
|
||||||
|
expectedNegativeBuckets: []Bucket[uint64]{},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, c := range cases {
|
for i, c := range cases {
|
||||||
|
@ -461,11 +552,81 @@ func TestHistogramToFloat(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCustomBucketsHistogramToFloat(t *testing.T) {
|
||||||
|
h := Histogram{
|
||||||
|
Schema: CustomBucketsSchema,
|
||||||
|
Count: 10,
|
||||||
|
Sum: 2.7,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: 0, Length: 4},
|
||||||
|
{Offset: 0, Length: 0},
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
|
||||||
|
CustomValues: []float64{5, 10, 20, 50, 100, 500},
|
||||||
|
}
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
fh *FloatHistogram
|
||||||
|
}{
|
||||||
|
{name: "without prior float histogram"},
|
||||||
|
{name: "prior float histogram with more buckets", fh: &FloatHistogram{
|
||||||
|
Schema: 2,
|
||||||
|
Count: 3,
|
||||||
|
Sum: 5,
|
||||||
|
ZeroThreshold: 4,
|
||||||
|
ZeroCount: 1,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9},
|
||||||
|
NegativeSpans: []Span{
|
||||||
|
{Offset: 20, Length: 6},
|
||||||
|
{Offset: 12, Length: 7},
|
||||||
|
{Offset: 33, Length: 10},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9},
|
||||||
|
}},
|
||||||
|
{name: "prior float histogram with fewer buckets", fh: &FloatHistogram{
|
||||||
|
Schema: 2,
|
||||||
|
Count: 3,
|
||||||
|
Sum: 5,
|
||||||
|
ZeroThreshold: 4,
|
||||||
|
ZeroCount: 1,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []float64{1, 2},
|
||||||
|
NegativeSpans: []Span{
|
||||||
|
{Offset: 20, Length: 6},
|
||||||
|
{Offset: 12, Length: 7},
|
||||||
|
{Offset: 33, Length: 10},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []float64{1, 2},
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, h.Validate())
|
||||||
|
for _, c := range cases {
|
||||||
|
t.Run(c.name, func(t *testing.T) {
|
||||||
|
hStr := h.String()
|
||||||
|
fh := h.ToFloat(c.fh)
|
||||||
|
require.NoError(t, fh.Validate())
|
||||||
|
require.Equal(t, hStr, h.String())
|
||||||
|
require.Equal(t, hStr, fh.String())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// TestHistogramEquals tests both Histogram and FloatHistogram.
|
// TestHistogramEquals tests both Histogram and FloatHistogram.
|
||||||
func TestHistogramEquals(t *testing.T) {
|
func TestHistogramEquals(t *testing.T) {
|
||||||
h1 := Histogram{
|
h1 := Histogram{
|
||||||
Schema: 3,
|
Schema: 3,
|
||||||
Count: 61,
|
Count: 62,
|
||||||
Sum: 2.7,
|
Sum: 2.7,
|
||||||
ZeroThreshold: 0.1,
|
ZeroThreshold: 0.1,
|
||||||
ZeroCount: 42,
|
ZeroCount: 42,
|
||||||
|
@ -495,6 +656,15 @@ func TestHistogramEquals(t *testing.T) {
|
||||||
require.False(t, h1f.Equals(h2f))
|
require.False(t, h1f.Equals(h2f))
|
||||||
require.False(t, h2f.Equals(h1f))
|
require.False(t, h2f.Equals(h1f))
|
||||||
}
|
}
|
||||||
|
notEqualsUntilFloatConv := func(h1, h2 Histogram) {
|
||||||
|
require.False(t, h1.Equals(&h2))
|
||||||
|
require.False(t, h2.Equals(&h1))
|
||||||
|
h1f, h2f := h1.ToFloat(nil), h2.ToFloat(nil)
|
||||||
|
require.True(t, h1f.Equals(h2f))
|
||||||
|
require.True(t, h2f.Equals(h1f))
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, h1.Validate())
|
||||||
|
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
equals(h1, *h2)
|
equals(h1, *h2)
|
||||||
|
@ -602,6 +772,45 @@ func TestHistogramEquals(t *testing.T) {
|
||||||
|
|
||||||
// Sum StaleNaN vs regular NaN.
|
// Sum StaleNaN vs regular NaN.
|
||||||
notEquals(*hStale, *hNaN)
|
notEquals(*hStale, *hNaN)
|
||||||
|
|
||||||
|
// Has non-empty custom bounds for exponential schema.
|
||||||
|
hCustom := h1.Copy()
|
||||||
|
hCustom.CustomValues = []float64{1, 2, 3}
|
||||||
|
equals(h1, *hCustom)
|
||||||
|
|
||||||
|
cbh1 := Histogram{
|
||||||
|
Schema: CustomBucketsSchema,
|
||||||
|
Count: 10,
|
||||||
|
Sum: 2.7,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: 0, Length: 4},
|
||||||
|
{Offset: 10, Length: 3},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
|
||||||
|
CustomValues: []float64{0.1, 0.2, 0.5, 1, 2, 5, 10, 15, 20, 25, 50, 75, 100, 200, 250, 500, 1000},
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, cbh1.Validate())
|
||||||
|
|
||||||
|
cbh2 := cbh1.Copy()
|
||||||
|
equals(cbh1, *cbh2)
|
||||||
|
|
||||||
|
// Has different custom bounds for custom buckets schema.
|
||||||
|
cbh2 = cbh1.Copy()
|
||||||
|
cbh2.CustomValues = []float64{0.1, 0.2, 0.5}
|
||||||
|
notEquals(cbh1, *cbh2)
|
||||||
|
|
||||||
|
// Has non-empty negative spans and buckets for custom buckets schema.
|
||||||
|
cbh2 = cbh1.Copy()
|
||||||
|
cbh2.NegativeSpans = []Span{{Offset: 0, Length: 1}}
|
||||||
|
cbh2.NegativeBuckets = []int64{1}
|
||||||
|
notEqualsUntilFloatConv(cbh1, *cbh2)
|
||||||
|
|
||||||
|
// Has non-zero zero count and threshold for custom buckets schema.
|
||||||
|
cbh2 = cbh1.Copy()
|
||||||
|
cbh2.ZeroThreshold = 0.1
|
||||||
|
cbh2.ZeroCount = 10
|
||||||
|
notEqualsUntilFloatConv(cbh1, *cbh2)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHistogramCopy(t *testing.T) {
|
func TestHistogramCopy(t *testing.T) {
|
||||||
|
@ -640,6 +849,21 @@ func TestHistogramCopy(t *testing.T) {
|
||||||
},
|
},
|
||||||
expected: &Histogram{},
|
expected: &Histogram{},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "with custom buckets",
|
||||||
|
orig: &Histogram{
|
||||||
|
Schema: CustomBucketsSchema,
|
||||||
|
PositiveSpans: []Span{{-2, 1}},
|
||||||
|
PositiveBuckets: []int64{1, 3, -3, 42},
|
||||||
|
CustomValues: []float64{5, 10, 15},
|
||||||
|
},
|
||||||
|
expected: &Histogram{
|
||||||
|
Schema: CustomBucketsSchema,
|
||||||
|
PositiveSpans: []Span{{-2, 1}},
|
||||||
|
PositiveBuckets: []int64{1, 3, -3, 42},
|
||||||
|
CustomValues: []float64{5, 10, 15},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tcase := range cases {
|
for _, tcase := range cases {
|
||||||
|
@ -690,6 +914,21 @@ func TestHistogramCopyTo(t *testing.T) {
|
||||||
},
|
},
|
||||||
expected: &Histogram{},
|
expected: &Histogram{},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "with custom buckets",
|
||||||
|
orig: &Histogram{
|
||||||
|
Schema: CustomBucketsSchema,
|
||||||
|
PositiveSpans: []Span{{-2, 1}},
|
||||||
|
PositiveBuckets: []int64{1, 3, -3, 42},
|
||||||
|
CustomValues: []float64{5, 10, 15},
|
||||||
|
},
|
||||||
|
expected: &Histogram{
|
||||||
|
Schema: CustomBucketsSchema,
|
||||||
|
PositiveSpans: []Span{{-2, 1}},
|
||||||
|
PositiveBuckets: []int64{1, 3, -3, 42},
|
||||||
|
CustomValues: []float64{5, 10, 15},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tcase := range cases {
|
for _, tcase := range cases {
|
||||||
|
@ -971,6 +1210,86 @@ func TestHistogramCompact(t *testing.T) {
|
||||||
NegativeBuckets: []int64{2, 3},
|
NegativeBuckets: []int64{2, 3},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"nothing should happen with custom buckets",
|
||||||
|
&Histogram{
|
||||||
|
Schema: CustomBucketsSchema,
|
||||||
|
PositiveSpans: []Span{{-2, 1}, {2, 3}},
|
||||||
|
PositiveBuckets: []int64{1, 3, -3, 42},
|
||||||
|
CustomValues: []float64{5, 10, 15},
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
&Histogram{
|
||||||
|
Schema: CustomBucketsSchema,
|
||||||
|
PositiveSpans: []Span{{-2, 1}, {2, 3}},
|
||||||
|
PositiveBuckets: []int64{1, 3, -3, 42},
|
||||||
|
CustomValues: []float64{5, 10, 15},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"eliminate zero offsets with custom buckets",
|
||||||
|
&Histogram{
|
||||||
|
Schema: CustomBucketsSchema,
|
||||||
|
PositiveSpans: []Span{{-2, 1}, {0, 3}, {0, 1}},
|
||||||
|
PositiveBuckets: []int64{1, 3, -3, 42, 3},
|
||||||
|
CustomValues: []float64{5, 10, 15, 20},
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
&Histogram{
|
||||||
|
Schema: CustomBucketsSchema,
|
||||||
|
PositiveSpans: []Span{{-2, 5}},
|
||||||
|
PositiveBuckets: []int64{1, 3, -3, 42, 3},
|
||||||
|
CustomValues: []float64{5, 10, 15, 20},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"eliminate zero length with custom buckets",
|
||||||
|
&Histogram{
|
||||||
|
Schema: CustomBucketsSchema,
|
||||||
|
PositiveSpans: []Span{{-2, 2}, {2, 0}, {3, 3}},
|
||||||
|
PositiveBuckets: []int64{1, 3, -3, 42, 3},
|
||||||
|
CustomValues: []float64{5, 10, 15, 20},
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
&Histogram{
|
||||||
|
Schema: CustomBucketsSchema,
|
||||||
|
PositiveSpans: []Span{{-2, 2}, {5, 3}},
|
||||||
|
PositiveBuckets: []int64{1, 3, -3, 42, 3},
|
||||||
|
CustomValues: []float64{5, 10, 15, 20},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"eliminate multiple zero length spans with custom buckets",
|
||||||
|
&Histogram{
|
||||||
|
Schema: CustomBucketsSchema,
|
||||||
|
PositiveSpans: []Span{{-2, 2}, {2, 0}, {2, 0}, {2, 0}, {3, 3}},
|
||||||
|
PositiveBuckets: []int64{1, 3, -3, 42, 3},
|
||||||
|
CustomValues: []float64{5, 10, 15, 20},
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
&Histogram{
|
||||||
|
Schema: CustomBucketsSchema,
|
||||||
|
PositiveSpans: []Span{{-2, 2}, {9, 3}},
|
||||||
|
PositiveBuckets: []int64{1, 3, -3, 42, 3},
|
||||||
|
CustomValues: []float64{5, 10, 15, 20},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cut empty buckets at start or end of spans, even in the middle, with custom buckets",
|
||||||
|
&Histogram{
|
||||||
|
Schema: CustomBucketsSchema,
|
||||||
|
PositiveSpans: []Span{{-4, 6}, {3, 6}},
|
||||||
|
PositiveBuckets: []int64{0, 0, 1, 3, -4, 0, 1, 42, 3, -46, 0, 0},
|
||||||
|
CustomValues: []float64{5, 10, 15, 20},
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
&Histogram{
|
||||||
|
Schema: CustomBucketsSchema,
|
||||||
|
PositiveSpans: []Span{{-2, 2}, {5, 3}},
|
||||||
|
PositiveBuckets: []int64{1, 3, -3, 42, 3},
|
||||||
|
CustomValues: []float64{5, 10, 15, 20},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
|
@ -1107,6 +1426,145 @@ func TestHistogramValidation(t *testing.T) {
|
||||||
errMsg: `3 observations found in buckets, but the Count field is 2: histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)`,
|
errMsg: `3 observations found in buckets, but the Count field is 2: histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)`,
|
||||||
skipFloat: true,
|
skipFloat: true,
|
||||||
},
|
},
|
||||||
|
"rejects an exponential histogram with custom buckets schema": {
|
||||||
|
h: &Histogram{
|
||||||
|
Count: 12,
|
||||||
|
ZeroCount: 2,
|
||||||
|
ZeroThreshold: 0.001,
|
||||||
|
Sum: 19.4,
|
||||||
|
Schema: CustomBucketsSchema,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||||
|
NegativeSpans: []Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []int64{1, 1, -1, 0},
|
||||||
|
},
|
||||||
|
errMsg: `custom buckets: only 0 custom bounds defined which is insufficient to cover total span length of 5: histogram custom bounds are too few`,
|
||||||
|
},
|
||||||
|
"rejects a custom buckets histogram with exponential schema": {
|
||||||
|
h: &Histogram{
|
||||||
|
Count: 5,
|
||||||
|
Sum: 19.4,
|
||||||
|
Schema: 0,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||||
|
CustomValues: []float64{1, 2, 3, 4},
|
||||||
|
},
|
||||||
|
errMsg: `histogram with exponential schema must not have custom bounds`,
|
||||||
|
skipFloat: true, // Converting to float will remove the wrong fields so only the float version will pass validation
|
||||||
|
},
|
||||||
|
"rejects a custom buckets histogram with zero/negative buckets": {
|
||||||
|
h: &Histogram{
|
||||||
|
Count: 12,
|
||||||
|
ZeroCount: 2,
|
||||||
|
ZeroThreshold: 0.001,
|
||||||
|
Sum: 19.4,
|
||||||
|
Schema: CustomBucketsSchema,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||||
|
NegativeSpans: []Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []int64{1, 1, -1, 0},
|
||||||
|
CustomValues: []float64{1, 2, 3, 4},
|
||||||
|
},
|
||||||
|
errMsg: `custom buckets: must have zero count of 0`,
|
||||||
|
skipFloat: true, // Converting to float will remove the wrong fields so only the float version will pass validation
|
||||||
|
},
|
||||||
|
"rejects a custom buckets histogram with negative offset in first span": {
|
||||||
|
h: &Histogram{
|
||||||
|
Count: 5,
|
||||||
|
Sum: 19.4,
|
||||||
|
Schema: CustomBucketsSchema,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: -1, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||||
|
CustomValues: []float64{1, 2, 3, 4},
|
||||||
|
},
|
||||||
|
errMsg: `custom buckets: span number 1 with offset -1: histogram has a span whose offset is negative`,
|
||||||
|
},
|
||||||
|
"rejects a custom buckets histogram with negative offset in subsequent spans": {
|
||||||
|
h: &Histogram{
|
||||||
|
Count: 5,
|
||||||
|
Sum: 19.4,
|
||||||
|
Schema: CustomBucketsSchema,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: -1, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||||
|
CustomValues: []float64{1, 2, 3, 4},
|
||||||
|
},
|
||||||
|
errMsg: `custom buckets: span number 2 with offset -1: histogram has a span whose offset is negative`,
|
||||||
|
},
|
||||||
|
"rejects a custom buckets histogram with non-matching bucket counts": {
|
||||||
|
h: &Histogram{
|
||||||
|
Count: 5,
|
||||||
|
Sum: 19.4,
|
||||||
|
Schema: CustomBucketsSchema,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 1, -1},
|
||||||
|
CustomValues: []float64{1, 2, 3, 4},
|
||||||
|
},
|
||||||
|
errMsg: `custom buckets: spans need 4 buckets, have 3 buckets: histogram spans specify different number of buckets than provided`,
|
||||||
|
},
|
||||||
|
"rejects a custom buckets histogram with too few bounds": {
|
||||||
|
h: &Histogram{
|
||||||
|
Count: 5,
|
||||||
|
Sum: 19.4,
|
||||||
|
Schema: CustomBucketsSchema,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||||
|
CustomValues: []float64{1, 2, 3},
|
||||||
|
},
|
||||||
|
errMsg: `custom buckets: only 3 custom bounds defined which is insufficient to cover total span length of 5: histogram custom bounds are too few`,
|
||||||
|
},
|
||||||
|
"valid custom buckets histogram": {
|
||||||
|
h: &Histogram{
|
||||||
|
Count: 5,
|
||||||
|
Sum: 19.4,
|
||||||
|
Schema: CustomBucketsSchema,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||||
|
CustomValues: []float64{1, 2, 3, 4},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"valid custom buckets histogram with extra bounds": {
|
||||||
|
h: &Histogram{
|
||||||
|
Count: 5,
|
||||||
|
Sum: 19.4,
|
||||||
|
Schema: CustomBucketsSchema,
|
||||||
|
PositiveSpans: []Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||||
|
CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for testName, tc := range tests {
|
for testName, tc := range tests {
|
||||||
|
|
|
@ -104,30 +104,39 @@ func (t *nameTable) ToName(num int) string {
|
||||||
return t.byNum[num]
|
return t.byNum[num]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// "Varint" in this file is non-standard: we encode small numbers (up to 32767) in 2 bytes,
|
||||||
|
// because we expect most Prometheus to have more than 127 unique strings.
|
||||||
|
// And we don't encode numbers larger than 4 bytes because we don't expect more than 536,870,912 unique strings.
|
||||||
func decodeVarint(data string, index int) (int, int) {
|
func decodeVarint(data string, index int) (int, int) {
|
||||||
// Fast-path for common case of a single byte, value 0..127.
|
b := int(data[index]) + int(data[index+1])<<8
|
||||||
b := data[index]
|
index += 2
|
||||||
|
if b < 0x8000 {
|
||||||
|
return b, index
|
||||||
|
}
|
||||||
|
return decodeVarintRest(b, data, index)
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeVarintRest(b int, data string, index int) (int, int) {
|
||||||
|
value := int(b & 0x7FFF)
|
||||||
|
b = int(data[index])
|
||||||
index++
|
index++
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
return int(b), index
|
return value | (b << 15), index
|
||||||
}
|
}
|
||||||
value := int(b & 0x7F)
|
|
||||||
for shift := uint(7); ; shift += 7 {
|
value |= (b & 0x7f) << 15
|
||||||
// Just panic if we go of the end of data, since all Labels strings are constructed internally and
|
b = int(data[index])
|
||||||
// malformed data indicates a bug, or memory corruption.
|
index++
|
||||||
b := data[index]
|
return value | (b << 22), index
|
||||||
index++
|
|
||||||
value |= int(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return value, index
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func decodeString(t *nameTable, data string, index int) (string, int) {
|
func decodeString(t *nameTable, data string, index int) (string, int) {
|
||||||
var num int
|
// Copy decodeVarint here, because the Go compiler says it's too big to inline.
|
||||||
num, index = decodeVarint(data, index)
|
num := int(data[index]) + int(data[index+1])<<8
|
||||||
|
index += 2
|
||||||
|
if num >= 0x8000 {
|
||||||
|
num, index = decodeVarintRest(num, data, index)
|
||||||
|
}
|
||||||
return t.ToName(num), index
|
return t.ToName(num), index
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -321,7 +330,12 @@ func (ls Labels) Get(name string) string {
|
||||||
} else if lName[0] > name[0] { // Stop looking if we've gone past.
|
} else if lName[0] > name[0] { // Stop looking if we've gone past.
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
_, i = decodeVarint(ls.data, i)
|
// Copy decodeVarint here, because the Go compiler says it's too big to inline.
|
||||||
|
num := int(ls.data[i]) + int(ls.data[i+1])<<8
|
||||||
|
i += 2
|
||||||
|
if num >= 0x8000 {
|
||||||
|
_, i = decodeVarintRest(num, ls.data, i)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
@ -339,7 +353,12 @@ func (ls Labels) Has(name string) bool {
|
||||||
} else if lName[0] > name[0] { // Stop looking if we've gone past.
|
} else if lName[0] > name[0] { // Stop looking if we've gone past.
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
_, i = decodeVarint(ls.data, i)
|
// Copy decodeVarint here, because the Go compiler says it's too big to inline.
|
||||||
|
num := int(ls.data[i]) + int(ls.data[i+1])<<8
|
||||||
|
i += 2
|
||||||
|
if num >= 0x8000 {
|
||||||
|
_, i = decodeVarintRest(num, ls.data, i)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -641,29 +660,24 @@ func marshalNumbersToSizedBuffer(nums []int, data []byte) int {
|
||||||
|
|
||||||
func sizeVarint(x uint64) (n int) {
|
func sizeVarint(x uint64) (n int) {
|
||||||
// Most common case first
|
// Most common case first
|
||||||
if x < 1<<7 {
|
if x < 1<<15 {
|
||||||
return 1
|
return 2
|
||||||
}
|
}
|
||||||
if x >= 1<<56 {
|
if x < 1<<22 {
|
||||||
return 9
|
return 3
|
||||||
}
|
}
|
||||||
if x >= 1<<28 {
|
if x >= 1<<29 {
|
||||||
x >>= 28
|
panic("Number too large to represent")
|
||||||
n = 4
|
|
||||||
}
|
}
|
||||||
if x >= 1<<14 {
|
return 4
|
||||||
x >>= 14
|
|
||||||
n += 2
|
|
||||||
}
|
|
||||||
if x >= 1<<7 {
|
|
||||||
n++
|
|
||||||
}
|
|
||||||
return n + 1
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeVarintSlow(data []byte, offset int, v uint64) int {
|
func encodeVarintSlow(data []byte, offset int, v uint64) int {
|
||||||
offset -= sizeVarint(v)
|
offset -= sizeVarint(v)
|
||||||
base := offset
|
base := offset
|
||||||
|
data[offset] = uint8(v)
|
||||||
|
v >>= 8
|
||||||
|
offset++
|
||||||
for v >= 1<<7 {
|
for v >= 1<<7 {
|
||||||
data[offset] = uint8(v&0x7f | 0x80)
|
data[offset] = uint8(v&0x7f | 0x80)
|
||||||
v >>= 7
|
v >>= 7
|
||||||
|
@ -673,11 +687,12 @@ func encodeVarintSlow(data []byte, offset int, v uint64) int {
|
||||||
return base
|
return base
|
||||||
}
|
}
|
||||||
|
|
||||||
// Special code for the common case that a value is less than 128
|
// Special code for the common case that a value is less than 32768
|
||||||
func encodeVarint(data []byte, offset, v int) int {
|
func encodeVarint(data []byte, offset, v int) int {
|
||||||
if v < 1<<7 {
|
if v < 1<<15 {
|
||||||
offset--
|
offset -= 2
|
||||||
data[offset] = uint8(v)
|
data[offset] = uint8(v)
|
||||||
|
data[offset+1] = uint8(v >> 8)
|
||||||
return offset
|
return offset
|
||||||
}
|
}
|
||||||
return encodeVarintSlow(data, offset, uint64(v))
|
return encodeVarintSlow(data, offset, uint64(v))
|
||||||
|
|
50
model/labels/labels_dedupelabels_test.go
Normal file
50
model/labels/labels_dedupelabels_test.go
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
// Copyright 2024 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build dedupelabels
|
||||||
|
|
||||||
|
package labels
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestVarint(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
v int
|
||||||
|
expected []byte
|
||||||
|
}{
|
||||||
|
{0, []byte{0, 0}},
|
||||||
|
{1, []byte{1, 0}},
|
||||||
|
{2, []byte{2, 0}},
|
||||||
|
{0x7FFF, []byte{0xFF, 0x7F}},
|
||||||
|
{0x8000, []byte{0x00, 0x80, 0x01}},
|
||||||
|
{0x8001, []byte{0x01, 0x80, 0x01}},
|
||||||
|
{0x3FFFFF, []byte{0xFF, 0xFF, 0x7F}},
|
||||||
|
{0x400000, []byte{0x00, 0x80, 0x80, 0x01}},
|
||||||
|
{0x400001, []byte{0x01, 0x80, 0x80, 0x01}},
|
||||||
|
{0x1FFFFFFF, []byte{0xFF, 0xFF, 0xFF, 0x7F}},
|
||||||
|
}
|
||||||
|
var buf [16]byte
|
||||||
|
for _, c := range cases {
|
||||||
|
n := encodeVarint(buf[:], len(buf), c.v)
|
||||||
|
require.Equal(t, len(c.expected), len(buf)-n)
|
||||||
|
require.Equal(t, c.expected, buf[n:])
|
||||||
|
got, m := decodeVarint(string(buf[:]), n)
|
||||||
|
require.Equal(t, c.v, got)
|
||||||
|
require.Equal(t, len(buf), m)
|
||||||
|
}
|
||||||
|
require.Panics(t, func() { encodeVarint(buf[:], len(buf), 1<<29) })
|
||||||
|
}
|
|
@ -466,6 +466,38 @@ func TestLabels_DropMetricName(t *testing.T) {
|
||||||
require.True(t, Equal(original, check))
|
require.True(t, Equal(original, check))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ScratchBuilderForBenchmark() ScratchBuilder {
|
||||||
|
// (Only relevant to -tags dedupelabels: stuff the symbol table before adding the real labels, to avoid having everything fitting into 1 byte.)
|
||||||
|
b := NewScratchBuilder(256)
|
||||||
|
for i := 0; i < 256; i++ {
|
||||||
|
b.Add(fmt.Sprintf("name%d", i), fmt.Sprintf("value%d", i))
|
||||||
|
}
|
||||||
|
b.Labels()
|
||||||
|
b.Reset()
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewForBenchmark(ls ...Label) Labels {
|
||||||
|
b := ScratchBuilderForBenchmark()
|
||||||
|
for _, l := range ls {
|
||||||
|
b.Add(l.Name, l.Value)
|
||||||
|
}
|
||||||
|
b.Sort()
|
||||||
|
return b.Labels()
|
||||||
|
}
|
||||||
|
|
||||||
|
func FromStringsForBenchmark(ss ...string) Labels {
|
||||||
|
if len(ss)%2 != 0 {
|
||||||
|
panic("invalid number of strings")
|
||||||
|
}
|
||||||
|
b := ScratchBuilderForBenchmark()
|
||||||
|
for i := 0; i < len(ss); i += 2 {
|
||||||
|
b.Add(ss[i], ss[i+1])
|
||||||
|
}
|
||||||
|
b.Sort()
|
||||||
|
return b.Labels()
|
||||||
|
}
|
||||||
|
|
||||||
// BenchmarkLabels_Get was written to check whether a binary search can improve the performance vs the linear search implementation
|
// BenchmarkLabels_Get was written to check whether a binary search can improve the performance vs the linear search implementation
|
||||||
// The results have shown that binary search would only be better when searching last labels in scenarios with more than 10 labels.
|
// The results have shown that binary search would only be better when searching last labels in scenarios with more than 10 labels.
|
||||||
// In the following list, `old` is the linear search while `new` is the binary search implementation (without calling sort.Search, which performs even worse here)
|
// In the following list, `old` is the linear search while `new` is the binary search implementation (without calling sort.Search, which performs even worse here)
|
||||||
|
@ -488,7 +520,7 @@ func BenchmarkLabels_Get(b *testing.B) {
|
||||||
}
|
}
|
||||||
for _, size := range []int{5, 10, maxLabels} {
|
for _, size := range []int{5, 10, maxLabels} {
|
||||||
b.Run(fmt.Sprintf("with %d labels", size), func(b *testing.B) {
|
b.Run(fmt.Sprintf("with %d labels", size), func(b *testing.B) {
|
||||||
labels := New(allLabels[:size]...)
|
labels := NewForBenchmark(allLabels[:size]...)
|
||||||
for _, scenario := range []struct {
|
for _, scenario := range []struct {
|
||||||
desc, label string
|
desc, label string
|
||||||
}{
|
}{
|
||||||
|
@ -520,33 +552,33 @@ var comparisonBenchmarkScenarios = []struct {
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
"equal",
|
"equal",
|
||||||
FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"),
|
FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "another_label_value"),
|
||||||
FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"),
|
FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "another_label_value"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"not equal",
|
"not equal",
|
||||||
FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"),
|
FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "another_label_value"),
|
||||||
FromStrings("a_label_name", "a_label_value", "another_label_name", "a_different_label_value"),
|
FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "a_different_label_value"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"different sizes",
|
"different sizes",
|
||||||
FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"),
|
FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "another_label_value"),
|
||||||
FromStrings("a_label_name", "a_label_value"),
|
FromStringsForBenchmark("a_label_name", "a_label_value"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"lots",
|
"lots",
|
||||||
FromStrings("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrz"),
|
FromStringsForBenchmark("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrz"),
|
||||||
FromStrings("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrr"),
|
FromStringsForBenchmark("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrr"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"real long equal",
|
"real long equal",
|
||||||
FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"),
|
FromStringsForBenchmark("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"),
|
||||||
FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"),
|
FromStringsForBenchmark("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"real long different end",
|
"real long different end",
|
||||||
FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"),
|
FromStringsForBenchmark("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"),
|
||||||
FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "deadbeef-0000-1111-2222-b9ad64bb417e"),
|
FromStringsForBenchmark("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "deadbeef-0000-1111-2222-b9ad64bb417e"),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -834,7 +866,7 @@ func BenchmarkBuilder(b *testing.B) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkLabels_Copy(b *testing.B) {
|
func BenchmarkLabels_Copy(b *testing.B) {
|
||||||
l := New(benchmarkLabels...)
|
l := NewForBenchmark(benchmarkLabels...)
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
l = l.Copy()
|
l = l.Copy()
|
||||||
|
|
|
@ -206,6 +206,11 @@ func (re Regexp) MarshalYAML() (interface{}, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsZero implements the yaml.IsZeroer interface.
|
||||||
|
func (re Regexp) IsZero() bool {
|
||||||
|
return re.Regexp == DefaultRelabelConfig.Regex.Regexp
|
||||||
|
}
|
||||||
|
|
||||||
// String returns the original string used to compile the regular expression.
|
// String returns the original string used to compile the regular expression.
|
||||||
func (re Regexp) String() string {
|
func (re Regexp) String() string {
|
||||||
str := re.Regexp.String()
|
str := re.Regexp.String()
|
||||||
|
|
|
@ -851,3 +851,52 @@ func BenchmarkRelabel(b *testing.B) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestConfig_UnmarshalThenMarshal(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
inputYaml string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Values provided",
|
||||||
|
inputYaml: `source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port]
|
||||||
|
separator: ;
|
||||||
|
regex: \\d+
|
||||||
|
target_label: __meta_kubernetes_pod_container_port_number
|
||||||
|
replacement: $1
|
||||||
|
action: replace
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "No regex provided",
|
||||||
|
inputYaml: `source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port]
|
||||||
|
separator: ;
|
||||||
|
target_label: __meta_kubernetes_pod_container_port_number
|
||||||
|
replacement: $1
|
||||||
|
action: keepequal
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Default regex provided",
|
||||||
|
inputYaml: `source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port]
|
||||||
|
separator: ;
|
||||||
|
regex: (.*)
|
||||||
|
target_label: __meta_kubernetes_pod_container_port_number
|
||||||
|
replacement: $1
|
||||||
|
action: replace
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
unmarshalled := Config{}
|
||||||
|
err := yaml.Unmarshal([]byte(test.inputYaml), &unmarshalled)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
marshalled, err := yaml.Marshal(&unmarshalled)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, test.inputYaml, string(marshalled))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -110,10 +110,11 @@ type Manager struct {
|
||||||
|
|
||||||
metrics *alertMetrics
|
metrics *alertMetrics
|
||||||
|
|
||||||
more chan struct{}
|
more chan struct{}
|
||||||
mtx sync.RWMutex
|
mtx sync.RWMutex
|
||||||
ctx context.Context
|
|
||||||
cancel func()
|
stopOnce *sync.Once
|
||||||
|
stopRequested chan struct{}
|
||||||
|
|
||||||
alertmanagers map[string]*alertmanagerSet
|
alertmanagers map[string]*alertmanagerSet
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
|
@ -121,9 +122,10 @@ type Manager struct {
|
||||||
|
|
||||||
// Options are the configurable parameters of a Handler.
|
// Options are the configurable parameters of a Handler.
|
||||||
type Options struct {
|
type Options struct {
|
||||||
QueueCapacity int
|
QueueCapacity int
|
||||||
ExternalLabels labels.Labels
|
DrainOnShutdown bool
|
||||||
RelabelConfigs []*relabel.Config
|
ExternalLabels labels.Labels
|
||||||
|
RelabelConfigs []*relabel.Config
|
||||||
// Used for sending HTTP requests to the Alertmanager.
|
// Used for sending HTTP requests to the Alertmanager.
|
||||||
Do func(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error)
|
Do func(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error)
|
||||||
|
|
||||||
|
@ -217,8 +219,6 @@ func do(ctx context.Context, client *http.Client, req *http.Request) (*http.Resp
|
||||||
|
|
||||||
// NewManager is the manager constructor.
|
// NewManager is the manager constructor.
|
||||||
func NewManager(o *Options, logger log.Logger) *Manager {
|
func NewManager(o *Options, logger log.Logger) *Manager {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
|
|
||||||
if o.Do == nil {
|
if o.Do == nil {
|
||||||
o.Do = do
|
o.Do = do
|
||||||
}
|
}
|
||||||
|
@ -227,12 +227,12 @@ func NewManager(o *Options, logger log.Logger) *Manager {
|
||||||
}
|
}
|
||||||
|
|
||||||
n := &Manager{
|
n := &Manager{
|
||||||
queue: make([]*Alert, 0, o.QueueCapacity),
|
queue: make([]*Alert, 0, o.QueueCapacity),
|
||||||
ctx: ctx,
|
more: make(chan struct{}, 1),
|
||||||
cancel: cancel,
|
stopRequested: make(chan struct{}),
|
||||||
more: make(chan struct{}, 1),
|
stopOnce: &sync.Once{},
|
||||||
opts: o,
|
opts: o,
|
||||||
logger: logger,
|
logger: logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
queueLenFunc := func() float64 { return float64(n.queueLen()) }
|
queueLenFunc := func() float64 { return float64(n.queueLen()) }
|
||||||
|
@ -298,42 +298,100 @@ func (n *Manager) nextBatch() []*Alert {
|
||||||
return alerts
|
return alerts
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Run dispatches notifications continuously, returning once Stop has been called and all
|
||||||
|
// pending notifications have been drained from the queue (if draining is enabled).
|
||||||
|
//
|
||||||
|
// Dispatching of notifications occurs in parallel to processing target updates to avoid one starving the other.
|
||||||
|
// Refer to https://github.com/prometheus/prometheus/issues/13676 for more details.
|
||||||
|
func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) {
|
||||||
|
wg := sync.WaitGroup{}
|
||||||
|
wg.Add(2)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
n.targetUpdateLoop(tsets)
|
||||||
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
n.sendLoop()
|
||||||
|
n.drainQueue()
|
||||||
|
}()
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
level.Info(n.logger).Log("msg", "Notification manager stopped")
|
||||||
|
}
|
||||||
|
|
||||||
// sendLoop continuously consumes the notifications queue and sends alerts to
|
// sendLoop continuously consumes the notifications queue and sends alerts to
|
||||||
// the configured Alertmanagers.
|
// the configured Alertmanagers.
|
||||||
func (n *Manager) sendLoop() {
|
func (n *Manager) sendLoop() {
|
||||||
for {
|
for {
|
||||||
|
// If we've been asked to stop, that takes priority over sending any further notifications.
|
||||||
select {
|
select {
|
||||||
case <-n.ctx.Done():
|
case <-n.stopRequested:
|
||||||
return
|
return
|
||||||
case <-n.more:
|
default:
|
||||||
}
|
select {
|
||||||
alerts := n.nextBatch()
|
case <-n.stopRequested:
|
||||||
|
return
|
||||||
|
|
||||||
if !n.sendAll(alerts...) {
|
case <-n.more:
|
||||||
n.metrics.dropped.Add(float64(len(alerts)))
|
n.sendOneBatch()
|
||||||
}
|
|
||||||
// If the queue still has items left, kick off the next iteration.
|
// If the queue still has items left, kick off the next iteration.
|
||||||
if n.queueLen() > 0 {
|
if n.queueLen() > 0 {
|
||||||
n.setMore()
|
n.setMore()
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run receives updates of target groups and triggers a reload.
|
// targetUpdateLoop receives updates of target groups and triggers a reload.
|
||||||
// The dispatching of notifications occurs in the background to prevent blocking the receipt of target updates.
|
func (n *Manager) targetUpdateLoop(tsets <-chan map[string][]*targetgroup.Group) {
|
||||||
// Refer to https://github.com/prometheus/prometheus/issues/13676 for more details.
|
|
||||||
func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) {
|
|
||||||
go n.sendLoop()
|
|
||||||
for {
|
for {
|
||||||
|
// If we've been asked to stop, that takes priority over processing any further target group updates.
|
||||||
select {
|
select {
|
||||||
case <-n.ctx.Done():
|
case <-n.stopRequested:
|
||||||
return
|
return
|
||||||
case ts := <-tsets:
|
default:
|
||||||
n.reload(ts)
|
select {
|
||||||
|
case <-n.stopRequested:
|
||||||
|
return
|
||||||
|
case ts := <-tsets:
|
||||||
|
n.reload(ts)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (n *Manager) sendOneBatch() {
|
||||||
|
alerts := n.nextBatch()
|
||||||
|
|
||||||
|
if !n.sendAll(alerts...) {
|
||||||
|
n.metrics.dropped.Add(float64(len(alerts)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Manager) drainQueue() {
|
||||||
|
if !n.opts.DrainOnShutdown {
|
||||||
|
if n.queueLen() > 0 {
|
||||||
|
level.Warn(n.logger).Log("msg", "Draining remaining notifications on shutdown is disabled, and some notifications have been dropped", "count", n.queueLen())
|
||||||
|
n.metrics.dropped.Add(float64(n.queueLen()))
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
level.Info(n.logger).Log("msg", "Draining any remaining notifications...")
|
||||||
|
|
||||||
|
for n.queueLen() > 0 {
|
||||||
|
n.sendOneBatch()
|
||||||
|
}
|
||||||
|
|
||||||
|
level.Info(n.logger).Log("msg", "Remaining notifications drained")
|
||||||
|
}
|
||||||
|
|
||||||
func (n *Manager) reload(tgs map[string][]*targetgroup.Group) {
|
func (n *Manager) reload(tgs map[string][]*targetgroup.Group) {
|
||||||
n.mtx.Lock()
|
n.mtx.Lock()
|
||||||
defer n.mtx.Unlock()
|
defer n.mtx.Unlock()
|
||||||
|
@ -546,7 +604,7 @@ func (n *Manager) sendAll(alerts ...*Alert) bool {
|
||||||
for _, am := range ams.ams {
|
for _, am := range ams.ams {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(n.ctx, time.Duration(ams.cfg.Timeout))
|
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(ams.cfg.Timeout))
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
go func(ctx context.Context, client *http.Client, url string, payload []byte, count int) {
|
go func(ctx context.Context, client *http.Client, url string, payload []byte, count int) {
|
||||||
|
@ -616,6 +674,7 @@ func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []b
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Any HTTP status 2xx is OK.
|
// Any HTTP status 2xx is OK.
|
||||||
|
//nolint:usestdlibvars
|
||||||
if resp.StatusCode/100 != 2 {
|
if resp.StatusCode/100 != 2 {
|
||||||
return fmt.Errorf("bad response status %s", resp.Status)
|
return fmt.Errorf("bad response status %s", resp.Status)
|
||||||
}
|
}
|
||||||
|
@ -623,10 +682,19 @@ func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []b
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop shuts down the notification handler.
|
// Stop signals the notification manager to shut down and immediately returns.
|
||||||
|
//
|
||||||
|
// Run will return once the notification manager has successfully shut down.
|
||||||
|
//
|
||||||
|
// The manager will optionally drain any queued notifications before shutting down.
|
||||||
|
//
|
||||||
|
// Stop is safe to call multiple times.
|
||||||
func (n *Manager) Stop() {
|
func (n *Manager) Stop() {
|
||||||
level.Info(n.logger).Log("msg", "Stopping notification manager...")
|
level.Info(n.logger).Log("msg", "Stopping notification manager...")
|
||||||
n.cancel()
|
|
||||||
|
n.stopOnce.Do(func() {
|
||||||
|
close(n.stopRequested)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Alertmanager holds Alertmanager endpoint information.
|
// Alertmanager holds Alertmanager endpoint information.
|
||||||
|
|
|
@ -847,3 +847,173 @@ loop2:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestStop_DrainingDisabled(t *testing.T) {
|
||||||
|
releaseReceiver := make(chan struct{})
|
||||||
|
receiverReceivedRequest := make(chan struct{}, 2)
|
||||||
|
alertsReceived := atomic.NewInt64(0)
|
||||||
|
|
||||||
|
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// Let the test know we've received a request.
|
||||||
|
receiverReceivedRequest <- struct{}{}
|
||||||
|
|
||||||
|
var alerts []*Alert
|
||||||
|
|
||||||
|
b, err := io.ReadAll(r.Body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = json.Unmarshal(b, &alerts)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
alertsReceived.Add(int64(len(alerts)))
|
||||||
|
|
||||||
|
// Wait for the test to release us.
|
||||||
|
<-releaseReceiver
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
}))
|
||||||
|
defer func() {
|
||||||
|
server.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
m := NewManager(
|
||||||
|
&Options{
|
||||||
|
QueueCapacity: 10,
|
||||||
|
DrainOnShutdown: false,
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
m.alertmanagers = make(map[string]*alertmanagerSet)
|
||||||
|
|
||||||
|
am1Cfg := config.DefaultAlertmanagerConfig
|
||||||
|
am1Cfg.Timeout = model.Duration(time.Second)
|
||||||
|
|
||||||
|
m.alertmanagers["1"] = &alertmanagerSet{
|
||||||
|
ams: []alertmanager{
|
||||||
|
alertmanagerMock{
|
||||||
|
urlf: func() string { return server.URL },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
cfg: &am1Cfg,
|
||||||
|
}
|
||||||
|
|
||||||
|
notificationManagerStopped := make(chan struct{})
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer close(notificationManagerStopped)
|
||||||
|
m.Run(nil)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Queue two alerts. The first should be immediately sent to the receiver, which should block until we release it later.
|
||||||
|
m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-1")})
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-receiverReceivedRequest:
|
||||||
|
// Nothing more to do.
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
require.FailNow(t, "gave up waiting for receiver to receive notification of first alert")
|
||||||
|
}
|
||||||
|
|
||||||
|
m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-2")})
|
||||||
|
|
||||||
|
// Stop the notification manager, pause to allow the shutdown to be observed, and then allow the receiver to proceed.
|
||||||
|
m.Stop()
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
close(releaseReceiver)
|
||||||
|
|
||||||
|
// Wait for the notification manager to stop and confirm only the first notification was sent.
|
||||||
|
// The second notification should be dropped.
|
||||||
|
select {
|
||||||
|
case <-notificationManagerStopped:
|
||||||
|
// Nothing more to do.
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
require.FailNow(t, "gave up waiting for notification manager to stop")
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, int64(1), alertsReceived.Load())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStop_DrainingEnabled(t *testing.T) {
|
||||||
|
releaseReceiver := make(chan struct{})
|
||||||
|
receiverReceivedRequest := make(chan struct{}, 2)
|
||||||
|
alertsReceived := atomic.NewInt64(0)
|
||||||
|
|
||||||
|
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// Let the test know we've received a request.
|
||||||
|
receiverReceivedRequest <- struct{}{}
|
||||||
|
|
||||||
|
var alerts []*Alert
|
||||||
|
|
||||||
|
b, err := io.ReadAll(r.Body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = json.Unmarshal(b, &alerts)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
alertsReceived.Add(int64(len(alerts)))
|
||||||
|
|
||||||
|
// Wait for the test to release us.
|
||||||
|
<-releaseReceiver
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
}))
|
||||||
|
defer func() {
|
||||||
|
server.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
m := NewManager(
|
||||||
|
&Options{
|
||||||
|
QueueCapacity: 10,
|
||||||
|
DrainOnShutdown: true,
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
m.alertmanagers = make(map[string]*alertmanagerSet)
|
||||||
|
|
||||||
|
am1Cfg := config.DefaultAlertmanagerConfig
|
||||||
|
am1Cfg.Timeout = model.Duration(time.Second)
|
||||||
|
|
||||||
|
m.alertmanagers["1"] = &alertmanagerSet{
|
||||||
|
ams: []alertmanager{
|
||||||
|
alertmanagerMock{
|
||||||
|
urlf: func() string { return server.URL },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
cfg: &am1Cfg,
|
||||||
|
}
|
||||||
|
|
||||||
|
notificationManagerStopped := make(chan struct{})
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer close(notificationManagerStopped)
|
||||||
|
m.Run(nil)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Queue two alerts. The first should be immediately sent to the receiver, which should block until we release it later.
|
||||||
|
m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-1")})
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-receiverReceivedRequest:
|
||||||
|
// Nothing more to do.
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
require.FailNow(t, "gave up waiting for receiver to receive notification of first alert")
|
||||||
|
}
|
||||||
|
|
||||||
|
m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-2")})
|
||||||
|
|
||||||
|
// Stop the notification manager and allow the receiver to proceed.
|
||||||
|
m.Stop()
|
||||||
|
close(releaseReceiver)
|
||||||
|
|
||||||
|
// Wait for the notification manager to stop and confirm both notifications were sent.
|
||||||
|
select {
|
||||||
|
case <-notificationManagerStopped:
|
||||||
|
// Nothing more to do.
|
||||||
|
case <-time.After(200 * time.Millisecond):
|
||||||
|
require.FailNow(t, "gave up waiting for notification manager to stop")
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, int64(2), alertsReceived.Load())
|
||||||
|
}
|
||||||
|
|
130
promql/engine.go
130
promql/engine.go
|
@ -1799,18 +1799,21 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
||||||
}, e.LHS, e.RHS)
|
}, e.LHS, e.RHS)
|
||||||
default:
|
default:
|
||||||
return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||||
return ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh), nil
|
vec, err := ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh)
|
||||||
|
return vec, handleVectorBinopError(err, e)
|
||||||
}, e.LHS, e.RHS)
|
}, e.LHS, e.RHS)
|
||||||
}
|
}
|
||||||
|
|
||||||
case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar:
|
case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar:
|
||||||
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||||
return ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh), nil
|
vec, err := ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh)
|
||||||
|
return vec, handleVectorBinopError(err, e)
|
||||||
}, e.LHS, e.RHS)
|
}, e.LHS, e.RHS)
|
||||||
|
|
||||||
case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector:
|
case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector:
|
||||||
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||||
return ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh), nil
|
vec, err := ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh)
|
||||||
|
return vec, handleVectorBinopError(err, e)
|
||||||
}, e.LHS, e.RHS)
|
}, e.LHS, e.RHS)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2443,12 +2446,12 @@ func (ev *evaluator) VectorUnless(lhs, rhs Vector, matching *parser.VectorMatchi
|
||||||
}
|
}
|
||||||
|
|
||||||
// VectorBinop evaluates a binary operation between two Vectors, excluding set operators.
|
// VectorBinop evaluates a binary operation between two Vectors, excluding set operators.
|
||||||
func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *parser.VectorMatching, returnBool bool, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector {
|
func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *parser.VectorMatching, returnBool bool, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) (Vector, error) {
|
||||||
if matching.Card == parser.CardManyToMany {
|
if matching.Card == parser.CardManyToMany {
|
||||||
panic("many-to-many only allowed for set operators")
|
panic("many-to-many only allowed for set operators")
|
||||||
}
|
}
|
||||||
if len(lhs) == 0 || len(rhs) == 0 {
|
if len(lhs) == 0 || len(rhs) == 0 {
|
||||||
return nil // Short-circuit: nothing is going to match.
|
return nil, nil // Short-circuit: nothing is going to match.
|
||||||
}
|
}
|
||||||
|
|
||||||
// The control flow below handles one-to-one or many-to-one matching.
|
// The control flow below handles one-to-one or many-to-one matching.
|
||||||
|
@ -2501,6 +2504,7 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
|
||||||
|
|
||||||
// For all lhs samples find a respective rhs sample and perform
|
// For all lhs samples find a respective rhs sample and perform
|
||||||
// the binary operation.
|
// the binary operation.
|
||||||
|
var lastErr error
|
||||||
for i, ls := range lhs {
|
for i, ls := range lhs {
|
||||||
sig := lhsh[i].signature
|
sig := lhsh[i].signature
|
||||||
|
|
||||||
|
@ -2516,7 +2520,10 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
|
||||||
fl, fr = fr, fl
|
fl, fr = fr, fl
|
||||||
hl, hr = hr, hl
|
hl, hr = hr, hl
|
||||||
}
|
}
|
||||||
floatValue, histogramValue, keep := vectorElemBinop(op, fl, fr, hl, hr)
|
floatValue, histogramValue, keep, err := vectorElemBinop(op, fl, fr, hl, hr)
|
||||||
|
if err != nil {
|
||||||
|
lastErr = err
|
||||||
|
}
|
||||||
switch {
|
switch {
|
||||||
case returnBool:
|
case returnBool:
|
||||||
if keep {
|
if keep {
|
||||||
|
@ -2558,7 +2565,7 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
|
||||||
H: histogramValue,
|
H: histogramValue,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return enh.Out
|
return enh.Out, lastErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func signatureFunc(on bool, b []byte, names ...string) func(labels.Labels) string {
|
func signatureFunc(on bool, b []byte, names ...string) func(labels.Labels) string {
|
||||||
|
@ -2621,7 +2628,8 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V
|
||||||
}
|
}
|
||||||
|
|
||||||
// VectorscalarBinop evaluates a binary operation between a Vector and a Scalar.
|
// VectorscalarBinop evaluates a binary operation between a Vector and a Scalar.
|
||||||
func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) Vector {
|
func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) (Vector, error) {
|
||||||
|
var lastErr error
|
||||||
for _, lhsSample := range lhs {
|
for _, lhsSample := range lhs {
|
||||||
lf, rf := lhsSample.F, rhs.V
|
lf, rf := lhsSample.F, rhs.V
|
||||||
var rh *histogram.FloatHistogram
|
var rh *histogram.FloatHistogram
|
||||||
|
@ -2632,7 +2640,10 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala
|
||||||
lf, rf = rf, lf
|
lf, rf = rf, lf
|
||||||
lh, rh = rh, lh
|
lh, rh = rh, lh
|
||||||
}
|
}
|
||||||
float, histogram, keep := vectorElemBinop(op, lf, rf, lh, rh)
|
float, histogram, keep, err := vectorElemBinop(op, lf, rf, lh, rh)
|
||||||
|
if err != nil {
|
||||||
|
lastErr = err
|
||||||
|
}
|
||||||
// Catch cases where the scalar is the LHS in a scalar-vector comparison operation.
|
// Catch cases where the scalar is the LHS in a scalar-vector comparison operation.
|
||||||
// We want to always keep the vector element value as the output value, even if it's on the RHS.
|
// We want to always keep the vector element value as the output value, even if it's on the RHS.
|
||||||
if op.IsComparisonOperator() && swap {
|
if op.IsComparisonOperator() && swap {
|
||||||
|
@ -2656,7 +2667,7 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala
|
||||||
enh.Out = append(enh.Out, lhsSample)
|
enh.Out = append(enh.Out, lhsSample)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return enh.Out
|
return enh.Out, lastErr
|
||||||
}
|
}
|
||||||
|
|
||||||
// scalarBinop evaluates a binary operation between two Scalars.
|
// scalarBinop evaluates a binary operation between two Scalars.
|
||||||
|
@ -2693,49 +2704,57 @@ func scalarBinop(op parser.ItemType, lhs, rhs float64) float64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
// vectorElemBinop evaluates a binary operation between two Vector elements.
|
// vectorElemBinop evaluates a binary operation between two Vector elements.
|
||||||
func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool) {
|
func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) {
|
||||||
switch op {
|
switch op {
|
||||||
case parser.ADD:
|
case parser.ADD:
|
||||||
if hlhs != nil && hrhs != nil {
|
if hlhs != nil && hrhs != nil {
|
||||||
return 0, hlhs.Copy().Add(hrhs).Compact(0), true
|
res, err := hlhs.Copy().Add(hrhs)
|
||||||
|
if err != nil {
|
||||||
|
return 0, nil, false, err
|
||||||
|
}
|
||||||
|
return 0, res.Compact(0), true, nil
|
||||||
}
|
}
|
||||||
return lhs + rhs, nil, true
|
return lhs + rhs, nil, true, nil
|
||||||
case parser.SUB:
|
case parser.SUB:
|
||||||
if hlhs != nil && hrhs != nil {
|
if hlhs != nil && hrhs != nil {
|
||||||
return 0, hlhs.Copy().Sub(hrhs).Compact(0), true
|
res, err := hlhs.Copy().Sub(hrhs)
|
||||||
|
if err != nil {
|
||||||
|
return 0, nil, false, err
|
||||||
|
}
|
||||||
|
return 0, res.Compact(0), true, nil
|
||||||
}
|
}
|
||||||
return lhs - rhs, nil, true
|
return lhs - rhs, nil, true, nil
|
||||||
case parser.MUL:
|
case parser.MUL:
|
||||||
if hlhs != nil && hrhs == nil {
|
if hlhs != nil && hrhs == nil {
|
||||||
return 0, hlhs.Copy().Mul(rhs), true
|
return 0, hlhs.Copy().Mul(rhs), true, nil
|
||||||
}
|
}
|
||||||
if hlhs == nil && hrhs != nil {
|
if hlhs == nil && hrhs != nil {
|
||||||
return 0, hrhs.Copy().Mul(lhs), true
|
return 0, hrhs.Copy().Mul(lhs), true, nil
|
||||||
}
|
}
|
||||||
return lhs * rhs, nil, true
|
return lhs * rhs, nil, true, nil
|
||||||
case parser.DIV:
|
case parser.DIV:
|
||||||
if hlhs != nil && hrhs == nil {
|
if hlhs != nil && hrhs == nil {
|
||||||
return 0, hlhs.Copy().Div(rhs), true
|
return 0, hlhs.Copy().Div(rhs), true, nil
|
||||||
}
|
}
|
||||||
return lhs / rhs, nil, true
|
return lhs / rhs, nil, true, nil
|
||||||
case parser.POW:
|
case parser.POW:
|
||||||
return math.Pow(lhs, rhs), nil, true
|
return math.Pow(lhs, rhs), nil, true, nil
|
||||||
case parser.MOD:
|
case parser.MOD:
|
||||||
return math.Mod(lhs, rhs), nil, true
|
return math.Mod(lhs, rhs), nil, true, nil
|
||||||
case parser.EQLC:
|
case parser.EQLC:
|
||||||
return lhs, nil, lhs == rhs
|
return lhs, nil, lhs == rhs, nil
|
||||||
case parser.NEQ:
|
case parser.NEQ:
|
||||||
return lhs, nil, lhs != rhs
|
return lhs, nil, lhs != rhs, nil
|
||||||
case parser.GTR:
|
case parser.GTR:
|
||||||
return lhs, nil, lhs > rhs
|
return lhs, nil, lhs > rhs, nil
|
||||||
case parser.LSS:
|
case parser.LSS:
|
||||||
return lhs, nil, lhs < rhs
|
return lhs, nil, lhs < rhs, nil
|
||||||
case parser.GTE:
|
case parser.GTE:
|
||||||
return lhs, nil, lhs >= rhs
|
return lhs, nil, lhs >= rhs, nil
|
||||||
case parser.LTE:
|
case parser.LTE:
|
||||||
return lhs, nil, lhs <= rhs
|
return lhs, nil, lhs <= rhs, nil
|
||||||
case parser.ATAN2:
|
case parser.ATAN2:
|
||||||
return math.Atan2(lhs, rhs), nil, true
|
return math.Atan2(lhs, rhs), nil, true, nil
|
||||||
}
|
}
|
||||||
panic(fmt.Errorf("operator %q not allowed for operations between Vectors", op))
|
panic(fmt.Errorf("operator %q not allowed for operations between Vectors", op))
|
||||||
}
|
}
|
||||||
|
@ -2746,7 +2765,7 @@ type groupedAggregation struct {
|
||||||
hasHistogram bool // Has at least 1 histogram sample aggregated.
|
hasHistogram bool // Has at least 1 histogram sample aggregated.
|
||||||
floatValue float64
|
floatValue float64
|
||||||
histogramValue *histogram.FloatHistogram
|
histogramValue *histogram.FloatHistogram
|
||||||
floatMean float64
|
floatMean float64 // Mean, or "compensating value" for Kahan summation.
|
||||||
groupCount int
|
groupCount int
|
||||||
heap vectorByValueHeap
|
heap vectorByValueHeap
|
||||||
}
|
}
|
||||||
|
@ -2774,11 +2793,13 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
|
||||||
*group = groupedAggregation{
|
*group = groupedAggregation{
|
||||||
seen: true,
|
seen: true,
|
||||||
floatValue: f,
|
floatValue: f,
|
||||||
floatMean: f,
|
|
||||||
groupCount: 1,
|
groupCount: 1,
|
||||||
}
|
}
|
||||||
switch op {
|
switch op {
|
||||||
case parser.SUM, parser.AVG:
|
case parser.AVG:
|
||||||
|
group.floatMean = f
|
||||||
|
fallthrough
|
||||||
|
case parser.SUM:
|
||||||
if h == nil {
|
if h == nil {
|
||||||
group.hasFloat = true
|
group.hasFloat = true
|
||||||
} else {
|
} else {
|
||||||
|
@ -2786,6 +2807,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
|
||||||
group.hasHistogram = true
|
group.hasHistogram = true
|
||||||
}
|
}
|
||||||
case parser.STDVAR, parser.STDDEV:
|
case parser.STDVAR, parser.STDDEV:
|
||||||
|
group.floatMean = f
|
||||||
group.floatValue = 0
|
group.floatValue = 0
|
||||||
case parser.QUANTILE:
|
case parser.QUANTILE:
|
||||||
group.heap = make(vectorByValueHeap, 1)
|
group.heap = make(vectorByValueHeap, 1)
|
||||||
|
@ -2801,14 +2823,17 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
|
||||||
if h != nil {
|
if h != nil {
|
||||||
group.hasHistogram = true
|
group.hasHistogram = true
|
||||||
if group.histogramValue != nil {
|
if group.histogramValue != nil {
|
||||||
group.histogramValue.Add(h)
|
_, err := group.histogramValue.Add(h)
|
||||||
|
if err != nil {
|
||||||
|
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Otherwise the aggregation contained floats
|
// Otherwise the aggregation contained floats
|
||||||
// previously and will be invalid anyway. No
|
// previously and will be invalid anyway. No
|
||||||
// point in copying the histogram in that case.
|
// point in copying the histogram in that case.
|
||||||
} else {
|
} else {
|
||||||
group.hasFloat = true
|
group.hasFloat = true
|
||||||
group.floatValue += f
|
group.floatValue, group.floatMean = kahanSumInc(f, group.floatValue, group.floatMean)
|
||||||
}
|
}
|
||||||
|
|
||||||
case parser.AVG:
|
case parser.AVG:
|
||||||
|
@ -2818,8 +2843,14 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
|
||||||
if group.histogramValue != nil {
|
if group.histogramValue != nil {
|
||||||
left := h.Copy().Div(float64(group.groupCount))
|
left := h.Copy().Div(float64(group.groupCount))
|
||||||
right := group.histogramValue.Copy().Div(float64(group.groupCount))
|
right := group.histogramValue.Copy().Div(float64(group.groupCount))
|
||||||
toAdd := left.Sub(right)
|
toAdd, err := left.Sub(right)
|
||||||
group.histogramValue.Add(toAdd)
|
if err != nil {
|
||||||
|
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
|
||||||
|
}
|
||||||
|
_, err = group.histogramValue.Add(toAdd)
|
||||||
|
if err != nil {
|
||||||
|
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Otherwise the aggregation contained floats
|
// Otherwise the aggregation contained floats
|
||||||
// previously and will be invalid anyway. No
|
// previously and will be invalid anyway. No
|
||||||
|
@ -2919,6 +2950,8 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
|
||||||
}
|
}
|
||||||
if aggr.hasHistogram {
|
if aggr.hasHistogram {
|
||||||
aggr.histogramValue.Compact(0)
|
aggr.histogramValue.Compact(0)
|
||||||
|
} else {
|
||||||
|
aggr.floatValue += aggr.floatMean // Add Kahan summation compensating term.
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
// For other aggregations, we already have the right value.
|
// For other aggregations, we already have the right value.
|
||||||
|
@ -3116,6 +3149,31 @@ func (ev *evaluator) nextValues(ts int64, series *Series) (f float64, h *histogr
|
||||||
return f, h, true
|
return f, h, true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// handleAggregationError adds the appropriate annotation based on the aggregation error.
|
||||||
|
func handleAggregationError(err error, e *parser.AggregateExpr, metricName string, annos *annotations.Annotations) {
|
||||||
|
pos := e.Expr.PositionRange()
|
||||||
|
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
|
||||||
|
annos.Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
|
||||||
|
} else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
|
||||||
|
annos.Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleVectorBinopError returns the appropriate annotation based on the vector binary operation error.
|
||||||
|
func handleVectorBinopError(err error, e *parser.BinaryExpr) annotations.Annotations {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
metricName := ""
|
||||||
|
pos := e.PositionRange()
|
||||||
|
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
|
||||||
|
return annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
|
||||||
|
} else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
|
||||||
|
return annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// groupingKey builds and returns the grouping key for the given metric and
|
// groupingKey builds and returns the grouping key for the given metric and
|
||||||
// grouping labels.
|
// grouping labels.
|
||||||
func generateGroupingKey(metric labels.Labels, grouping []string, without bool, buf []byte) (uint64, []byte) {
|
func generateGroupingKey(metric labels.Labels, grouping []string, without bool, buf []byte) (uint64, []byte) {
|
||||||
|
|
|
@ -834,10 +834,10 @@ load 10s
|
||||||
{
|
{
|
||||||
Query: "metricWith1HistogramEvery10Seconds",
|
Query: "metricWith1HistogramEvery10Seconds",
|
||||||
Start: time.Unix(21, 0),
|
Start: time.Unix(21, 0),
|
||||||
PeakSamples: 12,
|
PeakSamples: 13,
|
||||||
TotalSamples: 12, // 1 histogram sample of size 12 / 10 seconds
|
TotalSamples: 13, // 1 histogram HPoint of size 13 / 10 seconds
|
||||||
TotalSamplesPerStep: stats.TotalSamplesPerStep{
|
TotalSamplesPerStep: stats.TotalSamplesPerStep{
|
||||||
21000: 12,
|
21000: 13,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -934,10 +934,10 @@ load 10s
|
||||||
{
|
{
|
||||||
Query: "metricWith1HistogramEvery10Seconds[60s]",
|
Query: "metricWith1HistogramEvery10Seconds[60s]",
|
||||||
Start: time.Unix(201, 0),
|
Start: time.Unix(201, 0),
|
||||||
PeakSamples: 72,
|
PeakSamples: 78,
|
||||||
TotalSamples: 72, // 1 histogram (size 12) / 10 seconds * 60 seconds
|
TotalSamples: 78, // 1 histogram (size 13 HPoint) / 10 seconds * 60 seconds
|
||||||
TotalSamplesPerStep: stats.TotalSamplesPerStep{
|
TotalSamplesPerStep: stats.TotalSamplesPerStep{
|
||||||
201000: 72,
|
201000: 78,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -962,10 +962,10 @@ load 10s
|
||||||
{
|
{
|
||||||
Query: "max_over_time(metricWith1HistogramEvery10Seconds[60s])[20s:5s]",
|
Query: "max_over_time(metricWith1HistogramEvery10Seconds[60s])[20s:5s]",
|
||||||
Start: time.Unix(201, 0),
|
Start: time.Unix(201, 0),
|
||||||
PeakSamples: 72,
|
PeakSamples: 78,
|
||||||
TotalSamples: 288, // (1 histogram (size 12) / 10 seconds * 60 seconds) * 4
|
TotalSamples: 312, // (1 histogram (size 13) / 10 seconds * 60 seconds) * 4
|
||||||
TotalSamplesPerStep: stats.TotalSamplesPerStep{
|
TotalSamplesPerStep: stats.TotalSamplesPerStep{
|
||||||
201000: 288,
|
201000: 312,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -980,10 +980,10 @@ load 10s
|
||||||
{
|
{
|
||||||
Query: "metricWith1HistogramEvery10Seconds[60s] @ 30",
|
Query: "metricWith1HistogramEvery10Seconds[60s] @ 30",
|
||||||
Start: time.Unix(201, 0),
|
Start: time.Unix(201, 0),
|
||||||
PeakSamples: 48,
|
PeakSamples: 52,
|
||||||
TotalSamples: 48, // @ modifier force the evaluation to at 30 seconds - So it brings 4 datapoints (0, 10, 20, 30 seconds) * 1 series
|
TotalSamples: 52, // @ modifier force the evaluation to at 30 seconds - So it brings 4 datapoints (0, 10, 20, 30 seconds) * 1 series
|
||||||
TotalSamplesPerStep: stats.TotalSamplesPerStep{
|
TotalSamplesPerStep: stats.TotalSamplesPerStep{
|
||||||
201000: 48,
|
201000: 52,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -1118,13 +1118,13 @@ load 10s
|
||||||
Start: time.Unix(204, 0),
|
Start: time.Unix(204, 0),
|
||||||
End: time.Unix(223, 0),
|
End: time.Unix(223, 0),
|
||||||
Interval: 5 * time.Second,
|
Interval: 5 * time.Second,
|
||||||
PeakSamples: 48,
|
PeakSamples: 52,
|
||||||
TotalSamples: 48, // 1 histogram (size 12) per query * 4 steps
|
TotalSamples: 52, // 1 histogram (size 13 HPoint) per query * 4 steps
|
||||||
TotalSamplesPerStep: stats.TotalSamplesPerStep{
|
TotalSamplesPerStep: stats.TotalSamplesPerStep{
|
||||||
204000: 12, // aligned to the step time, not the sample time
|
204000: 13, // aligned to the step time, not the sample time
|
||||||
209000: 12,
|
209000: 13,
|
||||||
214000: 12,
|
214000: 13,
|
||||||
219000: 12,
|
219000: 13,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
@ -14,6 +14,7 @@
|
||||||
package promql
|
package promql
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"slices"
|
"slices"
|
||||||
|
@ -210,14 +211,28 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra
|
||||||
}
|
}
|
||||||
|
|
||||||
h := last.CopyToSchema(minSchema)
|
h := last.CopyToSchema(minSchema)
|
||||||
h.Sub(prev)
|
_, err := h.Sub(prev)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
|
||||||
|
return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
|
||||||
|
} else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
|
||||||
|
return nil, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if isCounter {
|
if isCounter {
|
||||||
// Second iteration to deal with counter resets.
|
// Second iteration to deal with counter resets.
|
||||||
for _, currPoint := range points[1:] {
|
for _, currPoint := range points[1:] {
|
||||||
curr := currPoint.H
|
curr := currPoint.H
|
||||||
if curr.DetectReset(prev) {
|
if curr.DetectReset(prev) {
|
||||||
h.Add(prev)
|
_, err := h.Add(prev)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
|
||||||
|
return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
|
||||||
|
} else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
|
||||||
|
return nil, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos))
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
prev = curr
|
prev = curr
|
||||||
}
|
}
|
||||||
|
@ -513,10 +528,11 @@ func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series)
|
||||||
return append(enh.Out, Sample{F: aggrFn(el)})
|
return append(enh.Out, Sample{F: aggrFn(el)})
|
||||||
}
|
}
|
||||||
|
|
||||||
func aggrHistOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) *histogram.FloatHistogram) Vector {
|
func aggrHistOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) (*histogram.FloatHistogram, error)) (Vector, error) {
|
||||||
el := vals[0].(Matrix)[0]
|
el := vals[0].(Matrix)[0]
|
||||||
|
res, err := aggrFn(el)
|
||||||
|
|
||||||
return append(enh.Out, Sample{H: aggrFn(el)})
|
return append(enh.Out, Sample{H: res}), err
|
||||||
}
|
}
|
||||||
|
|
||||||
// === avg_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
|
// === avg_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
|
||||||
|
@ -528,18 +544,33 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
|
||||||
}
|
}
|
||||||
if len(firstSeries.Floats) == 0 {
|
if len(firstSeries.Floats) == 0 {
|
||||||
// The passed values only contain histograms.
|
// The passed values only contain histograms.
|
||||||
return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram {
|
vec, err := aggrHistOverTime(vals, enh, func(s Series) (*histogram.FloatHistogram, error) {
|
||||||
count := 1
|
count := 1
|
||||||
mean := s.Histograms[0].H.Copy()
|
mean := s.Histograms[0].H.Copy()
|
||||||
for _, h := range s.Histograms[1:] {
|
for _, h := range s.Histograms[1:] {
|
||||||
count++
|
count++
|
||||||
left := h.H.Copy().Div(float64(count))
|
left := h.H.Copy().Div(float64(count))
|
||||||
right := mean.Copy().Div(float64(count))
|
right := mean.Copy().Div(float64(count))
|
||||||
toAdd := left.Sub(right)
|
toAdd, err := left.Sub(right)
|
||||||
mean.Add(toAdd)
|
if err != nil {
|
||||||
|
return mean, err
|
||||||
|
}
|
||||||
|
_, err = mean.Add(toAdd)
|
||||||
|
if err != nil {
|
||||||
|
return mean, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return mean
|
return mean, nil
|
||||||
}), nil
|
})
|
||||||
|
if err != nil {
|
||||||
|
metricName := firstSeries.Metric.Get(labels.MetricName)
|
||||||
|
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
|
||||||
|
return enh.Out, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, args[0].PositionRange()))
|
||||||
|
} else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
|
||||||
|
return enh.Out, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, args[0].PositionRange()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return vec, nil
|
||||||
}
|
}
|
||||||
return aggrOverTime(vals, enh, func(s Series) float64 {
|
return aggrOverTime(vals, enh, func(s Series) float64 {
|
||||||
var mean, count, c float64
|
var mean, count, c float64
|
||||||
|
@ -673,13 +704,25 @@ func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
|
||||||
}
|
}
|
||||||
if len(firstSeries.Floats) == 0 {
|
if len(firstSeries.Floats) == 0 {
|
||||||
// The passed values only contain histograms.
|
// The passed values only contain histograms.
|
||||||
return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram {
|
vec, err := aggrHistOverTime(vals, enh, func(s Series) (*histogram.FloatHistogram, error) {
|
||||||
sum := s.Histograms[0].H.Copy()
|
sum := s.Histograms[0].H.Copy()
|
||||||
for _, h := range s.Histograms[1:] {
|
for _, h := range s.Histograms[1:] {
|
||||||
sum.Add(h.H)
|
_, err := sum.Add(h.H)
|
||||||
|
if err != nil {
|
||||||
|
return sum, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return sum
|
return sum, nil
|
||||||
}), nil
|
})
|
||||||
|
if err != nil {
|
||||||
|
metricName := firstSeries.Metric.Get(labels.MetricName)
|
||||||
|
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
|
||||||
|
return enh.Out, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, args[0].PositionRange()))
|
||||||
|
} else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
|
||||||
|
return enh.Out, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, args[0].PositionRange()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return vec, nil
|
||||||
}
|
}
|
||||||
return aggrOverTime(vals, enh, func(s Series) float64 {
|
return aggrOverTime(vals, enh, func(s Series) float64 {
|
||||||
var sum, c float64
|
var sum, c float64
|
||||||
|
|
|
@ -84,6 +84,7 @@ BUCKETS_DESC
|
||||||
NEGATIVE_BUCKETS_DESC
|
NEGATIVE_BUCKETS_DESC
|
||||||
ZERO_BUCKET_DESC
|
ZERO_BUCKET_DESC
|
||||||
ZERO_BUCKET_WIDTH_DESC
|
ZERO_BUCKET_WIDTH_DESC
|
||||||
|
CUSTOM_VALUES_DESC
|
||||||
%token histogramDescEnd
|
%token histogramDescEnd
|
||||||
|
|
||||||
// Operators.
|
// Operators.
|
||||||
|
@ -797,6 +798,11 @@ histogram_desc_item
|
||||||
$$ = yylex.(*parser).newMap()
|
$$ = yylex.(*parser).newMap()
|
||||||
$$["z_bucket_w"] = $3
|
$$["z_bucket_w"] = $3
|
||||||
}
|
}
|
||||||
|
| CUSTOM_VALUES_DESC COLON bucket_set
|
||||||
|
{
|
||||||
|
$$ = yylex.(*parser).newMap()
|
||||||
|
$$["custom_values"] = $3
|
||||||
|
}
|
||||||
| BUCKETS_DESC COLON bucket_set
|
| BUCKETS_DESC COLON bucket_set
|
||||||
{
|
{
|
||||||
$$ = yylex.(*parser).newMap()
|
$$ = yylex.(*parser).newMap()
|
||||||
|
|
|
@ -67,62 +67,63 @@ const BUCKETS_DESC = 57375
|
||||||
const NEGATIVE_BUCKETS_DESC = 57376
|
const NEGATIVE_BUCKETS_DESC = 57376
|
||||||
const ZERO_BUCKET_DESC = 57377
|
const ZERO_BUCKET_DESC = 57377
|
||||||
const ZERO_BUCKET_WIDTH_DESC = 57378
|
const ZERO_BUCKET_WIDTH_DESC = 57378
|
||||||
const histogramDescEnd = 57379
|
const CUSTOM_VALUES_DESC = 57379
|
||||||
const operatorsStart = 57380
|
const histogramDescEnd = 57380
|
||||||
const ADD = 57381
|
const operatorsStart = 57381
|
||||||
const DIV = 57382
|
const ADD = 57382
|
||||||
const EQLC = 57383
|
const DIV = 57383
|
||||||
const EQL_REGEX = 57384
|
const EQLC = 57384
|
||||||
const GTE = 57385
|
const EQL_REGEX = 57385
|
||||||
const GTR = 57386
|
const GTE = 57386
|
||||||
const LAND = 57387
|
const GTR = 57387
|
||||||
const LOR = 57388
|
const LAND = 57388
|
||||||
const LSS = 57389
|
const LOR = 57389
|
||||||
const LTE = 57390
|
const LSS = 57390
|
||||||
const LUNLESS = 57391
|
const LTE = 57391
|
||||||
const MOD = 57392
|
const LUNLESS = 57392
|
||||||
const MUL = 57393
|
const MOD = 57393
|
||||||
const NEQ = 57394
|
const MUL = 57394
|
||||||
const NEQ_REGEX = 57395
|
const NEQ = 57395
|
||||||
const POW = 57396
|
const NEQ_REGEX = 57396
|
||||||
const SUB = 57397
|
const POW = 57397
|
||||||
const AT = 57398
|
const SUB = 57398
|
||||||
const ATAN2 = 57399
|
const AT = 57399
|
||||||
const operatorsEnd = 57400
|
const ATAN2 = 57400
|
||||||
const aggregatorsStart = 57401
|
const operatorsEnd = 57401
|
||||||
const AVG = 57402
|
const aggregatorsStart = 57402
|
||||||
const BOTTOMK = 57403
|
const AVG = 57403
|
||||||
const COUNT = 57404
|
const BOTTOMK = 57404
|
||||||
const COUNT_VALUES = 57405
|
const COUNT = 57405
|
||||||
const GROUP = 57406
|
const COUNT_VALUES = 57406
|
||||||
const MAX = 57407
|
const GROUP = 57407
|
||||||
const MIN = 57408
|
const MAX = 57408
|
||||||
const QUANTILE = 57409
|
const MIN = 57409
|
||||||
const STDDEV = 57410
|
const QUANTILE = 57410
|
||||||
const STDVAR = 57411
|
const STDDEV = 57411
|
||||||
const SUM = 57412
|
const STDVAR = 57412
|
||||||
const TOPK = 57413
|
const SUM = 57413
|
||||||
const aggregatorsEnd = 57414
|
const TOPK = 57414
|
||||||
const keywordsStart = 57415
|
const aggregatorsEnd = 57415
|
||||||
const BOOL = 57416
|
const keywordsStart = 57416
|
||||||
const BY = 57417
|
const BOOL = 57417
|
||||||
const GROUP_LEFT = 57418
|
const BY = 57418
|
||||||
const GROUP_RIGHT = 57419
|
const GROUP_LEFT = 57419
|
||||||
const IGNORING = 57420
|
const GROUP_RIGHT = 57420
|
||||||
const OFFSET = 57421
|
const IGNORING = 57421
|
||||||
const ON = 57422
|
const OFFSET = 57422
|
||||||
const WITHOUT = 57423
|
const ON = 57423
|
||||||
const keywordsEnd = 57424
|
const WITHOUT = 57424
|
||||||
const preprocessorStart = 57425
|
const keywordsEnd = 57425
|
||||||
const START = 57426
|
const preprocessorStart = 57426
|
||||||
const END = 57427
|
const START = 57427
|
||||||
const preprocessorEnd = 57428
|
const END = 57428
|
||||||
const startSymbolsStart = 57429
|
const preprocessorEnd = 57429
|
||||||
const START_METRIC = 57430
|
const startSymbolsStart = 57430
|
||||||
const START_SERIES_DESCRIPTION = 57431
|
const START_METRIC = 57431
|
||||||
const START_EXPRESSION = 57432
|
const START_SERIES_DESCRIPTION = 57432
|
||||||
const START_METRIC_SELECTOR = 57433
|
const START_EXPRESSION = 57433
|
||||||
const startSymbolsEnd = 57434
|
const START_METRIC_SELECTOR = 57434
|
||||||
|
const startSymbolsEnd = 57435
|
||||||
|
|
||||||
var yyToknames = [...]string{
|
var yyToknames = [...]string{
|
||||||
"$end",
|
"$end",
|
||||||
|
@ -161,6 +162,7 @@ var yyToknames = [...]string{
|
||||||
"NEGATIVE_BUCKETS_DESC",
|
"NEGATIVE_BUCKETS_DESC",
|
||||||
"ZERO_BUCKET_DESC",
|
"ZERO_BUCKET_DESC",
|
||||||
"ZERO_BUCKET_WIDTH_DESC",
|
"ZERO_BUCKET_WIDTH_DESC",
|
||||||
|
"CUSTOM_VALUES_DESC",
|
||||||
"histogramDescEnd",
|
"histogramDescEnd",
|
||||||
"operatorsStart",
|
"operatorsStart",
|
||||||
"ADD",
|
"ADD",
|
||||||
|
@ -235,270 +237,273 @@ var yyExca = [...]int16{
|
||||||
24, 134,
|
24, 134,
|
||||||
-2, 0,
|
-2, 0,
|
||||||
-1, 58,
|
-1, 58,
|
||||||
2, 171,
|
|
||||||
15, 171,
|
|
||||||
75, 171,
|
|
||||||
81, 171,
|
|
||||||
-2, 100,
|
|
||||||
-1, 59,
|
|
||||||
2, 172,
|
2, 172,
|
||||||
15, 172,
|
15, 172,
|
||||||
75, 172,
|
76, 172,
|
||||||
81, 172,
|
82, 172,
|
||||||
-2, 101,
|
-2, 100,
|
||||||
-1, 60,
|
-1, 59,
|
||||||
2, 173,
|
2, 173,
|
||||||
15, 173,
|
15, 173,
|
||||||
75, 173,
|
76, 173,
|
||||||
81, 173,
|
82, 173,
|
||||||
-2, 103,
|
-2, 101,
|
||||||
-1, 61,
|
-1, 60,
|
||||||
2, 174,
|
2, 174,
|
||||||
15, 174,
|
15, 174,
|
||||||
75, 174,
|
76, 174,
|
||||||
81, 174,
|
82, 174,
|
||||||
-2, 104,
|
-2, 103,
|
||||||
-1, 62,
|
-1, 61,
|
||||||
2, 175,
|
2, 175,
|
||||||
15, 175,
|
15, 175,
|
||||||
75, 175,
|
76, 175,
|
||||||
81, 175,
|
82, 175,
|
||||||
-2, 105,
|
-2, 104,
|
||||||
-1, 63,
|
-1, 62,
|
||||||
2, 176,
|
2, 176,
|
||||||
15, 176,
|
15, 176,
|
||||||
75, 176,
|
76, 176,
|
||||||
81, 176,
|
82, 176,
|
||||||
-2, 110,
|
-2, 105,
|
||||||
-1, 64,
|
-1, 63,
|
||||||
2, 177,
|
2, 177,
|
||||||
15, 177,
|
15, 177,
|
||||||
75, 177,
|
76, 177,
|
||||||
81, 177,
|
82, 177,
|
||||||
-2, 112,
|
-2, 110,
|
||||||
-1, 65,
|
-1, 64,
|
||||||
2, 178,
|
2, 178,
|
||||||
15, 178,
|
15, 178,
|
||||||
75, 178,
|
76, 178,
|
||||||
81, 178,
|
82, 178,
|
||||||
-2, 114,
|
-2, 112,
|
||||||
-1, 66,
|
-1, 65,
|
||||||
2, 179,
|
2, 179,
|
||||||
15, 179,
|
15, 179,
|
||||||
75, 179,
|
76, 179,
|
||||||
81, 179,
|
82, 179,
|
||||||
-2, 115,
|
-2, 114,
|
||||||
-1, 67,
|
-1, 66,
|
||||||
2, 180,
|
2, 180,
|
||||||
15, 180,
|
15, 180,
|
||||||
75, 180,
|
76, 180,
|
||||||
81, 180,
|
82, 180,
|
||||||
-2, 116,
|
-2, 115,
|
||||||
-1, 68,
|
-1, 67,
|
||||||
2, 181,
|
2, 181,
|
||||||
15, 181,
|
15, 181,
|
||||||
75, 181,
|
76, 181,
|
||||||
81, 181,
|
82, 181,
|
||||||
-2, 117,
|
-2, 116,
|
||||||
-1, 69,
|
-1, 68,
|
||||||
2, 182,
|
2, 182,
|
||||||
15, 182,
|
15, 182,
|
||||||
75, 182,
|
76, 182,
|
||||||
81, 182,
|
82, 182,
|
||||||
|
-2, 117,
|
||||||
|
-1, 69,
|
||||||
|
2, 183,
|
||||||
|
15, 183,
|
||||||
|
76, 183,
|
||||||
|
82, 183,
|
||||||
-2, 118,
|
-2, 118,
|
||||||
-1, 195,
|
-1, 195,
|
||||||
12, 230,
|
12, 231,
|
||||||
13, 230,
|
13, 231,
|
||||||
18, 230,
|
18, 231,
|
||||||
19, 230,
|
19, 231,
|
||||||
25, 230,
|
25, 231,
|
||||||
39, 230,
|
40, 231,
|
||||||
45, 230,
|
46, 231,
|
||||||
46, 230,
|
47, 231,
|
||||||
49, 230,
|
50, 231,
|
||||||
55, 230,
|
56, 231,
|
||||||
60, 230,
|
61, 231,
|
||||||
61, 230,
|
62, 231,
|
||||||
62, 230,
|
63, 231,
|
||||||
63, 230,
|
64, 231,
|
||||||
64, 230,
|
65, 231,
|
||||||
65, 230,
|
66, 231,
|
||||||
66, 230,
|
67, 231,
|
||||||
67, 230,
|
68, 231,
|
||||||
68, 230,
|
69, 231,
|
||||||
69, 230,
|
70, 231,
|
||||||
70, 230,
|
71, 231,
|
||||||
71, 230,
|
72, 231,
|
||||||
75, 230,
|
76, 231,
|
||||||
79, 230,
|
80, 231,
|
||||||
81, 230,
|
82, 231,
|
||||||
84, 230,
|
85, 231,
|
||||||
85, 230,
|
86, 231,
|
||||||
-2, 0,
|
-2, 0,
|
||||||
-1, 196,
|
-1, 196,
|
||||||
12, 230,
|
12, 231,
|
||||||
13, 230,
|
13, 231,
|
||||||
18, 230,
|
18, 231,
|
||||||
19, 230,
|
19, 231,
|
||||||
25, 230,
|
25, 231,
|
||||||
39, 230,
|
40, 231,
|
||||||
45, 230,
|
46, 231,
|
||||||
46, 230,
|
47, 231,
|
||||||
49, 230,
|
50, 231,
|
||||||
55, 230,
|
56, 231,
|
||||||
60, 230,
|
61, 231,
|
||||||
61, 230,
|
62, 231,
|
||||||
62, 230,
|
63, 231,
|
||||||
63, 230,
|
64, 231,
|
||||||
64, 230,
|
65, 231,
|
||||||
65, 230,
|
66, 231,
|
||||||
66, 230,
|
67, 231,
|
||||||
67, 230,
|
68, 231,
|
||||||
68, 230,
|
69, 231,
|
||||||
69, 230,
|
70, 231,
|
||||||
70, 230,
|
71, 231,
|
||||||
71, 230,
|
72, 231,
|
||||||
75, 230,
|
76, 231,
|
||||||
79, 230,
|
80, 231,
|
||||||
81, 230,
|
82, 231,
|
||||||
84, 230,
|
85, 231,
|
||||||
85, 230,
|
86, 231,
|
||||||
-2, 0,
|
-2, 0,
|
||||||
-1, 217,
|
-1, 217,
|
||||||
21, 228,
|
|
||||||
-2, 0,
|
|
||||||
-1, 285,
|
|
||||||
21, 229,
|
21, 229,
|
||||||
-2, 0,
|
-2, 0,
|
||||||
|
-1, 286,
|
||||||
|
21, 230,
|
||||||
|
-2, 0,
|
||||||
}
|
}
|
||||||
|
|
||||||
const yyPrivate = 57344
|
const yyPrivate = 57344
|
||||||
|
|
||||||
const yyLast = 742
|
const yyLast = 778
|
||||||
|
|
||||||
var yyAct = [...]int16{
|
var yyAct = [...]int16{
|
||||||
151, 322, 320, 268, 327, 148, 221, 37, 187, 144,
|
151, 324, 322, 268, 329, 148, 221, 37, 187, 144,
|
||||||
281, 280, 152, 113, 77, 173, 104, 102, 101, 6,
|
282, 281, 152, 113, 77, 173, 104, 102, 101, 6,
|
||||||
128, 223, 105, 193, 155, 194, 195, 196, 339, 262,
|
223, 193, 105, 194, 195, 196, 128, 262, 260, 155,
|
||||||
260, 233, 317, 316, 57, 100, 294, 239, 103, 146,
|
233, 103, 342, 293, 100, 319, 239, 116, 146, 318,
|
||||||
300, 313, 263, 156, 156, 283, 147, 338, 259, 123,
|
315, 263, 156, 123, 106, 147, 284, 114, 295, 116,
|
||||||
337, 106, 252, 311, 155, 299, 340, 301, 264, 157,
|
156, 341, 175, 259, 340, 253, 57, 264, 157, 114,
|
||||||
157, 108, 298, 109, 235, 236, 292, 251, 237, 107,
|
117, 108, 313, 109, 235, 236, 157, 112, 237, 107,
|
||||||
155, 292, 174, 191, 175, 96, 250, 99, 258, 224,
|
323, 174, 117, 175, 155, 96, 250, 99, 293, 224,
|
||||||
226, 228, 229, 230, 238, 240, 243, 244, 245, 246,
|
226, 228, 229, 230, 238, 240, 243, 244, 245, 246,
|
||||||
247, 110, 145, 225, 227, 231, 232, 234, 241, 242,
|
247, 177, 145, 225, 227, 231, 232, 234, 241, 242,
|
||||||
98, 257, 321, 248, 249, 2, 3, 4, 5, 218,
|
98, 176, 178, 248, 249, 104, 2, 3, 4, 5,
|
||||||
158, 104, 177, 217, 168, 162, 165, 105, 175, 160,
|
158, 105, 177, 110, 168, 162, 165, 302, 150, 160,
|
||||||
164, 161, 176, 178, 189, 213, 106, 328, 216, 256,
|
191, 161, 176, 178, 189, 155, 213, 343, 106, 330,
|
||||||
183, 179, 192, 163, 181, 100, 190, 197, 198, 199,
|
72, 179, 192, 33, 181, 155, 190, 197, 198, 199,
|
||||||
200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
|
200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
|
||||||
210, 211, 255, 182, 72, 212, 177, 214, 215, 33,
|
210, 211, 185, 301, 258, 212, 156, 214, 215, 188,
|
||||||
82, 84, 85, 7, 86, 87, 176, 178, 90, 91,
|
256, 183, 290, 191, 252, 164, 155, 289, 300, 218,
|
||||||
223, 93, 94, 95, 116, 96, 97, 99, 83, 147,
|
223, 79, 157, 217, 7, 299, 312, 257, 163, 251,
|
||||||
233, 286, 289, 116, 114, 254, 239, 288, 147, 172,
|
233, 78, 288, 255, 182, 254, 239, 156, 216, 180,
|
||||||
220, 124, 253, 114, 171, 310, 309, 117, 120, 261,
|
220, 124, 172, 120, 147, 311, 314, 171, 119, 261,
|
||||||
98, 112, 287, 119, 278, 279, 117, 170, 282, 10,
|
287, 153, 154, 157, 279, 280, 79, 147, 283, 310,
|
||||||
308, 159, 307, 235, 236, 312, 118, 237, 147, 74,
|
170, 118, 159, 10, 235, 236, 78, 309, 237, 147,
|
||||||
306, 305, 304, 303, 302, 250, 81, 285, 224, 226,
|
308, 307, 306, 74, 76, 305, 250, 286, 304, 224,
|
||||||
228, 229, 230, 238, 240, 243, 244, 245, 246, 247,
|
226, 228, 229, 230, 238, 240, 243, 244, 245, 246,
|
||||||
79, 79, 225, 227, 231, 232, 234, 241, 242, 48,
|
247, 303, 81, 225, 227, 231, 232, 234, 241, 242,
|
||||||
78, 78, 248, 249, 122, 73, 121, 150, 180, 76,
|
48, 34, 1, 248, 249, 122, 73, 121, 285, 47,
|
||||||
290, 291, 293, 56, 295, 8, 9, 9, 34, 35,
|
291, 292, 294, 56, 296, 8, 9, 9, 46, 35,
|
||||||
1, 284, 296, 297, 155, 129, 130, 131, 132, 133,
|
45, 44, 297, 298, 127, 129, 130, 131, 132, 133,
|
||||||
134, 135, 136, 137, 138, 139, 140, 141, 142, 143,
|
134, 135, 136, 137, 138, 139, 140, 141, 142, 143,
|
||||||
47, 46, 45, 44, 156, 314, 315, 127, 43, 42,
|
43, 42, 41, 125, 166, 40, 316, 317, 126, 39,
|
||||||
41, 185, 319, 125, 166, 324, 325, 326, 188, 323,
|
38, 49, 186, 321, 338, 265, 326, 327, 328, 80,
|
||||||
157, 329, 191, 331, 330, 155, 40, 126, 332, 333,
|
325, 184, 219, 332, 331, 334, 333, 75, 115, 149,
|
||||||
100, 51, 72, 334, 53, 39, 38, 22, 52, 336,
|
335, 336, 100, 51, 72, 337, 53, 55, 222, 22,
|
||||||
49, 167, 186, 335, 54, 156, 265, 80, 341, 153,
|
52, 339, 50, 167, 111, 0, 54, 0, 0, 0,
|
||||||
154, 184, 219, 75, 115, 82, 84, 149, 70, 55,
|
0, 344, 0, 0, 0, 0, 0, 0, 82, 84,
|
||||||
222, 157, 50, 111, 18, 19, 93, 94, 20, 0,
|
0, 70, 0, 0, 0, 0, 0, 18, 19, 93,
|
||||||
96, 97, 99, 83, 71, 0, 0, 0, 0, 58,
|
94, 20, 0, 96, 97, 99, 83, 71, 0, 0,
|
||||||
|
0, 0, 58, 59, 60, 61, 62, 63, 64, 65,
|
||||||
|
66, 67, 68, 69, 0, 0, 0, 13, 98, 0,
|
||||||
|
0, 24, 0, 30, 0, 0, 31, 32, 36, 100,
|
||||||
|
51, 72, 0, 53, 267, 0, 22, 52, 0, 0,
|
||||||
|
0, 266, 0, 54, 0, 270, 271, 269, 276, 278,
|
||||||
|
275, 277, 272, 273, 274, 0, 84, 0, 70, 0,
|
||||||
|
0, 0, 0, 0, 18, 19, 93, 94, 20, 0,
|
||||||
|
96, 0, 99, 83, 71, 0, 0, 0, 0, 58,
|
||||||
59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
|
59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
|
||||||
69, 0, 0, 0, 13, 98, 0, 0, 24, 0,
|
69, 0, 0, 0, 13, 98, 0, 0, 24, 0,
|
||||||
30, 0, 0, 31, 32, 36, 100, 51, 72, 0,
|
30, 0, 0, 31, 32, 51, 72, 0, 53, 320,
|
||||||
53, 267, 0, 22, 52, 0, 0, 0, 266, 0,
|
0, 22, 52, 0, 0, 0, 0, 0, 54, 0,
|
||||||
54, 0, 270, 271, 269, 275, 277, 274, 276, 272,
|
270, 271, 269, 276, 278, 275, 277, 272, 273, 274,
|
||||||
273, 0, 84, 0, 70, 0, 0, 0, 0, 0,
|
0, 0, 0, 70, 0, 0, 17, 72, 0, 18,
|
||||||
18, 19, 93, 94, 20, 0, 96, 0, 99, 83,
|
19, 0, 22, 20, 0, 0, 0, 0, 0, 71,
|
||||||
71, 0, 0, 0, 0, 58, 59, 60, 61, 62,
|
0, 0, 0, 0, 58, 59, 60, 61, 62, 63,
|
||||||
63, 64, 65, 66, 67, 68, 69, 0, 0, 0,
|
64, 65, 66, 67, 68, 69, 0, 0, 0, 13,
|
||||||
13, 98, 0, 0, 24, 0, 30, 0, 0, 31,
|
18, 19, 0, 24, 20, 30, 0, 0, 31, 32,
|
||||||
32, 51, 72, 0, 53, 318, 0, 22, 52, 0,
|
0, 0, 0, 0, 0, 11, 12, 14, 15, 16,
|
||||||
0, 0, 0, 0, 54, 0, 270, 271, 269, 275,
|
21, 23, 25, 26, 27, 28, 29, 17, 33, 0,
|
||||||
277, 274, 276, 272, 273, 0, 0, 0, 70, 0,
|
13, 0, 0, 22, 24, 0, 30, 0, 0, 31,
|
||||||
0, 0, 0, 0, 18, 19, 0, 0, 20, 0,
|
32, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
0, 0, 17, 72, 71, 0, 0, 0, 22, 58,
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
|
0, 18, 19, 0, 0, 20, 0, 0, 0, 0,
|
||||||
69, 0, 0, 0, 13, 0, 0, 0, 24, 0,
|
0, 0, 0, 0, 0, 0, 11, 12, 14, 15,
|
||||||
30, 0, 0, 31, 32, 18, 19, 0, 0, 20,
|
16, 21, 23, 25, 26, 27, 28, 29, 100, 0,
|
||||||
0, 0, 0, 17, 33, 0, 0, 0, 0, 22,
|
0, 13, 0, 0, 0, 24, 169, 30, 0, 0,
|
||||||
11, 12, 14, 15, 16, 21, 23, 25, 26, 27,
|
31, 32, 0, 0, 0, 0, 0, 100, 0, 0,
|
||||||
28, 29, 0, 0, 0, 13, 0, 0, 0, 24,
|
0, 0, 0, 0, 82, 84, 85, 0, 86, 87,
|
||||||
0, 30, 0, 0, 31, 32, 18, 19, 0, 0,
|
88, 89, 90, 91, 92, 93, 94, 95, 0, 96,
|
||||||
20, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
97, 99, 83, 82, 84, 85, 0, 86, 87, 88,
|
||||||
0, 11, 12, 14, 15, 16, 21, 23, 25, 26,
|
89, 90, 91, 92, 93, 94, 95, 0, 96, 97,
|
||||||
27, 28, 29, 100, 0, 0, 13, 0, 0, 0,
|
99, 83, 100, 0, 98, 0, 0, 0, 0, 0,
|
||||||
24, 169, 30, 0, 0, 31, 32, 0, 0, 0,
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
0, 0, 100, 0, 0, 0, 0, 0, 82, 84,
|
0, 100, 0, 98, 0, 0, 0, 0, 82, 84,
|
||||||
85, 0, 86, 87, 88, 89, 90, 91, 92, 93,
|
85, 0, 86, 87, 88, 0, 90, 91, 92, 93,
|
||||||
94, 95, 0, 96, 97, 99, 83, 82, 84, 85,
|
94, 95, 0, 96, 97, 99, 83, 82, 84, 85,
|
||||||
0, 86, 87, 88, 89, 90, 91, 92, 93, 94,
|
0, 86, 87, 0, 0, 90, 91, 0, 93, 94,
|
||||||
95, 0, 96, 97, 99, 83, 100, 0, 98, 0,
|
95, 0, 96, 97, 99, 83, 0, 0, 98, 0,
|
||||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
0, 0, 0, 0, 0, 0, 0, 98, 0, 0,
|
0, 0, 0, 0, 0, 0, 0, 98,
|
||||||
0, 82, 84, 85, 0, 86, 87, 88, 0, 90,
|
|
||||||
91, 92, 93, 94, 95, 0, 96, 97, 99, 83,
|
|
||||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
|
||||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
|
||||||
0, 98,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var yyPact = [...]int16{
|
var yyPact = [...]int16{
|
||||||
17, 153, 541, 541, 385, 500, -1000, -1000, -1000, 146,
|
17, 164, 555, 555, 388, 494, -1000, -1000, -1000, 120,
|
||||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||||
-1000, -1000, -1000, 239, -1000, 224, -1000, 618, -1000, -1000,
|
-1000, -1000, -1000, 204, -1000, 240, -1000, 633, -1000, -1000,
|
||||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||||
36, 111, -1000, 459, -1000, 459, 141, -1000, -1000, -1000,
|
29, 113, -1000, 463, -1000, 463, 117, -1000, -1000, -1000,
|
||||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||||
-1000, -1000, 181, -1000, -1000, 196, -1000, -1000, 252, -1000,
|
-1000, -1000, 47, -1000, -1000, 191, -1000, -1000, 253, -1000,
|
||||||
25, -1000, -54, -54, -54, -54, -54, -54, -54, -54,
|
19, -1000, -49, -49, -49, -49, -49, -49, -49, -49,
|
||||||
-54, -54, -54, -54, -54, -54, -54, -54, 37, 255,
|
-49, -49, -49, -49, -49, -49, -49, -49, 36, 116,
|
||||||
209, 111, -59, -1000, 118, 118, 309, -1000, 599, 21,
|
210, 113, -60, -1000, 163, 163, 311, -1000, 614, 20,
|
||||||
-1000, 187, -1000, -1000, 70, 114, -1000, -1000, -1000, 238,
|
-1000, 190, -1000, -1000, 69, 48, -1000, -1000, -1000, 169,
|
||||||
-1000, 128, -1000, 296, 459, -1000, -55, -50, -1000, 459,
|
-1000, 159, -1000, 147, 463, -1000, -58, -53, -1000, 463,
|
||||||
459, 459, 459, 459, 459, 459, 459, 459, 459, 459,
|
463, 463, 463, 463, 463, 463, 463, 463, 463, 463,
|
||||||
459, 459, 459, 459, -1000, 170, -1000, -1000, -1000, 110,
|
463, 463, 463, 463, -1000, 185, -1000, -1000, -1000, 111,
|
||||||
-1000, -1000, -1000, -1000, -1000, -1000, 51, 51, 107, -1000,
|
-1000, -1000, -1000, -1000, -1000, -1000, 55, 55, 167, -1000,
|
||||||
-1000, -1000, -1000, 168, -1000, -1000, 45, -1000, 618, -1000,
|
-1000, -1000, -1000, 168, -1000, -1000, 157, -1000, 633, -1000,
|
||||||
-1000, 172, -1000, 127, -1000, -1000, -1000, -1000, -1000, 76,
|
-1000, 35, -1000, 158, -1000, -1000, -1000, -1000, -1000, 152,
|
||||||
-1000, -1000, -1000, -1000, -1000, 22, 4, 3, -1000, -1000,
|
-1000, -1000, -1000, -1000, -1000, 27, 2, 1, -1000, -1000,
|
||||||
-1000, 384, 382, 118, 118, 118, 118, 21, 21, 306,
|
-1000, 387, 385, 163, 163, 163, 163, 20, 20, 308,
|
||||||
306, 306, 121, 662, 306, 306, 121, 21, 21, 306,
|
308, 308, 697, 678, 308, 308, 697, 20, 20, 308,
|
||||||
21, 382, -1000, 23, -1000, -1000, -1000, 179, -1000, 180,
|
20, 385, -1000, 24, -1000, -1000, -1000, 198, -1000, 160,
|
||||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||||
-1000, -1000, 459, -1000, -1000, -1000, -1000, -1000, -1000, 52,
|
-1000, -1000, 463, -1000, -1000, -1000, -1000, -1000, -1000, 59,
|
||||||
52, 10, 52, 57, 57, 38, 40, -1000, -1000, 218,
|
59, 22, 59, 104, 104, 151, 100, -1000, -1000, 235,
|
||||||
217, 216, 215, 214, 206, 204, 190, 189, -1000, -1000,
|
222, 219, 216, 215, 214, 211, 203, 189, 170, -1000,
|
||||||
-1000, -1000, -1000, -1000, 32, 213, -1000, -1000, 19, -1000,
|
-1000, -1000, -1000, -1000, -1000, 41, 194, -1000, -1000, 18,
|
||||||
618, -1000, -1000, -1000, 52, -1000, 7, 6, 458, -1000,
|
-1000, 633, -1000, -1000, -1000, 59, -1000, 13, 9, 462,
|
||||||
-1000, -1000, 47, 5, 51, 51, 51, 113, 47, 113,
|
-1000, -1000, -1000, 14, 10, 55, 55, 55, 115, 115,
|
||||||
47, -1000, -1000, -1000, -1000, -1000, 52, 52, -1000, -1000,
|
14, 115, 14, -1000, -1000, -1000, -1000, -1000, 59, 59,
|
||||||
-1000, 52, -1000, -1000, -1000, -1000, -1000, -1000, 51, -1000,
|
-1000, -1000, -1000, 59, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||||
-1000, -1000, -1000, -1000, -1000, 26, -1000, 35, -1000, -1000,
|
55, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 30, -1000,
|
||||||
-1000, -1000,
|
106, -1000, -1000, -1000, -1000,
|
||||||
}
|
}
|
||||||
|
|
||||||
var yyPgo = [...]int16{
|
var yyPgo = [...]int16{
|
||||||
0, 353, 13, 352, 6, 15, 350, 263, 349, 347,
|
0, 334, 13, 332, 6, 15, 328, 263, 327, 319,
|
||||||
344, 209, 265, 343, 14, 342, 10, 11, 341, 337,
|
318, 213, 265, 317, 14, 312, 10, 11, 311, 309,
|
||||||
8, 336, 3, 4, 333, 2, 1, 0, 332, 12,
|
8, 305, 3, 4, 304, 2, 1, 0, 302, 12,
|
||||||
5, 330, 326, 18, 191, 325, 317, 7, 316, 304,
|
5, 301, 300, 18, 191, 299, 298, 7, 295, 294,
|
||||||
17, 303, 34, 300, 299, 298, 297, 293, 292, 291,
|
17, 293, 56, 292, 291, 290, 274, 271, 270, 268,
|
||||||
290, 249, 9, 271, 270, 268,
|
259, 250, 9, 258, 252, 251,
|
||||||
}
|
}
|
||||||
|
|
||||||
var yyR1 = [...]int8{
|
var yyR1 = [...]int8{
|
||||||
|
@ -518,14 +523,14 @@ var yyR1 = [...]int8{
|
||||||
14, 14, 14, 55, 19, 19, 19, 19, 18, 18,
|
14, 14, 14, 55, 19, 19, 19, 19, 18, 18,
|
||||||
18, 18, 18, 18, 18, 18, 18, 28, 28, 28,
|
18, 18, 18, 18, 18, 18, 18, 28, 28, 28,
|
||||||
20, 20, 20, 20, 21, 21, 21, 22, 22, 22,
|
20, 20, 20, 20, 21, 21, 21, 22, 22, 22,
|
||||||
22, 22, 22, 22, 22, 22, 23, 23, 24, 24,
|
22, 22, 22, 22, 22, 22, 22, 23, 23, 24,
|
||||||
24, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
24, 24, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||||
3, 3, 3, 6, 6, 6, 6, 6, 6, 6,
|
3, 3, 3, 3, 6, 6, 6, 6, 6, 6,
|
||||||
6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
|
6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
|
||||||
6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
|
6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
|
||||||
8, 8, 5, 5, 5, 5, 44, 27, 29, 29,
|
6, 8, 8, 5, 5, 5, 5, 44, 27, 29,
|
||||||
30, 30, 26, 25, 25, 52, 48, 10, 53, 53,
|
29, 30, 30, 26, 25, 25, 52, 48, 10, 53,
|
||||||
17, 17,
|
53, 17, 17,
|
||||||
}
|
}
|
||||||
|
|
||||||
var yyR2 = [...]int8{
|
var yyR2 = [...]int8{
|
||||||
|
@ -545,52 +550,52 @@ var yyR2 = [...]int8{
|
||||||
3, 2, 1, 2, 0, 3, 2, 1, 1, 3,
|
3, 2, 1, 2, 0, 3, 2, 1, 1, 3,
|
||||||
1, 3, 4, 1, 3, 5, 5, 1, 1, 1,
|
1, 3, 4, 1, 3, 5, 5, 1, 1, 1,
|
||||||
4, 3, 3, 2, 3, 1, 2, 3, 3, 3,
|
4, 3, 3, 2, 3, 1, 2, 3, 3, 3,
|
||||||
3, 3, 3, 3, 3, 3, 4, 3, 3, 1,
|
3, 3, 3, 3, 3, 3, 3, 4, 3, 3,
|
||||||
2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
1, 2, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||||
1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
|
1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
|
||||||
1, 1, 1, 2, 1, 1, 1, 1, 0, 1,
|
2, 1, 1, 1, 2, 1, 1, 1, 1, 0,
|
||||||
0, 1,
|
1, 0, 1,
|
||||||
}
|
}
|
||||||
|
|
||||||
var yyChk = [...]int16{
|
var yyChk = [...]int16{
|
||||||
-1000, -54, 88, 89, 90, 91, 2, 10, -12, -7,
|
-1000, -54, 89, 90, 91, 92, 2, 10, -12, -7,
|
||||||
-11, 60, 61, 75, 62, 63, 64, 12, 45, 46,
|
-11, 61, 62, 76, 63, 64, 65, 12, 46, 47,
|
||||||
49, 65, 18, 66, 79, 67, 68, 69, 70, 71,
|
50, 66, 18, 67, 80, 68, 69, 70, 71, 72,
|
||||||
81, 84, 85, 13, -55, -12, 10, -37, -32, -35,
|
82, 85, 86, 13, -55, -12, 10, -37, -32, -35,
|
||||||
-38, -43, -44, -45, -47, -48, -49, -50, -51, -31,
|
-38, -43, -44, -45, -47, -48, -49, -50, -51, -31,
|
||||||
-3, 12, 19, 15, 25, -8, -7, -42, 60, 61,
|
-3, 12, 19, 15, 25, -8, -7, -42, 61, 62,
|
||||||
62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
|
63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
|
||||||
39, 55, 13, -51, -11, -13, 20, -14, 12, 2,
|
40, 56, 13, -51, -11, -13, 20, -14, 12, 2,
|
||||||
-19, 2, 39, 57, 40, 41, 43, 44, 45, 46,
|
-19, 2, 40, 58, 41, 42, 44, 45, 46, 47,
|
||||||
47, 48, 49, 50, 51, 52, 54, 55, 79, 56,
|
48, 49, 50, 51, 52, 53, 55, 56, 80, 57,
|
||||||
14, -33, -40, 2, 75, 81, 15, -40, -37, -37,
|
14, -33, -40, 2, 76, 82, 15, -40, -37, -37,
|
||||||
-42, -1, 20, -2, 12, -10, 2, 25, 20, 7,
|
-42, -1, 20, -2, 12, -10, 2, 25, 20, 7,
|
||||||
2, 4, 2, 24, -34, -41, -36, -46, 74, -34,
|
2, 4, 2, 24, -34, -41, -36, -46, 75, -34,
|
||||||
-34, -34, -34, -34, -34, -34, -34, -34, -34, -34,
|
-34, -34, -34, -34, -34, -34, -34, -34, -34, -34,
|
||||||
-34, -34, -34, -34, -52, 55, 2, 9, -30, -9,
|
-34, -34, -34, -34, -52, 56, 2, 9, -30, -9,
|
||||||
2, -27, -29, 84, 85, 19, 39, 55, -52, 2,
|
2, -27, -29, 85, 86, 19, 40, 56, -52, 2,
|
||||||
-40, -33, -16, 15, 2, -16, -39, 22, -37, 22,
|
-40, -33, -16, 15, 2, -16, -39, 22, -37, 22,
|
||||||
20, 7, 2, -5, 2, 4, 52, 42, 53, -5,
|
20, 7, 2, -5, 2, 4, 53, 43, 54, -5,
|
||||||
20, -14, 25, 2, -18, 5, -28, -20, 12, -27,
|
20, -14, 25, 2, -18, 5, -28, -20, 12, -27,
|
||||||
-29, 16, -37, 78, 80, 76, 77, -37, -37, -37,
|
-29, 16, -37, 79, 81, 77, 78, -37, -37, -37,
|
||||||
-37, -37, -37, -37, -37, -37, -37, -37, -37, -37,
|
-37, -37, -37, -37, -37, -37, -37, -37, -37, -37,
|
||||||
-37, -37, -52, 15, -27, -27, 21, 6, 2, -15,
|
-37, -37, -52, 15, -27, -27, 21, 6, 2, -15,
|
||||||
22, -4, -6, 2, 60, 74, 61, 75, 62, 63,
|
22, -4, -6, 2, 61, 75, 62, 76, 63, 64,
|
||||||
64, 76, 77, 12, 78, 45, 46, 49, 65, 18,
|
65, 77, 78, 12, 79, 46, 47, 50, 66, 18,
|
||||||
66, 79, 80, 67, 68, 69, 70, 71, 84, 85,
|
67, 80, 81, 68, 69, 70, 71, 72, 85, 86,
|
||||||
57, 22, 7, 20, -2, 25, 2, 25, 2, 26,
|
58, 22, 7, 20, -2, 25, 2, 25, 2, 26,
|
||||||
26, -29, 26, 39, 55, -21, 24, 17, -22, 30,
|
26, -29, 26, 40, 56, -21, 24, 17, -22, 30,
|
||||||
28, 29, 35, 36, 33, 31, 34, 32, -16, -16,
|
28, 29, 35, 36, 37, 33, 31, 34, 32, -16,
|
||||||
-17, -16, -17, 22, -53, -52, 2, 22, 7, 2,
|
-16, -17, -16, -17, 22, -53, -52, 2, 22, 7,
|
||||||
-37, -26, 19, -26, 26, -26, -20, -20, 24, 17,
|
2, -37, -26, 19, -26, 26, -26, -20, -20, 24,
|
||||||
2, 17, 6, 6, 6, 6, 6, 6, 6, 6,
|
17, 2, 17, 6, 6, 6, 6, 6, 6, 6,
|
||||||
6, 21, 2, 22, -4, -26, 26, 26, 17, -22,
|
6, 6, 6, 21, 2, 22, -4, -26, 26, 26,
|
||||||
-25, 55, -26, -30, -27, -27, -27, -23, 14, -25,
|
17, -22, -25, 56, -26, -30, -27, -27, -27, -23,
|
||||||
-23, -25, -26, -26, -26, -24, -27, 24, 21, 2,
|
14, -23, -25, -23, -25, -26, -26, -26, -24, -27,
|
||||||
21, -27,
|
24, 21, 2, 21, -27,
|
||||||
}
|
}
|
||||||
|
|
||||||
var yyDef = [...]int16{
|
var yyDef = [...]int16{
|
||||||
|
@ -599,36 +604,36 @@ var yyDef = [...]int16{
|
||||||
109, 110, 111, 112, 113, 114, 115, 116, 117, 118,
|
109, 110, 111, 112, 113, 114, 115, 116, 117, 118,
|
||||||
119, 120, 121, 0, 2, -2, 3, 4, 8, 9,
|
119, 120, 121, 0, 2, -2, 3, 4, 8, 9,
|
||||||
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
||||||
0, 106, 216, 0, 226, 0, 83, 84, -2, -2,
|
0, 106, 217, 0, 227, 0, 83, 84, -2, -2,
|
||||||
-2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
-2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
||||||
210, 211, 0, 5, 98, 0, 124, 127, 0, 132,
|
211, 212, 0, 5, 98, 0, 124, 127, 0, 132,
|
||||||
133, 137, 43, 43, 43, 43, 43, 43, 43, 43,
|
133, 137, 43, 43, 43, 43, 43, 43, 43, 43,
|
||||||
43, 43, 43, 43, 43, 43, 43, 43, 0, 0,
|
43, 43, 43, 43, 43, 43, 43, 43, 0, 0,
|
||||||
0, 0, 22, 23, 0, 0, 0, 60, 0, 81,
|
0, 0, 22, 23, 0, 0, 0, 60, 0, 81,
|
||||||
82, 0, 87, 89, 0, 93, 97, 227, 122, 0,
|
82, 0, 87, 89, 0, 93, 97, 228, 122, 0,
|
||||||
128, 0, 131, 136, 0, 42, 47, 48, 44, 0,
|
128, 0, 131, 136, 0, 42, 47, 48, 44, 0,
|
||||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
0, 0, 0, 0, 67, 0, 69, 225, 70, 0,
|
0, 0, 0, 0, 67, 0, 69, 226, 70, 0,
|
||||||
72, 220, 221, 73, 74, 217, 0, 0, 0, 80,
|
72, 221, 222, 73, 74, 218, 0, 0, 0, 80,
|
||||||
20, 21, 24, 0, 54, 25, 0, 62, 64, 66,
|
20, 21, 24, 0, 54, 25, 0, 62, 64, 66,
|
||||||
85, 0, 90, 0, 96, 212, 213, 214, 215, 0,
|
85, 0, 90, 0, 96, 213, 214, 215, 216, 0,
|
||||||
123, 126, 129, 130, 135, 138, 140, 143, 147, 148,
|
123, 126, 129, 130, 135, 138, 140, 143, 147, 148,
|
||||||
149, 0, 26, 0, 0, -2, -2, 27, 28, 29,
|
149, 0, 26, 0, 0, -2, -2, 27, 28, 29,
|
||||||
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
|
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
|
||||||
40, 41, 68, 0, 218, 219, 75, -2, 79, 0,
|
40, 41, 68, 0, 219, 220, 75, -2, 79, 0,
|
||||||
53, 56, 58, 59, 183, 184, 185, 186, 187, 188,
|
53, 56, 58, 59, 184, 185, 186, 187, 188, 189,
|
||||||
189, 190, 191, 192, 193, 194, 195, 196, 197, 198,
|
190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
|
||||||
199, 200, 201, 202, 203, 204, 205, 206, 207, 208,
|
200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
|
||||||
209, 61, 65, 86, 88, 91, 95, 92, 94, 0,
|
210, 61, 65, 86, 88, 91, 95, 92, 94, 0,
|
||||||
0, 0, 0, 0, 0, 0, 0, 153, 155, 0,
|
0, 0, 0, 0, 0, 0, 0, 153, 155, 0,
|
||||||
0, 0, 0, 0, 0, 0, 0, 0, 45, 46,
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 45,
|
||||||
49, 231, 50, 71, 0, -2, 78, 51, 0, 57,
|
46, 49, 232, 50, 71, 0, -2, 78, 51, 0,
|
||||||
63, 139, 222, 141, 0, 144, 0, 0, 0, 151,
|
57, 63, 139, 223, 141, 0, 144, 0, 0, 0,
|
||||||
156, 152, 0, 0, 0, 0, 0, 0, 0, 0,
|
151, 156, 152, 0, 0, 0, 0, 0, 0, 0,
|
||||||
0, 76, 77, 52, 55, 142, 0, 0, 150, 154,
|
0, 0, 0, 76, 77, 52, 55, 142, 0, 0,
|
||||||
157, 0, 224, 158, 159, 160, 161, 162, 0, 163,
|
150, 154, 157, 0, 225, 158, 159, 160, 161, 162,
|
||||||
164, 165, 145, 146, 223, 0, 169, 0, 167, 170,
|
0, 163, 164, 165, 166, 145, 146, 224, 0, 170,
|
||||||
166, 168,
|
0, 168, 171, 167, 169,
|
||||||
}
|
}
|
||||||
|
|
||||||
var yyTok1 = [...]int8{
|
var yyTok1 = [...]int8{
|
||||||
|
@ -645,7 +650,7 @@ var yyTok2 = [...]int8{
|
||||||
62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
|
62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
|
||||||
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
|
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
|
||||||
82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
|
82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
|
||||||
92,
|
92, 93,
|
||||||
}
|
}
|
||||||
|
|
||||||
var yyTok3 = [...]int8{
|
var yyTok3 = [...]int8{
|
||||||
|
@ -1738,47 +1743,53 @@ yydefault:
|
||||||
yyDollar = yyS[yypt-3 : yypt+1]
|
yyDollar = yyS[yypt-3 : yypt+1]
|
||||||
{
|
{
|
||||||
yyVAL.descriptors = yylex.(*parser).newMap()
|
yyVAL.descriptors = yylex.(*parser).newMap()
|
||||||
yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set
|
yyVAL.descriptors["custom_values"] = yyDollar[3].bucket_set
|
||||||
}
|
}
|
||||||
case 163:
|
case 163:
|
||||||
yyDollar = yyS[yypt-3 : yypt+1]
|
yyDollar = yyS[yypt-3 : yypt+1]
|
||||||
{
|
{
|
||||||
yyVAL.descriptors = yylex.(*parser).newMap()
|
yyVAL.descriptors = yylex.(*parser).newMap()
|
||||||
yyVAL.descriptors["offset"] = yyDollar[3].int
|
yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set
|
||||||
}
|
}
|
||||||
case 164:
|
case 164:
|
||||||
yyDollar = yyS[yypt-3 : yypt+1]
|
yyDollar = yyS[yypt-3 : yypt+1]
|
||||||
{
|
{
|
||||||
yyVAL.descriptors = yylex.(*parser).newMap()
|
yyVAL.descriptors = yylex.(*parser).newMap()
|
||||||
yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set
|
yyVAL.descriptors["offset"] = yyDollar[3].int
|
||||||
}
|
}
|
||||||
case 165:
|
case 165:
|
||||||
yyDollar = yyS[yypt-3 : yypt+1]
|
yyDollar = yyS[yypt-3 : yypt+1]
|
||||||
{
|
{
|
||||||
yyVAL.descriptors = yylex.(*parser).newMap()
|
yyVAL.descriptors = yylex.(*parser).newMap()
|
||||||
yyVAL.descriptors["n_offset"] = yyDollar[3].int
|
yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set
|
||||||
}
|
}
|
||||||
case 166:
|
case 166:
|
||||||
yyDollar = yyS[yypt-4 : yypt+1]
|
yyDollar = yyS[yypt-3 : yypt+1]
|
||||||
{
|
{
|
||||||
yyVAL.bucket_set = yyDollar[2].bucket_set
|
yyVAL.descriptors = yylex.(*parser).newMap()
|
||||||
|
yyVAL.descriptors["n_offset"] = yyDollar[3].int
|
||||||
}
|
}
|
||||||
case 167:
|
case 167:
|
||||||
yyDollar = yyS[yypt-3 : yypt+1]
|
yyDollar = yyS[yypt-4 : yypt+1]
|
||||||
{
|
{
|
||||||
yyVAL.bucket_set = yyDollar[2].bucket_set
|
yyVAL.bucket_set = yyDollar[2].bucket_set
|
||||||
}
|
}
|
||||||
case 168:
|
case 168:
|
||||||
yyDollar = yyS[yypt-3 : yypt+1]
|
yyDollar = yyS[yypt-3 : yypt+1]
|
||||||
{
|
{
|
||||||
yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float)
|
yyVAL.bucket_set = yyDollar[2].bucket_set
|
||||||
}
|
}
|
||||||
case 169:
|
case 169:
|
||||||
|
yyDollar = yyS[yypt-3 : yypt+1]
|
||||||
|
{
|
||||||
|
yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float)
|
||||||
|
}
|
||||||
|
case 170:
|
||||||
yyDollar = yyS[yypt-1 : yypt+1]
|
yyDollar = yyS[yypt-1 : yypt+1]
|
||||||
{
|
{
|
||||||
yyVAL.bucket_set = []float64{yyDollar[1].float}
|
yyVAL.bucket_set = []float64{yyDollar[1].float}
|
||||||
}
|
}
|
||||||
case 216:
|
case 217:
|
||||||
yyDollar = yyS[yypt-1 : yypt+1]
|
yyDollar = yyS[yypt-1 : yypt+1]
|
||||||
{
|
{
|
||||||
yyVAL.node = &NumberLiteral{
|
yyVAL.node = &NumberLiteral{
|
||||||
|
@ -1786,22 +1797,22 @@ yydefault:
|
||||||
PosRange: yyDollar[1].item.PositionRange(),
|
PosRange: yyDollar[1].item.PositionRange(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case 217:
|
case 218:
|
||||||
yyDollar = yyS[yypt-1 : yypt+1]
|
yyDollar = yyS[yypt-1 : yypt+1]
|
||||||
{
|
{
|
||||||
yyVAL.float = yylex.(*parser).number(yyDollar[1].item.Val)
|
yyVAL.float = yylex.(*parser).number(yyDollar[1].item.Val)
|
||||||
}
|
}
|
||||||
case 218:
|
case 219:
|
||||||
yyDollar = yyS[yypt-2 : yypt+1]
|
yyDollar = yyS[yypt-2 : yypt+1]
|
||||||
{
|
{
|
||||||
yyVAL.float = yyDollar[2].float
|
yyVAL.float = yyDollar[2].float
|
||||||
}
|
}
|
||||||
case 219:
|
case 220:
|
||||||
yyDollar = yyS[yypt-2 : yypt+1]
|
yyDollar = yyS[yypt-2 : yypt+1]
|
||||||
{
|
{
|
||||||
yyVAL.float = -yyDollar[2].float
|
yyVAL.float = -yyDollar[2].float
|
||||||
}
|
}
|
||||||
case 222:
|
case 223:
|
||||||
yyDollar = yyS[yypt-1 : yypt+1]
|
yyDollar = yyS[yypt-1 : yypt+1]
|
||||||
{
|
{
|
||||||
var err error
|
var err error
|
||||||
|
@ -1810,17 +1821,17 @@ yydefault:
|
||||||
yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid repetition in series values: %s", err)
|
yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid repetition in series values: %s", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case 223:
|
case 224:
|
||||||
yyDollar = yyS[yypt-2 : yypt+1]
|
yyDollar = yyS[yypt-2 : yypt+1]
|
||||||
{
|
{
|
||||||
yyVAL.int = -int64(yyDollar[2].uint)
|
yyVAL.int = -int64(yyDollar[2].uint)
|
||||||
}
|
}
|
||||||
case 224:
|
case 225:
|
||||||
yyDollar = yyS[yypt-1 : yypt+1]
|
yyDollar = yyS[yypt-1 : yypt+1]
|
||||||
{
|
{
|
||||||
yyVAL.int = int64(yyDollar[1].uint)
|
yyVAL.int = int64(yyDollar[1].uint)
|
||||||
}
|
}
|
||||||
case 225:
|
case 226:
|
||||||
yyDollar = yyS[yypt-1 : yypt+1]
|
yyDollar = yyS[yypt-1 : yypt+1]
|
||||||
{
|
{
|
||||||
var err error
|
var err error
|
||||||
|
@ -1829,7 +1840,7 @@ yydefault:
|
||||||
yylex.(*parser).addParseErr(yyDollar[1].item.PositionRange(), err)
|
yylex.(*parser).addParseErr(yyDollar[1].item.PositionRange(), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case 226:
|
case 227:
|
||||||
yyDollar = yyS[yypt-1 : yypt+1]
|
yyDollar = yyS[yypt-1 : yypt+1]
|
||||||
{
|
{
|
||||||
yyVAL.node = &StringLiteral{
|
yyVAL.node = &StringLiteral{
|
||||||
|
@ -1837,7 +1848,7 @@ yydefault:
|
||||||
PosRange: yyDollar[1].item.PositionRange(),
|
PosRange: yyDollar[1].item.PositionRange(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case 227:
|
case 228:
|
||||||
yyDollar = yyS[yypt-1 : yypt+1]
|
yyDollar = yyS[yypt-1 : yypt+1]
|
||||||
{
|
{
|
||||||
yyVAL.item = Item{
|
yyVAL.item = Item{
|
||||||
|
@ -1846,12 +1857,12 @@ yydefault:
|
||||||
Val: yylex.(*parser).unquoteString(yyDollar[1].item.Val),
|
Val: yylex.(*parser).unquoteString(yyDollar[1].item.Val),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case 228:
|
case 229:
|
||||||
yyDollar = yyS[yypt-0 : yypt+1]
|
yyDollar = yyS[yypt-0 : yypt+1]
|
||||||
{
|
{
|
||||||
yyVAL.duration = 0
|
yyVAL.duration = 0
|
||||||
}
|
}
|
||||||
case 230:
|
case 231:
|
||||||
yyDollar = yyS[yypt-0 : yypt+1]
|
yyDollar = yyS[yypt-0 : yypt+1]
|
||||||
{
|
{
|
||||||
yyVAL.strings = nil
|
yyVAL.strings = nil
|
||||||
|
|
|
@ -135,15 +135,16 @@ var key = map[string]ItemType{
|
||||||
}
|
}
|
||||||
|
|
||||||
var histogramDesc = map[string]ItemType{
|
var histogramDesc = map[string]ItemType{
|
||||||
"sum": SUM_DESC,
|
"sum": SUM_DESC,
|
||||||
"count": COUNT_DESC,
|
"count": COUNT_DESC,
|
||||||
"schema": SCHEMA_DESC,
|
"schema": SCHEMA_DESC,
|
||||||
"offset": OFFSET_DESC,
|
"offset": OFFSET_DESC,
|
||||||
"n_offset": NEGATIVE_OFFSET_DESC,
|
"n_offset": NEGATIVE_OFFSET_DESC,
|
||||||
"buckets": BUCKETS_DESC,
|
"buckets": BUCKETS_DESC,
|
||||||
"n_buckets": NEGATIVE_BUCKETS_DESC,
|
"n_buckets": NEGATIVE_BUCKETS_DESC,
|
||||||
"z_bucket": ZERO_BUCKET_DESC,
|
"z_bucket": ZERO_BUCKET_DESC,
|
||||||
"z_bucket_w": ZERO_BUCKET_WIDTH_DESC,
|
"z_bucket_w": ZERO_BUCKET_WIDTH_DESC,
|
||||||
|
"custom_values": CUSTOM_VALUES_DESC,
|
||||||
}
|
}
|
||||||
|
|
||||||
// ItemTypeStr is the default string representations for common Items. It does not
|
// ItemTypeStr is the default string representations for common Items. It does not
|
||||||
|
@ -313,6 +314,11 @@ func (l *Lexer) accept(valid string) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// is peeks and returns true if the next rune is contained in the provided string.
|
||||||
|
func (l *Lexer) is(valid string) bool {
|
||||||
|
return strings.ContainsRune(valid, l.peek())
|
||||||
|
}
|
||||||
|
|
||||||
// acceptRun consumes a run of runes from the valid set.
|
// acceptRun consumes a run of runes from the valid set.
|
||||||
func (l *Lexer) acceptRun(valid string) {
|
func (l *Lexer) acceptRun(valid string) {
|
||||||
for strings.ContainsRune(valid, l.next()) {
|
for strings.ContainsRune(valid, l.next()) {
|
||||||
|
@ -901,19 +907,78 @@ func acceptRemainingDuration(l *Lexer) bool {
|
||||||
// scanNumber scans numbers of different formats. The scanned Item is
|
// scanNumber scans numbers of different formats. The scanned Item is
|
||||||
// not necessarily a valid number. This case is caught by the parser.
|
// not necessarily a valid number. This case is caught by the parser.
|
||||||
func (l *Lexer) scanNumber() bool {
|
func (l *Lexer) scanNumber() bool {
|
||||||
digits := "0123456789"
|
// Modify the digit pattern if the number is hexadecimal.
|
||||||
|
digitPattern := "0123456789"
|
||||||
// Disallow hexadecimal in series descriptions as the syntax is ambiguous.
|
// Disallow hexadecimal in series descriptions as the syntax is ambiguous.
|
||||||
if !l.seriesDesc && l.accept("0") && l.accept("xX") {
|
if !l.seriesDesc &&
|
||||||
digits = "0123456789abcdefABCDEF"
|
l.accept("0") && l.accept("xX") {
|
||||||
|
l.accept("_") // eg., 0X_1FFFP-16 == 0.1249847412109375
|
||||||
|
digitPattern = "0123456789abcdefABCDEF"
|
||||||
}
|
}
|
||||||
l.acceptRun(digits)
|
const (
|
||||||
if l.accept(".") {
|
// Define dot, exponent, and underscore patterns.
|
||||||
l.acceptRun(digits)
|
dotPattern = "."
|
||||||
}
|
exponentPattern = "eE"
|
||||||
if l.accept("eE") {
|
underscorePattern = "_"
|
||||||
l.accept("+-")
|
// Anti-patterns are rune sets that cannot follow their respective rune.
|
||||||
l.acceptRun("0123456789")
|
dotAntiPattern = "_."
|
||||||
|
exponentAntiPattern = "._eE" // and EOL.
|
||||||
|
underscoreAntiPattern = "._eE" // and EOL.
|
||||||
|
)
|
||||||
|
// All numbers follow the prefix: [.][d][d._eE]*
|
||||||
|
l.accept(dotPattern)
|
||||||
|
l.accept(digitPattern)
|
||||||
|
// [d._eE]* hereon.
|
||||||
|
dotConsumed := false
|
||||||
|
exponentConsumed := false
|
||||||
|
for l.is(digitPattern + dotPattern + underscorePattern + exponentPattern) {
|
||||||
|
// "." cannot repeat.
|
||||||
|
if l.is(dotPattern) {
|
||||||
|
if dotConsumed {
|
||||||
|
l.accept(dotPattern)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// "eE" cannot repeat.
|
||||||
|
if l.is(exponentPattern) {
|
||||||
|
if exponentConsumed {
|
||||||
|
l.accept(exponentPattern)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Handle dots.
|
||||||
|
if l.accept(dotPattern) {
|
||||||
|
dotConsumed = true
|
||||||
|
if l.accept(dotAntiPattern) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// Fractional hexadecimal literals are not allowed.
|
||||||
|
if len(digitPattern) > 10 /* 0x[\da-fA-F].[\d]+p[\d] */ {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Handle exponents.
|
||||||
|
if l.accept(exponentPattern) {
|
||||||
|
exponentConsumed = true
|
||||||
|
l.accept("+-")
|
||||||
|
if l.accept(exponentAntiPattern) || l.peek() == eof {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Handle underscores.
|
||||||
|
if l.accept(underscorePattern) {
|
||||||
|
if l.accept(underscoreAntiPattern) || l.peek() == eof {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Handle digits at the end since we already consumed before this loop.
|
||||||
|
l.acceptRun(digitPattern)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Next thing must not be alphanumeric unless it's the times token
|
// Next thing must not be alphanumeric unless it's the times token
|
||||||
// for series repetitions.
|
// for series repetitions.
|
||||||
if r := l.peek(); (l.seriesDesc && r == 'x') || !isAlphaNumeric(r) {
|
if r := l.peek(); (l.seriesDesc && r == 'x') || !isAlphaNumeric(r) {
|
||||||
|
|
|
@ -132,6 +132,84 @@ var tests = []struct {
|
||||||
}, {
|
}, {
|
||||||
input: "0x123",
|
input: "0x123",
|
||||||
expected: []Item{{NUMBER, 0, "0x123"}},
|
expected: []Item{{NUMBER, 0, "0x123"}},
|
||||||
|
}, {
|
||||||
|
input: "1..2",
|
||||||
|
fail: true,
|
||||||
|
}, {
|
||||||
|
input: "1.2.",
|
||||||
|
fail: true,
|
||||||
|
}, {
|
||||||
|
input: "00_1_23_4.56_7_8",
|
||||||
|
expected: []Item{{NUMBER, 0, "00_1_23_4.56_7_8"}},
|
||||||
|
}, {
|
||||||
|
input: "00_1_23__4.56_7_8",
|
||||||
|
fail: true,
|
||||||
|
}, {
|
||||||
|
input: "00_1_23_4._56_7_8",
|
||||||
|
fail: true,
|
||||||
|
}, {
|
||||||
|
input: "00_1_23_4_.56_7_8",
|
||||||
|
fail: true,
|
||||||
|
}, {
|
||||||
|
input: "0x1_2_34",
|
||||||
|
expected: []Item{{NUMBER, 0, "0x1_2_34"}},
|
||||||
|
}, {
|
||||||
|
input: "0x1_2__34",
|
||||||
|
fail: true,
|
||||||
|
}, {
|
||||||
|
input: "0x1_2__34.5_6p1", // "0x1.1p1"-based formats are not supported yet.
|
||||||
|
fail: true,
|
||||||
|
}, {
|
||||||
|
input: "0x1_2__34.5_6",
|
||||||
|
fail: true,
|
||||||
|
}, {
|
||||||
|
input: "0x1_2__34.56",
|
||||||
|
fail: true,
|
||||||
|
}, {
|
||||||
|
input: "1_e2",
|
||||||
|
fail: true,
|
||||||
|
}, {
|
||||||
|
input: "1.e2",
|
||||||
|
expected: []Item{{NUMBER, 0, "1.e2"}},
|
||||||
|
}, {
|
||||||
|
input: "1e.2",
|
||||||
|
fail: true,
|
||||||
|
}, {
|
||||||
|
input: "1e+.2",
|
||||||
|
fail: true,
|
||||||
|
}, {
|
||||||
|
input: "1ee2",
|
||||||
|
fail: true,
|
||||||
|
}, {
|
||||||
|
input: "1e+e2",
|
||||||
|
fail: true,
|
||||||
|
}, {
|
||||||
|
input: "1e",
|
||||||
|
fail: true,
|
||||||
|
}, {
|
||||||
|
input: "1e+",
|
||||||
|
fail: true,
|
||||||
|
}, {
|
||||||
|
input: "1e1_2_34",
|
||||||
|
expected: []Item{{NUMBER, 0, "1e1_2_34"}},
|
||||||
|
}, {
|
||||||
|
input: "1e_1_2_34",
|
||||||
|
fail: true,
|
||||||
|
}, {
|
||||||
|
input: "1e1_2__34",
|
||||||
|
fail: true,
|
||||||
|
}, {
|
||||||
|
input: "1e+_1_2_34",
|
||||||
|
fail: true,
|
||||||
|
}, {
|
||||||
|
input: "1e-_1_2_34",
|
||||||
|
fail: true,
|
||||||
|
}, {
|
||||||
|
input: "12_",
|
||||||
|
fail: true,
|
||||||
|
}, {
|
||||||
|
input: "_1_2",
|
||||||
|
expected: []Item{{IDENTIFIER, 0, "_1_2"}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
@ -481,19 +481,19 @@ func (p *parser) mergeMaps(left, right *map[string]interface{}) (ret *map[string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *parser) histogramsIncreaseSeries(base, inc *histogram.FloatHistogram, times uint64) ([]SequenceValue, error) {
|
func (p *parser) histogramsIncreaseSeries(base, inc *histogram.FloatHistogram, times uint64) ([]SequenceValue, error) {
|
||||||
return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) *histogram.FloatHistogram {
|
return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) (*histogram.FloatHistogram, error) {
|
||||||
return a.Add(b)
|
return a.Add(b)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *parser) histogramsDecreaseSeries(base, inc *histogram.FloatHistogram, times uint64) ([]SequenceValue, error) {
|
func (p *parser) histogramsDecreaseSeries(base, inc *histogram.FloatHistogram, times uint64) ([]SequenceValue, error) {
|
||||||
return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) *histogram.FloatHistogram {
|
return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) (*histogram.FloatHistogram, error) {
|
||||||
return a.Sub(b)
|
return a.Sub(b)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uint64,
|
func (p *parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uint64,
|
||||||
combine func(*histogram.FloatHistogram, *histogram.FloatHistogram) *histogram.FloatHistogram,
|
combine func(*histogram.FloatHistogram, *histogram.FloatHistogram) (*histogram.FloatHistogram, error),
|
||||||
) ([]SequenceValue, error) {
|
) ([]SequenceValue, error) {
|
||||||
ret := make([]SequenceValue, times+1)
|
ret := make([]SequenceValue, times+1)
|
||||||
// Add an additional value (the base) for time 0, which we ignore in tests.
|
// Add an additional value (the base) for time 0, which we ignore in tests.
|
||||||
|
@ -504,7 +504,11 @@ func (p *parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uin
|
||||||
return nil, fmt.Errorf("error combining histograms: cannot merge from schema %d to %d", inc.Schema, cur.Schema)
|
return nil, fmt.Errorf("error combining histograms: cannot merge from schema %d to %d", inc.Schema, cur.Schema)
|
||||||
}
|
}
|
||||||
|
|
||||||
cur = combine(cur.Copy(), inc)
|
var err error
|
||||||
|
cur, err = combine(cur.Copy(), inc)
|
||||||
|
if err != nil {
|
||||||
|
return ret, err
|
||||||
|
}
|
||||||
ret[i] = SequenceValue{Histogram: cur}
|
ret[i] = SequenceValue{Histogram: cur}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -562,6 +566,15 @@ func (p *parser) buildHistogramFromMap(desc *map[string]interface{}) *histogram.
|
||||||
p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing z_bucket_w number: %v", val)
|
p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing z_bucket_w number: %v", val)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
val, ok = (*desc)["custom_values"]
|
||||||
|
if ok {
|
||||||
|
customValues, ok := val.([]float64)
|
||||||
|
if ok {
|
||||||
|
output.CustomValues = customValues
|
||||||
|
} else {
|
||||||
|
p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing custom_values: %v", val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
buckets, spans := p.buildHistogramBucketsAndSpans(desc, "buckets", "offset")
|
buckets, spans := p.buildHistogramBucketsAndSpans(desc, "buckets", "offset")
|
||||||
output.PositiveBuckets = buckets
|
output.PositiveBuckets = buckets
|
||||||
|
|
|
@ -513,12 +513,12 @@ var testExpr = []struct {
|
||||||
{
|
{
|
||||||
input: "2.5.",
|
input: "2.5.",
|
||||||
fail: true,
|
fail: true,
|
||||||
errMsg: "unexpected character: '.'",
|
errMsg: `1:1: parse error: bad number or duration syntax: "2.5."`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: "100..4",
|
input: "100..4",
|
||||||
fail: true,
|
fail: true,
|
||||||
errMsg: `unexpected number ".4"`,
|
errMsg: `1:1: parse error: bad number or duration syntax: "100.."`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: "0deadbeef",
|
input: "0deadbeef",
|
||||||
|
|
|
@ -63,6 +63,10 @@ load 1m
|
||||||
Each `load` command is additive - it does not replace any data loaded in a previous `load` command.
|
Each `load` command is additive - it does not replace any data loaded in a previous `load` command.
|
||||||
Use `clear` to remove all loaded data.
|
Use `clear` to remove all loaded data.
|
||||||
|
|
||||||
|
### Native histograms with custom buckets (NHCB)
|
||||||
|
|
||||||
|
When loading a batch of classic histogram float series, you can optionally append the suffix `_with_nhcb` to convert them to native histograms with custom buckets and load both the original float series and the new histogram series.
|
||||||
|
|
||||||
## `clear` command
|
## `clear` command
|
||||||
|
|
||||||
`clear` removes all data previously loaded with `load` commands.
|
`clear` removes all data previously loaded with `load` commands.
|
||||||
|
|
|
@ -19,6 +19,8 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -43,9 +45,9 @@ import (
|
||||||
|
|
||||||
var (
|
var (
|
||||||
patSpace = regexp.MustCompile("[\t ]+")
|
patSpace = regexp.MustCompile("[\t ]+")
|
||||||
patLoad = regexp.MustCompile(`^load\s+(.+?)$`)
|
patLoad = regexp.MustCompile(`^load(?:_(with_nhcb))?\s+(.+?)$`)
|
||||||
patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`)
|
patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|warn|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`)
|
||||||
patEvalRange = regexp.MustCompile(`^eval(?:_(fail))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`)
|
patEvalRange = regexp.MustCompile(`^eval(?:_(fail|warn))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`)
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -177,15 +179,18 @@ func raise(line int, format string, v ...interface{}) error {
|
||||||
|
|
||||||
func parseLoad(lines []string, i int) (int, *loadCmd, error) {
|
func parseLoad(lines []string, i int) (int, *loadCmd, error) {
|
||||||
if !patLoad.MatchString(lines[i]) {
|
if !patLoad.MatchString(lines[i]) {
|
||||||
return i, nil, raise(i, "invalid load command. (load <step:duration>)")
|
return i, nil, raise(i, "invalid load command. (load[_with_nhcb] <step:duration>)")
|
||||||
}
|
}
|
||||||
parts := patLoad.FindStringSubmatch(lines[i])
|
parts := patLoad.FindStringSubmatch(lines[i])
|
||||||
|
var (
|
||||||
gap, err := model.ParseDuration(parts[1])
|
withNHCB = parts[1] == "with_nhcb"
|
||||||
|
step = parts[2]
|
||||||
|
)
|
||||||
|
gap, err := model.ParseDuration(step)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return i, nil, raise(i, "invalid step definition %q: %s", parts[1], err)
|
return i, nil, raise(i, "invalid step definition %q: %s", step, err)
|
||||||
}
|
}
|
||||||
cmd := newLoadCmd(time.Duration(gap))
|
cmd := newLoadCmd(time.Duration(gap), withNHCB)
|
||||||
for i+1 < len(lines) {
|
for i+1 < len(lines) {
|
||||||
i++
|
i++
|
||||||
defLine := lines[i]
|
defLine := lines[i]
|
||||||
|
@ -218,7 +223,7 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
|
||||||
rangeParts := patEvalRange.FindStringSubmatch(lines[i])
|
rangeParts := patEvalRange.FindStringSubmatch(lines[i])
|
||||||
|
|
||||||
if instantParts == nil && rangeParts == nil {
|
if instantParts == nil && rangeParts == nil {
|
||||||
return i, nil, raise(i, "invalid evaluation command. Must be either 'eval[_fail|_ordered] instant [at <offset:duration>] <query>' or 'eval[_fail] range from <from> to <to> step <step> <query>'")
|
return i, nil, raise(i, "invalid evaluation command. Must be either 'eval[_fail|_warn|_ordered] instant [at <offset:duration>] <query>' or 'eval[_fail|_warn] range from <from> to <to> step <step> <query>'")
|
||||||
}
|
}
|
||||||
|
|
||||||
isInstant := instantParts != nil
|
isInstant := instantParts != nil
|
||||||
|
@ -297,6 +302,8 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
|
||||||
cmd.ordered = true
|
cmd.ordered = true
|
||||||
case "fail":
|
case "fail":
|
||||||
cmd.fail = true
|
cmd.fail = true
|
||||||
|
case "warn":
|
||||||
|
cmd.warn = true
|
||||||
}
|
}
|
||||||
|
|
||||||
for j := 1; i+1 < len(lines); j++ {
|
for j := 1; i+1 < len(lines); j++ {
|
||||||
|
@ -367,7 +374,7 @@ func (t *test) parse(input string) error {
|
||||||
switch c := strings.ToLower(patSpace.Split(l, 2)[0]); {
|
switch c := strings.ToLower(patSpace.Split(l, 2)[0]); {
|
||||||
case c == "clear":
|
case c == "clear":
|
||||||
cmd = &clearCmd{}
|
cmd = &clearCmd{}
|
||||||
case c == "load":
|
case strings.HasPrefix(c, "load"):
|
||||||
i, cmd, err = parseLoad(lines, i)
|
i, cmd, err = parseLoad(lines, i)
|
||||||
case strings.HasPrefix(c, "eval"):
|
case strings.HasPrefix(c, "eval"):
|
||||||
i, cmd, err = t.parseEval(lines, i)
|
i, cmd, err = t.parseEval(lines, i)
|
||||||
|
@ -399,14 +406,16 @@ type loadCmd struct {
|
||||||
metrics map[uint64]labels.Labels
|
metrics map[uint64]labels.Labels
|
||||||
defs map[uint64][]promql.Sample
|
defs map[uint64][]promql.Sample
|
||||||
exemplars map[uint64][]exemplar.Exemplar
|
exemplars map[uint64][]exemplar.Exemplar
|
||||||
|
withNHCB bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func newLoadCmd(gap time.Duration) *loadCmd {
|
func newLoadCmd(gap time.Duration, withNHCB bool) *loadCmd {
|
||||||
return &loadCmd{
|
return &loadCmd{
|
||||||
gap: gap,
|
gap: gap,
|
||||||
metrics: map[uint64]labels.Labels{},
|
metrics: map[uint64]labels.Labels{},
|
||||||
defs: map[uint64][]promql.Sample{},
|
defs: map[uint64][]promql.Sample{},
|
||||||
exemplars: map[uint64][]exemplar.Exemplar{},
|
exemplars: map[uint64][]exemplar.Exemplar{},
|
||||||
|
withNHCB: withNHCB,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -445,6 +454,167 @@ func (cmd *loadCmd) append(a storage.Appender) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if cmd.withNHCB {
|
||||||
|
return cmd.appendCustomHistogram(a)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getHistogramMetricBase(m labels.Labels, suffix string) (labels.Labels, uint64) {
|
||||||
|
mName := m.Get(labels.MetricName)
|
||||||
|
baseM := labels.NewBuilder(m).
|
||||||
|
Set(labels.MetricName, strings.TrimSuffix(mName, suffix)).
|
||||||
|
Del(labels.BucketLabel).
|
||||||
|
Labels()
|
||||||
|
hash := baseM.Hash()
|
||||||
|
return baseM, hash
|
||||||
|
}
|
||||||
|
|
||||||
|
type tempHistogramWrapper struct {
|
||||||
|
metric labels.Labels
|
||||||
|
upperBounds []float64
|
||||||
|
histogramByTs map[int64]tempHistogram
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTempHistogramWrapper() tempHistogramWrapper {
|
||||||
|
return tempHistogramWrapper{
|
||||||
|
upperBounds: []float64{},
|
||||||
|
histogramByTs: map[int64]tempHistogram{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type tempHistogram struct {
|
||||||
|
bucketCounts map[float64]float64
|
||||||
|
count float64
|
||||||
|
sum float64
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTempHistogram() tempHistogram {
|
||||||
|
return tempHistogram{
|
||||||
|
bucketCounts: map[float64]float64{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func processClassicHistogramSeries(m labels.Labels, suffix string, histogramMap map[uint64]tempHistogramWrapper, smpls []promql.Sample, updateHistogramWrapper func(*tempHistogramWrapper), updateHistogram func(*tempHistogram, float64)) {
|
||||||
|
m2, m2hash := getHistogramMetricBase(m, suffix)
|
||||||
|
histogramWrapper, exists := histogramMap[m2hash]
|
||||||
|
if !exists {
|
||||||
|
histogramWrapper = newTempHistogramWrapper()
|
||||||
|
}
|
||||||
|
histogramWrapper.metric = m2
|
||||||
|
if updateHistogramWrapper != nil {
|
||||||
|
updateHistogramWrapper(&histogramWrapper)
|
||||||
|
}
|
||||||
|
for _, s := range smpls {
|
||||||
|
if s.H != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
histogram, exists := histogramWrapper.histogramByTs[s.T]
|
||||||
|
if !exists {
|
||||||
|
histogram = newTempHistogram()
|
||||||
|
}
|
||||||
|
updateHistogram(&histogram, s.F)
|
||||||
|
histogramWrapper.histogramByTs[s.T] = histogram
|
||||||
|
}
|
||||||
|
histogramMap[m2hash] = histogramWrapper
|
||||||
|
}
|
||||||
|
|
||||||
|
func processUpperBoundsAndCreateBaseHistogram(upperBounds0 []float64) ([]float64, *histogram.FloatHistogram) {
|
||||||
|
sort.Float64s(upperBounds0)
|
||||||
|
upperBounds := make([]float64, 0, len(upperBounds0))
|
||||||
|
prevLE := math.Inf(-1)
|
||||||
|
for _, le := range upperBounds0 {
|
||||||
|
if le != prevLE { // deduplicate
|
||||||
|
upperBounds = append(upperBounds, le)
|
||||||
|
prevLE = le
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var customBounds []float64
|
||||||
|
if upperBounds[len(upperBounds)-1] == math.Inf(1) {
|
||||||
|
customBounds = upperBounds[:len(upperBounds)-1]
|
||||||
|
} else {
|
||||||
|
customBounds = upperBounds
|
||||||
|
}
|
||||||
|
return upperBounds, &histogram.FloatHistogram{
|
||||||
|
Count: 0,
|
||||||
|
Sum: 0,
|
||||||
|
Schema: histogram.CustomBucketsSchema,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: uint32(len(upperBounds))},
|
||||||
|
},
|
||||||
|
PositiveBuckets: make([]float64, len(upperBounds)),
|
||||||
|
CustomValues: customBounds,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If classic histograms are defined, convert them into native histograms with custom
|
||||||
|
// bounds and append the defined time series to the storage.
|
||||||
|
func (cmd *loadCmd) appendCustomHistogram(a storage.Appender) error {
|
||||||
|
histogramMap := map[uint64]tempHistogramWrapper{}
|
||||||
|
|
||||||
|
// Go through all the time series to collate classic histogram data
|
||||||
|
// and organise them by timestamp.
|
||||||
|
for hash, smpls := range cmd.defs {
|
||||||
|
m := cmd.metrics[hash]
|
||||||
|
mName := m.Get(labels.MetricName)
|
||||||
|
switch {
|
||||||
|
case strings.HasSuffix(mName, "_bucket") && m.Has(labels.BucketLabel):
|
||||||
|
le, err := strconv.ParseFloat(m.Get(labels.BucketLabel), 64)
|
||||||
|
if err != nil || math.IsNaN(le) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
processClassicHistogramSeries(m, "_bucket", histogramMap, smpls, func(histogramWrapper *tempHistogramWrapper) {
|
||||||
|
histogramWrapper.upperBounds = append(histogramWrapper.upperBounds, le)
|
||||||
|
}, func(histogram *tempHistogram, f float64) {
|
||||||
|
histogram.bucketCounts[le] = f
|
||||||
|
})
|
||||||
|
case strings.HasSuffix(mName, "_count"):
|
||||||
|
processClassicHistogramSeries(m, "_count", histogramMap, smpls, nil, func(histogram *tempHistogram, f float64) {
|
||||||
|
histogram.count = f
|
||||||
|
})
|
||||||
|
case strings.HasSuffix(mName, "_sum"):
|
||||||
|
processClassicHistogramSeries(m, "_sum", histogramMap, smpls, nil, func(histogram *tempHistogram, f float64) {
|
||||||
|
histogram.sum = f
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert the collated classic histogram data into native histograms
|
||||||
|
// with custom bounds and append them to the storage.
|
||||||
|
for _, histogramWrapper := range histogramMap {
|
||||||
|
upperBounds, fhBase := processUpperBoundsAndCreateBaseHistogram(histogramWrapper.upperBounds)
|
||||||
|
samples := make([]promql.Sample, 0, len(histogramWrapper.histogramByTs))
|
||||||
|
for t, histogram := range histogramWrapper.histogramByTs {
|
||||||
|
fh := fhBase.Copy()
|
||||||
|
var prevCount, total float64
|
||||||
|
for i, le := range upperBounds {
|
||||||
|
currCount, exists := histogram.bucketCounts[le]
|
||||||
|
if !exists {
|
||||||
|
currCount = 0
|
||||||
|
}
|
||||||
|
count := currCount - prevCount
|
||||||
|
fh.PositiveBuckets[i] = count
|
||||||
|
total += count
|
||||||
|
prevCount = currCount
|
||||||
|
}
|
||||||
|
fh.Sum = histogram.sum
|
||||||
|
if histogram.count != 0 {
|
||||||
|
total = histogram.count
|
||||||
|
}
|
||||||
|
fh.Count = total
|
||||||
|
s := promql.Sample{T: t, H: fh.Compact(0)}
|
||||||
|
if err := s.H.Validate(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
samples = append(samples, s)
|
||||||
|
}
|
||||||
|
sort.Slice(samples, func(i, j int) bool { return samples[i].T < samples[j].T })
|
||||||
|
for _, s := range samples {
|
||||||
|
if err := appendSample(a, s, histogramWrapper.metric); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -471,7 +641,7 @@ type evalCmd struct {
|
||||||
line int
|
line int
|
||||||
|
|
||||||
isRange bool // if false, instant query
|
isRange bool // if false, instant query
|
||||||
fail, ordered bool
|
fail, warn, ordered bool
|
||||||
expectedFailMessage string
|
expectedFailMessage string
|
||||||
expectedFailRegexp *regexp.Regexp
|
expectedFailRegexp *regexp.Regexp
|
||||||
|
|
||||||
|
@ -828,6 +998,13 @@ func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error {
|
||||||
return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err)
|
return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err)
|
||||||
}
|
}
|
||||||
res := q.Exec(t.context)
|
res := q.Exec(t.context)
|
||||||
|
countWarnings, _ := res.Warnings.CountWarningsAndInfo()
|
||||||
|
if !cmd.warn && countWarnings > 0 {
|
||||||
|
return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", cmd.expr, cmd.line, res.Warnings)
|
||||||
|
}
|
||||||
|
if cmd.warn && countWarnings == 0 {
|
||||||
|
return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", cmd.expr, cmd.line)
|
||||||
|
}
|
||||||
if res.Err != nil {
|
if res.Err != nil {
|
||||||
if cmd.fail {
|
if cmd.fail {
|
||||||
return cmd.checkExpectedFailure(res.Err)
|
return cmd.checkExpectedFailure(res.Err)
|
||||||
|
@ -854,76 +1031,89 @@ func (t *test) execInstantEval(cmd *evalCmd, engine promql.QueryEngine) error {
|
||||||
}
|
}
|
||||||
queries = append([]atModifierTestCase{{expr: cmd.expr, evalTime: cmd.start}}, queries...)
|
queries = append([]atModifierTestCase{{expr: cmd.expr, evalTime: cmd.start}}, queries...)
|
||||||
for _, iq := range queries {
|
for _, iq := range queries {
|
||||||
q, err := engine.NewInstantQuery(t.context, t.storage, nil, iq.expr, iq.evalTime)
|
if err := t.runInstantQuery(iq, cmd, engine); err != nil {
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error creating instant query for %q (line %d): %w", cmd.expr, cmd.line, err)
|
|
||||||
}
|
|
||||||
defer q.Close()
|
|
||||||
res := q.Exec(t.context)
|
|
||||||
if res.Err != nil {
|
|
||||||
if cmd.fail {
|
|
||||||
if err := cmd.checkExpectedFailure(res.Err); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return fmt.Errorf("error evaluating query %q (line %d): %w", iq.expr, cmd.line, res.Err)
|
|
||||||
}
|
|
||||||
if res.Err == nil && cmd.fail {
|
|
||||||
return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line)
|
|
||||||
}
|
|
||||||
err = cmd.compareResult(res.Value)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check query returns same result in range mode,
|
|
||||||
// by checking against the middle step.
|
|
||||||
q, err = engine.NewRangeQuery(t.context, t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err)
|
|
||||||
}
|
|
||||||
rangeRes := q.Exec(t.context)
|
|
||||||
if rangeRes.Err != nil {
|
|
||||||
return fmt.Errorf("error evaluating query %q (line %d) in range mode: %w", iq.expr, cmd.line, rangeRes.Err)
|
|
||||||
}
|
|
||||||
defer q.Close()
|
|
||||||
if cmd.ordered {
|
|
||||||
// Range queries are always sorted by labels, so skip this test case that expects results in a particular order.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
mat := rangeRes.Value.(promql.Matrix)
|
|
||||||
if err := assertMatrixSorted(mat); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
vec := make(promql.Vector, 0, len(mat))
|
func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promql.QueryEngine) error {
|
||||||
for _, series := range mat {
|
q, err := engine.NewInstantQuery(t.context, t.storage, nil, iq.expr, iq.evalTime)
|
||||||
// We expect either Floats or Histograms.
|
if err != nil {
|
||||||
for _, point := range series.Floats {
|
return fmt.Errorf("error creating instant query for %q (line %d): %w", cmd.expr, cmd.line, err)
|
||||||
if point.T == timeMilliseconds(iq.evalTime) {
|
}
|
||||||
vec = append(vec, promql.Sample{Metric: series.Metric, T: point.T, F: point.F})
|
defer q.Close()
|
||||||
break
|
res := q.Exec(t.context)
|
||||||
}
|
countWarnings, _ := res.Warnings.CountWarningsAndInfo()
|
||||||
}
|
if !cmd.warn && countWarnings > 0 {
|
||||||
for _, point := range series.Histograms {
|
return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings)
|
||||||
if point.T == timeMilliseconds(iq.evalTime) {
|
}
|
||||||
vec = append(vec, promql.Sample{Metric: series.Metric, T: point.T, H: point.H})
|
if cmd.warn && countWarnings == 0 {
|
||||||
break
|
return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", iq.expr, cmd.line)
|
||||||
}
|
}
|
||||||
|
if res.Err != nil {
|
||||||
|
if cmd.fail {
|
||||||
|
if err := cmd.checkExpectedFailure(res.Err); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
if _, ok := res.Value.(promql.Scalar); ok {
|
return fmt.Errorf("error evaluating query %q (line %d): %w", iq.expr, cmd.line, res.Err)
|
||||||
err = cmd.compareResult(promql.Scalar{V: vec[0].F})
|
}
|
||||||
} else {
|
if res.Err == nil && cmd.fail {
|
||||||
err = cmd.compareResult(vec)
|
return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line)
|
||||||
}
|
}
|
||||||
if err != nil {
|
err = cmd.compareResult(res.Value)
|
||||||
return fmt.Errorf("error in %s %s (line %d) range mode: %w", cmd, iq.expr, cmd.line, err)
|
if err != nil {
|
||||||
}
|
return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check query returns same result in range mode,
|
||||||
|
// by checking against the middle step.
|
||||||
|
q, err = engine.NewRangeQuery(t.context, t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err)
|
||||||
|
}
|
||||||
|
rangeRes := q.Exec(t.context)
|
||||||
|
if rangeRes.Err != nil {
|
||||||
|
return fmt.Errorf("error evaluating query %q (line %d) in range mode: %w", iq.expr, cmd.line, rangeRes.Err)
|
||||||
|
}
|
||||||
|
defer q.Close()
|
||||||
|
if cmd.ordered {
|
||||||
|
// Range queries are always sorted by labels, so skip this test case that expects results in a particular order.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
mat := rangeRes.Value.(promql.Matrix)
|
||||||
|
if err := assertMatrixSorted(mat); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
vec := make(promql.Vector, 0, len(mat))
|
||||||
|
for _, series := range mat {
|
||||||
|
// We expect either Floats or Histograms.
|
||||||
|
for _, point := range series.Floats {
|
||||||
|
if point.T == timeMilliseconds(iq.evalTime) {
|
||||||
|
vec = append(vec, promql.Sample{Metric: series.Metric, T: point.T, F: point.F})
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, point := range series.Histograms {
|
||||||
|
if point.T == timeMilliseconds(iq.evalTime) {
|
||||||
|
vec = append(vec, promql.Sample{Metric: series.Metric, T: point.T, H: point.H})
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, ok := res.Value.(promql.Scalar); ok {
|
||||||
|
err = cmd.compareResult(promql.Scalar{V: vec[0].F})
|
||||||
|
} else {
|
||||||
|
err = cmd.compareResult(vec)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error in %s %s (line %d) range mode: %w", cmd, iq.expr, cmd.line, err)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1014,7 +1204,7 @@ func (ll *LazyLoader) parse(input string) error {
|
||||||
if len(l) == 0 {
|
if len(l) == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if strings.ToLower(patSpace.Split(l, 2)[0]) == "load" {
|
if strings.HasPrefix(strings.ToLower(patSpace.Split(l, 2)[0]), "load") {
|
||||||
_, cmd, err := parseLoad(lines, i)
|
_, cmd, err := parseLoad(lines, i)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
14
promql/promqltest/testdata/aggregators.test
vendored
14
promql/promqltest/testdata/aggregators.test
vendored
|
@ -399,7 +399,7 @@ eval instant at 1m quantile without(point)((scalar(foo)), data)
|
||||||
{test="three samples"} 1.6
|
{test="three samples"} 1.6
|
||||||
{test="uneven samples"} 2.8
|
{test="uneven samples"} 2.8
|
||||||
|
|
||||||
eval instant at 1m quantile without(point)(NaN, data)
|
eval_warn instant at 1m quantile without(point)(NaN, data)
|
||||||
{test="two samples"} NaN
|
{test="two samples"} NaN
|
||||||
{test="three samples"} NaN
|
{test="three samples"} NaN
|
||||||
{test="uneven samples"} NaN
|
{test="uneven samples"} NaN
|
||||||
|
@ -503,6 +503,18 @@ eval instant at 1m avg(data{test="-big"})
|
||||||
eval instant at 1m avg(data{test="bigzero"})
|
eval instant at 1m avg(data{test="bigzero"})
|
||||||
{} 0
|
{} 0
|
||||||
|
|
||||||
|
# Test summing extreme values.
|
||||||
|
clear
|
||||||
|
|
||||||
|
load 10s
|
||||||
|
data{test="ten",point="a"} 2
|
||||||
|
data{test="ten",point="b"} 8
|
||||||
|
data{test="ten",point="c"} 1e+100
|
||||||
|
data{test="ten",point="d"} -1e100
|
||||||
|
|
||||||
|
eval instant at 1m sum(data{test="ten"})
|
||||||
|
{} 10
|
||||||
|
|
||||||
clear
|
clear
|
||||||
|
|
||||||
# Test that aggregations are deterministic.
|
# Test that aggregations are deterministic.
|
||||||
|
|
6
promql/promqltest/testdata/functions.test
vendored
6
promql/promqltest/testdata/functions.test
vendored
|
@ -855,17 +855,17 @@ eval instant at 1m quantile_over_time(1, data[2m])
|
||||||
{test="three samples"} 2
|
{test="three samples"} 2
|
||||||
{test="uneven samples"} 4
|
{test="uneven samples"} 4
|
||||||
|
|
||||||
eval instant at 1m quantile_over_time(-1, data[2m])
|
eval_warn instant at 1m quantile_over_time(-1, data[2m])
|
||||||
{test="two samples"} -Inf
|
{test="two samples"} -Inf
|
||||||
{test="three samples"} -Inf
|
{test="three samples"} -Inf
|
||||||
{test="uneven samples"} -Inf
|
{test="uneven samples"} -Inf
|
||||||
|
|
||||||
eval instant at 1m quantile_over_time(2, data[2m])
|
eval_warn instant at 1m quantile_over_time(2, data[2m])
|
||||||
{test="two samples"} +Inf
|
{test="two samples"} +Inf
|
||||||
{test="three samples"} +Inf
|
{test="three samples"} +Inf
|
||||||
{test="uneven samples"} +Inf
|
{test="uneven samples"} +Inf
|
||||||
|
|
||||||
eval instant at 1m (quantile_over_time(2, (data[2m])))
|
eval_warn instant at 1m (quantile_over_time(2, (data[2m])))
|
||||||
{test="two samples"} +Inf
|
{test="two samples"} +Inf
|
||||||
{test="three samples"} +Inf
|
{test="three samples"} +Inf
|
||||||
{test="uneven samples"} +Inf
|
{test="uneven samples"} +Inf
|
||||||
|
|
110
promql/promqltest/testdata/histograms.test
vendored
110
promql/promqltest/testdata/histograms.test
vendored
|
@ -5,7 +5,7 @@
|
||||||
# server has to cope with it.
|
# server has to cope with it.
|
||||||
|
|
||||||
# Test histogram.
|
# Test histogram.
|
||||||
load 5m
|
load_with_nhcb 5m
|
||||||
testhistogram_bucket{le="0.1", start="positive"} 0+5x10
|
testhistogram_bucket{le="0.1", start="positive"} 0+5x10
|
||||||
testhistogram_bucket{le=".2", start="positive"} 0+7x10
|
testhistogram_bucket{le=".2", start="positive"} 0+7x10
|
||||||
testhistogram_bucket{le="1e0", start="positive"} 0+11x10
|
testhistogram_bucket{le="1e0", start="positive"} 0+11x10
|
||||||
|
@ -18,15 +18,33 @@ load 5m
|
||||||
# Another test histogram, where q(1/6), q(1/2), and q(5/6) are each in
|
# Another test histogram, where q(1/6), q(1/2), and q(5/6) are each in
|
||||||
# the middle of a bucket and should therefore be 1, 3, and 5,
|
# the middle of a bucket and should therefore be 1, 3, and 5,
|
||||||
# respectively.
|
# respectively.
|
||||||
load 5m
|
load_with_nhcb 5m
|
||||||
testhistogram2_bucket{le="0"} 0+0x10
|
testhistogram2_bucket{le="0"} 0+0x10
|
||||||
testhistogram2_bucket{le="2"} 0+1x10
|
testhistogram2_bucket{le="2"} 0+1x10
|
||||||
testhistogram2_bucket{le="4"} 0+2x10
|
testhistogram2_bucket{le="4"} 0+2x10
|
||||||
testhistogram2_bucket{le="6"} 0+3x10
|
testhistogram2_bucket{le="6"} 0+3x10
|
||||||
testhistogram2_bucket{le="+Inf"} 0+3x10
|
testhistogram2_bucket{le="+Inf"} 0+3x10
|
||||||
|
|
||||||
|
# Another test histogram, this time without any observations in the +Inf bucket.
|
||||||
|
# This enables a meaningful calculation of standard deviation and variance.
|
||||||
|
load_with_nhcb 5m
|
||||||
|
testhistogram3_bucket{le="0", start="positive"} 0+0x10
|
||||||
|
testhistogram3_bucket{le="0.1", start="positive"} 0+5x10
|
||||||
|
testhistogram3_bucket{le=".2", start="positive"} 0+7x10
|
||||||
|
testhistogram3_bucket{le="1e0", start="positive"} 0+11x10
|
||||||
|
testhistogram3_bucket{le="+Inf", start="positive"} 0+11x10
|
||||||
|
testhistogram3_sum{start="positive"} 0+33x10
|
||||||
|
testhistogram3_count{start="positive"} 0+11x10
|
||||||
|
testhistogram3_bucket{le="-.25", start="negative"} 0+0x10
|
||||||
|
testhistogram3_bucket{le="-.2", start="negative"} 0+1x10
|
||||||
|
testhistogram3_bucket{le="-0.1", start="negative"} 0+2x10
|
||||||
|
testhistogram3_bucket{le="0.3", start="negative"} 0+2x10
|
||||||
|
testhistogram3_bucket{le="+Inf", start="negative"} 0+2x10
|
||||||
|
testhistogram3_sum{start="negative"} 0+8x10
|
||||||
|
testhistogram3_count{start="negative"} 0+2x10
|
||||||
|
|
||||||
# Now a more realistic histogram per job and instance to test aggregation.
|
# Now a more realistic histogram per job and instance to test aggregation.
|
||||||
load 5m
|
load_with_nhcb 5m
|
||||||
request_duration_seconds_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10
|
request_duration_seconds_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10
|
||||||
request_duration_seconds_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10
|
request_duration_seconds_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10
|
||||||
request_duration_seconds_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10
|
request_duration_seconds_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10
|
||||||
|
@ -41,7 +59,7 @@ load 5m
|
||||||
request_duration_seconds_bucket{job="job2", instance="ins2", le="+Inf"} 0+9x10
|
request_duration_seconds_bucket{job="job2", instance="ins2", le="+Inf"} 0+9x10
|
||||||
|
|
||||||
# Different le representations in one histogram.
|
# Different le representations in one histogram.
|
||||||
load 5m
|
load_with_nhcb 5m
|
||||||
mixed_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10
|
mixed_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10
|
||||||
mixed_bucket{job="job1", instance="ins1", le="0.2"} 0+1x10
|
mixed_bucket{job="job1", instance="ins1", le="0.2"} 0+1x10
|
||||||
mixed_bucket{job="job1", instance="ins1", le="2e-1"} 0+1x10
|
mixed_bucket{job="job1", instance="ins1", le="2e-1"} 0+1x10
|
||||||
|
@ -50,27 +68,81 @@ load 5m
|
||||||
mixed_bucket{job="job1", instance="ins2", le="+inf"} 0+0x10
|
mixed_bucket{job="job1", instance="ins2", le="+inf"} 0+0x10
|
||||||
mixed_bucket{job="job1", instance="ins2", le="+Inf"} 0+0x10
|
mixed_bucket{job="job1", instance="ins2", le="+Inf"} 0+0x10
|
||||||
|
|
||||||
|
# Test histogram_count.
|
||||||
|
eval instant at 50m histogram_count(testhistogram3)
|
||||||
|
{start="positive"} 110
|
||||||
|
{start="negative"} 20
|
||||||
|
|
||||||
|
# Test histogram_sum.
|
||||||
|
eval instant at 50m histogram_sum(testhistogram3)
|
||||||
|
{start="positive"} 330
|
||||||
|
{start="negative"} 80
|
||||||
|
|
||||||
|
# Test histogram_avg.
|
||||||
|
eval instant at 50m histogram_avg(testhistogram3)
|
||||||
|
{start="positive"} 3
|
||||||
|
{start="negative"} 4
|
||||||
|
|
||||||
|
# Test histogram_stddev.
|
||||||
|
eval instant at 50m histogram_stddev(testhistogram3)
|
||||||
|
{start="positive"} 2.8189265757336734
|
||||||
|
{start="negative"} 4.182715937754936
|
||||||
|
|
||||||
|
# Test histogram_stdvar.
|
||||||
|
eval instant at 50m histogram_stdvar(testhistogram3)
|
||||||
|
{start="positive"} 7.946347039377573
|
||||||
|
{start="negative"} 17.495112615949154
|
||||||
|
|
||||||
|
# Test histogram_fraction.
|
||||||
|
|
||||||
|
eval instant at 50m histogram_fraction(0, 0.2, testhistogram3)
|
||||||
|
{start="positive"} 0.6363636363636364
|
||||||
|
{start="negative"} 0
|
||||||
|
|
||||||
|
eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[10m]))
|
||||||
|
{start="positive"} 0.6363636363636364
|
||||||
|
{start="negative"} 0
|
||||||
|
|
||||||
|
# Test histogram_quantile.
|
||||||
|
|
||||||
|
eval instant at 50m histogram_quantile(0, testhistogram3_bucket)
|
||||||
|
{start="positive"} 0
|
||||||
|
{start="negative"} -0.25
|
||||||
|
|
||||||
|
eval instant at 50m histogram_quantile(0.25, testhistogram3_bucket)
|
||||||
|
{start="positive"} 0.055
|
||||||
|
{start="negative"} -0.225
|
||||||
|
|
||||||
|
eval instant at 50m histogram_quantile(0.5, testhistogram3_bucket)
|
||||||
|
{start="positive"} 0.125
|
||||||
|
{start="negative"} -0.2
|
||||||
|
|
||||||
|
eval instant at 50m histogram_quantile(0.75, testhistogram3_bucket)
|
||||||
|
{start="positive"} 0.45
|
||||||
|
{start="negative"} -0.15
|
||||||
|
|
||||||
|
eval instant at 50m histogram_quantile(1, testhistogram3_bucket)
|
||||||
|
{start="positive"} 1
|
||||||
|
{start="negative"} -0.1
|
||||||
|
|
||||||
# Quantile too low.
|
# Quantile too low.
|
||||||
eval instant at 50m histogram_quantile(-0.1, testhistogram_bucket)
|
eval_warn instant at 50m histogram_quantile(-0.1, testhistogram_bucket)
|
||||||
{start="positive"} -Inf
|
{start="positive"} -Inf
|
||||||
{start="negative"} -Inf
|
{start="negative"} -Inf
|
||||||
|
|
||||||
# Quantile too high.
|
# Quantile too high.
|
||||||
eval instant at 50m histogram_quantile(1.01, testhistogram_bucket)
|
eval_warn instant at 50m histogram_quantile(1.01, testhistogram_bucket)
|
||||||
{start="positive"} +Inf
|
{start="positive"} +Inf
|
||||||
{start="negative"} +Inf
|
{start="negative"} +Inf
|
||||||
|
|
||||||
# Quantile invalid.
|
# Quantile invalid.
|
||||||
eval instant at 50m histogram_quantile(NaN, testhistogram_bucket)
|
eval_warn instant at 50m histogram_quantile(NaN, testhistogram_bucket)
|
||||||
{start="positive"} NaN
|
{start="positive"} NaN
|
||||||
{start="negative"} NaN
|
{start="negative"} NaN
|
||||||
|
|
||||||
# Quantile value in lowest bucket, which is positive.
|
# Quantile value in lowest bucket.
|
||||||
eval instant at 50m histogram_quantile(0, testhistogram_bucket{start="positive"})
|
eval instant at 50m histogram_quantile(0, testhistogram_bucket)
|
||||||
{start="positive"} 0
|
{start="positive"} 0
|
||||||
|
|
||||||
# Quantile value in lowest bucket, which is negative.
|
|
||||||
eval instant at 50m histogram_quantile(0, testhistogram_bucket{start="negative"})
|
|
||||||
{start="negative"} -0.2
|
{start="negative"} -0.2
|
||||||
|
|
||||||
# Quantile value in highest bucket.
|
# Quantile value in highest bucket.
|
||||||
|
@ -83,7 +155,6 @@ eval instant at 50m histogram_quantile(0.2, testhistogram_bucket)
|
||||||
{start="positive"} 0.048
|
{start="positive"} 0.048
|
||||||
{start="negative"} -0.2
|
{start="negative"} -0.2
|
||||||
|
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.5, testhistogram_bucket)
|
eval instant at 50m histogram_quantile(0.5, testhistogram_bucket)
|
||||||
{start="positive"} 0.15
|
{start="positive"} 0.15
|
||||||
{start="negative"} -0.15
|
{start="negative"} -0.15
|
||||||
|
@ -182,6 +253,9 @@ eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket
|
||||||
{instance="ins1", job="job2"} 0.1
|
{instance="ins1", job="job2"} 0.1
|
||||||
{instance="ins2", job="job2"} 0.11666666666666667
|
{instance="ins2", job="job2"} 0.11666666666666667
|
||||||
|
|
||||||
|
eval instant at 50m sum(request_duration_seconds)
|
||||||
|
{} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}}
|
||||||
|
|
||||||
# A histogram with nonmonotonic bucket counts. This may happen when recording
|
# A histogram with nonmonotonic bucket counts. This may happen when recording
|
||||||
# rule evaluation or federation races scrape ingestion, causing some buckets
|
# rule evaluation or federation races scrape ingestion, causing some buckets
|
||||||
# counts to be derived from fewer samples.
|
# counts to be derived from fewer samples.
|
||||||
|
@ -209,6 +283,10 @@ eval instant at 50m histogram_quantile(0.5, rate(mixed_bucket[10m]))
|
||||||
{instance="ins1", job="job1"} 0.15
|
{instance="ins1", job="job1"} 0.15
|
||||||
{instance="ins2", job="job1"} NaN
|
{instance="ins2", job="job1"} NaN
|
||||||
|
|
||||||
|
eval instant at 50m histogram_quantile(0.5, rate(mixed[10m]))
|
||||||
|
{instance="ins1", job="job1"} 0.2
|
||||||
|
{instance="ins2", job="job1"} NaN
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.75, rate(mixed_bucket[10m]))
|
eval instant at 50m histogram_quantile(0.75, rate(mixed_bucket[10m]))
|
||||||
{instance="ins1", job="job1"} 0.2
|
{instance="ins1", job="job1"} 0.2
|
||||||
{instance="ins2", job="job1"} NaN
|
{instance="ins2", job="job1"} NaN
|
||||||
|
@ -217,7 +295,7 @@ eval instant at 50m histogram_quantile(1, rate(mixed_bucket[10m]))
|
||||||
{instance="ins1", job="job1"} 0.2
|
{instance="ins1", job="job1"} 0.2
|
||||||
{instance="ins2", job="job1"} NaN
|
{instance="ins2", job="job1"} NaN
|
||||||
|
|
||||||
load 5m
|
load_with_nhcb 5m
|
||||||
empty_bucket{le="0.1", job="job1", instance="ins1"} 0x10
|
empty_bucket{le="0.1", job="job1", instance="ins1"} 0x10
|
||||||
empty_bucket{le="0.2", job="job1", instance="ins1"} 0x10
|
empty_bucket{le="0.2", job="job1", instance="ins1"} 0x10
|
||||||
empty_bucket{le="+Inf", job="job1", instance="ins1"} 0x10
|
empty_bucket{le="+Inf", job="job1", instance="ins1"} 0x10
|
||||||
|
@ -227,9 +305,9 @@ eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[10m]))
|
||||||
|
|
||||||
# Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set
|
# Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set
|
||||||
# https://github.com/prometheus/prometheus/issues/9910
|
# https://github.com/prometheus/prometheus/issues/9910
|
||||||
load 5m
|
load_with_nhcb 5m
|
||||||
request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10
|
request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10
|
||||||
request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10
|
request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10
|
||||||
request_duration_seconds2_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10
|
request_duration_seconds2_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10
|
||||||
|
|
||||||
eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration.*"})
|
eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket$"})
|
||||||
|
|
|
@ -386,7 +386,7 @@ clear
|
||||||
load 10m
|
load 10m
|
||||||
histogram_quantile_1 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1
|
histogram_quantile_1 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1
|
||||||
|
|
||||||
eval instant at 10m histogram_quantile(1.001, histogram_quantile_1)
|
eval_warn instant at 10m histogram_quantile(1.001, histogram_quantile_1)
|
||||||
{} Inf
|
{} Inf
|
||||||
|
|
||||||
eval instant at 10m histogram_quantile(1, histogram_quantile_1)
|
eval instant at 10m histogram_quantile(1, histogram_quantile_1)
|
||||||
|
@ -410,7 +410,7 @@ eval instant at 10m histogram_quantile(0.1, histogram_quantile_1)
|
||||||
eval instant at 10m histogram_quantile(0, histogram_quantile_1)
|
eval instant at 10m histogram_quantile(0, histogram_quantile_1)
|
||||||
{} 0
|
{} 0
|
||||||
|
|
||||||
eval instant at 10m histogram_quantile(-1, histogram_quantile_1)
|
eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_1)
|
||||||
{} -Inf
|
{} -Inf
|
||||||
|
|
||||||
clear
|
clear
|
||||||
|
@ -419,7 +419,7 @@ clear
|
||||||
load 10m
|
load 10m
|
||||||
histogram_quantile_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 n_buckets:[2 3 0 1 4]}}x1
|
histogram_quantile_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 n_buckets:[2 3 0 1 4]}}x1
|
||||||
|
|
||||||
eval instant at 10m histogram_quantile(1.001, histogram_quantile_2)
|
eval_warn instant at 10m histogram_quantile(1.001, histogram_quantile_2)
|
||||||
{} Inf
|
{} Inf
|
||||||
|
|
||||||
eval instant at 10m histogram_quantile(1, histogram_quantile_2)
|
eval instant at 10m histogram_quantile(1, histogram_quantile_2)
|
||||||
|
@ -440,7 +440,7 @@ eval instant at 10m histogram_quantile(0.1, histogram_quantile_2)
|
||||||
eval instant at 10m histogram_quantile(0, histogram_quantile_2)
|
eval instant at 10m histogram_quantile(0, histogram_quantile_2)
|
||||||
{} -16
|
{} -16
|
||||||
|
|
||||||
eval instant at 10m histogram_quantile(-1, histogram_quantile_2)
|
eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_2)
|
||||||
{} -Inf
|
{} -Inf
|
||||||
|
|
||||||
clear
|
clear
|
||||||
|
@ -449,7 +449,7 @@ clear
|
||||||
load 10m
|
load 10m
|
||||||
histogram_quantile_3 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1
|
histogram_quantile_3 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1
|
||||||
|
|
||||||
eval instant at 10m histogram_quantile(1.001, histogram_quantile_3)
|
eval_warn instant at 10m histogram_quantile(1.001, histogram_quantile_3)
|
||||||
{} Inf
|
{} Inf
|
||||||
|
|
||||||
eval instant at 10m histogram_quantile(1, histogram_quantile_3)
|
eval instant at 10m histogram_quantile(1, histogram_quantile_3)
|
||||||
|
@ -485,7 +485,7 @@ eval instant at 10m histogram_quantile(0.01, histogram_quantile_3)
|
||||||
eval instant at 10m histogram_quantile(0, histogram_quantile_3)
|
eval instant at 10m histogram_quantile(0, histogram_quantile_3)
|
||||||
{} -16
|
{} -16
|
||||||
|
|
||||||
eval instant at 10m histogram_quantile(-1, histogram_quantile_3)
|
eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_3)
|
||||||
{} -Inf
|
{} -Inf
|
||||||
|
|
||||||
clear
|
clear
|
||||||
|
@ -763,3 +763,17 @@ eval instant at 10m histogram_count(increase(reset_in_bucket[15m]))
|
||||||
eval instant at 10m histogram_sum(increase(reset_in_bucket[15m]))
|
eval instant at 10m histogram_sum(increase(reset_in_bucket[15m]))
|
||||||
{} 10.5
|
{} 10.5
|
||||||
|
|
||||||
|
clear
|
||||||
|
|
||||||
|
# Test native histograms with custom buckets.
|
||||||
|
load 5m
|
||||||
|
custom_buckets_histogram {{schema:-53 sum:5 count:4 custom_values:[5 10] buckets:[1 2 1]}}x10
|
||||||
|
|
||||||
|
eval instant at 5m histogram_fraction(5, 10, custom_buckets_histogram)
|
||||||
|
{} 0.5
|
||||||
|
|
||||||
|
eval instant at 5m histogram_quantile(0.5, custom_buckets_histogram)
|
||||||
|
{} 7.5
|
||||||
|
|
||||||
|
eval instant at 5m sum(custom_buckets_histogram)
|
||||||
|
{} {{schema:-53 sum:5 count:4 custom_values:[5 10] buckets:[1 2 1]}}
|
||||||
|
|
|
@ -206,12 +206,15 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 {
|
||||||
|
|
||||||
for it.Next() {
|
for it.Next() {
|
||||||
bucket = it.At()
|
bucket = it.At()
|
||||||
|
if bucket.Count == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
count += bucket.Count
|
count += bucket.Count
|
||||||
if count >= rank {
|
if count >= rank {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if bucket.Lower < 0 && bucket.Upper > 0 {
|
if !h.UsesCustomBuckets() && bucket.Lower < 0 && bucket.Upper > 0 {
|
||||||
switch {
|
switch {
|
||||||
case len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0:
|
case len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0:
|
||||||
// The result is in the zero bucket and the histogram has only
|
// The result is in the zero bucket and the histogram has only
|
||||||
|
@ -222,6 +225,17 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 {
|
||||||
// negative buckets. So we consider 0 to be the upper bound.
|
// negative buckets. So we consider 0 to be the upper bound.
|
||||||
bucket.Upper = 0
|
bucket.Upper = 0
|
||||||
}
|
}
|
||||||
|
} else if h.UsesCustomBuckets() {
|
||||||
|
if bucket.Lower == math.Inf(-1) {
|
||||||
|
// first bucket, with lower bound -Inf
|
||||||
|
if bucket.Upper <= 0 {
|
||||||
|
return bucket.Upper
|
||||||
|
}
|
||||||
|
bucket.Lower = 0
|
||||||
|
} else if bucket.Upper == math.Inf(1) {
|
||||||
|
// last bucket, with upper bound +Inf
|
||||||
|
return bucket.Lower
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Due to numerical inaccuracies, we could end up with a higher count
|
// Due to numerical inaccuracies, we could end up with a higher count
|
||||||
// than h.Count. Thus, make sure count is never higher than h.Count.
|
// than h.Count. Thus, make sure count is never higher than h.Count.
|
||||||
|
|
|
@ -190,10 +190,18 @@ func (m *Manager) Stop() {
|
||||||
|
|
||||||
// Update the rule manager's state as the config requires. If
|
// Update the rule manager's state as the config requires. If
|
||||||
// loading the new rules failed the old rule set is restored.
|
// loading the new rules failed the old rule set is restored.
|
||||||
|
// This method will no-op in case the manager is already stopped.
|
||||||
func (m *Manager) Update(interval time.Duration, files []string, externalLabels labels.Labels, externalURL string, groupEvalIterationFunc GroupEvalIterationFunc) error {
|
func (m *Manager) Update(interval time.Duration, files []string, externalLabels labels.Labels, externalURL string, groupEvalIterationFunc GroupEvalIterationFunc) error {
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
// We cannot update a stopped manager
|
||||||
|
select {
|
||||||
|
case <-m.done:
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
groups, errs := m.LoadGroups(interval, externalLabels, externalURL, groupEvalIterationFunc, files...)
|
groups, errs := m.LoadGroups(interval, externalLabels, externalURL, groupEvalIterationFunc, files...)
|
||||||
|
|
||||||
if errs != nil {
|
if errs != nil {
|
||||||
|
|
|
@ -1455,7 +1455,8 @@ func TestNativeHistogramsInRecordingRules(t *testing.T) {
|
||||||
|
|
||||||
expHist := hists[0].ToFloat(nil)
|
expHist := hists[0].ToFloat(nil)
|
||||||
for _, h := range hists[1:] {
|
for _, h := range hists[1:] {
|
||||||
expHist = expHist.Add(h.ToFloat(nil))
|
expHist, err = expHist.Add(h.ToFloat(nil))
|
||||||
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
it := s.Iterator(nil)
|
it := s.Iterator(nil)
|
||||||
|
@ -2098,6 +2099,23 @@ func TestBoundedRuleEvalConcurrency(t *testing.T) {
|
||||||
require.EqualValues(t, maxInflight.Load(), int32(maxConcurrency)+int32(groupCount))
|
require.EqualValues(t, maxInflight.Load(), int32(maxConcurrency)+int32(groupCount))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestUpdateWhenStopped(t *testing.T) {
|
||||||
|
files := []string{"fixtures/rules.yaml"}
|
||||||
|
ruleManager := NewManager(&ManagerOptions{
|
||||||
|
Context: context.Background(),
|
||||||
|
Logger: log.NewNopLogger(),
|
||||||
|
})
|
||||||
|
ruleManager.start()
|
||||||
|
err := ruleManager.Update(10*time.Second, files, labels.EmptyLabels(), "", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotEmpty(t, ruleManager.groups)
|
||||||
|
|
||||||
|
ruleManager.Stop()
|
||||||
|
// Updates following a stop are no-op.
|
||||||
|
err = ruleManager.Update(10*time.Second, []string{}, labels.EmptyLabels(), "", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
const artificialDelay = 250 * time.Millisecond
|
const artificialDelay = 250 * time.Millisecond
|
||||||
|
|
||||||
func optsFactory(storage storage.Storage, maxInflight, inflightQueries *atomic.Int32, maxConcurrent int64) *ManagerOptions {
|
func optsFactory(storage storage.Storage, maxInflight, inflightQueries *atomic.Int32, maxConcurrent int64) *ManagerOptions {
|
||||||
|
|
|
@ -663,7 +663,7 @@ func appender(app storage.Appender, sampleLimit, bucketLimit int, maxSchema int3
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if maxSchema < nativeHistogramMaxSchema {
|
if maxSchema < histogram.ExponentialSchemaMax {
|
||||||
app = &maxSchemaAppender{
|
app = &maxSchemaAppender{
|
||||||
Appender: app,
|
Appender: app,
|
||||||
maxSchema: maxSchema,
|
maxSchema: maxSchema,
|
||||||
|
@ -1978,10 +1978,10 @@ func pickSchema(bucketFactor float64) int32 {
|
||||||
}
|
}
|
||||||
floor := math.Floor(-math.Log2(math.Log2(bucketFactor)))
|
floor := math.Floor(-math.Log2(math.Log2(bucketFactor)))
|
||||||
switch {
|
switch {
|
||||||
case floor >= float64(nativeHistogramMaxSchema):
|
case floor >= float64(histogram.ExponentialSchemaMax):
|
||||||
return nativeHistogramMaxSchema
|
return histogram.ExponentialSchemaMax
|
||||||
case floor <= float64(nativeHistogramMinSchema):
|
case floor <= float64(histogram.ExponentialSchemaMin):
|
||||||
return nativeHistogramMinSchema
|
return histogram.ExponentialSchemaMin
|
||||||
default:
|
default:
|
||||||
return int32(floor)
|
return int32(floor)
|
||||||
}
|
}
|
||||||
|
|
|
@ -511,7 +511,7 @@ func TestScrapePoolAppender(t *testing.T) {
|
||||||
appl, ok := loop.(*scrapeLoop)
|
appl, ok := loop.(*scrapeLoop)
|
||||||
require.True(t, ok, "Expected scrapeLoop but got %T", loop)
|
require.True(t, ok, "Expected scrapeLoop but got %T", loop)
|
||||||
|
|
||||||
wrapped := appender(appl.appender(context.Background()), 0, 0, nativeHistogramMaxSchema)
|
wrapped := appender(appl.appender(context.Background()), 0, 0, histogram.ExponentialSchemaMax)
|
||||||
|
|
||||||
tl, ok := wrapped.(*timeLimitAppender)
|
tl, ok := wrapped.(*timeLimitAppender)
|
||||||
require.True(t, ok, "Expected timeLimitAppender but got %T", wrapped)
|
require.True(t, ok, "Expected timeLimitAppender but got %T", wrapped)
|
||||||
|
@ -527,7 +527,7 @@ func TestScrapePoolAppender(t *testing.T) {
|
||||||
appl, ok = loop.(*scrapeLoop)
|
appl, ok = loop.(*scrapeLoop)
|
||||||
require.True(t, ok, "Expected scrapeLoop but got %T", loop)
|
require.True(t, ok, "Expected scrapeLoop but got %T", loop)
|
||||||
|
|
||||||
wrapped = appender(appl.appender(context.Background()), sampleLimit, 0, nativeHistogramMaxSchema)
|
wrapped = appender(appl.appender(context.Background()), sampleLimit, 0, histogram.ExponentialSchemaMax)
|
||||||
|
|
||||||
sl, ok := wrapped.(*limitAppender)
|
sl, ok := wrapped.(*limitAppender)
|
||||||
require.True(t, ok, "Expected limitAppender but got %T", wrapped)
|
require.True(t, ok, "Expected limitAppender but got %T", wrapped)
|
||||||
|
@ -538,7 +538,7 @@ func TestScrapePoolAppender(t *testing.T) {
|
||||||
_, ok = tl.Appender.(nopAppender)
|
_, ok = tl.Appender.(nopAppender)
|
||||||
require.True(t, ok, "Expected base appender but got %T", tl.Appender)
|
require.True(t, ok, "Expected base appender but got %T", tl.Appender)
|
||||||
|
|
||||||
wrapped = appender(appl.appender(context.Background()), sampleLimit, 100, nativeHistogramMaxSchema)
|
wrapped = appender(appl.appender(context.Background()), sampleLimit, 100, histogram.ExponentialSchemaMax)
|
||||||
|
|
||||||
bl, ok := wrapped.(*bucketLimitAppender)
|
bl, ok := wrapped.(*bucketLimitAppender)
|
||||||
require.True(t, ok, "Expected bucketLimitAppender but got %T", wrapped)
|
require.True(t, ok, "Expected bucketLimitAppender but got %T", wrapped)
|
||||||
|
@ -670,7 +670,7 @@ func newBasicScrapeLoop(t testing.TB, ctx context.Context, scraper scraper, app
|
||||||
true,
|
true,
|
||||||
false,
|
false,
|
||||||
true,
|
true,
|
||||||
0, 0, nativeHistogramMaxSchema,
|
0, 0, histogram.ExponentialSchemaMax,
|
||||||
nil,
|
nil,
|
||||||
interval,
|
interval,
|
||||||
time.Hour,
|
time.Hour,
|
||||||
|
@ -812,7 +812,7 @@ func TestScrapeLoopRun(t *testing.T) {
|
||||||
true,
|
true,
|
||||||
false,
|
false,
|
||||||
true,
|
true,
|
||||||
0, 0, nativeHistogramMaxSchema,
|
0, 0, histogram.ExponentialSchemaMax,
|
||||||
nil,
|
nil,
|
||||||
time.Second,
|
time.Second,
|
||||||
time.Hour,
|
time.Hour,
|
||||||
|
@ -956,7 +956,7 @@ func TestScrapeLoopMetadata(t *testing.T) {
|
||||||
true,
|
true,
|
||||||
false,
|
false,
|
||||||
true,
|
true,
|
||||||
0, 0, nativeHistogramMaxSchema,
|
0, 0, histogram.ExponentialSchemaMax,
|
||||||
nil,
|
nil,
|
||||||
0,
|
0,
|
||||||
0,
|
0,
|
||||||
|
|
|
@ -365,16 +365,26 @@ type bucketLimitAppender struct {
|
||||||
|
|
||||||
func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
||||||
if h != nil {
|
if h != nil {
|
||||||
|
// Return with an early error if the histogram has too many buckets and the
|
||||||
|
// schema is not exponential, in which case we can't reduce the resolution.
|
||||||
|
if len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit && !histogram.IsExponentialSchema(h.Schema) {
|
||||||
|
return 0, errBucketLimit
|
||||||
|
}
|
||||||
for len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit {
|
for len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit {
|
||||||
if h.Schema == -4 {
|
if h.Schema <= histogram.ExponentialSchemaMin {
|
||||||
return 0, errBucketLimit
|
return 0, errBucketLimit
|
||||||
}
|
}
|
||||||
h = h.ReduceResolution(h.Schema - 1)
|
h = h.ReduceResolution(h.Schema - 1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if fh != nil {
|
if fh != nil {
|
||||||
|
// Return with an early error if the histogram has too many buckets and the
|
||||||
|
// schema is not exponential, in which case we can't reduce the resolution.
|
||||||
|
if len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit && !histogram.IsExponentialSchema(fh.Schema) {
|
||||||
|
return 0, errBucketLimit
|
||||||
|
}
|
||||||
for len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit {
|
for len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit {
|
||||||
if fh.Schema == -4 {
|
if fh.Schema <= histogram.ExponentialSchemaMin {
|
||||||
return 0, errBucketLimit
|
return 0, errBucketLimit
|
||||||
}
|
}
|
||||||
fh = fh.ReduceResolution(fh.Schema - 1)
|
fh = fh.ReduceResolution(fh.Schema - 1)
|
||||||
|
@ -387,11 +397,6 @@ func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labe
|
||||||
return ref, nil
|
return ref, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
|
||||||
nativeHistogramMaxSchema int32 = 8
|
|
||||||
nativeHistogramMinSchema int32 = -4
|
|
||||||
)
|
|
||||||
|
|
||||||
type maxSchemaAppender struct {
|
type maxSchemaAppender struct {
|
||||||
storage.Appender
|
storage.Appender
|
||||||
|
|
||||||
|
@ -400,12 +405,12 @@ type maxSchemaAppender struct {
|
||||||
|
|
||||||
func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
||||||
if h != nil {
|
if h != nil {
|
||||||
if h.Schema > app.maxSchema {
|
if histogram.IsExponentialSchema(h.Schema) && h.Schema > app.maxSchema {
|
||||||
h = h.ReduceResolution(app.maxSchema)
|
h = h.ReduceResolution(app.maxSchema)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if fh != nil {
|
if fh != nil {
|
||||||
if fh.Schema > app.maxSchema {
|
if histogram.IsExponentialSchema(fh.Schema) && fh.Schema > app.maxSchema {
|
||||||
fh = fh.ReduceResolution(app.maxSchema)
|
fh = fh.ReduceResolution(app.maxSchema)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -474,6 +474,17 @@ func TestBucketLimitAppender(t *testing.T) {
|
||||||
PositiveBuckets: []int64{1, 0}, // 1, 1
|
PositiveBuckets: []int64{1, 0}, // 1, 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
customBuckets := histogram.Histogram{
|
||||||
|
Schema: histogram.CustomBucketsSchema,
|
||||||
|
Count: 9,
|
||||||
|
Sum: 33,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{3, 0, 0},
|
||||||
|
CustomValues: []float64{1, 2, 3},
|
||||||
|
}
|
||||||
|
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
h histogram.Histogram
|
h histogram.Histogram
|
||||||
limit int
|
limit int
|
||||||
|
@ -507,6 +518,18 @@ func TestBucketLimitAppender(t *testing.T) {
|
||||||
expectBucketCount: 1,
|
expectBucketCount: 1,
|
||||||
expectSchema: -2,
|
expectSchema: -2,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
h: customBuckets,
|
||||||
|
limit: 2,
|
||||||
|
expectError: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
h: customBuckets,
|
||||||
|
limit: 3,
|
||||||
|
expectError: false,
|
||||||
|
expectBucketCount: 3,
|
||||||
|
expectSchema: histogram.CustomBucketsSchema,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resApp := &collectResultAppender{}
|
resApp := &collectResultAppender{}
|
||||||
|
@ -562,6 +585,17 @@ func TestMaxSchemaAppender(t *testing.T) {
|
||||||
NegativeBuckets: []int64{3, 0, 0},
|
NegativeBuckets: []int64{3, 0, 0},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
customBuckets := histogram.Histogram{
|
||||||
|
Schema: histogram.CustomBucketsSchema,
|
||||||
|
Count: 9,
|
||||||
|
Sum: 33,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{3, 0, 0},
|
||||||
|
CustomValues: []float64{1, 2, 3},
|
||||||
|
}
|
||||||
|
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
h histogram.Histogram
|
h histogram.Histogram
|
||||||
maxSchema int32
|
maxSchema int32
|
||||||
|
@ -577,6 +611,11 @@ func TestMaxSchemaAppender(t *testing.T) {
|
||||||
maxSchema: 0,
|
maxSchema: 0,
|
||||||
expectSchema: 0,
|
expectSchema: 0,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
h: customBuckets,
|
||||||
|
maxSchema: -1,
|
||||||
|
expectSchema: histogram.CustomBucketsSchema,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resApp := &collectResultAppender{}
|
resApp := &collectResultAppender{}
|
||||||
|
|
|
@ -36,4 +36,4 @@ jobs:
|
||||||
uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1
|
uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1
|
||||||
with:
|
with:
|
||||||
args: --verbose
|
args: --verbose
|
||||||
version: v1.59.0
|
version: v1.59.1
|
||||||
|
|
|
@ -45,9 +45,15 @@ type mergeGenericQuerier struct {
|
||||||
//
|
//
|
||||||
// In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used.
|
// In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used.
|
||||||
func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMergeFunc) Querier {
|
func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMergeFunc) Querier {
|
||||||
if len(primaries)+len(secondaries) == 0 {
|
switch {
|
||||||
return NoopQuerier()
|
case len(primaries)+len(secondaries) == 0:
|
||||||
|
return noopQuerier{}
|
||||||
|
case len(primaries) == 1 && len(secondaries) == 0:
|
||||||
|
return primaries[0]
|
||||||
|
case len(primaries) == 0 && len(secondaries) == 1:
|
||||||
|
return secondaries[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries))
|
queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries))
|
||||||
for _, q := range primaries {
|
for _, q := range primaries {
|
||||||
if _, ok := q.(noopQuerier); !ok && q != nil {
|
if _, ok := q.(noopQuerier); !ok && q != nil {
|
||||||
|
@ -77,6 +83,15 @@ func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMer
|
||||||
// In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used.
|
// In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used.
|
||||||
// TODO(bwplotka): Currently merge will compact overlapping chunks with bigger chunk, without limit. Split it: https://github.com/prometheus/tsdb/issues/670
|
// TODO(bwplotka): Currently merge will compact overlapping chunks with bigger chunk, without limit. Split it: https://github.com/prometheus/tsdb/issues/670
|
||||||
func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn VerticalChunkSeriesMergeFunc) ChunkQuerier {
|
func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn VerticalChunkSeriesMergeFunc) ChunkQuerier {
|
||||||
|
switch {
|
||||||
|
case len(primaries) == 0 && len(secondaries) == 0:
|
||||||
|
return noopChunkQuerier{}
|
||||||
|
case len(primaries) == 1 && len(secondaries) == 0:
|
||||||
|
return primaries[0]
|
||||||
|
case len(primaries) == 0 && len(secondaries) == 1:
|
||||||
|
return secondaries[0]
|
||||||
|
}
|
||||||
|
|
||||||
queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries))
|
queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries))
|
||||||
for _, q := range primaries {
|
for _, q := range primaries {
|
||||||
if _, ok := q.(noopChunkQuerier); !ok && q != nil {
|
if _, ok := q.(noopChunkQuerier); !ok && q != nil {
|
||||||
|
@ -102,13 +117,6 @@ func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn Vertica
|
||||||
|
|
||||||
// Select returns a set of series that matches the given label matchers.
|
// Select returns a set of series that matches the given label matchers.
|
||||||
func (q *mergeGenericQuerier) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet {
|
func (q *mergeGenericQuerier) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet {
|
||||||
if len(q.queriers) == 0 {
|
|
||||||
return noopGenericSeriesSet{}
|
|
||||||
}
|
|
||||||
if len(q.queriers) == 1 {
|
|
||||||
return q.queriers[0].Select(ctx, sortSeries, hints, matchers...)
|
|
||||||
}
|
|
||||||
|
|
||||||
seriesSets := make([]genericSeriesSet, 0, len(q.queriers))
|
seriesSets := make([]genericSeriesSet, 0, len(q.queriers))
|
||||||
if !q.concurrentSelect {
|
if !q.concurrentSelect {
|
||||||
for _, querier := range q.queriers {
|
for _, querier := range q.queriers {
|
||||||
|
|
|
@ -180,9 +180,9 @@ func TestMergeQuerierWithChainMerger(t *testing.T) {
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
var p Querier
|
var p []Querier
|
||||||
if tc.primaryQuerierSeries != nil {
|
if tc.primaryQuerierSeries != nil {
|
||||||
p = &mockQuerier{toReturn: tc.primaryQuerierSeries}
|
p = append(p, &mockQuerier{toReturn: tc.primaryQuerierSeries})
|
||||||
}
|
}
|
||||||
var qs []Querier
|
var qs []Querier
|
||||||
for _, in := range tc.querierSeries {
|
for _, in := range tc.querierSeries {
|
||||||
|
@ -190,7 +190,7 @@ func TestMergeQuerierWithChainMerger(t *testing.T) {
|
||||||
}
|
}
|
||||||
qs = append(qs, tc.extraQueriers...)
|
qs = append(qs, tc.extraQueriers...)
|
||||||
|
|
||||||
mergedQuerier := NewMergeQuerier([]Querier{p}, qs, ChainedSeriesMerge).Select(context.Background(), false, nil)
|
mergedQuerier := NewMergeQuerier(p, qs, ChainedSeriesMerge).Select(context.Background(), false, nil)
|
||||||
|
|
||||||
// Get all merged series upfront to make sure there are no incorrectly retained shared
|
// Get all merged series upfront to make sure there are no incorrectly retained shared
|
||||||
// buffers causing bugs.
|
// buffers causing bugs.
|
||||||
|
@ -355,9 +355,9 @@ func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) {
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
var p ChunkQuerier
|
var p []ChunkQuerier
|
||||||
if tc.primaryChkQuerierSeries != nil {
|
if tc.primaryChkQuerierSeries != nil {
|
||||||
p = &mockChunkQuerier{toReturn: tc.primaryChkQuerierSeries}
|
p = append(p, &mockChunkQuerier{toReturn: tc.primaryChkQuerierSeries})
|
||||||
}
|
}
|
||||||
|
|
||||||
var qs []ChunkQuerier
|
var qs []ChunkQuerier
|
||||||
|
@ -366,7 +366,7 @@ func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) {
|
||||||
}
|
}
|
||||||
qs = append(qs, tc.extraQueriers...)
|
qs = append(qs, tc.extraQueriers...)
|
||||||
|
|
||||||
merged := NewMergeChunkQuerier([]ChunkQuerier{p}, qs, NewCompactingChunkSeriesMerger(nil)).Select(context.Background(), false, nil)
|
merged := NewMergeChunkQuerier(p, qs, NewCompactingChunkSeriesMerger(nil)).Select(context.Background(), false, nil)
|
||||||
for merged.Next() {
|
for merged.Next() {
|
||||||
require.True(t, tc.expected.Next(), "Expected Next() to be true")
|
require.True(t, tc.expected.Next(), "Expected Next() to be true")
|
||||||
actualSeries := merged.At()
|
actualSeries := merged.At()
|
||||||
|
@ -1443,6 +1443,8 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
|
||||||
expectedErrs [4]error
|
expectedErrs [4]error
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
|
// NewMergeQuerier will not create a mergeGenericQuerier
|
||||||
|
// with just one querier inside, but we can test it anyway.
|
||||||
name: "one successful primary querier",
|
name: "one successful primary querier",
|
||||||
queriers: []genericQuerier{&mockGenericQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil}},
|
queriers: []genericQuerier{&mockGenericQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil}},
|
||||||
expectedSelectsSeries: []labels.Labels{
|
expectedSelectsSeries: []labels.Labels{
|
||||||
|
@ -1551,12 +1553,8 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
|
||||||
|
|
||||||
for _, qr := range q.queriers {
|
for _, qr := range q.queriers {
|
||||||
m := unwrapMockGenericQuerier(t, qr)
|
m := unwrapMockGenericQuerier(t, qr)
|
||||||
|
// mergeGenericQuerier forces all Selects to be sorted.
|
||||||
exp := []bool{true}
|
require.Equal(t, []bool{true}, m.sortedSeriesRequested)
|
||||||
if len(q.queriers) == 1 {
|
|
||||||
exp[0] = false
|
|
||||||
}
|
|
||||||
require.Equal(t, exp, m.sortedSeriesRequested)
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
t.Run("LabelNames", func(t *testing.T) {
|
t.Run("LabelNames", func(t *testing.T) {
|
||||||
|
|
|
@ -231,6 +231,7 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) error {
|
||||||
httpResp.Body.Close()
|
httpResp.Body.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
//nolint:usestdlibvars
|
||||||
if httpResp.StatusCode/100 != 2 {
|
if httpResp.StatusCode/100 != 2 {
|
||||||
scanner := bufio.NewScanner(io.LimitReader(httpResp.Body, maxErrMsgLen))
|
scanner := bufio.NewScanner(io.LimitReader(httpResp.Body, maxErrMsgLen))
|
||||||
line := ""
|
line := ""
|
||||||
|
@ -239,6 +240,7 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) error {
|
||||||
}
|
}
|
||||||
err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, line)
|
err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, line)
|
||||||
}
|
}
|
||||||
|
//nolint:usestdlibvars
|
||||||
if httpResp.StatusCode/100 == 5 ||
|
if httpResp.StatusCode/100 == 5 ||
|
||||||
(c.retryOnRateLimit && httpResp.StatusCode == http.StatusTooManyRequests) {
|
(c.retryOnRateLimit && httpResp.StatusCode == http.StatusTooManyRequests) {
|
||||||
return RecoverableError{err, retryAfterDuration(httpResp.Header.Get("Retry-After"))}
|
return RecoverableError{err, retryAfterDuration(httpResp.Header.Get("Retry-After"))}
|
||||||
|
@ -323,6 +325,7 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe
|
||||||
return nil, fmt.Errorf("error reading response. HTTP status code: %s: %w", httpResp.Status, err)
|
return nil, fmt.Errorf("error reading response. HTTP status code: %s: %w", httpResp.Status, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//nolint:usestdlibvars
|
||||||
if httpResp.StatusCode/100 != 2 {
|
if httpResp.StatusCode/100 != 2 {
|
||||||
return nil, fmt.Errorf("remote server %s returned HTTP status %s: %s", c.urlString, httpResp.Status, strings.TrimSpace(string(compressed)))
|
return nil, fmt.Errorf("remote server %s returned HTTP status %s: %s", c.urlString, httpResp.Status, strings.TrimSpace(string(compressed)))
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,6 +16,7 @@ package remote
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -1224,12 +1225,16 @@ func (s *shards) stop() {
|
||||||
// Force an unclean shutdown.
|
// Force an unclean shutdown.
|
||||||
s.hardShutdown()
|
s.hardShutdown()
|
||||||
<-s.done
|
<-s.done
|
||||||
if dropped := s.samplesDroppedOnHardShutdown.Load(); dropped > 0 {
|
|
||||||
level.Error(s.qm.logger).Log("msg", "Failed to flush all samples on shutdown", "count", dropped)
|
// Log error for any dropped samples, exemplars, or histograms.
|
||||||
}
|
logDroppedError := func(t string, counter atomic.Uint32) {
|
||||||
if dropped := s.exemplarsDroppedOnHardShutdown.Load(); dropped > 0 {
|
if dropped := counter.Load(); dropped > 0 {
|
||||||
level.Error(s.qm.logger).Log("msg", "Failed to flush all exemplars on shutdown", "count", dropped)
|
level.Error(s.qm.logger).Log("msg", fmt.Sprintf("Failed to flush all %s on shutdown", t), "count", dropped)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
logDroppedError("samples", s.samplesDroppedOnHardShutdown)
|
||||||
|
logDroppedError("exemplars", s.exemplarsDroppedOnHardShutdown)
|
||||||
|
logDroppedError("histograms", s.histogramsDroppedOnHardShutdown)
|
||||||
}
|
}
|
||||||
|
|
||||||
// enqueue data (sample or exemplar). If the shard is full, shutting down, or
|
// enqueue data (sample or exemplar). If the shard is full, shutting down, or
|
||||||
|
@ -1537,7 +1542,7 @@ func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, s
|
||||||
begin := time.Now()
|
begin := time.Now()
|
||||||
err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, histogramCount, pBuf, buf)
|
err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, histogramCount, pBuf, buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "err", err)
|
level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "histogramCount", histogramCount, "err", err)
|
||||||
s.qm.metrics.failedSamplesTotal.Add(float64(sampleCount))
|
s.qm.metrics.failedSamplesTotal.Add(float64(sampleCount))
|
||||||
s.qm.metrics.failedExemplarsTotal.Add(float64(exemplarCount))
|
s.qm.metrics.failedExemplarsTotal.Add(float64(exemplarCount))
|
||||||
s.qm.metrics.failedHistogramsTotal.Add(float64(histogramCount))
|
s.qm.metrics.failedHistogramsTotal.Add(float64(histogramCount))
|
||||||
|
@ -1778,9 +1783,11 @@ func buildTimeSeries(timeSeries []prompb.TimeSeries, filter func(prompb.TimeSeri
|
||||||
if len(ts.Histograms) > 0 && ts.Histograms[0].Timestamp < lowest {
|
if len(ts.Histograms) > 0 && ts.Histograms[0].Timestamp < lowest {
|
||||||
lowest = ts.Histograms[0].Timestamp
|
lowest = ts.Histograms[0].Timestamp
|
||||||
}
|
}
|
||||||
|
if i != keepIdx {
|
||||||
// Move the current element to the write position and increment the write pointer
|
// We have to swap the kept timeseries with the one which should be dropped.
|
||||||
timeSeries[keepIdx] = timeSeries[i]
|
// Copying any elements within timeSeries could cause data corruptions when reusing the slice in a next batch (shards.populateTimeSeries).
|
||||||
|
timeSeries[keepIdx], timeSeries[i] = timeSeries[i], timeSeries[keepIdx]
|
||||||
|
}
|
||||||
keepIdx++
|
keepIdx++
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"runtime/pprof"
|
"runtime/pprof"
|
||||||
"sort"
|
"sort"
|
||||||
|
@ -29,6 +30,7 @@ import (
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/gogo/protobuf/proto"
|
"github.com/gogo/protobuf/proto"
|
||||||
"github.com/golang/snappy"
|
"github.com/golang/snappy"
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
client_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
client_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
@ -611,6 +613,30 @@ func createTimeseries(numSamples, numSeries int, extraLabels ...labels.Label) ([
|
||||||
return samples, series
|
return samples, series
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func createProtoTimeseriesWithOld(numSamples, baseTs int64, extraLabels ...labels.Label) []prompb.TimeSeries {
|
||||||
|
samples := make([]prompb.TimeSeries, numSamples)
|
||||||
|
// use a fixed rand source so tests are consistent
|
||||||
|
r := rand.New(rand.NewSource(99))
|
||||||
|
for j := int64(0); j < numSamples; j++ {
|
||||||
|
name := fmt.Sprintf("test_metric_%d", j)
|
||||||
|
|
||||||
|
samples[j] = prompb.TimeSeries{
|
||||||
|
Labels: []prompb.Label{{Name: "__name__", Value: name}},
|
||||||
|
Samples: []prompb.Sample{
|
||||||
|
{
|
||||||
|
Timestamp: baseTs + j,
|
||||||
|
Value: float64(j),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// 10% of the time use a ts that is too old
|
||||||
|
if r.Intn(10) == 0 {
|
||||||
|
samples[j].Samples[0].Timestamp = baseTs - 5
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return samples
|
||||||
|
}
|
||||||
|
|
||||||
func createExemplars(numExemplars, numSeries int) ([]record.RefExemplar, []record.RefSeries) {
|
func createExemplars(numExemplars, numSeries int) ([]record.RefExemplar, []record.RefSeries) {
|
||||||
exemplars := make([]record.RefExemplar, 0, numExemplars)
|
exemplars := make([]record.RefExemplar, 0, numExemplars)
|
||||||
series := make([]record.RefSeries, 0, numSeries)
|
series := make([]record.RefSeries, 0, numSeries)
|
||||||
|
@ -679,8 +705,8 @@ func createHistograms(numSamples, numSeries int, floatHistogram bool) ([]record.
|
||||||
return histograms, nil, series
|
return histograms, nil, series
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSeriesNameFromRef(r record.RefSeries) string {
|
func getSeriesIDFromRef(r record.RefSeries) string {
|
||||||
return r.Labels.Get("__name__")
|
return r.Labels.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
type TestWriteClient struct {
|
type TestWriteClient struct {
|
||||||
|
@ -698,6 +724,9 @@ type TestWriteClient struct {
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
mtx sync.Mutex
|
mtx sync.Mutex
|
||||||
buf []byte
|
buf []byte
|
||||||
|
|
||||||
|
storeWait time.Duration
|
||||||
|
returnError error
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewTestWriteClient() *TestWriteClient {
|
func NewTestWriteClient() *TestWriteClient {
|
||||||
|
@ -706,6 +735,8 @@ func NewTestWriteClient() *TestWriteClient {
|
||||||
receivedSamples: map[string][]prompb.Sample{},
|
receivedSamples: map[string][]prompb.Sample{},
|
||||||
expectedSamples: map[string][]prompb.Sample{},
|
expectedSamples: map[string][]prompb.Sample{},
|
||||||
receivedMetadata: map[string][]prompb.MetricMetadata{},
|
receivedMetadata: map[string][]prompb.MetricMetadata{},
|
||||||
|
storeWait: 0,
|
||||||
|
returnError: nil,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -720,12 +751,15 @@ func (c *TestWriteClient) expectSamples(ss []record.RefSample, series []record.R
|
||||||
c.receivedSamples = map[string][]prompb.Sample{}
|
c.receivedSamples = map[string][]prompb.Sample{}
|
||||||
|
|
||||||
for _, s := range ss {
|
for _, s := range ss {
|
||||||
seriesName := getSeriesNameFromRef(series[s.Ref])
|
tsID := getSeriesIDFromRef(series[s.Ref])
|
||||||
c.expectedSamples[seriesName] = append(c.expectedSamples[seriesName], prompb.Sample{
|
c.expectedSamples[tsID] = append(c.expectedSamples[tsID], prompb.Sample{
|
||||||
Timestamp: s.T,
|
Timestamp: s.T,
|
||||||
Value: s.V,
|
Value: s.V,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
if !c.withWaitGroup {
|
||||||
|
return
|
||||||
|
}
|
||||||
c.wg.Add(len(ss))
|
c.wg.Add(len(ss))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -740,13 +774,13 @@ func (c *TestWriteClient) expectExemplars(ss []record.RefExemplar, series []reco
|
||||||
c.receivedExemplars = map[string][]prompb.Exemplar{}
|
c.receivedExemplars = map[string][]prompb.Exemplar{}
|
||||||
|
|
||||||
for _, s := range ss {
|
for _, s := range ss {
|
||||||
seriesName := getSeriesNameFromRef(series[s.Ref])
|
tsID := getSeriesIDFromRef(series[s.Ref])
|
||||||
e := prompb.Exemplar{
|
e := prompb.Exemplar{
|
||||||
Labels: LabelsToLabelsProto(s.Labels, nil),
|
Labels: LabelsToLabelsProto(s.Labels, nil),
|
||||||
Timestamp: s.T,
|
Timestamp: s.T,
|
||||||
Value: s.V,
|
Value: s.V,
|
||||||
}
|
}
|
||||||
c.expectedExemplars[seriesName] = append(c.expectedExemplars[seriesName], e)
|
c.expectedExemplars[tsID] = append(c.expectedExemplars[tsID], e)
|
||||||
}
|
}
|
||||||
c.wg.Add(len(ss))
|
c.wg.Add(len(ss))
|
||||||
}
|
}
|
||||||
|
@ -762,8 +796,8 @@ func (c *TestWriteClient) expectHistograms(hh []record.RefHistogramSample, serie
|
||||||
c.receivedHistograms = map[string][]prompb.Histogram{}
|
c.receivedHistograms = map[string][]prompb.Histogram{}
|
||||||
|
|
||||||
for _, h := range hh {
|
for _, h := range hh {
|
||||||
seriesName := getSeriesNameFromRef(series[h.Ref])
|
tsID := getSeriesIDFromRef(series[h.Ref])
|
||||||
c.expectedHistograms[seriesName] = append(c.expectedHistograms[seriesName], HistogramToHistogramProto(h.T, h.H))
|
c.expectedHistograms[tsID] = append(c.expectedHistograms[tsID], HistogramToHistogramProto(h.T, h.H))
|
||||||
}
|
}
|
||||||
c.wg.Add(len(hh))
|
c.wg.Add(len(hh))
|
||||||
}
|
}
|
||||||
|
@ -779,8 +813,8 @@ func (c *TestWriteClient) expectFloatHistograms(fhs []record.RefFloatHistogramSa
|
||||||
c.receivedFloatHistograms = map[string][]prompb.Histogram{}
|
c.receivedFloatHistograms = map[string][]prompb.Histogram{}
|
||||||
|
|
||||||
for _, fh := range fhs {
|
for _, fh := range fhs {
|
||||||
seriesName := getSeriesNameFromRef(series[fh.Ref])
|
tsID := getSeriesIDFromRef(series[fh.Ref])
|
||||||
c.expectedFloatHistograms[seriesName] = append(c.expectedFloatHistograms[seriesName], FloatHistogramToHistogramProto(fh.T, fh.FH))
|
c.expectedFloatHistograms[tsID] = append(c.expectedFloatHistograms[tsID], FloatHistogramToHistogramProto(fh.T, fh.FH))
|
||||||
}
|
}
|
||||||
c.wg.Add(len(fhs))
|
c.wg.Add(len(fhs))
|
||||||
}
|
}
|
||||||
|
@ -806,9 +840,27 @@ func (c *TestWriteClient) waitForExpectedData(tb testing.TB) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *TestWriteClient) SetStoreWait(w time.Duration) {
|
||||||
|
c.mtx.Lock()
|
||||||
|
defer c.mtx.Unlock()
|
||||||
|
c.storeWait = w
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *TestWriteClient) SetReturnError(err error) {
|
||||||
|
c.mtx.Lock()
|
||||||
|
defer c.mtx.Unlock()
|
||||||
|
c.returnError = err
|
||||||
|
}
|
||||||
|
|
||||||
func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error {
|
func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error {
|
||||||
c.mtx.Lock()
|
c.mtx.Lock()
|
||||||
defer c.mtx.Unlock()
|
defer c.mtx.Unlock()
|
||||||
|
if c.storeWait > 0 {
|
||||||
|
time.Sleep(c.storeWait)
|
||||||
|
}
|
||||||
|
if c.returnError != nil {
|
||||||
|
return c.returnError
|
||||||
|
}
|
||||||
// nil buffers are ok for snappy, ignore cast error.
|
// nil buffers are ok for snappy, ignore cast error.
|
||||||
if c.buf != nil {
|
if c.buf != nil {
|
||||||
c.buf = c.buf[:cap(c.buf)]
|
c.buf = c.buf[:cap(c.buf)]
|
||||||
|
@ -827,23 +879,23 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error {
|
||||||
count := 0
|
count := 0
|
||||||
for _, ts := range reqProto.Timeseries {
|
for _, ts := range reqProto.Timeseries {
|
||||||
labels := LabelProtosToLabels(&builder, ts.Labels)
|
labels := LabelProtosToLabels(&builder, ts.Labels)
|
||||||
seriesName := labels.Get("__name__")
|
tsID := labels.String()
|
||||||
for _, sample := range ts.Samples {
|
for _, sample := range ts.Samples {
|
||||||
count++
|
count++
|
||||||
c.receivedSamples[seriesName] = append(c.receivedSamples[seriesName], sample)
|
c.receivedSamples[tsID] = append(c.receivedSamples[tsID], sample)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, ex := range ts.Exemplars {
|
for _, ex := range ts.Exemplars {
|
||||||
count++
|
count++
|
||||||
c.receivedExemplars[seriesName] = append(c.receivedExemplars[seriesName], ex)
|
c.receivedExemplars[tsID] = append(c.receivedExemplars[tsID], ex)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, histogram := range ts.Histograms {
|
for _, histogram := range ts.Histograms {
|
||||||
count++
|
count++
|
||||||
if histogram.IsFloatHistogram() {
|
if histogram.IsFloatHistogram() {
|
||||||
c.receivedFloatHistograms[seriesName] = append(c.receivedFloatHistograms[seriesName], histogram)
|
c.receivedFloatHistograms[tsID] = append(c.receivedFloatHistograms[tsID], histogram)
|
||||||
} else {
|
} else {
|
||||||
c.receivedHistograms[seriesName] = append(c.receivedHistograms[seriesName], histogram)
|
c.receivedHistograms[tsID] = append(c.receivedHistograms[tsID], histogram)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1441,6 +1493,99 @@ func TestIsSampleOld(t *testing.T) {
|
||||||
require.False(t, isSampleOld(currentTime, 60*time.Second, timestamp.FromTime(currentTime.Add(-59*time.Second))))
|
require.False(t, isSampleOld(currentTime, 60*time.Second, timestamp.FromTime(currentTime.Add(-59*time.Second))))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Simulates scenario in which remote write endpoint is down and a subset of samples is dropped due to age limit while backoffing.
|
||||||
|
func TestSendSamplesWithBackoffWithSampleAgeLimit(t *testing.T) {
|
||||||
|
maxSamplesPerSend := 10
|
||||||
|
sampleAgeLimit := time.Second
|
||||||
|
|
||||||
|
cfg := config.DefaultQueueConfig
|
||||||
|
cfg.MaxShards = 1
|
||||||
|
cfg.SampleAgeLimit = model.Duration(sampleAgeLimit)
|
||||||
|
// Set the batch send deadline to 5 minutes to effectively disable it.
|
||||||
|
cfg.BatchSendDeadline = model.Duration(time.Minute * 5)
|
||||||
|
cfg.Capacity = 10 * maxSamplesPerSend // more than the amount of data we append in the test
|
||||||
|
cfg.MaxBackoff = model.Duration(time.Millisecond * 100)
|
||||||
|
cfg.MinBackoff = model.Duration(time.Millisecond * 100)
|
||||||
|
cfg.MaxSamplesPerSend = maxSamplesPerSend
|
||||||
|
metadataCfg := config.DefaultMetadataConfig
|
||||||
|
metadataCfg.Send = true
|
||||||
|
metadataCfg.SendInterval = model.Duration(time.Second * 60)
|
||||||
|
metadataCfg.MaxSamplesPerSend = maxSamplesPerSend
|
||||||
|
c := NewTestWriteClient()
|
||||||
|
c.withWaitGroup = false
|
||||||
|
m := newTestQueueManager(t, cfg, metadataCfg, time.Second, c)
|
||||||
|
|
||||||
|
m.Start()
|
||||||
|
|
||||||
|
batchID := 0
|
||||||
|
expectedSamples := map[string][]prompb.Sample{}
|
||||||
|
|
||||||
|
appendData := func(numberOfSeries int, timeAdd time.Duration, shouldBeDropped bool) {
|
||||||
|
t.Log(">>>> Appending series ", numberOfSeries, " as batch ID ", batchID, " with timeAdd ", timeAdd, " and should be dropped ", shouldBeDropped)
|
||||||
|
samples, series := createTimeseriesWithRandomLabelCount(strconv.Itoa(batchID), numberOfSeries, timeAdd, 9)
|
||||||
|
m.StoreSeries(series, batchID)
|
||||||
|
sent := m.Append(samples)
|
||||||
|
require.True(t, sent, "samples not sent")
|
||||||
|
if !shouldBeDropped {
|
||||||
|
for _, s := range samples {
|
||||||
|
tsID := getSeriesIDFromRef(series[s.Ref])
|
||||||
|
expectedSamples[tsID] = append(c.expectedSamples[tsID], prompb.Sample{
|
||||||
|
Timestamp: s.T,
|
||||||
|
Value: s.V,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
batchID++
|
||||||
|
}
|
||||||
|
timeShift := -time.Millisecond * 5
|
||||||
|
|
||||||
|
c.SetReturnError(RecoverableError{context.DeadlineExceeded, defaultBackoff})
|
||||||
|
|
||||||
|
appendData(maxSamplesPerSend/2, timeShift, true)
|
||||||
|
time.Sleep(sampleAgeLimit)
|
||||||
|
appendData(maxSamplesPerSend/2, timeShift, true)
|
||||||
|
time.Sleep(sampleAgeLimit / 10)
|
||||||
|
appendData(maxSamplesPerSend/2, timeShift, true)
|
||||||
|
time.Sleep(2 * sampleAgeLimit)
|
||||||
|
appendData(2*maxSamplesPerSend, timeShift, false)
|
||||||
|
time.Sleep(sampleAgeLimit / 2)
|
||||||
|
c.SetReturnError(nil)
|
||||||
|
appendData(5, timeShift, false)
|
||||||
|
m.Stop()
|
||||||
|
|
||||||
|
if diff := cmp.Diff(expectedSamples, c.receivedSamples); diff != "" {
|
||||||
|
t.Errorf("mismatch (-want +got):\n%s", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func createTimeseriesWithRandomLabelCount(id string, seriesCount int, timeAdd time.Duration, maxLabels int) ([]record.RefSample, []record.RefSeries) {
|
||||||
|
samples := []record.RefSample{}
|
||||||
|
series := []record.RefSeries{}
|
||||||
|
// use a fixed rand source so tests are consistent
|
||||||
|
r := rand.New(rand.NewSource(99))
|
||||||
|
for i := 0; i < seriesCount; i++ {
|
||||||
|
s := record.RefSample{
|
||||||
|
Ref: chunks.HeadSeriesRef(i),
|
||||||
|
T: time.Now().Add(timeAdd).UnixMilli(),
|
||||||
|
V: r.Float64(),
|
||||||
|
}
|
||||||
|
samples = append(samples, s)
|
||||||
|
labelsCount := r.Intn(maxLabels)
|
||||||
|
lb := labels.NewScratchBuilder(1 + labelsCount)
|
||||||
|
lb.Add("__name__", "batch_"+id+"_id_"+strconv.Itoa(i))
|
||||||
|
for j := 1; j < labelsCount+1; j++ {
|
||||||
|
// same for both name and value
|
||||||
|
label := "batch_" + id + "_label_" + strconv.Itoa(j)
|
||||||
|
lb.Add(label, label)
|
||||||
|
}
|
||||||
|
series = append(series, record.RefSeries{
|
||||||
|
Ref: chunks.HeadSeriesRef(i),
|
||||||
|
Labels: lb.Labels(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return samples, series
|
||||||
|
}
|
||||||
|
|
||||||
func createTimeseriesWithOldSamples(numSamples, numSeries int, extraLabels ...labels.Label) ([]record.RefSample, []record.RefSample, []record.RefSeries) {
|
func createTimeseriesWithOldSamples(numSamples, numSeries int, extraLabels ...labels.Label) ([]record.RefSample, []record.RefSample, []record.RefSeries) {
|
||||||
newSamples := make([]record.RefSample, 0, numSamples)
|
newSamples := make([]record.RefSample, 0, numSamples)
|
||||||
samples := make([]record.RefSample, 0, numSamples)
|
samples := make([]record.RefSample, 0, numSamples)
|
||||||
|
@ -1668,3 +1813,14 @@ func TestBuildTimeSeries(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func BenchmarkBuildTimeSeries(b *testing.B) {
|
||||||
|
// Send one sample per series, which is the typical remote_write case
|
||||||
|
const numSamples = 10000
|
||||||
|
filter := func(ts prompb.TimeSeries) bool { return filterTsLimit(99, ts) }
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
samples := createProtoTimeseriesWithOld(numSamples, 100, extraLabels...)
|
||||||
|
_, _, result, _, _, _ := buildTimeSeries(samples, filter)
|
||||||
|
require.NotNil(b, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -18,6 +18,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/go-kit/log/level"
|
"github.com/go-kit/log/level"
|
||||||
|
@ -25,7 +26,9 @@ import (
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
"github.com/prometheus/prometheus/model/timestamp"
|
||||||
"github.com/prometheus/prometheus/prompb"
|
"github.com/prometheus/prometheus/prompb"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
otlptranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
|
otlptranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
|
||||||
|
@ -38,6 +41,8 @@ type writeHandler struct {
|
||||||
samplesWithInvalidLabelsTotal prometheus.Counter
|
samplesWithInvalidLabelsTotal prometheus.Counter
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const maxAheadTime = 10 * time.Minute
|
||||||
|
|
||||||
// NewWriteHandler creates a http.Handler that accepts remote write requests and
|
// NewWriteHandler creates a http.Handler that accepts remote write requests and
|
||||||
// writes them to the provided appendable.
|
// writes them to the provided appendable.
|
||||||
func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable) http.Handler {
|
func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable) http.Handler {
|
||||||
|
@ -104,17 +109,22 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
|
||||||
outOfOrderExemplarErrs := 0
|
outOfOrderExemplarErrs := 0
|
||||||
samplesWithInvalidLabels := 0
|
samplesWithInvalidLabels := 0
|
||||||
|
|
||||||
app := h.appendable.Appender(ctx)
|
timeLimitApp := &timeLimitAppender{
|
||||||
|
Appender: h.appendable.Appender(ctx),
|
||||||
|
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
|
||||||
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = app.Rollback()
|
_ = timeLimitApp.Rollback()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
err = app.Commit()
|
err = timeLimitApp.Commit()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
b := labels.NewScratchBuilder(0)
|
b := labels.NewScratchBuilder(0)
|
||||||
var exemplarErr error
|
var exemplarErr error
|
||||||
|
|
||||||
for _, ts := range req.Timeseries {
|
for _, ts := range req.Timeseries {
|
||||||
labels := LabelProtosToLabels(&b, ts.Labels)
|
labels := LabelProtosToLabels(&b, ts.Labels)
|
||||||
if !labels.IsValid() {
|
if !labels.IsValid() {
|
||||||
|
@ -124,7 +134,7 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
|
||||||
}
|
}
|
||||||
var ref storage.SeriesRef
|
var ref storage.SeriesRef
|
||||||
for _, s := range ts.Samples {
|
for _, s := range ts.Samples {
|
||||||
ref, err = app.Append(ref, labels, s.Timestamp, s.Value)
|
ref, err = timeLimitApp.Append(ref, labels, s.Timestamp, s.Value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
unwrappedErr := errors.Unwrap(err)
|
unwrappedErr := errors.Unwrap(err)
|
||||||
if unwrappedErr == nil {
|
if unwrappedErr == nil {
|
||||||
|
@ -140,7 +150,7 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
|
||||||
for _, ep := range ts.Exemplars {
|
for _, ep := range ts.Exemplars {
|
||||||
e := exemplarProtoToExemplar(&b, ep)
|
e := exemplarProtoToExemplar(&b, ep)
|
||||||
|
|
||||||
_, exemplarErr = app.AppendExemplar(0, labels, e)
|
_, exemplarErr = timeLimitApp.AppendExemplar(0, labels, e)
|
||||||
exemplarErr = h.checkAppendExemplarError(exemplarErr, e, &outOfOrderExemplarErrs)
|
exemplarErr = h.checkAppendExemplarError(exemplarErr, e, &outOfOrderExemplarErrs)
|
||||||
if exemplarErr != nil {
|
if exemplarErr != nil {
|
||||||
// Since exemplar storage is still experimental, we don't fail the request on ingestion errors.
|
// Since exemplar storage is still experimental, we don't fail the request on ingestion errors.
|
||||||
|
@ -151,11 +161,12 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
|
||||||
for _, hp := range ts.Histograms {
|
for _, hp := range ts.Histograms {
|
||||||
if hp.IsFloatHistogram() {
|
if hp.IsFloatHistogram() {
|
||||||
fhs := FloatHistogramProtoToFloatHistogram(hp)
|
fhs := FloatHistogramProtoToFloatHistogram(hp)
|
||||||
_, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, fhs)
|
_, err = timeLimitApp.AppendHistogram(0, labels, hp.Timestamp, nil, fhs)
|
||||||
} else {
|
} else {
|
||||||
hs := HistogramProtoToHistogram(hp)
|
hs := HistogramProtoToHistogram(hp)
|
||||||
_, err = app.AppendHistogram(0, labels, hp.Timestamp, hs, nil)
|
_, err = timeLimitApp.AppendHistogram(0, labels, hp.Timestamp, hs, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
unwrappedErr := errors.Unwrap(err)
|
unwrappedErr := errors.Unwrap(err)
|
||||||
if unwrappedErr == nil {
|
if unwrappedErr == nil {
|
||||||
|
@ -233,3 +244,45 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
w.WriteHeader(http.StatusOK)
|
w.WriteHeader(http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type timeLimitAppender struct {
|
||||||
|
storage.Appender
|
||||||
|
|
||||||
|
maxTime int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *timeLimitAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
|
||||||
|
if t > app.maxTime {
|
||||||
|
return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds)
|
||||||
|
}
|
||||||
|
|
||||||
|
ref, err := app.Appender.Append(ref, lset, t, v)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return ref, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *timeLimitAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
||||||
|
if t > app.maxTime {
|
||||||
|
return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds)
|
||||||
|
}
|
||||||
|
|
||||||
|
ref, err := app.Appender.AppendHistogram(ref, l, t, h, fh)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return ref, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *timeLimitAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
|
||||||
|
if e.Ts > app.maxTime {
|
||||||
|
return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds)
|
||||||
|
}
|
||||||
|
|
||||||
|
ref, err := app.Appender.AppendExemplar(ref, l, e)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return ref, nil
|
||||||
|
}
|
||||||
|
|
|
@ -18,6 +18,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"math"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -87,73 +88,127 @@ func TestRemoteWriteHandler(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOutOfOrderSample(t *testing.T) {
|
func TestOutOfOrderSample(t *testing.T) {
|
||||||
buf, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
|
tests := []struct {
|
||||||
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}},
|
Name string
|
||||||
Samples: []prompb.Sample{{Value: 1, Timestamp: 0}},
|
Timestamp int64
|
||||||
}}, nil, nil, nil, nil)
|
}{
|
||||||
require.NoError(t, err)
|
{
|
||||||
|
Name: "historic",
|
||||||
req, err := http.NewRequest("", "", bytes.NewReader(buf))
|
Timestamp: 0,
|
||||||
require.NoError(t, err)
|
},
|
||||||
|
{
|
||||||
appendable := &mockAppendable{
|
Name: "future",
|
||||||
latestSample: 100,
|
Timestamp: math.MaxInt64,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable)
|
|
||||||
|
|
||||||
recorder := httptest.NewRecorder()
|
for _, tc := range tests {
|
||||||
handler.ServeHTTP(recorder, req)
|
t.Run(tc.Name, func(t *testing.T) {
|
||||||
|
buf, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
|
||||||
|
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}},
|
||||||
|
Samples: []prompb.Sample{{Value: 1, Timestamp: tc.Timestamp}},
|
||||||
|
}}, nil, nil, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
resp := recorder.Result()
|
req, err := http.NewRequest("", "", bytes.NewReader(buf))
|
||||||
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
appendable := &mockAppendable{
|
||||||
|
latestSample: 100,
|
||||||
|
}
|
||||||
|
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable)
|
||||||
|
|
||||||
|
recorder := httptest.NewRecorder()
|
||||||
|
handler.ServeHTTP(recorder, req)
|
||||||
|
|
||||||
|
resp := recorder.Result()
|
||||||
|
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// This test case currently aims to verify that the WriteHandler endpoint
|
// This test case currently aims to verify that the WriteHandler endpoint
|
||||||
// don't fail on ingestion errors since the exemplar storage is
|
// don't fail on ingestion errors since the exemplar storage is
|
||||||
// still experimental.
|
// still experimental.
|
||||||
func TestOutOfOrderExemplar(t *testing.T) {
|
func TestOutOfOrderExemplar(t *testing.T) {
|
||||||
buf, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
|
tests := []struct {
|
||||||
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}},
|
Name string
|
||||||
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "foo", Value: "bar"}}, Value: 1, Timestamp: 0}},
|
Timestamp int64
|
||||||
}}, nil, nil, nil, nil)
|
}{
|
||||||
require.NoError(t, err)
|
{
|
||||||
|
Name: "historic",
|
||||||
req, err := http.NewRequest("", "", bytes.NewReader(buf))
|
Timestamp: 0,
|
||||||
require.NoError(t, err)
|
},
|
||||||
|
{
|
||||||
appendable := &mockAppendable{
|
Name: "future",
|
||||||
latestExemplar: 100,
|
Timestamp: math.MaxInt64,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable)
|
|
||||||
|
|
||||||
recorder := httptest.NewRecorder()
|
for _, tc := range tests {
|
||||||
handler.ServeHTTP(recorder, req)
|
t.Run(tc.Name, func(t *testing.T) {
|
||||||
|
buf, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
|
||||||
|
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}},
|
||||||
|
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "foo", Value: "bar"}}, Value: 1, Timestamp: tc.Timestamp}},
|
||||||
|
}}, nil, nil, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
resp := recorder.Result()
|
req, err := http.NewRequest("", "", bytes.NewReader(buf))
|
||||||
// TODO: update to require.Equal(t, http.StatusConflict, resp.StatusCode) once exemplar storage is not experimental.
|
require.NoError(t, err)
|
||||||
require.Equal(t, http.StatusNoContent, resp.StatusCode)
|
|
||||||
|
appendable := &mockAppendable{
|
||||||
|
latestExemplar: 100,
|
||||||
|
}
|
||||||
|
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable)
|
||||||
|
|
||||||
|
recorder := httptest.NewRecorder()
|
||||||
|
handler.ServeHTTP(recorder, req)
|
||||||
|
|
||||||
|
resp := recorder.Result()
|
||||||
|
// TODO: update to require.Equal(t, http.StatusConflict, resp.StatusCode) once exemplar storage is not experimental.
|
||||||
|
require.Equal(t, http.StatusNoContent, resp.StatusCode)
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOutOfOrderHistogram(t *testing.T) {
|
func TestOutOfOrderHistogram(t *testing.T) {
|
||||||
buf, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
|
tests := []struct {
|
||||||
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}},
|
Name string
|
||||||
Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram), FloatHistogramToHistogramProto(1, testHistogram.ToFloat(nil))},
|
Timestamp int64
|
||||||
}}, nil, nil, nil, nil)
|
}{
|
||||||
require.NoError(t, err)
|
{
|
||||||
|
Name: "historic",
|
||||||
req, err := http.NewRequest("", "", bytes.NewReader(buf))
|
Timestamp: 0,
|
||||||
require.NoError(t, err)
|
},
|
||||||
|
{
|
||||||
appendable := &mockAppendable{
|
Name: "future",
|
||||||
latestHistogram: 100,
|
Timestamp: math.MaxInt64,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable)
|
|
||||||
|
|
||||||
recorder := httptest.NewRecorder()
|
for _, tc := range tests {
|
||||||
handler.ServeHTTP(recorder, req)
|
t.Run(tc.Name, func(t *testing.T) {
|
||||||
|
buf, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
|
||||||
|
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}},
|
||||||
|
Histograms: []prompb.Histogram{HistogramToHistogramProto(tc.Timestamp, &testHistogram), FloatHistogramToHistogramProto(1, testHistogram.ToFloat(nil))},
|
||||||
|
}}, nil, nil, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
resp := recorder.Result()
|
req, err := http.NewRequest("", "", bytes.NewReader(buf))
|
||||||
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
appendable := &mockAppendable{
|
||||||
|
latestHistogram: 100,
|
||||||
|
}
|
||||||
|
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable)
|
||||||
|
|
||||||
|
recorder := httptest.NewRecorder()
|
||||||
|
handler.ServeHTTP(recorder, req)
|
||||||
|
|
||||||
|
resp := recorder.Result()
|
||||||
|
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkRemoteWritehandler(b *testing.B) {
|
func BenchmarkRemoteWritehandler(b *testing.B) {
|
||||||
|
|
|
@ -76,6 +76,7 @@ func (c *FloatHistogramChunk) NumSamples() int {
|
||||||
func (c *FloatHistogramChunk) Layout() (
|
func (c *FloatHistogramChunk) Layout() (
|
||||||
schema int32, zeroThreshold float64,
|
schema int32, zeroThreshold float64,
|
||||||
negativeSpans, positiveSpans []histogram.Span,
|
negativeSpans, positiveSpans []histogram.Span,
|
||||||
|
customValues []float64,
|
||||||
err error,
|
err error,
|
||||||
) {
|
) {
|
||||||
if c.NumSamples() == 0 {
|
if c.NumSamples() == 0 {
|
||||||
|
@ -133,17 +134,18 @@ func (c *FloatHistogramChunk) Appender() (Appender, error) {
|
||||||
a := &FloatHistogramAppender{
|
a := &FloatHistogramAppender{
|
||||||
b: &c.b,
|
b: &c.b,
|
||||||
|
|
||||||
schema: it.schema,
|
schema: it.schema,
|
||||||
zThreshold: it.zThreshold,
|
zThreshold: it.zThreshold,
|
||||||
pSpans: it.pSpans,
|
pSpans: it.pSpans,
|
||||||
nSpans: it.nSpans,
|
nSpans: it.nSpans,
|
||||||
t: it.t,
|
customValues: it.customValues,
|
||||||
tDelta: it.tDelta,
|
t: it.t,
|
||||||
cnt: it.cnt,
|
tDelta: it.tDelta,
|
||||||
zCnt: it.zCnt,
|
cnt: it.cnt,
|
||||||
pBuckets: pBuckets,
|
zCnt: it.zCnt,
|
||||||
nBuckets: nBuckets,
|
pBuckets: pBuckets,
|
||||||
sum: it.sum,
|
nBuckets: nBuckets,
|
||||||
|
sum: it.sum,
|
||||||
}
|
}
|
||||||
if it.numTotal == 0 {
|
if it.numTotal == 0 {
|
||||||
a.sum.leading = 0xff
|
a.sum.leading = 0xff
|
||||||
|
@ -191,6 +193,7 @@ type FloatHistogramAppender struct {
|
||||||
schema int32
|
schema int32
|
||||||
zThreshold float64
|
zThreshold float64
|
||||||
pSpans, nSpans []histogram.Span
|
pSpans, nSpans []histogram.Span
|
||||||
|
customValues []float64
|
||||||
|
|
||||||
t, tDelta int64
|
t, tDelta int64
|
||||||
sum, cnt, zCnt xorValue
|
sum, cnt, zCnt xorValue
|
||||||
|
@ -222,6 +225,7 @@ func (a *FloatHistogramAppender) Append(int64, float64) {
|
||||||
//
|
//
|
||||||
// The chunk is not appendable in the following cases:
|
// The chunk is not appendable in the following cases:
|
||||||
// - The schema has changed.
|
// - The schema has changed.
|
||||||
|
// - The custom bounds have changed if the current schema is custom buckets.
|
||||||
// - The threshold for the zero bucket has changed.
|
// - The threshold for the zero bucket has changed.
|
||||||
// - Any buckets have disappeared.
|
// - Any buckets have disappeared.
|
||||||
// - There was a counter reset in the count of observations or in any bucket, including the zero bucket.
|
// - There was a counter reset in the count of observations or in any bucket, including the zero bucket.
|
||||||
|
@ -263,6 +267,11 @@ func (a *FloatHistogramAppender) appendable(h *histogram.FloatHistogram) (
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.FloatBucketsMatch(h.CustomValues, a.customValues) {
|
||||||
|
counterReset = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if h.ZeroCount < a.zCnt.value {
|
if h.ZeroCount < a.zCnt.value {
|
||||||
// There has been a counter reset since ZeroThreshold didn't change.
|
// There has been a counter reset since ZeroThreshold didn't change.
|
||||||
counterReset = true
|
counterReset = true
|
||||||
|
@ -303,6 +312,7 @@ func (a *FloatHistogramAppender) appendable(h *histogram.FloatHistogram) (
|
||||||
//
|
//
|
||||||
// The chunk is not appendable in the following cases:
|
// The chunk is not appendable in the following cases:
|
||||||
// - The schema has changed.
|
// - The schema has changed.
|
||||||
|
// - The custom bounds have changed if the current schema is custom buckets.
|
||||||
// - The threshold for the zero bucket has changed.
|
// - The threshold for the zero bucket has changed.
|
||||||
// - The last sample in the chunk was stale while the current sample is not stale.
|
// - The last sample in the chunk was stale while the current sample is not stale.
|
||||||
func (a *FloatHistogramAppender) appendableGauge(h *histogram.FloatHistogram) (
|
func (a *FloatHistogramAppender) appendableGauge(h *histogram.FloatHistogram) (
|
||||||
|
@ -329,6 +339,10 @@ func (a *FloatHistogramAppender) appendableGauge(h *histogram.FloatHistogram) (
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.FloatBucketsMatch(h.CustomValues, a.customValues) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
positiveInserts, backwardPositiveInserts, positiveSpans = expandSpansBothWays(a.pSpans, h.PositiveSpans)
|
positiveInserts, backwardPositiveInserts, positiveSpans = expandSpansBothWays(a.pSpans, h.PositiveSpans)
|
||||||
negativeInserts, backwardNegativeInserts, negativeSpans = expandSpansBothWays(a.nSpans, h.NegativeSpans)
|
negativeInserts, backwardNegativeInserts, negativeSpans = expandSpansBothWays(a.nSpans, h.NegativeSpans)
|
||||||
okToAppend = true
|
okToAppend = true
|
||||||
|
@ -422,7 +436,7 @@ func (a *FloatHistogramAppender) appendFloatHistogram(t int64, h *histogram.Floa
|
||||||
if num == 0 {
|
if num == 0 {
|
||||||
// The first append gets the privilege to dictate the layout
|
// The first append gets the privilege to dictate the layout
|
||||||
// but it's also responsible for encoding it into the chunk!
|
// but it's also responsible for encoding it into the chunk!
|
||||||
writeHistogramChunkLayout(a.b, h.Schema, h.ZeroThreshold, h.PositiveSpans, h.NegativeSpans)
|
writeHistogramChunkLayout(a.b, h.Schema, h.ZeroThreshold, h.PositiveSpans, h.NegativeSpans, h.CustomValues)
|
||||||
a.schema = h.Schema
|
a.schema = h.Schema
|
||||||
a.zThreshold = h.ZeroThreshold
|
a.zThreshold = h.ZeroThreshold
|
||||||
|
|
||||||
|
@ -438,6 +452,12 @@ func (a *FloatHistogramAppender) appendFloatHistogram(t int64, h *histogram.Floa
|
||||||
} else {
|
} else {
|
||||||
a.nSpans = nil
|
a.nSpans = nil
|
||||||
}
|
}
|
||||||
|
if len(h.CustomValues) > 0 {
|
||||||
|
a.customValues = make([]float64, len(h.CustomValues))
|
||||||
|
copy(a.customValues, h.CustomValues)
|
||||||
|
} else {
|
||||||
|
a.customValues = nil
|
||||||
|
}
|
||||||
|
|
||||||
numPBuckets, numNBuckets := countSpans(h.PositiveSpans), countSpans(h.NegativeSpans)
|
numPBuckets, numNBuckets := countSpans(h.PositiveSpans), countSpans(h.NegativeSpans)
|
||||||
if numPBuckets > 0 {
|
if numPBuckets > 0 {
|
||||||
|
@ -693,6 +713,7 @@ type floatHistogramIterator struct {
|
||||||
schema int32
|
schema int32
|
||||||
zThreshold float64
|
zThreshold float64
|
||||||
pSpans, nSpans []histogram.Span
|
pSpans, nSpans []histogram.Span
|
||||||
|
customValues []float64
|
||||||
|
|
||||||
// For the fields that are tracked as deltas and ultimately dod's.
|
// For the fields that are tracked as deltas and ultimately dod's.
|
||||||
t int64
|
t int64
|
||||||
|
@ -753,6 +774,7 @@ func (it *floatHistogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram)
|
||||||
NegativeSpans: it.nSpans,
|
NegativeSpans: it.nSpans,
|
||||||
PositiveBuckets: it.pBuckets,
|
PositiveBuckets: it.pBuckets,
|
||||||
NegativeBuckets: it.nBuckets,
|
NegativeBuckets: it.nBuckets,
|
||||||
|
CustomValues: it.customValues,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -775,6 +797,9 @@ func (it *floatHistogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram)
|
||||||
fh.NegativeBuckets = resize(fh.NegativeBuckets, len(it.nBuckets))
|
fh.NegativeBuckets = resize(fh.NegativeBuckets, len(it.nBuckets))
|
||||||
copy(fh.NegativeBuckets, it.nBuckets)
|
copy(fh.NegativeBuckets, it.nBuckets)
|
||||||
|
|
||||||
|
fh.CustomValues = resize(fh.CustomValues, len(it.customValues))
|
||||||
|
copy(fh.CustomValues, it.customValues)
|
||||||
|
|
||||||
return it.t, fh
|
return it.t, fh
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -819,7 +844,7 @@ func (it *floatHistogramIterator) Next() ValueType {
|
||||||
// The first read is responsible for reading the chunk layout
|
// The first read is responsible for reading the chunk layout
|
||||||
// and for initializing fields that depend on it. We give
|
// and for initializing fields that depend on it. We give
|
||||||
// counter reset info at chunk level, hence we discard it here.
|
// counter reset info at chunk level, hence we discard it here.
|
||||||
schema, zeroThreshold, posSpans, negSpans, err := readHistogramChunkLayout(&it.br)
|
schema, zeroThreshold, posSpans, negSpans, customValues, err := readHistogramChunkLayout(&it.br)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
it.err = err
|
it.err = err
|
||||||
return ValNone
|
return ValNone
|
||||||
|
@ -827,6 +852,7 @@ func (it *floatHistogramIterator) Next() ValueType {
|
||||||
it.schema = schema
|
it.schema = schema
|
||||||
it.zThreshold = zeroThreshold
|
it.zThreshold = zeroThreshold
|
||||||
it.pSpans, it.nSpans = posSpans, negSpans
|
it.pSpans, it.nSpans = posSpans, negSpans
|
||||||
|
it.customValues = customValues
|
||||||
numPBuckets, numNBuckets := countSpans(posSpans), countSpans(negSpans)
|
numPBuckets, numNBuckets := countSpans(posSpans), countSpans(negSpans)
|
||||||
// Allocate bucket slices as needed, recycling existing slices
|
// Allocate bucket slices as needed, recycling existing slices
|
||||||
// in case this iterator was reset and already has slices of a
|
// in case this iterator was reset and already has slices of a
|
||||||
|
|
|
@ -280,7 +280,38 @@ func TestFloatHistogramChunkBucketChanges(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFloatHistogramChunkAppendable(t *testing.T) {
|
func TestFloatHistogramChunkAppendable(t *testing.T) {
|
||||||
setup := func() (Chunk, *FloatHistogramAppender, int64, *histogram.FloatHistogram) {
|
eh := &histogram.FloatHistogram{
|
||||||
|
Count: 5,
|
||||||
|
ZeroCount: 2,
|
||||||
|
Sum: 18.4,
|
||||||
|
ZeroThreshold: 1e-125,
|
||||||
|
Schema: 1,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 2, Length: 1},
|
||||||
|
{Offset: 3, Length: 2},
|
||||||
|
{Offset: 3, Length: 1},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []float64{6, 3, 3, 2, 4, 5, 1},
|
||||||
|
}
|
||||||
|
|
||||||
|
cbh := &histogram.FloatHistogram{
|
||||||
|
Count: 24,
|
||||||
|
Sum: 18.4,
|
||||||
|
Schema: histogram.CustomBucketsSchema,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 2, Length: 1},
|
||||||
|
{Offset: 3, Length: 2},
|
||||||
|
{Offset: 3, Length: 1},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []float64{6, 3, 3, 2, 4, 5, 1},
|
||||||
|
CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20},
|
||||||
|
}
|
||||||
|
|
||||||
|
setup := func(h *histogram.FloatHistogram) (Chunk, *FloatHistogramAppender, int64, *histogram.FloatHistogram) {
|
||||||
c := Chunk(NewFloatHistogramChunk())
|
c := Chunk(NewFloatHistogramChunk())
|
||||||
|
|
||||||
// Create fresh appender and add the first histogram.
|
// Create fresh appender and add the first histogram.
|
||||||
|
@ -289,32 +320,17 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
|
||||||
require.Equal(t, 0, c.NumSamples())
|
require.Equal(t, 0, c.NumSamples())
|
||||||
|
|
||||||
ts := int64(1234567890)
|
ts := int64(1234567890)
|
||||||
h1 := &histogram.FloatHistogram{
|
|
||||||
Count: 5,
|
|
||||||
ZeroCount: 2,
|
|
||||||
Sum: 18.4,
|
|
||||||
ZeroThreshold: 1e-125,
|
|
||||||
Schema: 1,
|
|
||||||
PositiveSpans: []histogram.Span{
|
|
||||||
{Offset: 0, Length: 2},
|
|
||||||
{Offset: 2, Length: 1},
|
|
||||||
{Offset: 3, Length: 2},
|
|
||||||
{Offset: 3, Length: 1},
|
|
||||||
{Offset: 1, Length: 1},
|
|
||||||
},
|
|
||||||
PositiveBuckets: []float64{6, 3, 3, 2, 4, 5, 1},
|
|
||||||
}
|
|
||||||
|
|
||||||
chk, _, app, err := app.AppendFloatHistogram(nil, ts, h1.Copy(), false)
|
chk, _, app, err := app.AppendFloatHistogram(nil, ts, h.Copy(), false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Nil(t, chk)
|
require.Nil(t, chk)
|
||||||
require.Equal(t, 1, c.NumSamples())
|
require.Equal(t, 1, c.NumSamples())
|
||||||
require.Equal(t, UnknownCounterReset, c.(*FloatHistogramChunk).GetCounterResetHeader())
|
require.Equal(t, UnknownCounterReset, c.(*FloatHistogramChunk).GetCounterResetHeader())
|
||||||
return c, app.(*FloatHistogramAppender), ts, h1
|
return c, app.(*FloatHistogramAppender), ts, h
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // Schema change.
|
{ // Schema change.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.Schema++
|
h2.Schema++
|
||||||
_, _, ok, _ := hApp.appendable(h2)
|
_, _, ok, _ := hApp.appendable(h2)
|
||||||
|
@ -324,7 +340,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // Zero threshold change.
|
{ // Zero threshold change.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.ZeroThreshold += 0.1
|
h2.ZeroThreshold += 0.1
|
||||||
_, _, ok, _ := hApp.appendable(h2)
|
_, _, ok, _ := hApp.appendable(h2)
|
||||||
|
@ -334,7 +350,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // New histogram that has more buckets.
|
{ // New histogram that has more buckets.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.PositiveSpans = []histogram.Span{
|
h2.PositiveSpans = []histogram.Span{
|
||||||
{Offset: 0, Length: 3},
|
{Offset: 0, Length: 3},
|
||||||
|
@ -357,7 +373,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // New histogram that has a bucket missing.
|
{ // New histogram that has a bucket missing.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
|
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.PositiveSpans = []histogram.Span{
|
h2.PositiveSpans = []histogram.Span{
|
||||||
|
@ -379,7 +395,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // New histogram that has a counter reset while buckets are same.
|
{ // New histogram that has a counter reset while buckets are same.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.Sum = 23
|
h2.Sum = 23
|
||||||
h2.PositiveBuckets = []float64{6, 2, 3, 2, 4, 5, 1}
|
h2.PositiveBuckets = []float64{6, 2, 3, 2, 4, 5, 1}
|
||||||
|
@ -394,7 +410,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // New histogram that has a counter reset while new buckets were added.
|
{ // New histogram that has a counter reset while new buckets were added.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.PositiveSpans = []histogram.Span{
|
h2.PositiveSpans = []histogram.Span{
|
||||||
{Offset: 0, Length: 3},
|
{Offset: 0, Length: 3},
|
||||||
|
@ -415,7 +431,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
// New histogram that has a counter reset while new buckets were
|
// New histogram that has a counter reset while new buckets were
|
||||||
// added before the first bucket and reset on first bucket. (to
|
// added before the first bucket and reset on first bucket. (to
|
||||||
// catch the edge case where the new bucket should be forwarded
|
// catch the edge case where the new bucket should be forwarded
|
||||||
|
@ -442,7 +458,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // New histogram that has an explicit counter reset.
|
{ // New histogram that has an explicit counter reset.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.CounterResetHint = histogram.CounterReset
|
h2.CounterResetHint = histogram.CounterReset
|
||||||
|
|
||||||
|
@ -450,7 +466,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // Start new chunk explicitly, and append a new histogram that is considered appendable to the previous chunk.
|
{ // Start new chunk explicitly, and append a new histogram that is considered appendable to the previous chunk.
|
||||||
_, hApp, ts, h1 := setup()
|
_, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy() // Identity is appendable.
|
h2 := h1.Copy() // Identity is appendable.
|
||||||
|
|
||||||
nextChunk := NewFloatHistogramChunk()
|
nextChunk := NewFloatHistogramChunk()
|
||||||
|
@ -466,7 +482,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // Start new chunk explicitly, and append a new histogram that is not considered appendable to the previous chunk.
|
{ // Start new chunk explicitly, and append a new histogram that is not considered appendable to the previous chunk.
|
||||||
_, hApp, ts, h1 := setup()
|
_, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.Count-- // Make this not appendable due to counter reset.
|
h2.Count-- // Make this not appendable due to counter reset.
|
||||||
|
|
||||||
|
@ -483,7 +499,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // Start new chunk explicitly, and append a new histogram that would need recoding if we added it to the chunk.
|
{ // Start new chunk explicitly, and append a new histogram that would need recoding if we added it to the chunk.
|
||||||
_, hApp, ts, h1 := setup()
|
_, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.PositiveSpans = []histogram.Span{
|
h2.PositiveSpans = []histogram.Span{
|
||||||
{Offset: 0, Length: 3},
|
{Offset: 0, Length: 3},
|
||||||
|
@ -507,6 +523,72 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
|
||||||
assertSampleCount(t, nextChunk, 1, ValFloatHistogram)
|
assertSampleCount(t, nextChunk, 1, ValFloatHistogram)
|
||||||
require.Equal(t, NotCounterReset, nextChunk.GetCounterResetHeader())
|
require.Equal(t, NotCounterReset, nextChunk.GetCounterResetHeader())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{ // Custom buckets, no change.
|
||||||
|
c, hApp, ts, h1 := setup(cbh)
|
||||||
|
h2 := h1.Copy()
|
||||||
|
_, _, ok, _ := hApp.appendable(h2)
|
||||||
|
require.True(t, ok)
|
||||||
|
|
||||||
|
assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // Custom buckets, increase in bucket counts but no change in layout.
|
||||||
|
c, hApp, ts, h1 := setup(cbh)
|
||||||
|
h2 := h1.Copy()
|
||||||
|
h2.Count++
|
||||||
|
h2.PositiveBuckets = []float64{6, 3, 3, 2, 4, 5, 2}
|
||||||
|
_, _, ok, _ := hApp.appendable(h2)
|
||||||
|
require.True(t, ok)
|
||||||
|
|
||||||
|
assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // Custom buckets, decrease in bucket counts but no change in layout.
|
||||||
|
c, hApp, ts, h1 := setup(cbh)
|
||||||
|
h2 := h1.Copy()
|
||||||
|
h2.Count--
|
||||||
|
h2.PositiveBuckets = []float64{6, 3, 3, 2, 4, 5, 0}
|
||||||
|
_, _, ok, _ := hApp.appendable(h2)
|
||||||
|
require.False(t, ok)
|
||||||
|
|
||||||
|
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // Custom buckets, change only in custom bounds.
|
||||||
|
c, hApp, ts, h1 := setup(cbh)
|
||||||
|
h2 := h1.Copy()
|
||||||
|
h2.CustomValues = []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21}
|
||||||
|
_, _, ok, _ := hApp.appendable(h2)
|
||||||
|
require.False(t, ok)
|
||||||
|
|
||||||
|
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // Custom buckets, with more buckets.
|
||||||
|
c, hApp, ts, h1 := setup(cbh)
|
||||||
|
h2 := h1.Copy()
|
||||||
|
h2.PositiveSpans = []histogram.Span{
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
{Offset: 1, Length: 4},
|
||||||
|
{Offset: 3, Length: 3},
|
||||||
|
}
|
||||||
|
h2.Count += 6
|
||||||
|
h2.Sum = 30
|
||||||
|
// Existing histogram should get values converted from the above to:
|
||||||
|
// 6 3 0 3 0 0 2 4 5 0 1 (previous values with some new empty buckets in between)
|
||||||
|
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
|
||||||
|
h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1} // (total 30)
|
||||||
|
|
||||||
|
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
|
||||||
|
require.NotEmpty(t, posInterjections)
|
||||||
|
require.Empty(t, negInterjections)
|
||||||
|
require.True(t, ok) // Only new buckets came in.
|
||||||
|
require.False(t, cr)
|
||||||
|
|
||||||
|
assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertNewFloatHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *FloatHistogramAppender, ts int64, h *histogram.FloatHistogram, expectHeader CounterResetHeader) {
|
func assertNewFloatHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *FloatHistogramAppender, ts int64, h *histogram.FloatHistogram, expectHeader CounterResetHeader) {
|
||||||
|
@ -526,7 +608,7 @@ func assertNewFloatHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *Fl
|
||||||
func assertNoNewFloatHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *FloatHistogramAppender, ts int64, h *histogram.FloatHistogram, expectHeader CounterResetHeader) {
|
func assertNoNewFloatHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *FloatHistogramAppender, ts int64, h *histogram.FloatHistogram, expectHeader CounterResetHeader) {
|
||||||
oldChunkBytes := oldChunk.Bytes()
|
oldChunkBytes := oldChunk.Bytes()
|
||||||
newChunk, recoded, newAppender, err := hApp.AppendFloatHistogram(nil, ts, h, false)
|
newChunk, recoded, newAppender, err := hApp.AppendFloatHistogram(nil, ts, h, false)
|
||||||
require.NotEqual(t, oldChunkBytes, oldChunk.Bytes()) // Sanity check that previous chunk is untouched.
|
require.Greater(t, len(oldChunk.Bytes()), len(oldChunkBytes)) // Check that current chunk is bigger than previously.
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Nil(t, newChunk)
|
require.Nil(t, newChunk)
|
||||||
require.False(t, recoded)
|
require.False(t, recoded)
|
||||||
|
@ -715,6 +797,32 @@ func TestFloatHistogramChunkAppendableWithEmptySpan(t *testing.T) {
|
||||||
NegativeBuckets: []float64{1, 4, 2, 7, 5, 5, 2},
|
NegativeBuckets: []float64{1, 4, 2, 7, 5, 5, 2},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"empty span in old and new custom buckets histogram": {
|
||||||
|
h1: &histogram.FloatHistogram{
|
||||||
|
Schema: histogram.CustomBucketsSchema,
|
||||||
|
Count: 7,
|
||||||
|
Sum: 1234.5,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 4},
|
||||||
|
{Offset: 0, Length: 0},
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []float64{1, 2, 1, 1, 1, 1, 1},
|
||||||
|
CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
|
||||||
|
},
|
||||||
|
h2: &histogram.FloatHistogram{
|
||||||
|
Schema: histogram.CustomBucketsSchema,
|
||||||
|
Count: 10,
|
||||||
|
Sum: 2345.6,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 4},
|
||||||
|
{Offset: 0, Length: 0},
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []float64{1, 3, 1, 2, 1, 1, 1},
|
||||||
|
CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, tc := range tests {
|
for name, tc := range tests {
|
||||||
|
@ -741,7 +849,40 @@ func TestFloatHistogramChunkAppendableWithEmptySpan(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
|
func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
|
||||||
setup := func() (Chunk, *FloatHistogramAppender, int64, *histogram.FloatHistogram) {
|
eh := &histogram.FloatHistogram{
|
||||||
|
CounterResetHint: histogram.GaugeType,
|
||||||
|
Count: 5,
|
||||||
|
ZeroCount: 2,
|
||||||
|
Sum: 18.4,
|
||||||
|
ZeroThreshold: 1e-125,
|
||||||
|
Schema: 1,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 2, Length: 1},
|
||||||
|
{Offset: 3, Length: 2},
|
||||||
|
{Offset: 3, Length: 1},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []float64{6, 3, 3, 2, 4, 5, 1},
|
||||||
|
}
|
||||||
|
|
||||||
|
cbh := &histogram.FloatHistogram{
|
||||||
|
CounterResetHint: histogram.GaugeType,
|
||||||
|
Count: 24,
|
||||||
|
Sum: 18.4,
|
||||||
|
Schema: histogram.CustomBucketsSchema,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 2, Length: 1},
|
||||||
|
{Offset: 3, Length: 2},
|
||||||
|
{Offset: 3, Length: 1},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []float64{6, 3, 3, 2, 4, 5, 1},
|
||||||
|
CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20},
|
||||||
|
}
|
||||||
|
|
||||||
|
setup := func(h *histogram.FloatHistogram) (Chunk, *FloatHistogramAppender, int64, *histogram.FloatHistogram) {
|
||||||
c := Chunk(NewFloatHistogramChunk())
|
c := Chunk(NewFloatHistogramChunk())
|
||||||
|
|
||||||
// Create fresh appender and add the first histogram.
|
// Create fresh appender and add the first histogram.
|
||||||
|
@ -750,33 +891,17 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
|
||||||
require.Equal(t, 0, c.NumSamples())
|
require.Equal(t, 0, c.NumSamples())
|
||||||
|
|
||||||
ts := int64(1234567890)
|
ts := int64(1234567890)
|
||||||
h1 := &histogram.FloatHistogram{
|
|
||||||
CounterResetHint: histogram.GaugeType,
|
|
||||||
Count: 5,
|
|
||||||
ZeroCount: 2,
|
|
||||||
Sum: 18.4,
|
|
||||||
ZeroThreshold: 1e-125,
|
|
||||||
Schema: 1,
|
|
||||||
PositiveSpans: []histogram.Span{
|
|
||||||
{Offset: 0, Length: 2},
|
|
||||||
{Offset: 2, Length: 1},
|
|
||||||
{Offset: 3, Length: 2},
|
|
||||||
{Offset: 3, Length: 1},
|
|
||||||
{Offset: 1, Length: 1},
|
|
||||||
},
|
|
||||||
PositiveBuckets: []float64{6, 3, 3, 2, 4, 5, 1},
|
|
||||||
}
|
|
||||||
|
|
||||||
chk, _, app, err := app.AppendFloatHistogram(nil, ts, h1.Copy(), false)
|
chk, _, app, err := app.AppendFloatHistogram(nil, ts, h.Copy(), false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Nil(t, chk)
|
require.Nil(t, chk)
|
||||||
require.Equal(t, 1, c.NumSamples())
|
require.Equal(t, 1, c.NumSamples())
|
||||||
require.Equal(t, GaugeType, c.(*FloatHistogramChunk).GetCounterResetHeader())
|
require.Equal(t, GaugeType, c.(*FloatHistogramChunk).GetCounterResetHeader())
|
||||||
return c, app.(*FloatHistogramAppender), ts, h1
|
return c, app.(*FloatHistogramAppender), ts, h
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // Schema change.
|
{ // Schema change.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.Schema++
|
h2.Schema++
|
||||||
_, _, _, _, _, _, ok := hApp.appendableGauge(h2)
|
_, _, _, _, _, _, ok := hApp.appendableGauge(h2)
|
||||||
|
@ -786,7 +911,7 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // Zero threshold change.
|
{ // Zero threshold change.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.ZeroThreshold += 0.1
|
h2.ZeroThreshold += 0.1
|
||||||
_, _, _, _, _, _, ok := hApp.appendableGauge(h2)
|
_, _, _, _, _, _, ok := hApp.appendableGauge(h2)
|
||||||
|
@ -796,7 +921,7 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // New histogram that has more buckets.
|
{ // New histogram that has more buckets.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.PositiveSpans = []histogram.Span{
|
h2.PositiveSpans = []histogram.Span{
|
||||||
{Offset: 0, Length: 3},
|
{Offset: 0, Length: 3},
|
||||||
|
@ -820,7 +945,7 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // New histogram that has buckets missing.
|
{ // New histogram that has buckets missing.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.PositiveSpans = []histogram.Span{
|
h2.PositiveSpans = []histogram.Span{
|
||||||
{Offset: 0, Length: 2},
|
{Offset: 0, Length: 2},
|
||||||
|
@ -844,7 +969,7 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // New histogram that has a bucket missing and new buckets.
|
{ // New histogram that has a bucket missing and new buckets.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.PositiveSpans = []histogram.Span{
|
h2.PositiveSpans = []histogram.Span{
|
||||||
{Offset: 0, Length: 2},
|
{Offset: 0, Length: 2},
|
||||||
|
@ -866,7 +991,7 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // New histogram that has a counter reset while buckets are same.
|
{ // New histogram that has a counter reset while buckets are same.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.Sum = 23
|
h2.Sum = 23
|
||||||
h2.PositiveBuckets = []float64{6, 2, 3, 2, 4, 5, 1}
|
h2.PositiveBuckets = []float64{6, 2, 3, 2, 4, 5, 1}
|
||||||
|
@ -882,7 +1007,7 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // New histogram that has a counter reset while new buckets were added.
|
{ // New histogram that has a counter reset while new buckets were added.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.PositiveSpans = []histogram.Span{
|
h2.PositiveSpans = []histogram.Span{
|
||||||
{Offset: 0, Length: 3},
|
{Offset: 0, Length: 3},
|
||||||
|
@ -906,7 +1031,7 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
|
||||||
{
|
{
|
||||||
// New histogram that has a counter reset while new buckets were
|
// New histogram that has a counter reset while new buckets were
|
||||||
// added before the first bucket and reset on first bucket.
|
// added before the first bucket and reset on first bucket.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.PositiveSpans = []histogram.Span{
|
h2.PositiveSpans = []histogram.Span{
|
||||||
{Offset: -3, Length: 2},
|
{Offset: -3, Length: 2},
|
||||||
|
@ -928,6 +1053,73 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
|
||||||
|
|
||||||
assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
|
assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{ // Custom buckets, no change.
|
||||||
|
c, hApp, ts, h1 := setup(cbh)
|
||||||
|
h2 := h1.Copy()
|
||||||
|
_, _, _, _, _, _, ok := hApp.appendableGauge(h2)
|
||||||
|
require.True(t, ok)
|
||||||
|
|
||||||
|
assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // Custom buckets, increase in bucket counts but no change in layout.
|
||||||
|
c, hApp, ts, h1 := setup(cbh)
|
||||||
|
h2 := h1.Copy()
|
||||||
|
h2.Count++
|
||||||
|
h2.PositiveBuckets = []float64{6, 3, 3, 2, 4, 5, 2}
|
||||||
|
_, _, _, _, _, _, ok := hApp.appendableGauge(h2)
|
||||||
|
require.True(t, ok)
|
||||||
|
|
||||||
|
assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // Custom buckets, decrease in bucket counts but no change in layout.
|
||||||
|
c, hApp, ts, h1 := setup(cbh)
|
||||||
|
h2 := h1.Copy()
|
||||||
|
h2.Count--
|
||||||
|
h2.PositiveBuckets = []float64{6, 3, 3, 2, 4, 5, 0}
|
||||||
|
_, _, _, _, _, _, ok := hApp.appendableGauge(h2)
|
||||||
|
require.True(t, ok)
|
||||||
|
|
||||||
|
assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // Custom buckets, change only in custom bounds.
|
||||||
|
c, hApp, ts, h1 := setup(cbh)
|
||||||
|
h2 := h1.Copy()
|
||||||
|
h2.CustomValues = []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21}
|
||||||
|
_, _, _, _, _, _, ok := hApp.appendableGauge(h2)
|
||||||
|
require.False(t, ok)
|
||||||
|
|
||||||
|
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // Custom buckets, with more buckets.
|
||||||
|
c, hApp, ts, h1 := setup(cbh)
|
||||||
|
h2 := h1.Copy()
|
||||||
|
h2.PositiveSpans = []histogram.Span{
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
{Offset: 1, Length: 4},
|
||||||
|
{Offset: 3, Length: 3},
|
||||||
|
}
|
||||||
|
h2.Count += 6
|
||||||
|
h2.Sum = 30
|
||||||
|
// Existing histogram should get values converted from the above to:
|
||||||
|
// 6 3 0 3 0 0 2 4 5 0 1 (previous values with some new empty buckets in between)
|
||||||
|
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
|
||||||
|
h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1} // (total 30)
|
||||||
|
|
||||||
|
posInterjections, negInterjections, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2)
|
||||||
|
require.NotEmpty(t, posInterjections)
|
||||||
|
require.Empty(t, negInterjections)
|
||||||
|
require.Empty(t, pBackwardI)
|
||||||
|
require.Empty(t, nBackwardI)
|
||||||
|
require.True(t, ok) // Only new buckets came in.
|
||||||
|
|
||||||
|
assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFloatHistogramAppendOnlyErrors(t *testing.T) {
|
func TestFloatHistogramAppendOnlyErrors(t *testing.T) {
|
||||||
|
@ -975,4 +1167,26 @@ func TestFloatHistogramAppendOnlyErrors(t *testing.T) {
|
||||||
require.False(t, isRecoded)
|
require.False(t, isRecoded)
|
||||||
require.EqualError(t, err, "float histogram counter reset")
|
require.EqualError(t, err, "float histogram counter reset")
|
||||||
})
|
})
|
||||||
|
t.Run("counter reset error with custom buckets", func(t *testing.T) {
|
||||||
|
c := Chunk(NewFloatHistogramChunk())
|
||||||
|
|
||||||
|
// Create fresh appender and add the first histogram.
|
||||||
|
app, err := c.Appender()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
h := tsdbutil.GenerateTestCustomBucketsFloatHistogram(0)
|
||||||
|
var isRecoded bool
|
||||||
|
c, isRecoded, app, err = app.AppendFloatHistogram(nil, 1, h, true)
|
||||||
|
require.Nil(t, c)
|
||||||
|
require.False(t, isRecoded)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Add erroring histogram.
|
||||||
|
h2 := h.Copy()
|
||||||
|
h2.CustomValues = []float64{0, 1, 2, 3, 4, 5, 6, 7}
|
||||||
|
c, isRecoded, _, err = app.AppendFloatHistogram(nil, 2, h2, true)
|
||||||
|
require.Nil(t, c)
|
||||||
|
require.False(t, isRecoded)
|
||||||
|
require.EqualError(t, err, "float histogram counter reset")
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -69,6 +69,7 @@ func (c *HistogramChunk) NumSamples() int {
|
||||||
func (c *HistogramChunk) Layout() (
|
func (c *HistogramChunk) Layout() (
|
||||||
schema int32, zeroThreshold float64,
|
schema int32, zeroThreshold float64,
|
||||||
negativeSpans, positiveSpans []histogram.Span,
|
negativeSpans, positiveSpans []histogram.Span,
|
||||||
|
customValues []float64,
|
||||||
err error,
|
err error,
|
||||||
) {
|
) {
|
||||||
if c.NumSamples() == 0 {
|
if c.NumSamples() == 0 {
|
||||||
|
@ -131,6 +132,7 @@ func (c *HistogramChunk) Appender() (Appender, error) {
|
||||||
zThreshold: it.zThreshold,
|
zThreshold: it.zThreshold,
|
||||||
pSpans: it.pSpans,
|
pSpans: it.pSpans,
|
||||||
nSpans: it.nSpans,
|
nSpans: it.nSpans,
|
||||||
|
customValues: it.customValues,
|
||||||
t: it.t,
|
t: it.t,
|
||||||
cnt: it.cnt,
|
cnt: it.cnt,
|
||||||
zCnt: it.zCnt,
|
zCnt: it.zCnt,
|
||||||
|
@ -198,6 +200,7 @@ type HistogramAppender struct {
|
||||||
schema int32
|
schema int32
|
||||||
zThreshold float64
|
zThreshold float64
|
||||||
pSpans, nSpans []histogram.Span
|
pSpans, nSpans []histogram.Span
|
||||||
|
customValues []float64
|
||||||
|
|
||||||
// Although we intend to start new chunks on counter resets, we still
|
// Although we intend to start new chunks on counter resets, we still
|
||||||
// have to handle negative deltas for gauge histograms. Therefore, even
|
// have to handle negative deltas for gauge histograms. Therefore, even
|
||||||
|
@ -241,6 +244,7 @@ func (a *HistogramAppender) Append(int64, float64) {
|
||||||
// The chunk is not appendable in the following cases:
|
// The chunk is not appendable in the following cases:
|
||||||
//
|
//
|
||||||
// - The schema has changed.
|
// - The schema has changed.
|
||||||
|
// - The custom bounds have changed if the current schema is custom buckets.
|
||||||
// - The threshold for the zero bucket has changed.
|
// - The threshold for the zero bucket has changed.
|
||||||
// - Any buckets have disappeared.
|
// - Any buckets have disappeared.
|
||||||
// - There was a counter reset in the count of observations or in any bucket,
|
// - There was a counter reset in the count of observations or in any bucket,
|
||||||
|
@ -283,6 +287,11 @@ func (a *HistogramAppender) appendable(h *histogram.Histogram) (
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.FloatBucketsMatch(h.CustomValues, a.customValues) {
|
||||||
|
counterReset = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if h.ZeroCount < a.zCnt {
|
if h.ZeroCount < a.zCnt {
|
||||||
// There has been a counter reset since ZeroThreshold didn't change.
|
// There has been a counter reset since ZeroThreshold didn't change.
|
||||||
counterReset = true
|
counterReset = true
|
||||||
|
@ -323,6 +332,7 @@ func (a *HistogramAppender) appendable(h *histogram.Histogram) (
|
||||||
//
|
//
|
||||||
// The chunk is not appendable in the following cases:
|
// The chunk is not appendable in the following cases:
|
||||||
// - The schema has changed.
|
// - The schema has changed.
|
||||||
|
// - The custom bounds have changed if the current schema is custom buckets.
|
||||||
// - The threshold for the zero bucket has changed.
|
// - The threshold for the zero bucket has changed.
|
||||||
// - The last sample in the chunk was stale while the current sample is not stale.
|
// - The last sample in the chunk was stale while the current sample is not stale.
|
||||||
func (a *HistogramAppender) appendableGauge(h *histogram.Histogram) (
|
func (a *HistogramAppender) appendableGauge(h *histogram.Histogram) (
|
||||||
|
@ -349,6 +359,10 @@ func (a *HistogramAppender) appendableGauge(h *histogram.Histogram) (
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.FloatBucketsMatch(h.CustomValues, a.customValues) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
positiveInserts, backwardPositiveInserts, positiveSpans = expandSpansBothWays(a.pSpans, h.PositiveSpans)
|
positiveInserts, backwardPositiveInserts, positiveSpans = expandSpansBothWays(a.pSpans, h.PositiveSpans)
|
||||||
negativeInserts, backwardNegativeInserts, negativeSpans = expandSpansBothWays(a.nSpans, h.NegativeSpans)
|
negativeInserts, backwardNegativeInserts, negativeSpans = expandSpansBothWays(a.nSpans, h.NegativeSpans)
|
||||||
okToAppend = true
|
okToAppend = true
|
||||||
|
@ -442,7 +456,7 @@ func (a *HistogramAppender) appendHistogram(t int64, h *histogram.Histogram) {
|
||||||
if num == 0 {
|
if num == 0 {
|
||||||
// The first append gets the privilege to dictate the layout
|
// The first append gets the privilege to dictate the layout
|
||||||
// but it's also responsible for encoding it into the chunk!
|
// but it's also responsible for encoding it into the chunk!
|
||||||
writeHistogramChunkLayout(a.b, h.Schema, h.ZeroThreshold, h.PositiveSpans, h.NegativeSpans)
|
writeHistogramChunkLayout(a.b, h.Schema, h.ZeroThreshold, h.PositiveSpans, h.NegativeSpans, h.CustomValues)
|
||||||
a.schema = h.Schema
|
a.schema = h.Schema
|
||||||
a.zThreshold = h.ZeroThreshold
|
a.zThreshold = h.ZeroThreshold
|
||||||
|
|
||||||
|
@ -458,6 +472,12 @@ func (a *HistogramAppender) appendHistogram(t int64, h *histogram.Histogram) {
|
||||||
} else {
|
} else {
|
||||||
a.nSpans = nil
|
a.nSpans = nil
|
||||||
}
|
}
|
||||||
|
if len(h.CustomValues) > 0 {
|
||||||
|
a.customValues = make([]float64, len(h.CustomValues))
|
||||||
|
copy(a.customValues, h.CustomValues)
|
||||||
|
} else {
|
||||||
|
a.customValues = nil
|
||||||
|
}
|
||||||
|
|
||||||
numPBuckets, numNBuckets := countSpans(h.PositiveSpans), countSpans(h.NegativeSpans)
|
numPBuckets, numNBuckets := countSpans(h.PositiveSpans), countSpans(h.NegativeSpans)
|
||||||
if numPBuckets > 0 {
|
if numPBuckets > 0 {
|
||||||
|
@ -741,6 +761,7 @@ type histogramIterator struct {
|
||||||
schema int32
|
schema int32
|
||||||
zThreshold float64
|
zThreshold float64
|
||||||
pSpans, nSpans []histogram.Span
|
pSpans, nSpans []histogram.Span
|
||||||
|
customValues []float64
|
||||||
|
|
||||||
// For the fields that are tracked as deltas and ultimately dod's.
|
// For the fields that are tracked as deltas and ultimately dod's.
|
||||||
t int64
|
t int64
|
||||||
|
@ -797,6 +818,7 @@ func (it *histogramIterator) AtHistogram(h *histogram.Histogram) (int64, *histog
|
||||||
NegativeSpans: it.nSpans,
|
NegativeSpans: it.nSpans,
|
||||||
PositiveBuckets: it.pBuckets,
|
PositiveBuckets: it.pBuckets,
|
||||||
NegativeBuckets: it.nBuckets,
|
NegativeBuckets: it.nBuckets,
|
||||||
|
CustomValues: it.customValues,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -819,6 +841,9 @@ func (it *histogramIterator) AtHistogram(h *histogram.Histogram) (int64, *histog
|
||||||
h.NegativeBuckets = resize(h.NegativeBuckets, len(it.nBuckets))
|
h.NegativeBuckets = resize(h.NegativeBuckets, len(it.nBuckets))
|
||||||
copy(h.NegativeBuckets, it.nBuckets)
|
copy(h.NegativeBuckets, it.nBuckets)
|
||||||
|
|
||||||
|
h.CustomValues = resize(h.CustomValues, len(it.customValues))
|
||||||
|
copy(h.CustomValues, it.customValues)
|
||||||
|
|
||||||
return it.t, h
|
return it.t, h
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -839,6 +864,7 @@ func (it *histogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int
|
||||||
NegativeSpans: it.nSpans,
|
NegativeSpans: it.nSpans,
|
||||||
PositiveBuckets: it.pFloatBuckets,
|
PositiveBuckets: it.pFloatBuckets,
|
||||||
NegativeBuckets: it.nFloatBuckets,
|
NegativeBuckets: it.nFloatBuckets,
|
||||||
|
CustomValues: it.customValues,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -869,6 +895,9 @@ func (it *histogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int
|
||||||
fh.NegativeBuckets[i] = currentNegative
|
fh.NegativeBuckets[i] = currentNegative
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fh.CustomValues = resize(fh.CustomValues, len(it.customValues))
|
||||||
|
copy(fh.CustomValues, it.customValues)
|
||||||
|
|
||||||
return it.t, fh
|
return it.t, fh
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -927,7 +956,7 @@ func (it *histogramIterator) Next() ValueType {
|
||||||
// The first read is responsible for reading the chunk layout
|
// The first read is responsible for reading the chunk layout
|
||||||
// and for initializing fields that depend on it. We give
|
// and for initializing fields that depend on it. We give
|
||||||
// counter reset info at chunk level, hence we discard it here.
|
// counter reset info at chunk level, hence we discard it here.
|
||||||
schema, zeroThreshold, posSpans, negSpans, err := readHistogramChunkLayout(&it.br)
|
schema, zeroThreshold, posSpans, negSpans, customValues, err := readHistogramChunkLayout(&it.br)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
it.err = err
|
it.err = err
|
||||||
return ValNone
|
return ValNone
|
||||||
|
@ -935,6 +964,7 @@ func (it *histogramIterator) Next() ValueType {
|
||||||
it.schema = schema
|
it.schema = schema
|
||||||
it.zThreshold = zeroThreshold
|
it.zThreshold = zeroThreshold
|
||||||
it.pSpans, it.nSpans = posSpans, negSpans
|
it.pSpans, it.nSpans = posSpans, negSpans
|
||||||
|
it.customValues = customValues
|
||||||
numPBuckets, numNBuckets := countSpans(posSpans), countSpans(negSpans)
|
numPBuckets, numNBuckets := countSpans(posSpans), countSpans(negSpans)
|
||||||
// The code below recycles existing slices in case this iterator
|
// The code below recycles existing slices in case this iterator
|
||||||
// was reset and already has slices of a sufficient capacity.
|
// was reset and already has slices of a sufficient capacity.
|
||||||
|
|
|
@ -21,17 +21,21 @@ import (
|
||||||
|
|
||||||
func writeHistogramChunkLayout(
|
func writeHistogramChunkLayout(
|
||||||
b *bstream, schema int32, zeroThreshold float64,
|
b *bstream, schema int32, zeroThreshold float64,
|
||||||
positiveSpans, negativeSpans []histogram.Span,
|
positiveSpans, negativeSpans []histogram.Span, customValues []float64,
|
||||||
) {
|
) {
|
||||||
putZeroThreshold(b, zeroThreshold)
|
putZeroThreshold(b, zeroThreshold)
|
||||||
putVarbitInt(b, int64(schema))
|
putVarbitInt(b, int64(schema))
|
||||||
putHistogramChunkLayoutSpans(b, positiveSpans)
|
putHistogramChunkLayoutSpans(b, positiveSpans)
|
||||||
putHistogramChunkLayoutSpans(b, negativeSpans)
|
putHistogramChunkLayoutSpans(b, negativeSpans)
|
||||||
|
if histogram.IsCustomBucketsSchema(schema) {
|
||||||
|
putHistogramChunkLayoutCustomBounds(b, customValues)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func readHistogramChunkLayout(b *bstreamReader) (
|
func readHistogramChunkLayout(b *bstreamReader) (
|
||||||
schema int32, zeroThreshold float64,
|
schema int32, zeroThreshold float64,
|
||||||
positiveSpans, negativeSpans []histogram.Span,
|
positiveSpans, negativeSpans []histogram.Span,
|
||||||
|
customValues []float64,
|
||||||
err error,
|
err error,
|
||||||
) {
|
) {
|
||||||
zeroThreshold, err = readZeroThreshold(b)
|
zeroThreshold, err = readZeroThreshold(b)
|
||||||
|
@ -55,6 +59,13 @@ func readHistogramChunkLayout(b *bstreamReader) (
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if histogram.IsCustomBucketsSchema(schema) {
|
||||||
|
customValues, err = readHistogramChunkLayoutCustomBounds(b)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,6 +102,30 @@ func readHistogramChunkLayoutSpans(b *bstreamReader) ([]histogram.Span, error) {
|
||||||
return spans, nil
|
return spans, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func putHistogramChunkLayoutCustomBounds(b *bstream, customValues []float64) {
|
||||||
|
putVarbitUint(b, uint64(len(customValues)))
|
||||||
|
for _, bound := range customValues {
|
||||||
|
putCustomBound(b, bound)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func readHistogramChunkLayoutCustomBounds(b *bstreamReader) ([]float64, error) {
|
||||||
|
var customValues []float64
|
||||||
|
num, err := readVarbitUint(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for i := 0; i < int(num); i++ {
|
||||||
|
bound, err := readCustomBound(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
customValues = append(customValues, bound)
|
||||||
|
}
|
||||||
|
return customValues, nil
|
||||||
|
}
|
||||||
|
|
||||||
// putZeroThreshold writes the zero threshold to the bstream. It stores typical
|
// putZeroThreshold writes the zero threshold to the bstream. It stores typical
|
||||||
// values in just one byte, but needs 9 bytes for other values. In detail:
|
// values in just one byte, but needs 9 bytes for other values. In detail:
|
||||||
// - If the threshold is 0, store a single zero byte.
|
// - If the threshold is 0, store a single zero byte.
|
||||||
|
@ -139,6 +174,59 @@ func readZeroThreshold(br *bstreamReader) (float64, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isWholeWhenMultiplied checks to see if the number when multiplied by 1000 can
|
||||||
|
// be converted into an integer without losing precision.
|
||||||
|
func isWholeWhenMultiplied(in float64) bool {
|
||||||
|
i := uint(math.Round(in * 1000))
|
||||||
|
out := float64(i) / 1000
|
||||||
|
return in == out
|
||||||
|
}
|
||||||
|
|
||||||
|
// putCustomBound writes a custom bound to the bstream. It stores values from
|
||||||
|
// 0 to 33554.430 (inclusive) that are multiples of 0.001 in unsigned varbit
|
||||||
|
// encoding of up to 4 bytes, but needs 1 bit + 8 bytes for other values like
|
||||||
|
// negative numbers, numbers greater than 33554.430, or numbers that are not
|
||||||
|
// a multiple of 0.001, on the assumption that they are less common. In detail:
|
||||||
|
// - Multiply the bound by 1000, without rounding.
|
||||||
|
// - If the multiplied bound is >= 0, <= 33554430 and a whole number,
|
||||||
|
// add 1 and store it in unsigned varbit encoding. All these numbers are
|
||||||
|
// greater than 0, so the leading bit of the varbit is always 1!
|
||||||
|
// - Otherwise, store a 0 bit, followed by the 8 bytes of the original
|
||||||
|
// bound as a float64.
|
||||||
|
//
|
||||||
|
// When reading the values, we can first decode a value as unsigned varbit,
|
||||||
|
// if it's 0, then we read the next 8 bytes as a float64, otherwise
|
||||||
|
// we can convert the value to a float64 by subtracting 1 and dividing by 1000.
|
||||||
|
func putCustomBound(b *bstream, f float64) {
|
||||||
|
tf := f * 1000
|
||||||
|
// 33554431-1 comes from the maximum that can be stored in a varbit in 4
|
||||||
|
// bytes, other values are stored in 8 bytes anyway.
|
||||||
|
if tf < 0 || tf > 33554430 || !isWholeWhenMultiplied(f) {
|
||||||
|
b.writeBit(zero)
|
||||||
|
b.writeBits(math.Float64bits(f), 64)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
putVarbitUint(b, uint64(math.Round(tf))+1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// readCustomBound reads the custom bound written with putCustomBound.
|
||||||
|
func readCustomBound(br *bstreamReader) (float64, error) {
|
||||||
|
b, err := readVarbitUint(br)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
switch b {
|
||||||
|
case 0:
|
||||||
|
v, err := br.readBits(64)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return math.Float64frombits(v), nil
|
||||||
|
default:
|
||||||
|
return float64(b-1) / 1000, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type bucketIterator struct {
|
type bucketIterator struct {
|
||||||
spans []histogram.Span
|
spans []histogram.Span
|
||||||
span int // Span position of last yielded bucket.
|
span int // Span position of last yielded bucket.
|
||||||
|
|
|
@ -373,6 +373,7 @@ func TestWriteReadHistogramChunkLayout(t *testing.T) {
|
||||||
schema int32
|
schema int32
|
||||||
zeroThreshold float64
|
zeroThreshold float64
|
||||||
positiveSpans, negativeSpans []histogram.Span
|
positiveSpans, negativeSpans []histogram.Span
|
||||||
|
customValues []float64
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
schema: 3,
|
schema: 3,
|
||||||
|
@ -422,23 +423,48 @@ func TestWriteReadHistogramChunkLayout(t *testing.T) {
|
||||||
positiveSpans: nil,
|
positiveSpans: nil,
|
||||||
negativeSpans: nil,
|
negativeSpans: nil,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
schema: histogram.CustomBucketsSchema,
|
||||||
|
positiveSpans: []histogram.Span{{Offset: -4, Length: 3}, {Offset: 2, Length: 42}},
|
||||||
|
negativeSpans: nil,
|
||||||
|
customValues: []float64{-5, -2.5, 0, 0.1, 0.25, 0.5, 1, 2, 5, 10, 25, 50, 100, 255, 500, 1000, 50000, 1e7},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
schema: histogram.CustomBucketsSchema,
|
||||||
|
positiveSpans: []histogram.Span{{Offset: -4, Length: 3}, {Offset: 2, Length: 42}},
|
||||||
|
negativeSpans: nil,
|
||||||
|
customValues: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0, 25.0, 50.0, 100.0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
schema: histogram.CustomBucketsSchema,
|
||||||
|
positiveSpans: []histogram.Span{{Offset: -4, Length: 3}, {Offset: 2, Length: 42}},
|
||||||
|
negativeSpans: nil,
|
||||||
|
customValues: []float64{0.001, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512, 1.024, 2.048, 4.096, 8.192},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
schema: histogram.CustomBucketsSchema,
|
||||||
|
positiveSpans: []histogram.Span{{Offset: -4, Length: 3}, {Offset: 2, Length: 42}},
|
||||||
|
negativeSpans: nil,
|
||||||
|
customValues: []float64{1.001, 1.023, 2.01, 4.007, 4.095, 8.001, 8.19, 16.24},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
bs := bstream{}
|
bs := bstream{}
|
||||||
|
|
||||||
for _, l := range layouts {
|
for _, l := range layouts {
|
||||||
writeHistogramChunkLayout(&bs, l.schema, l.zeroThreshold, l.positiveSpans, l.negativeSpans)
|
writeHistogramChunkLayout(&bs, l.schema, l.zeroThreshold, l.positiveSpans, l.negativeSpans, l.customValues)
|
||||||
}
|
}
|
||||||
|
|
||||||
bsr := newBReader(bs.bytes())
|
bsr := newBReader(bs.bytes())
|
||||||
|
|
||||||
for _, want := range layouts {
|
for _, want := range layouts {
|
||||||
gotSchema, gotZeroThreshold, gotPositiveSpans, gotNegativeSpans, err := readHistogramChunkLayout(&bsr)
|
gotSchema, gotZeroThreshold, gotPositiveSpans, gotNegativeSpans, gotCustomBounds, err := readHistogramChunkLayout(&bsr)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, want.schema, gotSchema)
|
require.Equal(t, want.schema, gotSchema)
|
||||||
require.Equal(t, want.zeroThreshold, gotZeroThreshold)
|
require.Equal(t, want.zeroThreshold, gotZeroThreshold)
|
||||||
require.Equal(t, want.positiveSpans, gotPositiveSpans)
|
require.Equal(t, want.positiveSpans, gotPositiveSpans)
|
||||||
require.Equal(t, want.negativeSpans, gotNegativeSpans)
|
require.Equal(t, want.negativeSpans, gotNegativeSpans)
|
||||||
|
require.Equal(t, want.customValues, gotCustomBounds)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -294,7 +294,38 @@ func TestHistogramChunkBucketChanges(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHistogramChunkAppendable(t *testing.T) {
|
func TestHistogramChunkAppendable(t *testing.T) {
|
||||||
setup := func() (Chunk, *HistogramAppender, int64, *histogram.Histogram) {
|
eh := &histogram.Histogram{
|
||||||
|
Count: 5,
|
||||||
|
ZeroCount: 2,
|
||||||
|
Sum: 18.4,
|
||||||
|
ZeroThreshold: 1e-125,
|
||||||
|
Schema: 1,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 2, Length: 1},
|
||||||
|
{Offset: 3, Length: 2},
|
||||||
|
{Offset: 3, Length: 1},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{6, -3, 0, -1, 2, 1, -4}, // counts: 6, 3, 3, 2, 4, 5, 1 (total 24)
|
||||||
|
}
|
||||||
|
|
||||||
|
cbh := &histogram.Histogram{
|
||||||
|
Count: 24,
|
||||||
|
Sum: 18.4,
|
||||||
|
Schema: histogram.CustomBucketsSchema,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 2, Length: 1},
|
||||||
|
{Offset: 3, Length: 2},
|
||||||
|
{Offset: 3, Length: 1},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{6, -3, 0, -1, 2, 1, -4}, // counts: 6, 3, 3, 2, 4, 5, 1 (total 24)
|
||||||
|
CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20},
|
||||||
|
}
|
||||||
|
|
||||||
|
setup := func(h *histogram.Histogram) (Chunk, *HistogramAppender, int64, *histogram.Histogram) {
|
||||||
c := Chunk(NewHistogramChunk())
|
c := Chunk(NewHistogramChunk())
|
||||||
|
|
||||||
// Create fresh appender and add the first histogram.
|
// Create fresh appender and add the first histogram.
|
||||||
|
@ -303,32 +334,17 @@ func TestHistogramChunkAppendable(t *testing.T) {
|
||||||
require.Equal(t, 0, c.NumSamples())
|
require.Equal(t, 0, c.NumSamples())
|
||||||
|
|
||||||
ts := int64(1234567890)
|
ts := int64(1234567890)
|
||||||
h1 := &histogram.Histogram{
|
|
||||||
Count: 5,
|
|
||||||
ZeroCount: 2,
|
|
||||||
Sum: 18.4,
|
|
||||||
ZeroThreshold: 1e-125,
|
|
||||||
Schema: 1,
|
|
||||||
PositiveSpans: []histogram.Span{
|
|
||||||
{Offset: 0, Length: 2},
|
|
||||||
{Offset: 2, Length: 1},
|
|
||||||
{Offset: 3, Length: 2},
|
|
||||||
{Offset: 3, Length: 1},
|
|
||||||
{Offset: 1, Length: 1},
|
|
||||||
},
|
|
||||||
PositiveBuckets: []int64{6, -3, 0, -1, 2, 1, -4}, // counts: 6, 3, 3, 2, 4, 5, 1 (total 24)
|
|
||||||
}
|
|
||||||
|
|
||||||
chk, _, app, err := app.AppendHistogram(nil, ts, h1.Copy(), false)
|
chk, _, app, err := app.AppendHistogram(nil, ts, h.Copy(), false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Nil(t, chk)
|
require.Nil(t, chk)
|
||||||
require.Equal(t, 1, c.NumSamples())
|
require.Equal(t, 1, c.NumSamples())
|
||||||
require.Equal(t, UnknownCounterReset, c.(*HistogramChunk).GetCounterResetHeader())
|
require.Equal(t, UnknownCounterReset, c.(*HistogramChunk).GetCounterResetHeader())
|
||||||
return c, app.(*HistogramAppender), ts, h1
|
return c, app.(*HistogramAppender), ts, h
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // Schema change.
|
{ // Schema change.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.Schema++
|
h2.Schema++
|
||||||
_, _, ok, _ := hApp.appendable(h2)
|
_, _, ok, _ := hApp.appendable(h2)
|
||||||
|
@ -338,7 +354,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // Zero threshold change.
|
{ // Zero threshold change.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.ZeroThreshold += 0.1
|
h2.ZeroThreshold += 0.1
|
||||||
_, _, ok, _ := hApp.appendable(h2)
|
_, _, ok, _ := hApp.appendable(h2)
|
||||||
|
@ -348,7 +364,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // New histogram that has more buckets.
|
{ // New histogram that has more buckets.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.PositiveSpans = []histogram.Span{
|
h2.PositiveSpans = []histogram.Span{
|
||||||
{Offset: 0, Length: 3},
|
{Offset: 0, Length: 3},
|
||||||
|
@ -374,7 +390,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // New histogram that has a bucket missing.
|
{ // New histogram that has a bucket missing.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.PositiveSpans = []histogram.Span{
|
h2.PositiveSpans = []histogram.Span{
|
||||||
{Offset: 0, Length: 2},
|
{Offset: 0, Length: 2},
|
||||||
|
@ -395,7 +411,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // New histogram that has a counter reset while buckets are same.
|
{ // New histogram that has a counter reset while buckets are same.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.Sum = 23
|
h2.Sum = 23
|
||||||
h2.PositiveBuckets = []int64{6, -4, 1, -1, 2, 1, -4} // counts: 6, 2, 3, 2, 4, 5, 1 (total 23)
|
h2.PositiveBuckets = []int64{6, -4, 1, -1, 2, 1, -4} // counts: 6, 2, 3, 2, 4, 5, 1 (total 23)
|
||||||
|
@ -410,7 +426,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // New histogram that has a counter reset while new buckets were added.
|
{ // New histogram that has a counter reset while new buckets were added.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.PositiveSpans = []histogram.Span{
|
h2.PositiveSpans = []histogram.Span{
|
||||||
{Offset: 0, Length: 3},
|
{Offset: 0, Length: 3},
|
||||||
|
@ -438,7 +454,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
|
||||||
// added before the first bucket and reset on first bucket. (to
|
// added before the first bucket and reset on first bucket. (to
|
||||||
// catch the edge case where the new bucket should be forwarded
|
// catch the edge case where the new bucket should be forwarded
|
||||||
// ahead until first old bucket at start)
|
// ahead until first old bucket at start)
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.PositiveSpans = []histogram.Span{
|
h2.PositiveSpans = []histogram.Span{
|
||||||
{Offset: -3, Length: 2},
|
{Offset: -3, Length: 2},
|
||||||
|
@ -464,7 +480,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // New histogram that has an explicit counter reset.
|
{ // New histogram that has an explicit counter reset.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.CounterResetHint = histogram.CounterReset
|
h2.CounterResetHint = histogram.CounterReset
|
||||||
|
|
||||||
|
@ -472,7 +488,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // Start new chunk explicitly, and append a new histogram that is considered appendable to the previous chunk.
|
{ // Start new chunk explicitly, and append a new histogram that is considered appendable to the previous chunk.
|
||||||
_, hApp, ts, h1 := setup()
|
_, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy() // Identity is appendable.
|
h2 := h1.Copy() // Identity is appendable.
|
||||||
|
|
||||||
nextChunk := NewHistogramChunk()
|
nextChunk := NewHistogramChunk()
|
||||||
|
@ -488,7 +504,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // Start new chunk explicitly, and append a new histogram that is not considered appendable to the previous chunk.
|
{ // Start new chunk explicitly, and append a new histogram that is not considered appendable to the previous chunk.
|
||||||
_, hApp, ts, h1 := setup()
|
_, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.Count-- // Make this not appendable due to counter reset.
|
h2.Count-- // Make this not appendable due to counter reset.
|
||||||
|
|
||||||
|
@ -505,7 +521,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // Start new chunk explicitly, and append a new histogram that would need recoding if we added it to the chunk.
|
{ // Start new chunk explicitly, and append a new histogram that would need recoding if we added it to the chunk.
|
||||||
_, hApp, ts, h1 := setup()
|
_, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.PositiveSpans = []histogram.Span{
|
h2.PositiveSpans = []histogram.Span{
|
||||||
{Offset: 0, Length: 3},
|
{Offset: 0, Length: 3},
|
||||||
|
@ -532,6 +548,72 @@ func TestHistogramChunkAppendable(t *testing.T) {
|
||||||
assertSampleCount(t, nextChunk, 1, ValHistogram)
|
assertSampleCount(t, nextChunk, 1, ValHistogram)
|
||||||
require.Equal(t, NotCounterReset, nextChunk.GetCounterResetHeader())
|
require.Equal(t, NotCounterReset, nextChunk.GetCounterResetHeader())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{ // Custom buckets, no change.
|
||||||
|
c, hApp, ts, h1 := setup(cbh)
|
||||||
|
h2 := h1.Copy()
|
||||||
|
_, _, ok, _ := hApp.appendable(h2)
|
||||||
|
require.True(t, ok)
|
||||||
|
|
||||||
|
assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // Custom buckets, increase in bucket counts but no change in layout.
|
||||||
|
c, hApp, ts, h1 := setup(cbh)
|
||||||
|
h2 := h1.Copy()
|
||||||
|
h2.Count++
|
||||||
|
h2.PositiveBuckets = []int64{6, -3, 0, -1, 2, 1, -3}
|
||||||
|
_, _, ok, _ := hApp.appendable(h2)
|
||||||
|
require.True(t, ok)
|
||||||
|
|
||||||
|
assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // Custom buckets, decrease in bucket counts but no change in layout.
|
||||||
|
c, hApp, ts, h1 := setup(cbh)
|
||||||
|
h2 := h1.Copy()
|
||||||
|
h2.Count--
|
||||||
|
h2.PositiveBuckets = []int64{6, -3, 0, -1, 2, 1, -5}
|
||||||
|
_, _, ok, _ := hApp.appendable(h2)
|
||||||
|
require.False(t, ok)
|
||||||
|
|
||||||
|
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // Custom buckets, change only in custom bounds.
|
||||||
|
c, hApp, ts, h1 := setup(cbh)
|
||||||
|
h2 := h1.Copy()
|
||||||
|
h2.CustomValues = []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21}
|
||||||
|
_, _, ok, _ := hApp.appendable(h2)
|
||||||
|
require.False(t, ok)
|
||||||
|
|
||||||
|
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // Custom buckets, with more buckets.
|
||||||
|
c, hApp, ts, h1 := setup(cbh)
|
||||||
|
h2 := h1.Copy()
|
||||||
|
h2.PositiveSpans = []histogram.Span{
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
{Offset: 1, Length: 4},
|
||||||
|
{Offset: 3, Length: 3},
|
||||||
|
}
|
||||||
|
h2.Count += 6
|
||||||
|
h2.Sum = 30
|
||||||
|
// Existing histogram should get values converted from the above to:
|
||||||
|
// 6 3 0 3 0 0 2 4 5 0 1 (previous values with some new empty buckets in between)
|
||||||
|
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
|
||||||
|
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 1} // 7 5 1 3 1 0 2 5 5 0 1 (total 30)
|
||||||
|
|
||||||
|
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
|
||||||
|
require.NotEmpty(t, posInterjections)
|
||||||
|
require.Empty(t, negInterjections)
|
||||||
|
require.True(t, ok) // Only new buckets came in.
|
||||||
|
require.False(t, cr)
|
||||||
|
|
||||||
|
assertRecodedHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertNewHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *HistogramAppender, ts int64, h *histogram.Histogram, expectHeader CounterResetHeader) {
|
func assertNewHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *HistogramAppender, ts int64, h *histogram.Histogram, expectHeader CounterResetHeader) {
|
||||||
|
@ -548,6 +630,19 @@ func assertNewHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *Histogr
|
||||||
assertSampleCount(t, newChunk, 1, ValHistogram)
|
assertSampleCount(t, newChunk, 1, ValHistogram)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func assertNoNewHistogramChunkOnAppend(t *testing.T, currChunk Chunk, hApp *HistogramAppender, ts int64, h *histogram.Histogram, expectHeader CounterResetHeader) {
|
||||||
|
prevChunkBytes := currChunk.Bytes()
|
||||||
|
newChunk, recoded, newAppender, err := hApp.AppendHistogram(nil, ts, h, false)
|
||||||
|
require.Greater(t, len(currChunk.Bytes()), len(prevChunkBytes)) // Check that current chunk is bigger than previously.
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Nil(t, newChunk)
|
||||||
|
require.False(t, recoded)
|
||||||
|
require.Equal(t, expectHeader, currChunk.(*HistogramChunk).GetCounterResetHeader())
|
||||||
|
require.NotNil(t, newAppender)
|
||||||
|
require.Equal(t, hApp, newAppender)
|
||||||
|
assertSampleCount(t, currChunk, 2, ValHistogram)
|
||||||
|
}
|
||||||
|
|
||||||
func assertRecodedHistogramChunkOnAppend(t *testing.T, prevChunk Chunk, hApp *HistogramAppender, ts int64, h *histogram.Histogram, expectHeader CounterResetHeader) {
|
func assertRecodedHistogramChunkOnAppend(t *testing.T, prevChunk Chunk, hApp *HistogramAppender, ts int64, h *histogram.Histogram, expectHeader CounterResetHeader) {
|
||||||
prevChunkBytes := prevChunk.Bytes()
|
prevChunkBytes := prevChunk.Bytes()
|
||||||
newChunk, recoded, newAppender, err := hApp.AppendHistogram(nil, ts, h, false)
|
newChunk, recoded, newAppender, err := hApp.AppendHistogram(nil, ts, h, false)
|
||||||
|
@ -738,6 +833,32 @@ func TestHistogramChunkAppendableWithEmptySpan(t *testing.T) {
|
||||||
NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
|
NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"empty span in old and new custom buckets histogram": {
|
||||||
|
h1: &histogram.Histogram{
|
||||||
|
Schema: histogram.CustomBucketsSchema,
|
||||||
|
Count: 7,
|
||||||
|
Sum: 1234.5,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 4},
|
||||||
|
{Offset: 0, Length: 0},
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 1, -1, 0, 0, 0, 0},
|
||||||
|
CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
|
||||||
|
},
|
||||||
|
h2: &histogram.Histogram{
|
||||||
|
Schema: histogram.CustomBucketsSchema,
|
||||||
|
Count: 10,
|
||||||
|
Sum: 2345.6,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 4},
|
||||||
|
{Offset: 0, Length: 0},
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
|
||||||
|
CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, tc := range tests {
|
for name, tc := range tests {
|
||||||
|
@ -905,7 +1026,40 @@ func TestAtFloatHistogram(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHistogramChunkAppendableGauge(t *testing.T) {
|
func TestHistogramChunkAppendableGauge(t *testing.T) {
|
||||||
setup := func() (Chunk, *HistogramAppender, int64, *histogram.Histogram) {
|
eh := &histogram.Histogram{
|
||||||
|
CounterResetHint: histogram.GaugeType,
|
||||||
|
Count: 5,
|
||||||
|
ZeroCount: 2,
|
||||||
|
Sum: 18.4,
|
||||||
|
ZeroThreshold: 1e-125,
|
||||||
|
Schema: 1,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 2, Length: 1},
|
||||||
|
{Offset: 3, Length: 2},
|
||||||
|
{Offset: 3, Length: 1},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{6, -3, 0, -1, 2, 1, -4}, // {6, 3, 3, 2, 4, 5, 1}
|
||||||
|
}
|
||||||
|
|
||||||
|
cbh := &histogram.Histogram{
|
||||||
|
CounterResetHint: histogram.GaugeType,
|
||||||
|
Count: 24,
|
||||||
|
Sum: 18.4,
|
||||||
|
Schema: histogram.CustomBucketsSchema,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 2, Length: 1},
|
||||||
|
{Offset: 3, Length: 2},
|
||||||
|
{Offset: 3, Length: 1},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{6, -3, 0, -1, 2, 1, -4}, // {6, 3, 3, 2, 4, 5, 1}
|
||||||
|
CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20},
|
||||||
|
}
|
||||||
|
|
||||||
|
setup := func(h *histogram.Histogram) (Chunk, *HistogramAppender, int64, *histogram.Histogram) {
|
||||||
c := Chunk(NewHistogramChunk())
|
c := Chunk(NewHistogramChunk())
|
||||||
|
|
||||||
// Create fresh appender and add the first histogram.
|
// Create fresh appender and add the first histogram.
|
||||||
|
@ -914,66 +1068,38 @@ func TestHistogramChunkAppendableGauge(t *testing.T) {
|
||||||
require.Equal(t, 0, c.NumSamples())
|
require.Equal(t, 0, c.NumSamples())
|
||||||
|
|
||||||
ts := int64(1234567890)
|
ts := int64(1234567890)
|
||||||
h1 := &histogram.Histogram{
|
|
||||||
CounterResetHint: histogram.GaugeType,
|
|
||||||
Count: 5,
|
|
||||||
ZeroCount: 2,
|
|
||||||
Sum: 18.4,
|
|
||||||
ZeroThreshold: 1e-125,
|
|
||||||
Schema: 1,
|
|
||||||
PositiveSpans: []histogram.Span{
|
|
||||||
{Offset: 0, Length: 2},
|
|
||||||
{Offset: 2, Length: 1},
|
|
||||||
{Offset: 3, Length: 2},
|
|
||||||
{Offset: 3, Length: 1},
|
|
||||||
{Offset: 1, Length: 1},
|
|
||||||
},
|
|
||||||
PositiveBuckets: []int64{6, -3, 0, -1, 2, 1, -4}, // {6, 3, 3, 2, 4, 5, 1}
|
|
||||||
}
|
|
||||||
|
|
||||||
chk, _, app, err := app.AppendHistogram(nil, ts, h1.Copy(), false)
|
chk, _, app, err := app.AppendHistogram(nil, ts, h.Copy(), false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Nil(t, chk)
|
require.Nil(t, chk)
|
||||||
require.Equal(t, 1, c.NumSamples())
|
require.Equal(t, 1, c.NumSamples())
|
||||||
require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader())
|
require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader())
|
||||||
|
|
||||||
return c, app.(*HistogramAppender), ts, h1
|
return c, app.(*HistogramAppender), ts, h
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // Schema change.
|
{ // Schema change.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.Schema++
|
h2.Schema++
|
||||||
_, _, _, _, _, _, ok := hApp.appendableGauge(h2)
|
_, _, _, _, _, _, ok := hApp.appendableGauge(h2)
|
||||||
require.False(t, ok)
|
require.False(t, ok)
|
||||||
|
|
||||||
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false)
|
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, newc)
|
|
||||||
require.False(t, recoded)
|
|
||||||
require.NotEqual(t, c, newc)
|
|
||||||
require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader())
|
|
||||||
require.Equal(t, GaugeType, newc.(*HistogramChunk).GetCounterResetHeader())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // Zero threshold change.
|
{ // Zero threshold change.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.ZeroThreshold += 0.1
|
h2.ZeroThreshold += 0.1
|
||||||
_, _, _, _, _, _, ok := hApp.appendableGauge(h2)
|
_, _, _, _, _, _, ok := hApp.appendableGauge(h2)
|
||||||
require.False(t, ok)
|
require.False(t, ok)
|
||||||
|
|
||||||
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false)
|
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, newc)
|
|
||||||
require.False(t, recoded)
|
|
||||||
require.NotEqual(t, c, newc)
|
|
||||||
require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader())
|
|
||||||
require.Equal(t, GaugeType, newc.(*HistogramChunk).GetCounterResetHeader())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // New histogram that has more buckets.
|
{ // New histogram that has more buckets.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.PositiveSpans = []histogram.Span{
|
h2.PositiveSpans = []histogram.Span{
|
||||||
{Offset: 0, Length: 3},
|
{Offset: 0, Length: 3},
|
||||||
|
@ -993,15 +1119,11 @@ func TestHistogramChunkAppendableGauge(t *testing.T) {
|
||||||
require.Empty(t, nBackwardI)
|
require.Empty(t, nBackwardI)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
|
|
||||||
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false)
|
assertRecodedHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, newc)
|
|
||||||
require.True(t, recoded)
|
|
||||||
require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // New histogram that has buckets missing.
|
{ // New histogram that has buckets missing.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.PositiveSpans = []histogram.Span{
|
h2.PositiveSpans = []histogram.Span{
|
||||||
{Offset: 0, Length: 2},
|
{Offset: 0, Length: 2},
|
||||||
|
@ -1021,15 +1143,11 @@ func TestHistogramChunkAppendableGauge(t *testing.T) {
|
||||||
require.Empty(t, nBackwardI)
|
require.Empty(t, nBackwardI)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
|
|
||||||
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false)
|
assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
|
||||||
require.NoError(t, err)
|
|
||||||
require.Nil(t, newc)
|
|
||||||
require.False(t, recoded)
|
|
||||||
require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // New histogram that has a bucket missing and new buckets.
|
{ // New histogram that has a bucket missing and new buckets.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.PositiveSpans = []histogram.Span{
|
h2.PositiveSpans = []histogram.Span{
|
||||||
{Offset: 0, Length: 2},
|
{Offset: 0, Length: 2},
|
||||||
|
@ -1047,15 +1165,11 @@ func TestHistogramChunkAppendableGauge(t *testing.T) {
|
||||||
require.Empty(t, nBackwardI)
|
require.Empty(t, nBackwardI)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
|
|
||||||
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false)
|
assertRecodedHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, newc)
|
|
||||||
require.True(t, recoded)
|
|
||||||
require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // New histogram that has a counter reset while buckets are same.
|
{ // New histogram that has a counter reset while buckets are same.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.Sum = 23
|
h2.Sum = 23
|
||||||
h2.PositiveBuckets = []int64{6, -4, 1, -1, 2, 1, -4} // {6, 2, 3, 2, 4, 5, 1}
|
h2.PositiveBuckets = []int64{6, -4, 1, -1, 2, 1, -4} // {6, 2, 3, 2, 4, 5, 1}
|
||||||
|
@ -1067,15 +1181,11 @@ func TestHistogramChunkAppendableGauge(t *testing.T) {
|
||||||
require.Empty(t, nBackwardI)
|
require.Empty(t, nBackwardI)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
|
|
||||||
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false)
|
assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
|
||||||
require.NoError(t, err)
|
|
||||||
require.Nil(t, newc)
|
|
||||||
require.False(t, recoded)
|
|
||||||
require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // New histogram that has a counter reset while new buckets were added.
|
{ // New histogram that has a counter reset while new buckets were added.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.PositiveSpans = []histogram.Span{
|
h2.PositiveSpans = []histogram.Span{
|
||||||
{Offset: 0, Length: 3},
|
{Offset: 0, Length: 3},
|
||||||
|
@ -1093,17 +1203,13 @@ func TestHistogramChunkAppendableGauge(t *testing.T) {
|
||||||
require.Empty(t, nBackwardI)
|
require.Empty(t, nBackwardI)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
|
|
||||||
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false)
|
assertRecodedHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, newc)
|
|
||||||
require.True(t, recoded)
|
|
||||||
require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
// New histogram that has a counter reset while new buckets were
|
// New histogram that has a counter reset while new buckets were
|
||||||
// added before the first bucket and reset on first bucket.
|
// added before the first bucket and reset on first bucket.
|
||||||
c, hApp, ts, h1 := setup()
|
c, hApp, ts, h1 := setup(eh)
|
||||||
h2 := h1.Copy()
|
h2 := h1.Copy()
|
||||||
h2.PositiveSpans = []histogram.Span{
|
h2.PositiveSpans = []histogram.Span{
|
||||||
{Offset: -3, Length: 2},
|
{Offset: -3, Length: 2},
|
||||||
|
@ -1123,11 +1229,74 @@ func TestHistogramChunkAppendableGauge(t *testing.T) {
|
||||||
require.Empty(t, nBackwardI)
|
require.Empty(t, nBackwardI)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
|
|
||||||
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false)
|
assertRecodedHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
|
||||||
require.NoError(t, err)
|
}
|
||||||
require.NotNil(t, newc)
|
|
||||||
require.True(t, recoded)
|
{ // Custom buckets, no change.
|
||||||
require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader())
|
c, hApp, ts, h1 := setup(cbh)
|
||||||
|
h2 := h1.Copy()
|
||||||
|
_, _, _, _, _, _, ok := hApp.appendableGauge(h2)
|
||||||
|
require.True(t, ok)
|
||||||
|
|
||||||
|
assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // Custom buckets, increase in bucket counts but no change in layout.
|
||||||
|
c, hApp, ts, h1 := setup(cbh)
|
||||||
|
h2 := h1.Copy()
|
||||||
|
h2.Count++
|
||||||
|
h2.PositiveBuckets = []int64{6, -3, 0, -1, 2, 1, -3}
|
||||||
|
_, _, _, _, _, _, ok := hApp.appendableGauge(h2)
|
||||||
|
require.True(t, ok)
|
||||||
|
|
||||||
|
assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // Custom buckets, decrease in bucket counts but no change in layout.
|
||||||
|
c, hApp, ts, h1 := setup(cbh)
|
||||||
|
h2 := h1.Copy()
|
||||||
|
h2.Count--
|
||||||
|
h2.PositiveBuckets = []int64{6, -3, 0, -1, 2, 1, -5}
|
||||||
|
_, _, _, _, _, _, ok := hApp.appendableGauge(h2)
|
||||||
|
require.True(t, ok)
|
||||||
|
|
||||||
|
assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // Custom buckets, change only in custom bounds.
|
||||||
|
c, hApp, ts, h1 := setup(cbh)
|
||||||
|
h2 := h1.Copy()
|
||||||
|
h2.CustomValues = []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21}
|
||||||
|
_, _, _, _, _, _, ok := hApp.appendableGauge(h2)
|
||||||
|
require.False(t, ok)
|
||||||
|
|
||||||
|
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // Custom buckets, with more buckets.
|
||||||
|
c, hApp, ts, h1 := setup(cbh)
|
||||||
|
h2 := h1.Copy()
|
||||||
|
h2.PositiveSpans = []histogram.Span{
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
{Offset: 1, Length: 1},
|
||||||
|
{Offset: 1, Length: 4},
|
||||||
|
{Offset: 3, Length: 3},
|
||||||
|
}
|
||||||
|
h2.Count += 6
|
||||||
|
h2.Sum = 30
|
||||||
|
// Existing histogram should get values converted from the above to:
|
||||||
|
// 6 3 0 3 0 0 2 4 5 0 1 (previous values with some new empty buckets in between)
|
||||||
|
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
|
||||||
|
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 1} // 7 5 1 3 1 0 2 5 5 0 1 (total 30)
|
||||||
|
|
||||||
|
posInterjections, negInterjections, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2)
|
||||||
|
require.NotEmpty(t, posInterjections)
|
||||||
|
require.Empty(t, negInterjections)
|
||||||
|
require.Empty(t, pBackwardI)
|
||||||
|
require.Empty(t, nBackwardI)
|
||||||
|
require.True(t, ok) // Only new buckets came in.
|
||||||
|
|
||||||
|
assertRecodedHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1176,4 +1345,26 @@ func TestHistogramAppendOnlyErrors(t *testing.T) {
|
||||||
require.False(t, isRecoded)
|
require.False(t, isRecoded)
|
||||||
require.EqualError(t, err, "histogram counter reset")
|
require.EqualError(t, err, "histogram counter reset")
|
||||||
})
|
})
|
||||||
|
t.Run("counter reset error with custom buckets", func(t *testing.T) {
|
||||||
|
c := Chunk(NewHistogramChunk())
|
||||||
|
|
||||||
|
// Create fresh appender and add the first histogram.
|
||||||
|
app, err := c.Appender()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
h := tsdbutil.GenerateTestCustomBucketsHistogram(0)
|
||||||
|
var isRecoded bool
|
||||||
|
c, isRecoded, app, err = app.AppendHistogram(nil, 1, h, true)
|
||||||
|
require.Nil(t, c)
|
||||||
|
require.False(t, isRecoded)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Add erroring histogram.
|
||||||
|
h2 := h.Copy()
|
||||||
|
h2.CustomValues = []float64{0, 1, 2, 3, 4, 5, 6, 7}
|
||||||
|
c, isRecoded, _, err = app.AppendHistogram(nil, 2, h2, true)
|
||||||
|
require.Nil(t, c)
|
||||||
|
require.False(t, isRecoded)
|
||||||
|
require.EqualError(t, err, "histogram counter reset")
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -656,7 +656,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blockPopulator Bl
|
||||||
}
|
}
|
||||||
closers = append(closers, indexw)
|
closers = append(closers, indexw)
|
||||||
|
|
||||||
if err := blockPopulator.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, indexw, chunkw); err != nil {
|
if err := blockPopulator.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, indexw, chunkw, AllSortedPostings); err != nil {
|
||||||
return fmt.Errorf("populate block: %w", err)
|
return fmt.Errorf("populate block: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -722,7 +722,20 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blockPopulator Bl
|
||||||
}
|
}
|
||||||
|
|
||||||
type BlockPopulator interface {
|
type BlockPopulator interface {
|
||||||
PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter) error
|
PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter, postingsFunc IndexReaderPostingsFunc) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndexReaderPostingsFunc is a function to get a sorted posting iterator from a given index reader.
|
||||||
|
type IndexReaderPostingsFunc func(ctx context.Context, reader IndexReader) index.Postings
|
||||||
|
|
||||||
|
// AllSortedPostings returns a sorted all posting iterator from the input index reader.
|
||||||
|
func AllSortedPostings(ctx context.Context, reader IndexReader) index.Postings {
|
||||||
|
k, v := index.AllPostingsKey()
|
||||||
|
all, err := reader.Postings(ctx, k, v)
|
||||||
|
if err != nil {
|
||||||
|
return index.ErrPostings(err)
|
||||||
|
}
|
||||||
|
return reader.SortedPostings(all)
|
||||||
}
|
}
|
||||||
|
|
||||||
type DefaultBlockPopulator struct{}
|
type DefaultBlockPopulator struct{}
|
||||||
|
@ -730,7 +743,7 @@ type DefaultBlockPopulator struct{}
|
||||||
// PopulateBlock fills the index and chunk writers with new data gathered as the union
|
// PopulateBlock fills the index and chunk writers with new data gathered as the union
|
||||||
// of the provided blocks. It returns meta information for the new block.
|
// of the provided blocks. It returns meta information for the new block.
|
||||||
// It expects sorted blocks input by mint.
|
// It expects sorted blocks input by mint.
|
||||||
func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter) (err error) {
|
func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter, postingsFunc IndexReaderPostingsFunc) (err error) {
|
||||||
if len(blocks) == 0 {
|
if len(blocks) == 0 {
|
||||||
return errors.New("cannot populate block from no readers")
|
return errors.New("cannot populate block from no readers")
|
||||||
}
|
}
|
||||||
|
@ -788,14 +801,9 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa
|
||||||
}
|
}
|
||||||
closers = append(closers, tombsr)
|
closers = append(closers, tombsr)
|
||||||
|
|
||||||
k, v := index.AllPostingsKey()
|
postings := postingsFunc(ctx, indexr)
|
||||||
all, err := indexr.Postings(ctx, k, v)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
all = indexr.SortedPostings(all)
|
|
||||||
// Blocks meta is half open: [min, max), so subtract 1 to ensure we don't hold samples with exact meta.MaxTime timestamp.
|
// Blocks meta is half open: [min, max), so subtract 1 to ensure we don't hold samples with exact meta.MaxTime timestamp.
|
||||||
sets = append(sets, NewBlockChunkSeriesSet(b.Meta().ULID, indexr, chunkr, tombsr, all, meta.MinTime, meta.MaxTime-1, false))
|
sets = append(sets, NewBlockChunkSeriesSet(b.Meta().ULID, indexr, chunkr, tombsr, postings, meta.MinTime, meta.MaxTime-1, false))
|
||||||
syms := indexr.Symbols()
|
syms := indexr.Symbols()
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
symbols = syms
|
symbols = syms
|
||||||
|
|
|
@ -38,6 +38,7 @@ import (
|
||||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/index"
|
||||||
"github.com/prometheus/prometheus/tsdb/tombstones"
|
"github.com/prometheus/prometheus/tsdb/tombstones"
|
||||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
||||||
"github.com/prometheus/prometheus/tsdb/wlog"
|
"github.com/prometheus/prometheus/tsdb/wlog"
|
||||||
|
@ -493,6 +494,7 @@ func TestCompaction_populateBlock(t *testing.T) {
|
||||||
inputSeriesSamples [][]seriesSamples
|
inputSeriesSamples [][]seriesSamples
|
||||||
compactMinTime int64
|
compactMinTime int64
|
||||||
compactMaxTime int64 // When not defined the test runner sets a default of math.MaxInt64.
|
compactMaxTime int64 // When not defined the test runner sets a default of math.MaxInt64.
|
||||||
|
irPostingsFunc IndexReaderPostingsFunc
|
||||||
expSeriesSamples []seriesSamples
|
expSeriesSamples []seriesSamples
|
||||||
expErr error
|
expErr error
|
||||||
}{
|
}{
|
||||||
|
@ -961,6 +963,60 @@ func TestCompaction_populateBlock(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
title: "Populate from single block with index reader postings function selecting different series. Expect empty block.",
|
||||||
|
inputSeriesSamples: [][]seriesSamples{
|
||||||
|
{
|
||||||
|
{
|
||||||
|
lset: map[string]string{"a": "b"},
|
||||||
|
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
irPostingsFunc: func(ctx context.Context, reader IndexReader) index.Postings {
|
||||||
|
p, err := reader.Postings(ctx, "a", "c")
|
||||||
|
if err != nil {
|
||||||
|
return index.EmptyPostings()
|
||||||
|
}
|
||||||
|
return reader.SortedPostings(p)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
title: "Populate from single block with index reader postings function selecting one series. Expect partial block.",
|
||||||
|
inputSeriesSamples: [][]seriesSamples{
|
||||||
|
{
|
||||||
|
{
|
||||||
|
lset: map[string]string{"a": "b"},
|
||||||
|
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lset: map[string]string{"a": "c"},
|
||||||
|
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lset: map[string]string{"a": "d"},
|
||||||
|
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
irPostingsFunc: func(ctx context.Context, reader IndexReader) index.Postings {
|
||||||
|
p, err := reader.Postings(ctx, "a", "c", "d")
|
||||||
|
if err != nil {
|
||||||
|
return index.EmptyPostings()
|
||||||
|
}
|
||||||
|
return reader.SortedPostings(p)
|
||||||
|
},
|
||||||
|
expSeriesSamples: []seriesSamples{
|
||||||
|
{
|
||||||
|
lset: map[string]string{"a": "c"},
|
||||||
|
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
lset: map[string]string{"a": "d"},
|
||||||
|
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
} {
|
} {
|
||||||
t.Run(tc.title, func(t *testing.T) {
|
t.Run(tc.title, func(t *testing.T) {
|
||||||
blocks := make([]BlockReader, 0, len(tc.inputSeriesSamples))
|
blocks := make([]BlockReader, 0, len(tc.inputSeriesSamples))
|
||||||
|
@ -982,7 +1038,11 @@ func TestCompaction_populateBlock(t *testing.T) {
|
||||||
|
|
||||||
iw := &mockIndexWriter{}
|
iw := &mockIndexWriter{}
|
||||||
blockPopulator := DefaultBlockPopulator{}
|
blockPopulator := DefaultBlockPopulator{}
|
||||||
err = blockPopulator.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, iw, nopChunkWriter{})
|
irPostingsFunc := AllSortedPostings
|
||||||
|
if tc.irPostingsFunc != nil {
|
||||||
|
irPostingsFunc = tc.irPostingsFunc
|
||||||
|
}
|
||||||
|
err = blockPopulator.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, iw, nopChunkWriter{}, irPostingsFunc)
|
||||||
if tc.expErr != nil {
|
if tc.expErr != nil {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Equal(t, tc.expErr.Error(), err.Error())
|
require.Equal(t, tc.expErr.Error(), err.Error())
|
||||||
|
|
57
tsdb/db.go
57
tsdb/db.go
|
@ -192,12 +192,22 @@ type Options struct {
|
||||||
|
|
||||||
// NewCompactorFunc is a function that returns a TSDB compactor.
|
// NewCompactorFunc is a function that returns a TSDB compactor.
|
||||||
NewCompactorFunc NewCompactorFunc
|
NewCompactorFunc NewCompactorFunc
|
||||||
|
|
||||||
|
// BlockQuerierFunc is a function to return storage.Querier from a BlockReader.
|
||||||
|
BlockQuerierFunc BlockQuerierFunc
|
||||||
|
|
||||||
|
// BlockChunkQuerierFunc is a function to return storage.ChunkQuerier from a BlockReader.
|
||||||
|
BlockChunkQuerierFunc BlockChunkQuerierFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
type NewCompactorFunc func(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error)
|
type NewCompactorFunc func(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error)
|
||||||
|
|
||||||
type BlocksToDeleteFunc func(blocks []*Block) map[ulid.ULID]struct{}
|
type BlocksToDeleteFunc func(blocks []*Block) map[ulid.ULID]struct{}
|
||||||
|
|
||||||
|
type BlockQuerierFunc func(b BlockReader, mint, maxt int64) (storage.Querier, error)
|
||||||
|
|
||||||
|
type BlockChunkQuerierFunc func(b BlockReader, mint, maxt int64) (storage.ChunkQuerier, error)
|
||||||
|
|
||||||
// DB handles reads and writes of time series falling into
|
// DB handles reads and writes of time series falling into
|
||||||
// a hashed partition of a seriedb.
|
// a hashed partition of a seriedb.
|
||||||
type DB struct {
|
type DB struct {
|
||||||
|
@ -244,6 +254,10 @@ type DB struct {
|
||||||
writeNotified wlog.WriteNotified
|
writeNotified wlog.WriteNotified
|
||||||
|
|
||||||
registerer prometheus.Registerer
|
registerer prometheus.Registerer
|
||||||
|
|
||||||
|
blockQuerierFunc BlockQuerierFunc
|
||||||
|
|
||||||
|
blockChunkQuerierFunc BlockChunkQuerierFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
type dbMetrics struct {
|
type dbMetrics struct {
|
||||||
|
@ -559,10 +573,12 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue
|
||||||
|
|
||||||
db.closers = append(db.closers, head)
|
db.closers = append(db.closers, head)
|
||||||
return &DB{
|
return &DB{
|
||||||
dir: db.dir,
|
dir: db.dir,
|
||||||
logger: db.logger,
|
logger: db.logger,
|
||||||
blocks: blocks,
|
blocks: blocks,
|
||||||
head: head,
|
head: head,
|
||||||
|
blockQuerierFunc: NewBlockQuerier,
|
||||||
|
blockChunkQuerierFunc: NewBlockChunkQuerier,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -870,6 +886,18 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
|
||||||
}
|
}
|
||||||
db.compactCancel = cancel
|
db.compactCancel = cancel
|
||||||
|
|
||||||
|
if opts.BlockQuerierFunc == nil {
|
||||||
|
db.blockQuerierFunc = NewBlockQuerier
|
||||||
|
} else {
|
||||||
|
db.blockQuerierFunc = opts.BlockQuerierFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.BlockChunkQuerierFunc == nil {
|
||||||
|
db.blockChunkQuerierFunc = NewBlockChunkQuerier
|
||||||
|
} else {
|
||||||
|
db.blockChunkQuerierFunc = opts.BlockChunkQuerierFunc
|
||||||
|
}
|
||||||
|
|
||||||
var wal, wbl *wlog.WL
|
var wal, wbl *wlog.WL
|
||||||
segmentSize := wlog.DefaultSegmentSize
|
segmentSize := wlog.DefaultSegmentSize
|
||||||
// Wal is enabled.
|
// Wal is enabled.
|
||||||
|
@ -1964,7 +1992,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) {
|
||||||
if maxt >= db.head.MinTime() {
|
if maxt >= db.head.MinTime() {
|
||||||
rh := NewRangeHead(db.head, mint, maxt)
|
rh := NewRangeHead(db.head, mint, maxt)
|
||||||
var err error
|
var err error
|
||||||
inOrderHeadQuerier, err := NewBlockQuerier(rh, mint, maxt)
|
inOrderHeadQuerier, err := db.blockQuerierFunc(rh, mint, maxt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("open block querier for head %s: %w", rh, err)
|
return nil, fmt.Errorf("open block querier for head %s: %w", rh, err)
|
||||||
}
|
}
|
||||||
|
@ -1981,7 +2009,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) {
|
||||||
}
|
}
|
||||||
if getNew {
|
if getNew {
|
||||||
rh := NewRangeHead(db.head, newMint, maxt)
|
rh := NewRangeHead(db.head, newMint, maxt)
|
||||||
inOrderHeadQuerier, err = NewBlockQuerier(rh, newMint, maxt)
|
inOrderHeadQuerier, err = db.blockQuerierFunc(rh, newMint, maxt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("open block querier for head while getting new querier %s: %w", rh, err)
|
return nil, fmt.Errorf("open block querier for head while getting new querier %s: %w", rh, err)
|
||||||
}
|
}
|
||||||
|
@ -1995,9 +2023,9 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) {
|
||||||
if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) {
|
if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) {
|
||||||
rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef)
|
rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef)
|
||||||
var err error
|
var err error
|
||||||
outOfOrderHeadQuerier, err := NewBlockQuerier(rh, mint, maxt)
|
outOfOrderHeadQuerier, err := db.blockQuerierFunc(rh, mint, maxt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// If NewBlockQuerier() failed, make sure to clean up the pending read created by NewOOORangeHead.
|
// If BlockQuerierFunc() failed, make sure to clean up the pending read created by NewOOORangeHead.
|
||||||
rh.isoState.Close()
|
rh.isoState.Close()
|
||||||
|
|
||||||
return nil, fmt.Errorf("open block querier for ooo head %s: %w", rh, err)
|
return nil, fmt.Errorf("open block querier for ooo head %s: %w", rh, err)
|
||||||
|
@ -2007,7 +2035,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, b := range blocks {
|
for _, b := range blocks {
|
||||||
q, err := NewBlockQuerier(b, mint, maxt)
|
q, err := db.blockQuerierFunc(b, mint, maxt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("open querier for block %s: %w", b, err)
|
return nil, fmt.Errorf("open querier for block %s: %w", b, err)
|
||||||
}
|
}
|
||||||
|
@ -2045,7 +2073,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer
|
||||||
|
|
||||||
if maxt >= db.head.MinTime() {
|
if maxt >= db.head.MinTime() {
|
||||||
rh := NewRangeHead(db.head, mint, maxt)
|
rh := NewRangeHead(db.head, mint, maxt)
|
||||||
inOrderHeadQuerier, err := NewBlockChunkQuerier(rh, mint, maxt)
|
inOrderHeadQuerier, err := db.blockChunkQuerierFunc(rh, mint, maxt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("open querier for head %s: %w", rh, err)
|
return nil, fmt.Errorf("open querier for head %s: %w", rh, err)
|
||||||
}
|
}
|
||||||
|
@ -2062,7 +2090,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer
|
||||||
}
|
}
|
||||||
if getNew {
|
if getNew {
|
||||||
rh := NewRangeHead(db.head, newMint, maxt)
|
rh := NewRangeHead(db.head, newMint, maxt)
|
||||||
inOrderHeadQuerier, err = NewBlockChunkQuerier(rh, newMint, maxt)
|
inOrderHeadQuerier, err = db.blockChunkQuerierFunc(rh, newMint, maxt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("open querier for head while getting new querier %s: %w", rh, err)
|
return nil, fmt.Errorf("open querier for head while getting new querier %s: %w", rh, err)
|
||||||
}
|
}
|
||||||
|
@ -2075,8 +2103,11 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer
|
||||||
|
|
||||||
if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) {
|
if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) {
|
||||||
rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef)
|
rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef)
|
||||||
outOfOrderHeadQuerier, err := NewBlockChunkQuerier(rh, mint, maxt)
|
outOfOrderHeadQuerier, err := db.blockChunkQuerierFunc(rh, mint, maxt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// If NewBlockQuerier() failed, make sure to clean up the pending read created by NewOOORangeHead.
|
||||||
|
rh.isoState.Close()
|
||||||
|
|
||||||
return nil, fmt.Errorf("open block chunk querier for ooo head %s: %w", rh, err)
|
return nil, fmt.Errorf("open block chunk querier for ooo head %s: %w", rh, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2084,7 +2115,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, b := range blocks {
|
for _, b := range blocks {
|
||||||
q, err := NewBlockChunkQuerier(b, mint, maxt)
|
q, err := db.blockChunkQuerierFunc(b, mint, maxt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("open querier for block %s: %w", b, err)
|
return nil, fmt.Errorf("open querier for block %s: %w", b, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -7159,3 +7159,78 @@ func TestNewCompactorFunc(t *testing.T) {
|
||||||
require.Len(t, ulids, 1)
|
require.Len(t, ulids, 1)
|
||||||
require.Equal(t, block2, ulids[0])
|
require.Equal(t, block2, ulids[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestBlockQuerierAndBlockChunkQuerier(t *testing.T) {
|
||||||
|
opts := DefaultOptions()
|
||||||
|
opts.BlockQuerierFunc = func(b BlockReader, mint, maxt int64) (storage.Querier, error) {
|
||||||
|
// Only block with hints can be queried.
|
||||||
|
if len(b.Meta().Compaction.Hints) > 0 {
|
||||||
|
return NewBlockQuerier(b, mint, maxt)
|
||||||
|
}
|
||||||
|
return storage.NoopQuerier(), nil
|
||||||
|
}
|
||||||
|
opts.BlockChunkQuerierFunc = func(b BlockReader, mint, maxt int64) (storage.ChunkQuerier, error) {
|
||||||
|
// Only level 4 compaction block can be queried.
|
||||||
|
if b.Meta().Compaction.Level == 4 {
|
||||||
|
return NewBlockChunkQuerier(b, mint, maxt)
|
||||||
|
}
|
||||||
|
return storage.NoopChunkedQuerier(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
db := openTestDB(t, opts, nil)
|
||||||
|
defer func() {
|
||||||
|
require.NoError(t, db.Close())
|
||||||
|
}()
|
||||||
|
|
||||||
|
metas := []BlockMeta{
|
||||||
|
{Compaction: BlockMetaCompaction{Hints: []string{"test-hint"}}},
|
||||||
|
{Compaction: BlockMetaCompaction{Level: 4}},
|
||||||
|
}
|
||||||
|
for i := range metas {
|
||||||
|
// Include blockID into series to identify which block got touched.
|
||||||
|
serieses := []storage.Series{storage.NewListSeries(labels.FromMap(map[string]string{"block": fmt.Sprintf("block-%d", i), labels.MetricName: "test_metric"}), []chunks.Sample{sample{t: 0, f: 1}})}
|
||||||
|
blockDir := createBlock(t, db.Dir(), serieses)
|
||||||
|
b, err := OpenBlock(db.logger, blockDir, db.chunkPool)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Overwrite meta.json with compaction section for testing purpose.
|
||||||
|
b.meta.Compaction = metas[i].Compaction
|
||||||
|
_, err = writeMetaFile(db.logger, blockDir, &b.meta)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, b.Close())
|
||||||
|
}
|
||||||
|
require.NoError(t, db.reloadBlocks())
|
||||||
|
require.Len(t, db.Blocks(), 2)
|
||||||
|
|
||||||
|
querier, err := db.Querier(0, 500)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer querier.Close()
|
||||||
|
matcher := labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, "test_metric")
|
||||||
|
seriesSet := querier.Select(context.Background(), false, nil, matcher)
|
||||||
|
count := 0
|
||||||
|
var lbls labels.Labels
|
||||||
|
for seriesSet.Next() {
|
||||||
|
count++
|
||||||
|
lbls = seriesSet.At().Labels()
|
||||||
|
}
|
||||||
|
require.NoError(t, seriesSet.Err())
|
||||||
|
require.Equal(t, 1, count)
|
||||||
|
// Make sure only block-0 is queried.
|
||||||
|
require.Equal(t, "block-0", lbls.Get("block"))
|
||||||
|
|
||||||
|
chunkQuerier, err := db.ChunkQuerier(0, 500)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer chunkQuerier.Close()
|
||||||
|
css := chunkQuerier.Select(context.Background(), false, nil, matcher)
|
||||||
|
count = 0
|
||||||
|
// Reset lbls variable.
|
||||||
|
lbls = labels.EmptyLabels()
|
||||||
|
for css.Next() {
|
||||||
|
count++
|
||||||
|
lbls = css.At().Labels()
|
||||||
|
}
|
||||||
|
require.NoError(t, css.Err())
|
||||||
|
require.Equal(t, 1, count)
|
||||||
|
// Make sure only block-1 is queried.
|
||||||
|
require.Equal(t, "block-1", lbls.Get("block"))
|
||||||
|
}
|
||||||
|
|
|
@ -59,6 +59,20 @@ func GenerateTestHistogram(i int) *histogram.Histogram {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GenerateTestCustomBucketsHistogram(i int) *histogram.Histogram {
|
||||||
|
return &histogram.Histogram{
|
||||||
|
Count: 5 + uint64(i*4),
|
||||||
|
Sum: 18.4 * float64(i+1),
|
||||||
|
Schema: histogram.CustomBucketsSchema,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{int64(i + 1), 1, -1, 0},
|
||||||
|
CustomValues: []float64{0, 1, 2, 3, 4},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func GenerateTestGaugeHistograms(n int) (r []*histogram.Histogram) {
|
func GenerateTestGaugeHistograms(n int) (r []*histogram.Histogram) {
|
||||||
for x := 0; x < n; x++ {
|
for x := 0; x < n; x++ {
|
||||||
i := int(math.Sin(float64(x))*100) + 100
|
i := int(math.Sin(float64(x))*100) + 100
|
||||||
|
@ -105,6 +119,20 @@ func GenerateTestFloatHistogram(i int) *histogram.FloatHistogram {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GenerateTestCustomBucketsFloatHistogram(i int) *histogram.FloatHistogram {
|
||||||
|
return &histogram.FloatHistogram{
|
||||||
|
Count: 5 + float64(i*4),
|
||||||
|
Sum: 18.4 * float64(i+1),
|
||||||
|
Schema: histogram.CustomBucketsSchema,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 2},
|
||||||
|
{Offset: 1, Length: 2},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []float64{float64(i + 1), float64(i + 2), float64(i + 1), float64(i + 1)},
|
||||||
|
CustomValues: []float64{0, 1, 2, 3, 4},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func GenerateTestGaugeFloatHistograms(n int) (r []*histogram.FloatHistogram) {
|
func GenerateTestGaugeFloatHistograms(n int) (r []*histogram.FloatHistogram) {
|
||||||
for x := 0; x < n; x++ {
|
for x := 0; x < n; x++ {
|
||||||
i := int(math.Sin(float64(x))*100) + 100
|
i := int(math.Sin(float64(x))*100) + 100
|
||||||
|
|
|
@ -94,6 +94,19 @@ func (a Annotations) AsStrings(query string, maxAnnos int) []string {
|
||||||
return arr
|
return arr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a Annotations) CountWarningsAndInfo() (int, int) {
|
||||||
|
var countWarnings, countInfo int
|
||||||
|
for _, err := range a {
|
||||||
|
if errors.Is(err, PromQLWarning) {
|
||||||
|
countWarnings++
|
||||||
|
}
|
||||||
|
if errors.Is(err, PromQLInfo) {
|
||||||
|
countInfo++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return countWarnings, countInfo
|
||||||
|
}
|
||||||
|
|
||||||
//nolint:revive // error-naming.
|
//nolint:revive // error-naming.
|
||||||
var (
|
var (
|
||||||
// Currently there are only 2 types, warnings and info.
|
// Currently there are only 2 types, warnings and info.
|
||||||
|
@ -103,12 +116,14 @@ var (
|
||||||
PromQLInfo = errors.New("PromQL info")
|
PromQLInfo = errors.New("PromQL info")
|
||||||
PromQLWarning = errors.New("PromQL warning")
|
PromQLWarning = errors.New("PromQL warning")
|
||||||
|
|
||||||
InvalidQuantileWarning = fmt.Errorf("%w: quantile value should be between 0 and 1", PromQLWarning)
|
InvalidQuantileWarning = fmt.Errorf("%w: quantile value should be between 0 and 1", PromQLWarning)
|
||||||
BadBucketLabelWarning = fmt.Errorf("%w: bucket label %q is missing or has a malformed value", PromQLWarning, model.BucketLabel)
|
BadBucketLabelWarning = fmt.Errorf("%w: bucket label %q is missing or has a malformed value", PromQLWarning, model.BucketLabel)
|
||||||
MixedFloatsHistogramsWarning = fmt.Errorf("%w: encountered a mix of histograms and floats for", PromQLWarning)
|
MixedFloatsHistogramsWarning = fmt.Errorf("%w: encountered a mix of histograms and floats for", PromQLWarning)
|
||||||
MixedClassicNativeHistogramsWarning = fmt.Errorf("%w: vector contains a mix of classic and native histograms for metric name", PromQLWarning)
|
MixedClassicNativeHistogramsWarning = fmt.Errorf("%w: vector contains a mix of classic and native histograms for metric name", PromQLWarning)
|
||||||
NativeHistogramNotCounterWarning = fmt.Errorf("%w: this native histogram metric is not a counter:", PromQLWarning)
|
NativeHistogramNotCounterWarning = fmt.Errorf("%w: this native histogram metric is not a counter:", PromQLWarning)
|
||||||
NativeHistogramNotGaugeWarning = fmt.Errorf("%w: this native histogram metric is not a gauge:", PromQLWarning)
|
NativeHistogramNotGaugeWarning = fmt.Errorf("%w: this native histogram metric is not a gauge:", PromQLWarning)
|
||||||
|
MixedExponentialCustomHistogramsWarning = fmt.Errorf("%w: vector contains a mix of histograms with exponential and custom buckets schemas for metric name", PromQLWarning)
|
||||||
|
IncompatibleCustomBucketsHistogramsWarning = fmt.Errorf("%w: vector contains histograms with incompatible custom buckets for metric name", PromQLWarning)
|
||||||
|
|
||||||
PossibleNonCounterInfo = fmt.Errorf("%w: metric might not be a counter, name does not end in _total/_sum/_count/_bucket:", PromQLInfo)
|
PossibleNonCounterInfo = fmt.Errorf("%w: metric might not be a counter, name does not end in _total/_sum/_count/_bucket:", PromQLInfo)
|
||||||
HistogramQuantileForcedMonotonicityInfo = fmt.Errorf("%w: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for metric name", PromQLInfo)
|
HistogramQuantileForcedMonotonicityInfo = fmt.Errorf("%w: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for metric name", PromQLInfo)
|
||||||
|
@ -195,6 +210,24 @@ func NewNativeHistogramNotGaugeWarning(metricName string, pos posrange.PositionR
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewMixedExponentialCustomHistogramsWarning is used when the queried series includes
|
||||||
|
// histograms with both exponential and custom buckets schemas.
|
||||||
|
func NewMixedExponentialCustomHistogramsWarning(metricName string, pos posrange.PositionRange) error {
|
||||||
|
return annoErr{
|
||||||
|
PositionRange: pos,
|
||||||
|
Err: fmt.Errorf("%w %q", MixedExponentialCustomHistogramsWarning, metricName),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIncompatibleCustomBucketsHistogramsWarning is used when the queried series includes
|
||||||
|
// custom buckets histograms with incompatible custom bounds.
|
||||||
|
func NewIncompatibleCustomBucketsHistogramsWarning(metricName string, pos posrange.PositionRange) error {
|
||||||
|
return annoErr{
|
||||||
|
PositionRange: pos,
|
||||||
|
Err: fmt.Errorf("%w %q", IncompatibleCustomBucketsHistogramsWarning, metricName),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// NewPossibleNonCounterInfo is used when a named counter metric with only float samples does not
|
// NewPossibleNonCounterInfo is used when a named counter metric with only float samples does not
|
||||||
// have the suffixes _total, _sum, _count, or _bucket.
|
// have the suffixes _total, _sum, _count, or _bucket.
|
||||||
func NewPossibleNonCounterInfo(metricName string, pos posrange.PositionRange) error {
|
func NewPossibleNonCounterInfo(metricName string, pos posrange.PositionRange) error {
|
||||||
|
|
|
@ -2973,10 +2973,8 @@ func assertAPIError(t *testing.T, got *apiError, exp errorType) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
if exp == errorNone {
|
if exp == errorNone {
|
||||||
//nolint:testifylint
|
|
||||||
require.Nil(t, got)
|
require.Nil(t, got)
|
||||||
} else {
|
} else {
|
||||||
//nolint:testifylint
|
|
||||||
require.NotNil(t, got)
|
require.NotNil(t, got)
|
||||||
require.Equal(t, exp, got.typ, "(%q)", got)
|
require.Equal(t, exp, got.typ, "(%q)", got)
|
||||||
}
|
}
|
||||||
|
|
|
@ -261,7 +261,7 @@ NumberLiteral {
|
||||||
LineComment { "#" ![\n]* }
|
LineComment { "#" ![\n]* }
|
||||||
|
|
||||||
number {
|
number {
|
||||||
(std.digit+ ("." std.digit*)? | "." std.digit+) (("e" | "E") ("+" | "-")? std.digit+)? |
|
(std.digit+ (("_")? std.digit)* ("." std.digit+ (("_")? std.digit)*)? | "." std.digit+ (("_")? std.digit)*) (("e" | "E") ("+" | "-")? std.digit+ (("_")? std.digit)*)? |
|
||||||
"0x" (std.digit | $[a-fA-F])+
|
"0x" (std.digit | $[a-fA-F])+
|
||||||
}
|
}
|
||||||
StringLiteral { // TODO: This is for JS, make this work for PromQL.
|
StringLiteral { // TODO: This is for JS, make this work for PromQL.
|
||||||
|
|
Loading…
Reference in a new issue