Merge remote-tracking branch 'prometheus/main' into arve/close-engine

Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
This commit is contained in:
Arve Knudsen 2024-07-01 10:47:21 +02:00
commit e8ae8cf012
114 changed files with 6282 additions and 1863 deletions

View file

@ -174,7 +174,7 @@ jobs:
with: with:
args: --verbose args: --verbose
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml. # Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
version: v1.59.0 version: v1.59.1
fuzzing: fuzzing:
uses: ./.github/workflows/fuzzing.yml uses: ./.github/workflows/fuzzing.yml
if: github.event_name == 'pull_request' if: github.event_name == 'pull_request'

View file

@ -29,6 +29,7 @@ linters:
- unused - unused
- usestdlibvars - usestdlibvars
- whitespace - whitespace
- loggercheck
issues: issues:
max-same-issues: 0 max-same-issues: 0

View file

@ -2,16 +2,23 @@
## unreleased ## unreleased
This release changes the default for GOGC, the Go runtime control for the trade-off between excess memory use and CPU usage. We have found that Prometheus operates with minimal additional CPU usage, but greatly reduced memory by adjusting the upstream Go default from 100 to 50. ## 2.53.0 / 2024-06-16
* [CHANGE] Rules: Execute 1 query instead of N (where N is the number of alerts within alert rule) when restoring alerts. #13980 This release changes the default for GOGC, the Go runtime control for the trade-off between excess memory use and CPU usage. We have found that Prometheus operates with minimal additional CPU usage, but greatly reduced memory by adjusting the upstream Go default from 100 to 75.
* [CHANGE] Runtime: Change GOGC threshold from 100 to 50 #14176
* [FEATURE] Rules: Add new option `query_offset` for each rule group via rule group configuration file and `rule_query_offset` as part of the global configuration to have more resilience for remote write delays. #14061 * [CHANGE] Rules: Execute 1 query instead of N (where N is the number of alerts within alert rule) when restoring alerts. #13980 #14048
* [ENHANCEMENT] Rules: Add `rule_group_last_restore_duration_seconds` to measure the time it takes to restore a rule group. #13974 * [CHANGE] Runtime: Change GOGC threshold from 100 to 75 #14176 #14285
* [FEATURE] Rules: Add new option `query_offset` for each rule group via rule group configuration file and `rule_query_offset` as part of the global configuration to have more resilience for remote write delays. #14061 #14216 #14273
* [ENHANCEMENT] Rules: Add `rule_group_last_restore_duration_seconds` metric to measure the time it takes to restore a rule group. #13974
* [ENHANCEMENT] OTLP: Improve remote write format translation performance by using label set hashes for metric identifiers instead of string based ones. #14006 #13991 * [ENHANCEMENT] OTLP: Improve remote write format translation performance by using label set hashes for metric identifiers instead of string based ones. #14006 #13991
* [ENHANCEMENT] TSDB: Optimize querying with regexp matchers. #13620 * [ENHANCEMENT] TSDB: Optimize querying with regexp matchers. #13620
* [BUGFIX] OTLP: Don't generate target_info unless at least one identifying label is defined. #13991 * [BUGFIX] OTLP: Don't generate target_info unless there are metrics and at least one identifying label is defined. #13991
* [BUGFIX] OTLP: Don't generate target_info unless there are metrics. #13991 * [BUGFIX] Scrape: Do no try to ingest native histograms when the native histograms feature is turned off. This happened when protobuf scrape was enabled by for example the created time feature. #13987
* [BUGFIX] Scaleway SD: Use the instance's public IP if no private IP is available as the `__address__` meta label. #13941
* [BUGFIX] Query logger: Do not leak file descriptors on error. #13948
* [BUGFIX] TSDB: Let queries with heavy regex matches be cancelled and not use up the CPU. #14096 #14103 #14118 #14199
* [BUGFIX] API: Do not warn if result count is equal to the limit, only when exceeding the limit for the series, label-names and label-values APIs. #14116
* [BUGFIX] TSDB: Fix head stats and hooks when replaying a corrupted snapshot. #14079
## 2.52.1 / 2024-05-29 ## 2.52.1 / 2024-05-29

View file

@ -16,7 +16,7 @@ Maintainers for specific parts of the codebase:
* `model/histogram` and other code related to native histograms: Björn Rabenstein (<beorn@grafana.com> / @beorn7), * `model/histogram` and other code related to native histograms: Björn Rabenstein (<beorn@grafana.com> / @beorn7),
George Krajcsovits (<gyorgy.krajcsovits@grafana.com> / @krajorama) George Krajcsovits (<gyorgy.krajcsovits@grafana.com> / @krajorama)
* `storage` * `storage`
* `remote`: Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (<tom.wilkie@gmail.com> / @tomwilkie) * `remote`: Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Nicolás Pazos ( <npazosmendez@gmail.com> / @npazosmendez), Alex Greenbank ( <alex.greenbank@grafana.com> / @alexgreenbank)
* `otlptranslator`: Arve Knudsen (<arve.knudsen@gmail.com> / @aknuds1), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez) * `otlptranslator`: Arve Knudsen (<arve.knudsen@gmail.com> / @aknuds1), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
* `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez) * `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
* `agent`: Robert Fratto (<robert.fratto@grafana.com> / @rfratto) * `agent`: Robert Fratto (<robert.fratto@grafana.com> / @rfratto)

View file

@ -91,7 +91,7 @@ endif
promql/parser/generated_parser.y.go: promql/parser/generated_parser.y promql/parser/generated_parser.y.go: promql/parser/generated_parser.y
@echo ">> running goyacc to generate the .go file." @echo ">> running goyacc to generate the .go file."
@goyacc -l -o promql/parser/generated_parser.y.go promql/parser/generated_parser.y @$(FIRST_GOPATH)/bin/goyacc -l -o promql/parser/generated_parser.y.go promql/parser/generated_parser.y
.PHONY: clean-parser .PHONY: clean-parser
clean-parser: clean-parser:

View file

@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT := SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT := GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?= GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.59.0 GOLANGCI_LINT_VERSION ?= v1.59.1
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different. # windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))

View file

@ -149,6 +149,8 @@ Changes for a patch release or release candidate should be merged into the previ
Bump the version in the `VERSION` file and update `CHANGELOG.md`. Do this in a proper PR pointing to the release branch as this gives others the opportunity to chime in on the release in general and on the addition to the changelog in particular. For a release candidate, append something like `-rc.0` to the version (with the corresponding changes to the tag name, the release name etc.). Bump the version in the `VERSION` file and update `CHANGELOG.md`. Do this in a proper PR pointing to the release branch as this gives others the opportunity to chime in on the release in general and on the addition to the changelog in particular. For a release candidate, append something like `-rc.0` to the version (with the corresponding changes to the tag name, the release name etc.).
When updating the `CHANGELOG.md` look at all PRs included in the release since the last release and verify if they need a changelog entry.
Note that `CHANGELOG.md` should only document changes relevant to users of Prometheus, including external API changes, performance improvements, and new features. Do not document changes of internal interfaces, code refactorings and clean-ups, changes to the build process, etc. People interested in these are asked to refer to the git history. Note that `CHANGELOG.md` should only document changes relevant to users of Prometheus, including external API changes, performance improvements, and new features. Do not document changes of internal interfaces, code refactorings and clean-ups, changes to the build process, etc. People interested in these are asked to refer to the git history.
For release candidates still update `CHANGELOG.md`, but when you cut the final release later, merge all the changes from the pre-releases into the one final update. For release candidates still update `CHANGELOG.md`, but when you cut the final release later, merge all the changes from the pre-releases into the one final update.

View file

@ -1 +1 @@
2.52.1 2.53.0

View file

@ -445,6 +445,9 @@ func main() {
serverOnlyFlag(a, "alertmanager.notification-queue-capacity", "The capacity of the queue for pending Alertmanager notifications."). serverOnlyFlag(a, "alertmanager.notification-queue-capacity", "The capacity of the queue for pending Alertmanager notifications.").
Default("10000").IntVar(&cfg.notifier.QueueCapacity) Default("10000").IntVar(&cfg.notifier.QueueCapacity)
serverOnlyFlag(a, "alertmanager.drain-notification-queue-on-shutdown", "Send any outstanding Alertmanager notifications when shutting down. If false, any outstanding Alertmanager notifications will be dropped when shutting down.").
Default("true").BoolVar(&cfg.notifier.DrainOnShutdown)
// TODO: Remove in Prometheus 3.0. // TODO: Remove in Prometheus 3.0.
alertmanagerTimeout := a.Flag("alertmanager.timeout", "[DEPRECATED] This flag has no effect.").Hidden().String() alertmanagerTimeout := a.Flag("alertmanager.timeout", "[DEPRECATED] This flag has no effect.").Hidden().String()
@ -1206,7 +1209,7 @@ func main() {
} }
if agentMode { if agentMode {
// WAL storage. // WAL storage.
opts := cfg.agent.ToAgentOptions() opts := cfg.agent.ToAgentOptions(cfg.tsdb.OutOfOrderTimeWindow)
cancel := make(chan struct{}) cancel := make(chan struct{})
g.Add( g.Add(
func() error { func() error {
@ -1242,6 +1245,7 @@ func main() {
"TruncateFrequency", cfg.agent.TruncateFrequency, "TruncateFrequency", cfg.agent.TruncateFrequency,
"MinWALTime", cfg.agent.MinWALTime, "MinWALTime", cfg.agent.MinWALTime,
"MaxWALTime", cfg.agent.MaxWALTime, "MaxWALTime", cfg.agent.MaxWALTime,
"OutOfOrderTimeWindow", cfg.agent.OutOfOrderTimeWindow,
) )
localStorage.Set(db, 0) localStorage.Set(db, 0)
@ -1745,17 +1749,22 @@ type agentOptions struct {
TruncateFrequency model.Duration TruncateFrequency model.Duration
MinWALTime, MaxWALTime model.Duration MinWALTime, MaxWALTime model.Duration
NoLockfile bool NoLockfile bool
OutOfOrderTimeWindow int64
} }
func (opts agentOptions) ToAgentOptions() agent.Options { func (opts agentOptions) ToAgentOptions(outOfOrderTimeWindow int64) agent.Options {
if outOfOrderTimeWindow < 0 {
outOfOrderTimeWindow = 0
}
return agent.Options{ return agent.Options{
WALSegmentSize: int(opts.WALSegmentSize), WALSegmentSize: int(opts.WALSegmentSize),
WALCompression: wlog.ParseCompressionType(opts.WALCompression, opts.WALCompressionType), WALCompression: wlog.ParseCompressionType(opts.WALCompression, opts.WALCompressionType),
StripeSize: opts.StripeSize, StripeSize: opts.StripeSize,
TruncateFrequency: time.Duration(opts.TruncateFrequency), TruncateFrequency: time.Duration(opts.TruncateFrequency),
MinWALTime: durationToInt64Millis(time.Duration(opts.MinWALTime)), MinWALTime: durationToInt64Millis(time.Duration(opts.MinWALTime)),
MaxWALTime: durationToInt64Millis(time.Duration(opts.MaxWALTime)), MaxWALTime: durationToInt64Millis(time.Duration(opts.MaxWALTime)),
NoLockfile: opts.NoLockfile, NoLockfile: opts.NoLockfile,
OutOfOrderTimeWindow: outOfOrderTimeWindow,
} }
} }

View file

@ -72,7 +72,7 @@ func (p *queryLogTest) waitForPrometheus() error {
var err error var err error
for x := 0; x < 20; x++ { for x := 0; x < 20; x++ {
var r *http.Response var r *http.Response
if r, err = http.Get(fmt.Sprintf("http://%s:%d%s/-/ready", p.host, p.port, p.prefix)); err == nil && r.StatusCode == 200 { if r, err = http.Get(fmt.Sprintf("http://%s:%d%s/-/ready", p.host, p.port, p.prefix)); err == nil && r.StatusCode == http.StatusOK {
break break
} }
time.Sleep(500 * time.Millisecond) time.Sleep(500 * time.Millisecond)

View file

@ -22,6 +22,7 @@ import (
"time" "time"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/oklog/ulid"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/model/textparse"
@ -191,6 +192,10 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
if quiet { if quiet {
break break
} }
// Empty block, don't print.
if block.Compare(ulid.ULID{}) == 0 {
break
}
blocks, err := db.Blocks() blocks, err := db.Blocks()
if err != nil { if err != nil {
return fmt.Errorf("get blocks: %w", err) return fmt.Errorf("get blocks: %w", err)

View file

@ -998,6 +998,7 @@ var expectedConf = &Config{
HostNetworkingHost: "localhost", HostNetworkingHost: "localhost",
RefreshInterval: model.Duration(60 * time.Second), RefreshInterval: model.Duration(60 * time.Second),
HTTPClientConfig: config.DefaultHTTPClientConfig, HTTPClientConfig: config.DefaultHTTPClientConfig,
MatchFirstNetwork: true,
}, },
}, },
}, },

View file

@ -42,28 +42,29 @@ import (
) )
const ( const (
ec2Label = model.MetaLabelPrefix + "ec2_" ec2Label = model.MetaLabelPrefix + "ec2_"
ec2LabelAMI = ec2Label + "ami" ec2LabelAMI = ec2Label + "ami"
ec2LabelAZ = ec2Label + "availability_zone" ec2LabelAZ = ec2Label + "availability_zone"
ec2LabelAZID = ec2Label + "availability_zone_id" ec2LabelAZID = ec2Label + "availability_zone_id"
ec2LabelArch = ec2Label + "architecture" ec2LabelArch = ec2Label + "architecture"
ec2LabelIPv6Addresses = ec2Label + "ipv6_addresses" ec2LabelIPv6Addresses = ec2Label + "ipv6_addresses"
ec2LabelInstanceID = ec2Label + "instance_id" ec2LabelInstanceID = ec2Label + "instance_id"
ec2LabelInstanceLifecycle = ec2Label + "instance_lifecycle" ec2LabelInstanceLifecycle = ec2Label + "instance_lifecycle"
ec2LabelInstanceState = ec2Label + "instance_state" ec2LabelInstanceState = ec2Label + "instance_state"
ec2LabelInstanceType = ec2Label + "instance_type" ec2LabelInstanceType = ec2Label + "instance_type"
ec2LabelOwnerID = ec2Label + "owner_id" ec2LabelOwnerID = ec2Label + "owner_id"
ec2LabelPlatform = ec2Label + "platform" ec2LabelPlatform = ec2Label + "platform"
ec2LabelPrimarySubnetID = ec2Label + "primary_subnet_id" ec2LabelPrimaryIPv6Addresses = ec2Label + "primary_ipv6_addresses"
ec2LabelPrivateDNS = ec2Label + "private_dns_name" ec2LabelPrimarySubnetID = ec2Label + "primary_subnet_id"
ec2LabelPrivateIP = ec2Label + "private_ip" ec2LabelPrivateDNS = ec2Label + "private_dns_name"
ec2LabelPublicDNS = ec2Label + "public_dns_name" ec2LabelPrivateIP = ec2Label + "private_ip"
ec2LabelPublicIP = ec2Label + "public_ip" ec2LabelPublicDNS = ec2Label + "public_dns_name"
ec2LabelRegion = ec2Label + "region" ec2LabelPublicIP = ec2Label + "public_ip"
ec2LabelSubnetID = ec2Label + "subnet_id" ec2LabelRegion = ec2Label + "region"
ec2LabelTag = ec2Label + "tag_" ec2LabelSubnetID = ec2Label + "subnet_id"
ec2LabelVPCID = ec2Label + "vpc_id" ec2LabelTag = ec2Label + "tag_"
ec2LabelSeparator = "," ec2LabelVPCID = ec2Label + "vpc_id"
ec2LabelSeparator = ","
) )
// DefaultEC2SDConfig is the default EC2 SD configuration. // DefaultEC2SDConfig is the default EC2 SD configuration.
@ -317,6 +318,7 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
var subnets []string var subnets []string
var ipv6addrs []string var ipv6addrs []string
var primaryipv6addrs []string
subnetsMap := make(map[string]struct{}) subnetsMap := make(map[string]struct{})
for _, eni := range inst.NetworkInterfaces { for _, eni := range inst.NetworkInterfaces {
if eni.SubnetId == nil { if eni.SubnetId == nil {
@ -330,6 +332,15 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
for _, ipv6addr := range eni.Ipv6Addresses { for _, ipv6addr := range eni.Ipv6Addresses {
ipv6addrs = append(ipv6addrs, *ipv6addr.Ipv6Address) ipv6addrs = append(ipv6addrs, *ipv6addr.Ipv6Address)
if *ipv6addr.IsPrimaryIpv6 {
// we might have to extend the slice with more than one element
// that could leave empty strings in the list which is intentional
// to keep the position/device index information
for int64(len(primaryipv6addrs)) <= *eni.Attachment.DeviceIndex {
primaryipv6addrs = append(primaryipv6addrs, "")
}
primaryipv6addrs[*eni.Attachment.DeviceIndex] = *ipv6addr.Ipv6Address
}
} }
} }
labels[ec2LabelSubnetID] = model.LabelValue( labels[ec2LabelSubnetID] = model.LabelValue(
@ -342,6 +353,12 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
strings.Join(ipv6addrs, ec2LabelSeparator) + strings.Join(ipv6addrs, ec2LabelSeparator) +
ec2LabelSeparator) ec2LabelSeparator)
} }
if len(primaryipv6addrs) > 0 {
labels[ec2LabelPrimaryIPv6Addresses] = model.LabelValue(
ec2LabelSeparator +
strings.Join(primaryipv6addrs, ec2LabelSeparator) +
ec2LabelSeparator)
}
} }
for _, t := range inst.Tags { for _, t := range inst.Tags {

View file

@ -97,6 +97,7 @@ func fetchApps(ctx context.Context, server string, client *http.Client) (*Applic
resp.Body.Close() resp.Body.Close()
}() }()
//nolint:usestdlibvars
if resp.StatusCode/100 != 2 { if resp.StatusCode/100 != 2 {
return nil, fmt.Errorf("non 2xx status '%d' response during eureka service discovery", resp.StatusCode) return nil, fmt.Errorf("non 2xx status '%d' response during eureka service discovery", resp.StatusCode)
} }

View file

@ -87,6 +87,7 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error)
resp.Body.Close() resp.Body.Close()
}() }()
//nolint:usestdlibvars
if resp.StatusCode/100 != 2 { if resp.StatusCode/100 != 2 {
return nil, fmt.Errorf("non 2xx status '%d' response during hetzner service discovery with role robot", resp.StatusCode) return nil, fmt.Errorf("non 2xx status '%d' response during hetzner service discovery with role robot", resp.StatusCode)
} }

View file

@ -120,6 +120,16 @@ func Name(n string) func(*Manager) {
} }
} }
// Updatert sets the updatert of the manager.
// Used to speed up tests.
func Updatert(u time.Duration) func(*Manager) {
return func(m *Manager) {
m.mtx.Lock()
defer m.mtx.Unlock()
m.updatert = u
}
}
// HTTPClientOptions sets the list of HTTP client options to expose to // HTTPClientOptions sets the list of HTTP client options to expose to
// Discoverers. It is up to Discoverers to choose to use the options provided. // Discoverers. It is up to Discoverers to choose to use the options provided.
func HTTPClientOptions(opts ...config.HTTPClientOption) func(*Manager) { func HTTPClientOptions(opts ...config.HTTPClientOption) func(*Manager) {

View file

@ -22,8 +22,10 @@ import (
"strconv" "strconv"
"time" "time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/client" "github.com/docker/docker/client"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
@ -58,6 +60,7 @@ var DefaultDockerSDConfig = DockerSDConfig{
Filters: []Filter{}, Filters: []Filter{},
HostNetworkingHost: "localhost", HostNetworkingHost: "localhost",
HTTPClientConfig: config.DefaultHTTPClientConfig, HTTPClientConfig: config.DefaultHTTPClientConfig,
MatchFirstNetwork: true,
} }
func init() { func init() {
@ -73,7 +76,8 @@ type DockerSDConfig struct {
Filters []Filter `yaml:"filters"` Filters []Filter `yaml:"filters"`
HostNetworkingHost string `yaml:"host_networking_host"` HostNetworkingHost string `yaml:"host_networking_host"`
RefreshInterval model.Duration `yaml:"refresh_interval"` RefreshInterval model.Duration `yaml:"refresh_interval"`
MatchFirstNetwork bool `yaml:"match_first_network"`
} }
// NewDiscovererMetrics implements discovery.Config. // NewDiscovererMetrics implements discovery.Config.
@ -119,6 +123,7 @@ type DockerDiscovery struct {
port int port int
hostNetworkingHost string hostNetworkingHost string
filters filters.Args filters filters.Args
matchFirstNetwork bool
} }
// NewDockerDiscovery returns a new DockerDiscovery which periodically refreshes its targets. // NewDockerDiscovery returns a new DockerDiscovery which periodically refreshes its targets.
@ -131,6 +136,7 @@ func NewDockerDiscovery(conf *DockerSDConfig, logger log.Logger, metrics discove
d := &DockerDiscovery{ d := &DockerDiscovery{
port: conf.Port, port: conf.Port,
hostNetworkingHost: conf.HostNetworkingHost, hostNetworkingHost: conf.HostNetworkingHost,
matchFirstNetwork: conf.MatchFirstNetwork,
} }
hostURL, err := url.Parse(conf.Host) hostURL, err := url.Parse(conf.Host)
@ -202,6 +208,11 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
return nil, fmt.Errorf("error while computing network labels: %w", err) return nil, fmt.Errorf("error while computing network labels: %w", err)
} }
allContainers := make(map[string]types.Container)
for _, c := range containers {
allContainers[c.ID] = c
}
for _, c := range containers { for _, c := range containers {
if len(c.Names) == 0 { if len(c.Names) == 0 {
continue continue
@ -218,7 +229,50 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
commonLabels[dockerLabelContainerLabelPrefix+ln] = v commonLabels[dockerLabelContainerLabelPrefix+ln] = v
} }
for _, n := range c.NetworkSettings.Networks { networks := c.NetworkSettings.Networks
containerNetworkMode := container.NetworkMode(c.HostConfig.NetworkMode)
if len(networks) == 0 {
// Try to lookup shared networks
for {
if containerNetworkMode.IsContainer() {
tmpContainer, exists := allContainers[containerNetworkMode.ConnectedContainer()]
if !exists {
break
}
networks = tmpContainer.NetworkSettings.Networks
containerNetworkMode = container.NetworkMode(tmpContainer.HostConfig.NetworkMode)
if len(networks) > 0 {
break
}
} else {
break
}
}
}
if d.matchFirstNetwork && len(networks) > 1 {
// Match user defined network
if containerNetworkMode.IsUserDefined() {
networkMode := string(containerNetworkMode)
networks = map[string]*network.EndpointSettings{networkMode: networks[networkMode]}
} else {
// Get first network if container network mode has "none" value.
// This case appears under certain condition:
// 1. Container created with network set to "--net=none".
// 2. Disconnect network "none".
// 3. Reconnect network with user defined networks.
var first string
for k, n := range networks {
if n != nil {
first = k
break
}
}
networks = map[string]*network.EndpointSettings{first: networks[first]}
}
}
for _, n := range networks {
var added bool var added bool
for _, p := range c.Ports { for _, p := range c.Ports {

View file

@ -16,6 +16,7 @@ package moby
import ( import (
"context" "context"
"fmt" "fmt"
"sort"
"testing" "testing"
"github.com/go-kit/log" "github.com/go-kit/log"
@ -59,7 +60,7 @@ host: %s
tg := tgs[0] tg := tgs[0]
require.NotNil(t, tg) require.NotNil(t, tg)
require.NotNil(t, tg.Targets) require.NotNil(t, tg.Targets)
require.Len(t, tg.Targets, 3) require.Len(t, tg.Targets, 6)
for i, lbls := range []model.LabelSet{ for i, lbls := range []model.LabelSet{
{ {
@ -113,9 +114,259 @@ host: %s
"__meta_docker_container_network_mode": "host", "__meta_docker_container_network_mode": "host",
"__meta_docker_network_ip": "", "__meta_docker_network_ip": "",
}, },
{
"__address__": "172.20.0.2:3306",
"__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "mysql",
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
"__meta_docker_container_name": "/dockersd_mysql",
"__meta_docker_container_network_mode": "dockersd_private",
"__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.20.0.2",
"__meta_docker_network_name": "dockersd_private",
"__meta_docker_network_scope": "local",
"__meta_docker_port_private": "3306",
},
{
"__address__": "172.20.0.2:33060",
"__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "mysql",
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
"__meta_docker_container_name": "/dockersd_mysql",
"__meta_docker_container_network_mode": "dockersd_private",
"__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.20.0.2",
"__meta_docker_network_name": "dockersd_private",
"__meta_docker_network_scope": "local",
"__meta_docker_port_private": "33060",
},
{
"__address__": "172.20.0.2:9104",
"__meta_docker_container_id": "59bf76e8816af98856b90dd619c91027145ca501043b1c51756d03b085882e06",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "mysqlexporter",
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
"__meta_docker_container_label_maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>",
"__meta_docker_container_name": "/dockersd_mysql_exporter",
"__meta_docker_container_network_mode": "container:f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
"__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.20.0.2",
"__meta_docker_network_name": "dockersd_private",
"__meta_docker_network_scope": "local",
"__meta_docker_port_private": "9104",
},
} { } {
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
require.Equal(t, lbls, tg.Targets[i]) require.Equal(t, lbls, tg.Targets[i])
}) })
} }
} }
func TestDockerSDRefreshMatchAllNetworks(t *testing.T) {
sdmock := NewSDMock(t, "dockerprom")
sdmock.Setup()
e := sdmock.Endpoint()
url := e[:len(e)-1]
cfgString := fmt.Sprintf(`
---
host: %s
`, url)
var cfg DockerSDConfig
require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg))
cfg.MatchFirstNetwork = false
reg := prometheus.NewRegistry()
refreshMetrics := discovery.NewRefreshMetrics(reg)
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
require.NoError(t, metrics.Register())
defer metrics.Unregister()
defer refreshMetrics.Unregister()
d, err := NewDockerDiscovery(&cfg, log.NewNopLogger(), metrics)
require.NoError(t, err)
ctx := context.Background()
tgs, err := d.refresh(ctx)
require.NoError(t, err)
require.Len(t, tgs, 1)
tg := tgs[0]
require.NotNil(t, tg)
require.NotNil(t, tg.Targets)
require.Len(t, tg.Targets, 9)
sortFunc := func(labelSets []model.LabelSet) {
sort.Slice(labelSets, func(i, j int) bool {
return labelSets[i]["__address__"] < labelSets[j]["__address__"]
})
}
expected := []model.LabelSet{
{
"__address__": "172.19.0.2:9100",
"__meta_docker_container_id": "c301b928faceb1a18fe379f6bc178727ef920bb30b0f9b8592b32b36255a0eca",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "node",
"__meta_docker_container_label_com_docker_compose_version": "1.25.0",
"__meta_docker_container_label_maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>",
"__meta_docker_container_label_prometheus_job": "node",
"__meta_docker_container_name": "/dockersd_node_1",
"__meta_docker_container_network_mode": "dockersd_default",
"__meta_docker_network_id": "7189986ab399e144e52a71b7451b4e04e2158c044b4cd2f3ae26fc3a285d3798",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.19.0.2",
"__meta_docker_network_label_com_docker_compose_network": "default",
"__meta_docker_network_label_com_docker_compose_project": "dockersd",
"__meta_docker_network_label_com_docker_compose_version": "1.25.0",
"__meta_docker_network_name": "dockersd_default",
"__meta_docker_network_scope": "local",
"__meta_docker_port_private": "9100",
},
{
"__address__": "172.19.0.3:80",
"__meta_docker_container_id": "c301b928faceb1a18fe379f6bc178727ef920bb30b0f9b8592b32b36255a0eca",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "noport",
"__meta_docker_container_label_com_docker_compose_version": "1.25.0",
"__meta_docker_container_label_maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>",
"__meta_docker_container_label_prometheus_job": "noport",
"__meta_docker_container_name": "/dockersd_noport_1",
"__meta_docker_container_network_mode": "dockersd_default",
"__meta_docker_network_id": "7189986ab399e144e52a71b7451b4e04e2158c044b4cd2f3ae26fc3a285d3798",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.19.0.3",
"__meta_docker_network_label_com_docker_compose_network": "default",
"__meta_docker_network_label_com_docker_compose_project": "dockersd",
"__meta_docker_network_label_com_docker_compose_version": "1.25.0",
"__meta_docker_network_name": "dockersd_default",
"__meta_docker_network_scope": "local",
},
{
"__address__": "localhost",
"__meta_docker_container_id": "54ed6cc5c0988260436cb0e739b7b6c9cad6c439a93b4c4fdbe9753e1c94b189",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "host_networking",
"__meta_docker_container_label_com_docker_compose_version": "1.25.0",
"__meta_docker_container_name": "/dockersd_host_networking_1",
"__meta_docker_container_network_mode": "host",
"__meta_docker_network_ip": "",
},
{
"__address__": "172.20.0.2:3306",
"__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "mysql",
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
"__meta_docker_container_name": "/dockersd_mysql",
"__meta_docker_container_network_mode": "dockersd_private",
"__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.20.0.2",
"__meta_docker_network_name": "dockersd_private",
"__meta_docker_network_scope": "local",
"__meta_docker_port_private": "3306",
},
{
"__address__": "172.20.0.2:33060",
"__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "mysql",
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
"__meta_docker_container_name": "/dockersd_mysql",
"__meta_docker_container_network_mode": "dockersd_private",
"__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.20.0.2",
"__meta_docker_network_name": "dockersd_private",
"__meta_docker_network_scope": "local",
"__meta_docker_port_private": "33060",
},
{
"__address__": "172.21.0.2:3306",
"__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "mysql",
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
"__meta_docker_container_name": "/dockersd_mysql",
"__meta_docker_container_network_mode": "dockersd_private",
"__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.21.0.2",
"__meta_docker_network_name": "dockersd_private1",
"__meta_docker_network_scope": "local",
"__meta_docker_port_private": "3306",
},
{
"__address__": "172.21.0.2:33060",
"__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "mysql",
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
"__meta_docker_container_name": "/dockersd_mysql",
"__meta_docker_container_network_mode": "dockersd_private",
"__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.21.0.2",
"__meta_docker_network_name": "dockersd_private1",
"__meta_docker_network_scope": "local",
"__meta_docker_port_private": "33060",
},
{
"__address__": "172.21.0.2:9104",
"__meta_docker_container_id": "59bf76e8816af98856b90dd619c91027145ca501043b1c51756d03b085882e06",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "mysqlexporter",
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
"__meta_docker_container_label_maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>",
"__meta_docker_container_name": "/dockersd_mysql_exporter",
"__meta_docker_container_network_mode": "container:f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
"__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.21.0.2",
"__meta_docker_network_name": "dockersd_private1",
"__meta_docker_network_scope": "local",
"__meta_docker_port_private": "9104",
},
{
"__address__": "172.20.0.2:9104",
"__meta_docker_container_id": "59bf76e8816af98856b90dd619c91027145ca501043b1c51756d03b085882e06",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "mysqlexporter",
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
"__meta_docker_container_label_maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>",
"__meta_docker_container_name": "/dockersd_mysql_exporter",
"__meta_docker_container_network_mode": "container:f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
"__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.20.0.2",
"__meta_docker_network_name": "dockersd_private",
"__meta_docker_network_scope": "local",
"__meta_docker_port_private": "9104",
},
}
sortFunc(expected)
sortFunc(tg.Targets)
for i, lbls := range expected {
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
require.Equal(t, lbls, tg.Targets[i])
})
}
}

View file

@ -128,5 +128,105 @@
} }
}, },
"Mounts": [] "Mounts": []
},
{
"Id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
"Names": [
"/dockersd_mysql"
],
"Image": "mysql:5.7.29",
"ImageID": "sha256:5d9483f9a7b21c87e0f5b9776c3e06567603c28c0062013eda127c968175f5e8",
"Command": "mysqld",
"Created": 1616273136,
"Ports": [
{
"PrivatePort": 3306,
"Type": "tcp"
},
{
"PrivatePort": 33060,
"Type": "tcp"
}
],
"Labels": {
"com.docker.compose.project": "dockersd",
"com.docker.compose.service": "mysql",
"com.docker.compose.version": "2.2.2"
},
"State": "running",
"Status": "Up 40 seconds",
"HostConfig": {
"NetworkMode": "dockersd_private"
},
"NetworkSettings": {
"Networks": {
"dockersd_private": {
"IPAMConfig": null,
"Links": null,
"Aliases": null,
"NetworkID": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
"EndpointID": "80f8a61b37701a9991bb98c75ddd23fd9b7c16b5575ca81343f6b44ff4a2a9d9",
"Gateway": "172.20.0.1",
"IPAddress": "172.20.0.2",
"IPPrefixLen": 16,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"MacAddress": "02:42:ac:14:00:0a",
"DriverOpts": null
},
"dockersd_private1": {
"IPAMConfig": {},
"Links": null,
"Aliases": [
"mysql",
"mysql",
"f9ade4b83199"
],
"NetworkID": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8",
"EndpointID": "f80921d10e78c99a5907705aae75befea40c3d3e9f820e66ab392f7274be16b8",
"Gateway": "172.21.0.1",
"IPAddress": "172.21.0.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"MacAddress": "02:42:ac:15:00:02",
"DriverOpts": null
}
}
},
"Mounts": []
},
{
"Id": "59bf76e8816af98856b90dd619c91027145ca501043b1c51756d03b085882e06",
"Names": [
"/dockersd_mysql_exporter"
],
"Image": "prom/mysqld-exporter:latest",
"ImageID": "sha256:121b8a7cd0525dd89aaec58ad7d34c3bb3714740e5a67daf6510ccf71ab219a9",
"Command": "/bin/mysqld_exporter",
"Created": 1616273136,
"Ports": [
{
"PrivatePort": 9104,
"Type": "tcp"
}
],
"Labels": {
"com.docker.compose.project": "dockersd",
"com.docker.compose.service": "mysqlexporter",
"com.docker.compose.version": "2.2.2",
"maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>"
},
"State": "running",
"Status": "Up 40 seconds",
"HostConfig": {
"NetworkMode": "container:f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8"
},
"NetworkSettings": {
"Networks": {}
},
"Mounts": []
} }
] ]

View file

@ -111,5 +111,59 @@
"Containers": {}, "Containers": {},
"Options": {}, "Options": {},
"Labels": {} "Labels": {}
},
{
"Name": "dockersd_private",
"Id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
"Created": "2022-03-25T09:21:17.718370976+08:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": [
{
"Subnet": "172.20.0.1/16"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {},
"Options": {},
"Labels": {}
},
{
"Name": "dockersd_private1",
"Id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8",
"Created": "2022-03-25T09:21:17.718370976+08:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": [
{
"Subnet": "172.21.0.1/16"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {},
"Options": {},
"Labels": {}
} }
] ]

View file

@ -146,12 +146,18 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
openstackLabelUserID: model.LabelValue(s.UserID), openstackLabelUserID: model.LabelValue(s.UserID),
} }
flavorID, ok := s.Flavor["id"].(string) flavorName, nameOk := s.Flavor["original_name"].(string)
if !ok { // "original_name" is only available for microversion >= 2.47. It was added in favor of "id".
level.Warn(i.logger).Log("msg", "Invalid type for flavor id, expected string") if !nameOk {
continue flavorID, idOk := s.Flavor["id"].(string)
if !idOk {
level.Warn(i.logger).Log("msg", "Invalid type for both flavor original_name and flavor id, expected string")
continue
}
labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorID)
} else {
labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorName)
} }
labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorID)
imageID, ok := s.Image["id"].(string) imageID, ok := s.Image["id"].(string)
if ok { if ok {

View file

@ -84,7 +84,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) {
}, },
{ {
"__address__": model.LabelValue("10.0.0.31:0"), "__address__": model.LabelValue("10.0.0.31:0"),
"__meta_openstack_instance_flavor": model.LabelValue("1"), "__meta_openstack_instance_flavor": model.LabelValue("m1.medium"),
"__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682ba"), "__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682ba"),
"__meta_openstack_instance_image": model.LabelValue("f90f6034-2570-4974-8351-6b49732ef2eb"), "__meta_openstack_instance_image": model.LabelValue("f90f6034-2570-4974-8351-6b49732ef2eb"),
"__meta_openstack_instance_status": model.LabelValue("ACTIVE"), "__meta_openstack_instance_status": model.LabelValue("ACTIVE"),
@ -96,7 +96,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) {
}, },
{ {
"__address__": model.LabelValue("10.0.0.33:0"), "__address__": model.LabelValue("10.0.0.33:0"),
"__meta_openstack_instance_flavor": model.LabelValue("4"), "__meta_openstack_instance_flavor": model.LabelValue("m1.small"),
"__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682bb"), "__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682bb"),
"__meta_openstack_instance_status": model.LabelValue("ACTIVE"), "__meta_openstack_instance_status": model.LabelValue("ACTIVE"),
"__meta_openstack_instance_name": model.LabelValue("merp"), "__meta_openstack_instance_name": model.LabelValue("merp"),
@ -108,7 +108,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) {
}, },
{ {
"__address__": model.LabelValue("10.0.0.34:0"), "__address__": model.LabelValue("10.0.0.34:0"),
"__meta_openstack_instance_flavor": model.LabelValue("4"), "__meta_openstack_instance_flavor": model.LabelValue("m1.small"),
"__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682bb"), "__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682bb"),
"__meta_openstack_instance_status": model.LabelValue("ACTIVE"), "__meta_openstack_instance_status": model.LabelValue("ACTIVE"),
"__meta_openstack_instance_name": model.LabelValue("merp"), "__meta_openstack_instance_name": model.LabelValue("merp"),

View file

@ -427,13 +427,17 @@ const serverListBody = `
"OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000", "OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack",
"flavor": { "flavor": {
"id": "1", "vcpus": 2,
"links": [ "ram": 4096,
{ "disk": 0,
"href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", "ephemeral": 0,
"rel": "bookmark" "swap": 0,
} "original_name": "m1.medium",
] "extra_specs": {
"aggregate_instance_extra_specs:general": "true",
"hw:mem_page_size": "large",
"hw:vif_multiqueue_enabled": "true"
}
}, },
"id": "9e5476bd-a4ec-4653-93d6-72c93aa682ba", "id": "9e5476bd-a4ec-4653-93d6-72c93aa682ba",
"security_groups": [ "security_groups": [
@ -498,13 +502,17 @@ const serverListBody = `
"OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000", "OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack",
"flavor": { "flavor": {
"id": "4", "vcpus": 2,
"links": [ "ram": 4096,
{ "disk": 0,
"href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", "ephemeral": 0,
"rel": "bookmark" "swap": 0,
} "original_name": "m1.small",
] "extra_specs": {
"aggregate_instance_extra_specs:general": "true",
"hw:mem_page_size": "large",
"hw:vif_multiqueue_enabled": "true"
}
}, },
"id": "9e5476bd-a4ec-4653-93d6-72c93aa682bb", "id": "9e5476bd-a4ec-4653-93d6-72c93aa682bb",
"security_groups": [ "security_groups": [

View file

@ -50,6 +50,7 @@ The Prometheus monitoring server
| <code class="text-nowrap">--rules.alert.resend-delay</code> | Minimum amount of time to wait before resending an alert to Alertmanager. Use with server mode only. | `1m` | | <code class="text-nowrap">--rules.alert.resend-delay</code> | Minimum amount of time to wait before resending an alert to Alertmanager. Use with server mode only. | `1m` |
| <code class="text-nowrap">--rules.max-concurrent-evals</code> | Global concurrency limit for independent rules that can run concurrently. When set, "query.max-concurrency" may need to be adjusted accordingly. Use with server mode only. | `4` | | <code class="text-nowrap">--rules.max-concurrent-evals</code> | Global concurrency limit for independent rules that can run concurrently. When set, "query.max-concurrency" may need to be adjusted accordingly. Use with server mode only. | `4` |
| <code class="text-nowrap">--alertmanager.notification-queue-capacity</code> | The capacity of the queue for pending Alertmanager notifications. Use with server mode only. | `10000` | | <code class="text-nowrap">--alertmanager.notification-queue-capacity</code> | The capacity of the queue for pending Alertmanager notifications. Use with server mode only. | `10000` |
| <code class="text-nowrap">--alertmanager.drain-notification-queue-on-shutdown</code> | Send any outstanding Alertmanager notifications when shutting down. If false, any outstanding Alertmanager notifications will be dropped when shutting down. Use with server mode only. | `true` |
| <code class="text-nowrap">--query.lookback-delta</code> | The maximum lookback duration for retrieving metrics during expression evaluations and federation. Use with server mode only. | `5m` | | <code class="text-nowrap">--query.lookback-delta</code> | The maximum lookback duration for retrieving metrics during expression evaluations and federation. Use with server mode only. | `5m` |
| <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | | <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` |
| <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` | | <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` |

View file

@ -941,6 +941,9 @@ tls_config:
# The host to use if the container is in host networking mode. # The host to use if the container is in host networking mode.
[ host_networking_host: <string> | default = "localhost" ] [ host_networking_host: <string> | default = "localhost" ]
# Match the first network if the container has multiple networks defined, thus avoiding collecting duplicate targets.
[ match_first_network: <boolean> | default = true ]
# Optional filters to limit the discovery process to a subset of available # Optional filters to limit the discovery process to a subset of available
# resources. # resources.
# The available filters are listed in the upstream documentation: # The available filters are listed in the upstream documentation:
@ -1229,6 +1232,7 @@ The following meta labels are available on targets during [relabeling](#relabel_
* `__meta_ec2_ipv6_addresses`: comma separated list of IPv6 addresses assigned to the instance's network interfaces, if present * `__meta_ec2_ipv6_addresses`: comma separated list of IPv6 addresses assigned to the instance's network interfaces, if present
* `__meta_ec2_owner_id`: the ID of the AWS account that owns the EC2 instance * `__meta_ec2_owner_id`: the ID of the AWS account that owns the EC2 instance
* `__meta_ec2_platform`: the Operating System platform, set to 'windows' on Windows servers, absent otherwise * `__meta_ec2_platform`: the Operating System platform, set to 'windows' on Windows servers, absent otherwise
* `__meta_ec2_primary_ipv6_addresses`: comma separated list of the Primary IPv6 addresses of the instance, if present. The list is ordered based on the position of each corresponding network interface in the attachment order.
* `__meta_ec2_primary_subnet_id`: the subnet ID of the primary network interface, if available * `__meta_ec2_primary_subnet_id`: the subnet ID of the primary network interface, if available
* `__meta_ec2_private_dns_name`: the private DNS name of the instance, if available * `__meta_ec2_private_dns_name`: the private DNS name of the instance, if available
* `__meta_ec2_private_ip`: the private IP address of the instance, if present * `__meta_ec2_private_ip`: the private IP address of the instance, if present
@ -1359,7 +1363,7 @@ interface.
The following meta labels are available on targets during [relabeling](#relabel_config): The following meta labels are available on targets during [relabeling](#relabel_config):
* `__meta_openstack_address_pool`: the pool of the private IP. * `__meta_openstack_address_pool`: the pool of the private IP.
* `__meta_openstack_instance_flavor`: the flavor ID of the OpenStack instance. * `__meta_openstack_instance_flavor`: the flavor name of the OpenStack instance, or the flavor ID if the flavor name isn't available.
* `__meta_openstack_instance_id`: the OpenStack instance ID. * `__meta_openstack_instance_id`: the OpenStack instance ID.
* `__meta_openstack_instance_image`: the ID of the image the OpenStack instance is using. * `__meta_openstack_instance_image`: the ID of the image the OpenStack instance is using.
* `__meta_openstack_instance_name`: the OpenStack instance name. * `__meta_openstack_instance_name`: the OpenStack instance name.
@ -1608,7 +1612,16 @@ and serves as an interface to plug in custom service discovery mechanisms.
It reads a set of files containing a list of zero or more It reads a set of files containing a list of zero or more
`<static_config>`s. Changes to all defined files are detected via disk watches `<static_config>`s. Changes to all defined files are detected via disk watches
and applied immediately. Files may be provided in YAML or JSON format. Only and applied immediately.
While those individual files are watched for changes,
the parent directory is also watched implicitly. This is to handle [atomic
renaming](https://github.com/fsnotify/fsnotify/blob/c1467c02fba575afdb5f4201072ab8403bbf00f4/README.md?plain=1#L128) efficiently and to detect new files that match the configured globs.
This may cause issues if the parent directory contains a large number of other files,
as each of these files will be watched too, even though the events related
to them are not relevant.
Files may be provided in YAML or JSON format. Only
changes resulting in well-formed target groups are applied. changes resulting in well-formed target groups are applied.
Files must contain a list of static configs, using these formats: Files must contain a list of static configs, using these formats:
@ -3813,6 +3826,10 @@ NOTE: Out-of-order ingestion is an experimental feature, but you do not need any
# into the TSDB, i.e. it is an in-order sample or an out-of-order/out-of-bounds sample # into the TSDB, i.e. it is an in-order sample or an out-of-order/out-of-bounds sample
# that is within the out-of-order window, or (b) too-old, i.e. not in-order # that is within the out-of-order window, or (b) too-old, i.e. not in-order
# and before the out-of-order window. # and before the out-of-order window.
#
# When out_of_order_time_window is greater than 0, it also affects experimental agent. It allows
# the agent's WAL to accept out-of-order samples that fall within the specified time window relative
# to the timestamp of the last appended sample for the same series.
[ out_of_order_time_window: <duration> | default = 0s ] [ out_of_order_time_window: <duration> | default = 0s ]
``` ```

View file

@ -79,7 +79,12 @@ labels of the 1-element output vector from the input vector.
## `ceil()` ## `ceil()`
`ceil(v instant-vector)` rounds the sample values of all elements in `v` up to `ceil(v instant-vector)` rounds the sample values of all elements in `v` up to
the nearest integer. the nearest integer value greater than or equal to v.
* `ceil(+Inf) = +Inf`
* `ceil(±0) = ±0`
* `ceil(1.49) = 2.0`
* `ceil(1.78) = 2.0`
## `changes()` ## `changes()`
@ -173,7 +178,12 @@ Special cases are:
## `floor()` ## `floor()`
`floor(v instant-vector)` rounds the sample values of all elements in `v` down `floor(v instant-vector)` rounds the sample values of all elements in `v` down
to the nearest integer. to the nearest integer value smaller than or equal to v.
* `floor(+Inf) = +Inf`
* `floor(±0) = ±0`
* `floor(1.49) = 1.0`
* `floor(1.78) = 1.0`
## `histogram_avg()` ## `histogram_avg()`

View file

@ -61,8 +61,11 @@ A Prometheus server's data directory looks something like this:
Note that a limitation of local storage is that it is not clustered or Note that a limitation of local storage is that it is not clustered or
replicated. Thus, it is not arbitrarily scalable or durable in the face of replicated. Thus, it is not arbitrarily scalable or durable in the face of
drive or node outages and should be managed like any other single node drive or node outages and should be managed like any other single node
database. The use of RAID is suggested for storage availability, and database.
[snapshots](querying/api.md#snapshot) are recommended for backups. With proper
[Snapshots](querying/api.md#snapshot) are recommended for backups. Backups
made without snapshots run the risk of losing data that was recorded since
the last WAL sync, which typically happens every two hours. With proper
architecture, it is possible to retain years of data in local storage. architecture, it is possible to retain years of data in local storage.
Alternatively, external storage may be used via the Alternatively, external storage may be used via the

View file

@ -34,6 +34,20 @@
description: 'Prometheus %(prometheusName)s has failed to refresh SD with mechanism {{$labels.mechanism}}.' % $._config, description: 'Prometheus %(prometheusName)s has failed to refresh SD with mechanism {{$labels.mechanism}}.' % $._config,
}, },
}, },
{
alert: 'PrometheusKubernetesListWatchFailures',
expr: |||
increase(prometheus_sd_kubernetes_failures_total{%(prometheusSelector)s}[5m]) > 0
||| % $._config,
'for': '15m',
labels: {
severity: 'warning',
},
annotations: {
summary: 'Requests in Kubernetes SD are failing.',
description: 'Kubernetes service discovery of Prometheus %(prometheusName)s is experiencing {{ printf "%%.0f" $value }} failures with LIST/WATCH requests to the Kubernetes API in the last 5 minutes.' % $._config,
},
},
{ {
alert: 'PrometheusNotificationQueueRunningFull', alert: 'PrometheusNotificationQueueRunningFull',
expr: ||| expr: |||

4
go.mod
View file

@ -146,10 +146,10 @@ require (
github.com/hashicorp/cronexpr v1.1.2 // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-hclog v1.5.0 // indirect github.com/hashicorp/go-hclog v1.6.3 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-retryablehttp v0.7.4 // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect
github.com/hashicorp/golang-lru v0.6.0 // indirect github.com/hashicorp/golang-lru v0.6.0 // indirect
github.com/hashicorp/serf v0.10.1 // indirect github.com/hashicorp/serf v0.10.1 // indirect

9
go.sum
View file

@ -369,9 +369,8 @@ github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
@ -383,8 +382,8 @@ github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA= github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=

View file

@ -30,11 +30,12 @@ import (
type FloatHistogram struct { type FloatHistogram struct {
// Counter reset information. // Counter reset information.
CounterResetHint CounterResetHint CounterResetHint CounterResetHint
// Currently valid schema numbers are -4 <= n <= 8. They are all for // Currently valid schema numbers are -4 <= n <= 8 for exponential buckets.
// base-2 bucket schemas, where 1 is a bucket boundary in each case, and // They are all for base-2 bucket schemas, where 1 is a bucket boundary in
// then each power of two is divided into 2^n logarithmic buckets. Or // each case, and then each power of two is divided into 2^n logarithmic buckets.
// in other words, each bucket boundary is the previous boundary times // Or in other words, each bucket boundary is the previous boundary times
// 2^(2^-n). // 2^(2^-n). Another valid schema number is -53 for custom buckets, defined by
// the CustomValues field.
Schema int32 Schema int32
// Width of the zero bucket. // Width of the zero bucket.
ZeroThreshold float64 ZeroThreshold float64
@ -49,6 +50,16 @@ type FloatHistogram struct {
// Observation counts in buckets. Each represents an absolute count and // Observation counts in buckets. Each represents an absolute count and
// must be zero or positive. // must be zero or positive.
PositiveBuckets, NegativeBuckets []float64 PositiveBuckets, NegativeBuckets []float64
// Holds the custom (usually upper) bounds for bucket definitions, otherwise nil.
// This slice is interned, to be treated as immutable and copied by reference.
// These numbers should be strictly increasing. This field is only used when the
// schema is for custom buckets, and the ZeroThreshold, ZeroCount, NegativeSpans
// and NegativeBuckets fields are not used in that case.
CustomValues []float64
}
func (h *FloatHistogram) UsesCustomBuckets() bool {
return IsCustomBucketsSchema(h.Schema)
} }
// Copy returns a deep copy of the Histogram. // Copy returns a deep copy of the Histogram.
@ -56,28 +67,37 @@ func (h *FloatHistogram) Copy() *FloatHistogram {
c := FloatHistogram{ c := FloatHistogram{
CounterResetHint: h.CounterResetHint, CounterResetHint: h.CounterResetHint,
Schema: h.Schema, Schema: h.Schema,
ZeroThreshold: h.ZeroThreshold,
ZeroCount: h.ZeroCount,
Count: h.Count, Count: h.Count,
Sum: h.Sum, Sum: h.Sum,
} }
if h.UsesCustomBuckets() {
if len(h.CustomValues) != 0 {
c.CustomValues = make([]float64, len(h.CustomValues))
copy(c.CustomValues, h.CustomValues)
}
} else {
c.ZeroThreshold = h.ZeroThreshold
c.ZeroCount = h.ZeroCount
if len(h.NegativeSpans) != 0 {
c.NegativeSpans = make([]Span, len(h.NegativeSpans))
copy(c.NegativeSpans, h.NegativeSpans)
}
if len(h.NegativeBuckets) != 0 {
c.NegativeBuckets = make([]float64, len(h.NegativeBuckets))
copy(c.NegativeBuckets, h.NegativeBuckets)
}
}
if len(h.PositiveSpans) != 0 { if len(h.PositiveSpans) != 0 {
c.PositiveSpans = make([]Span, len(h.PositiveSpans)) c.PositiveSpans = make([]Span, len(h.PositiveSpans))
copy(c.PositiveSpans, h.PositiveSpans) copy(c.PositiveSpans, h.PositiveSpans)
} }
if len(h.NegativeSpans) != 0 {
c.NegativeSpans = make([]Span, len(h.NegativeSpans))
copy(c.NegativeSpans, h.NegativeSpans)
}
if len(h.PositiveBuckets) != 0 { if len(h.PositiveBuckets) != 0 {
c.PositiveBuckets = make([]float64, len(h.PositiveBuckets)) c.PositiveBuckets = make([]float64, len(h.PositiveBuckets))
copy(c.PositiveBuckets, h.PositiveBuckets) copy(c.PositiveBuckets, h.PositiveBuckets)
} }
if len(h.NegativeBuckets) != 0 {
c.NegativeBuckets = make([]float64, len(h.NegativeBuckets))
copy(c.NegativeBuckets, h.NegativeBuckets)
}
return &c return &c
} }
@ -87,32 +107,53 @@ func (h *FloatHistogram) Copy() *FloatHistogram {
func (h *FloatHistogram) CopyTo(to *FloatHistogram) { func (h *FloatHistogram) CopyTo(to *FloatHistogram) {
to.CounterResetHint = h.CounterResetHint to.CounterResetHint = h.CounterResetHint
to.Schema = h.Schema to.Schema = h.Schema
to.ZeroThreshold = h.ZeroThreshold
to.ZeroCount = h.ZeroCount
to.Count = h.Count to.Count = h.Count
to.Sum = h.Sum to.Sum = h.Sum
if h.UsesCustomBuckets() {
to.ZeroThreshold = 0
to.ZeroCount = 0
to.NegativeSpans = clearIfNotNil(to.NegativeSpans)
to.NegativeBuckets = clearIfNotNil(to.NegativeBuckets)
to.CustomValues = resize(to.CustomValues, len(h.CustomValues))
copy(to.CustomValues, h.CustomValues)
} else {
to.ZeroThreshold = h.ZeroThreshold
to.ZeroCount = h.ZeroCount
to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans))
copy(to.NegativeSpans, h.NegativeSpans)
to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets))
copy(to.NegativeBuckets, h.NegativeBuckets)
to.CustomValues = clearIfNotNil(to.CustomValues)
}
to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans)) to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans))
copy(to.PositiveSpans, h.PositiveSpans) copy(to.PositiveSpans, h.PositiveSpans)
to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans))
copy(to.NegativeSpans, h.NegativeSpans)
to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets)) to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets))
copy(to.PositiveBuckets, h.PositiveBuckets) copy(to.PositiveBuckets, h.PositiveBuckets)
to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets))
copy(to.NegativeBuckets, h.NegativeBuckets)
} }
// CopyToSchema works like Copy, but the returned deep copy has the provided // CopyToSchema works like Copy, but the returned deep copy has the provided
// target schema, which must be ≤ the original schema (i.e. it must have a lower // target schema, which must be ≤ the original schema (i.e. it must have a lower
// resolution). // resolution). This method panics if a custom buckets schema is used in the
// receiving FloatHistogram or as the provided targetSchema.
func (h *FloatHistogram) CopyToSchema(targetSchema int32) *FloatHistogram { func (h *FloatHistogram) CopyToSchema(targetSchema int32) *FloatHistogram {
if targetSchema == h.Schema { if targetSchema == h.Schema {
// Fast path. // Fast path.
return h.Copy() return h.Copy()
} }
if h.UsesCustomBuckets() {
panic(fmt.Errorf("cannot reduce resolution to %d when there are custom buckets", targetSchema))
}
if IsCustomBucketsSchema(targetSchema) {
panic("cannot reduce resolution to custom buckets schema")
}
if targetSchema > h.Schema { if targetSchema > h.Schema {
panic(fmt.Errorf("cannot copy from schema %d to %d", h.Schema, targetSchema)) panic(fmt.Errorf("cannot copy from schema %d to %d", h.Schema, targetSchema))
} }
@ -185,6 +226,9 @@ func (h *FloatHistogram) TestExpression() string {
if m.ZeroThreshold != 0 { if m.ZeroThreshold != 0 {
res = append(res, fmt.Sprintf("z_bucket_w:%g", m.ZeroThreshold)) res = append(res, fmt.Sprintf("z_bucket_w:%g", m.ZeroThreshold))
} }
if m.UsesCustomBuckets() {
res = append(res, fmt.Sprintf("custom_values:%g", m.CustomValues))
}
addBuckets := func(kind, bucketsKey, offsetKey string, buckets []float64, spans []Span) []string { addBuckets := func(kind, bucketsKey, offsetKey string, buckets []float64, spans []Span) []string {
if len(spans) > 1 { if len(spans) > 1 {
@ -210,14 +254,18 @@ func (h *FloatHistogram) TestExpression() string {
return "{{" + strings.Join(res, " ") + "}}" return "{{" + strings.Join(res, " ") + "}}"
} }
// ZeroBucket returns the zero bucket. // ZeroBucket returns the zero bucket. This method panics if the schema is for custom buckets.
func (h *FloatHistogram) ZeroBucket() Bucket[float64] { func (h *FloatHistogram) ZeroBucket() Bucket[float64] {
if h.UsesCustomBuckets() {
panic("histograms with custom buckets have no zero bucket")
}
return Bucket[float64]{ return Bucket[float64]{
Lower: -h.ZeroThreshold, Lower: -h.ZeroThreshold,
Upper: h.ZeroThreshold, Upper: h.ZeroThreshold,
LowerInclusive: true, LowerInclusive: true,
UpperInclusive: true, UpperInclusive: true,
Count: h.ZeroCount, Count: h.ZeroCount,
// Index is irrelevant for the zero bucket.
} }
} }
@ -263,9 +311,18 @@ func (h *FloatHistogram) Div(scalar float64) *FloatHistogram {
// //
// The method reconciles differences in the zero threshold and in the schema, and // The method reconciles differences in the zero threshold and in the schema, and
// changes them if needed. The other histogram will not be modified in any case. // changes them if needed. The other histogram will not be modified in any case.
// Adding is currently only supported between 2 exponential histograms, or between
// 2 custom buckets histograms with the exact same custom bounds.
// //
// This method returns a pointer to the receiving histogram for convenience. // This method returns a pointer to the receiving histogram for convenience.
func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { func (h *FloatHistogram) Add(other *FloatHistogram) (*FloatHistogram, error) {
if h.UsesCustomBuckets() != other.UsesCustomBuckets() {
return nil, ErrHistogramsIncompatibleSchema
}
if h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, other.CustomValues) {
return nil, ErrHistogramsIncompatibleBounds
}
switch { switch {
case other.CounterResetHint == h.CounterResetHint: case other.CounterResetHint == h.CounterResetHint:
// Adding apples to apples, all good. No need to change anything. // Adding apples to apples, all good. No need to change anything.
@ -290,19 +347,28 @@ func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram {
// TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place // TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place
} }
otherZeroCount := h.reconcileZeroBuckets(other) if !h.UsesCustomBuckets() {
h.ZeroCount += otherZeroCount otherZeroCount := h.reconcileZeroBuckets(other)
h.ZeroCount += otherZeroCount
}
h.Count += other.Count h.Count += other.Count
h.Sum += other.Sum h.Sum += other.Sum
var ( var (
hPositiveSpans = h.PositiveSpans hPositiveSpans = h.PositiveSpans
hPositiveBuckets = h.PositiveBuckets hPositiveBuckets = h.PositiveBuckets
hNegativeSpans = h.NegativeSpans
hNegativeBuckets = h.NegativeBuckets
otherPositiveSpans = other.PositiveSpans otherPositiveSpans = other.PositiveSpans
otherPositiveBuckets = other.PositiveBuckets otherPositiveBuckets = other.PositiveBuckets
)
if h.UsesCustomBuckets() {
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
return h, nil
}
var (
hNegativeSpans = h.NegativeSpans
hNegativeBuckets = h.NegativeBuckets
otherNegativeSpans = other.NegativeSpans otherNegativeSpans = other.NegativeSpans
otherNegativeBuckets = other.NegativeBuckets otherNegativeBuckets = other.NegativeBuckets
) )
@ -321,24 +387,40 @@ func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram {
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets) h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets)
return h return h, nil
} }
// Sub works like Add but subtracts the other histogram. // Sub works like Add but subtracts the other histogram.
func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram { func (h *FloatHistogram) Sub(other *FloatHistogram) (*FloatHistogram, error) {
otherZeroCount := h.reconcileZeroBuckets(other) if h.UsesCustomBuckets() != other.UsesCustomBuckets() {
h.ZeroCount -= otherZeroCount return nil, ErrHistogramsIncompatibleSchema
}
if h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, other.CustomValues) {
return nil, ErrHistogramsIncompatibleBounds
}
if !h.UsesCustomBuckets() {
otherZeroCount := h.reconcileZeroBuckets(other)
h.ZeroCount -= otherZeroCount
}
h.Count -= other.Count h.Count -= other.Count
h.Sum -= other.Sum h.Sum -= other.Sum
var ( var (
hPositiveSpans = h.PositiveSpans hPositiveSpans = h.PositiveSpans
hPositiveBuckets = h.PositiveBuckets hPositiveBuckets = h.PositiveBuckets
hNegativeSpans = h.NegativeSpans
hNegativeBuckets = h.NegativeBuckets
otherPositiveSpans = other.PositiveSpans otherPositiveSpans = other.PositiveSpans
otherPositiveBuckets = other.PositiveBuckets otherPositiveBuckets = other.PositiveBuckets
)
if h.UsesCustomBuckets() {
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
return h, nil
}
var (
hNegativeSpans = h.NegativeSpans
hNegativeBuckets = h.NegativeBuckets
otherNegativeSpans = other.NegativeSpans otherNegativeSpans = other.NegativeSpans
otherNegativeBuckets = other.NegativeBuckets otherNegativeBuckets = other.NegativeBuckets
) )
@ -356,7 +438,7 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram {
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets) h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets)
return h return h, nil
} }
// Equals returns true if the given float histogram matches exactly. // Equals returns true if the given float histogram matches exactly.
@ -365,29 +447,42 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram {
// but they must represent the same bucket layout to match. // but they must represent the same bucket layout to match.
// Sum, Count, ZeroCount and bucket values are compared based on their bit patterns // Sum, Count, ZeroCount and bucket values are compared based on their bit patterns
// because this method is about data equality rather than mathematical equality. // because this method is about data equality rather than mathematical equality.
// We ignore fields that are not used based on the exponential / custom buckets schema,
// but check fields where differences may cause unintended behaviour even if they are not
// supposed to be used according to the schema.
func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool { func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool {
if h2 == nil { if h2 == nil {
return false return false
} }
if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold || if h.Schema != h2.Schema ||
math.Float64bits(h.ZeroCount) != math.Float64bits(h2.ZeroCount) ||
math.Float64bits(h.Count) != math.Float64bits(h2.Count) || math.Float64bits(h.Count) != math.Float64bits(h2.Count) ||
math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) { math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) {
return false return false
} }
if h.UsesCustomBuckets() {
if !FloatBucketsMatch(h.CustomValues, h2.CustomValues) {
return false
}
}
if h.ZeroThreshold != h2.ZeroThreshold ||
math.Float64bits(h.ZeroCount) != math.Float64bits(h2.ZeroCount) {
return false
}
if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
return false
}
if !FloatBucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) {
return false
}
if !spansMatch(h.PositiveSpans, h2.PositiveSpans) { if !spansMatch(h.PositiveSpans, h2.PositiveSpans) {
return false return false
} }
if !spansMatch(h.NegativeSpans, h2.NegativeSpans) { if !FloatBucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) {
return false
}
if !floatBucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) {
return false
}
if !floatBucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) {
return false return false
} }
@ -403,6 +498,7 @@ func (h *FloatHistogram) Size() int {
negSpanSize := len(h.NegativeSpans) * 8 // 8 bytes (int32 + uint32). negSpanSize := len(h.NegativeSpans) * 8 // 8 bytes (int32 + uint32).
posBucketSize := len(h.PositiveBuckets) * 8 // 8 bytes (float64). posBucketSize := len(h.PositiveBuckets) * 8 // 8 bytes (float64).
negBucketSize := len(h.NegativeBuckets) * 8 // 8 bytes (float64). negBucketSize := len(h.NegativeBuckets) * 8 // 8 bytes (float64).
customBoundSize := len(h.CustomValues) * 8 // 8 bytes (float64).
// Total size of the struct. // Total size of the struct.
@ -417,9 +513,10 @@ func (h *FloatHistogram) Size() int {
// fh.NegativeSpans is 24 bytes. // fh.NegativeSpans is 24 bytes.
// fh.PositiveBuckets is 24 bytes. // fh.PositiveBuckets is 24 bytes.
// fh.NegativeBuckets is 24 bytes. // fh.NegativeBuckets is 24 bytes.
structSize := 144 // fh.CustomValues is 24 bytes.
structSize := 168
return structSize + posSpanSize + negSpanSize + posBucketSize + negBucketSize return structSize + posSpanSize + negSpanSize + posBucketSize + negBucketSize + customBoundSize
} }
// Compact eliminates empty buckets at the beginning and end of each span, then // Compact eliminates empty buckets at the beginning and end of each span, then
@ -504,6 +601,12 @@ func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool {
if h.Count < previous.Count { if h.Count < previous.Count {
return true return true
} }
if h.UsesCustomBuckets() != previous.UsesCustomBuckets() || (h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, previous.CustomValues)) {
// Mark that something has changed or that the application has been restarted. However, this does
// not matter so much since the change in schema will be handled directly in the chunks and PromQL
// functions.
return true
}
if h.Schema > previous.Schema { if h.Schema > previous.Schema {
return true return true
} }
@ -609,7 +712,7 @@ func (h *FloatHistogram) NegativeBucketIterator() BucketIterator[float64] {
// positive buckets in descending order (starting at the highest bucket and // positive buckets in descending order (starting at the highest bucket and
// going down towards the zero bucket). // going down towards the zero bucket).
func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64] { func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64] {
it := newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true) it := newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues)
return &it return &it
} }
@ -617,7 +720,7 @@ func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64]
// negative buckets in ascending order (starting at the lowest bucket and going // negative buckets in ascending order (starting at the lowest bucket and going
// up towards the zero bucket). // up towards the zero bucket).
func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64] { func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64] {
it := newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false) it := newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil)
return &it return &it
} }
@ -629,7 +732,7 @@ func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64]
func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] { func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] {
return &allFloatBucketIterator{ return &allFloatBucketIterator{
h: h, h: h,
leftIter: newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false), leftIter: newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil),
rightIter: h.floatBucketIterator(true, 0, h.Schema), rightIter: h.floatBucketIterator(true, 0, h.Schema),
state: -1, state: -1,
} }
@ -643,30 +746,52 @@ func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] {
func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] { func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] {
return &allFloatBucketIterator{ return &allFloatBucketIterator{
h: h, h: h,
leftIter: newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true), leftIter: newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues),
rightIter: h.floatBucketIterator(false, 0, h.Schema), rightIter: h.floatBucketIterator(false, 0, h.Schema),
state: -1, state: -1,
} }
} }
// Validate validates consistency between span and bucket slices. Also, buckets are checked // Validate validates consistency between span and bucket slices. Also, buckets are checked
// against negative values. // against negative values. We check to make sure there are no unexpected fields or field values
// based on the exponential / custom buckets schema.
// We do not check for h.Count being at least as large as the sum of the // We do not check for h.Count being at least as large as the sum of the
// counts in the buckets because floating point precision issues can // counts in the buckets because floating point precision issues can
// create false positives here. // create false positives here.
func (h *FloatHistogram) Validate() error { func (h *FloatHistogram) Validate() error {
if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
return fmt.Errorf("negative side: %w", err)
}
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
return fmt.Errorf("positive side: %w", err)
}
var nCount, pCount float64 var nCount, pCount float64
err := checkHistogramBuckets(h.NegativeBuckets, &nCount, false) if h.UsesCustomBuckets() {
if err != nil { if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
return fmt.Errorf("negative side: %w", err) return fmt.Errorf("custom buckets: %w", err)
}
if h.ZeroCount != 0 {
return fmt.Errorf("custom buckets: must have zero count of 0")
}
if h.ZeroThreshold != 0 {
return fmt.Errorf("custom buckets: must have zero threshold of 0")
}
if len(h.NegativeSpans) > 0 {
return fmt.Errorf("custom buckets: must not have negative spans")
}
if len(h.NegativeBuckets) > 0 {
return fmt.Errorf("custom buckets: must not have negative buckets")
}
} else {
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
return fmt.Errorf("positive side: %w", err)
}
if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
return fmt.Errorf("negative side: %w", err)
}
err := checkHistogramBuckets(h.NegativeBuckets, &nCount, false)
if err != nil {
return fmt.Errorf("negative side: %w", err)
}
if h.CustomValues != nil {
return fmt.Errorf("histogram with exponential schema must not have custom bounds")
}
} }
err = checkHistogramBuckets(h.PositiveBuckets, &pCount, false) err := checkHistogramBuckets(h.PositiveBuckets, &pCount, false)
if err != nil { if err != nil {
return fmt.Errorf("positive side: %w", err) return fmt.Errorf("positive side: %w", err)
} }
@ -790,17 +915,25 @@ func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 {
// If positive is true, the returned iterator iterates through the positive // If positive is true, the returned iterator iterates through the positive
// buckets, otherwise through the negative buckets. // buckets, otherwise through the negative buckets.
// //
// If absoluteStartValue is < the lowest absolute value of any upper bucket // Only for exponential schemas, if absoluteStartValue is < the lowest absolute
// boundary, the iterator starts with the first bucket. Otherwise, it will skip // value of any upper bucket boundary, the iterator starts with the first bucket.
// all buckets with an absolute value of their upper boundary ≤ // Otherwise, it will skip all buckets with an absolute value of their upper boundary ≤
// absoluteStartValue. // absoluteStartValue. For custom bucket schemas, absoluteStartValue is ignored and
// no buckets are skipped.
// //
// targetSchema must be ≤ the schema of FloatHistogram (and of course within the // targetSchema must be ≤ the schema of FloatHistogram (and of course within the
// legal values for schemas in general). The buckets are merged to match the // legal values for schemas in general). The buckets are merged to match the
// targetSchema prior to iterating (without mutating FloatHistogram). // targetSchema prior to iterating (without mutating FloatHistogram), but custom buckets
// schemas cannot be merged with other schemas.
func (h *FloatHistogram) floatBucketIterator( func (h *FloatHistogram) floatBucketIterator(
positive bool, absoluteStartValue float64, targetSchema int32, positive bool, absoluteStartValue float64, targetSchema int32,
) floatBucketIterator { ) floatBucketIterator {
if h.UsesCustomBuckets() && targetSchema != h.Schema {
panic(fmt.Errorf("cannot merge from custom buckets schema to exponential schema"))
}
if !h.UsesCustomBuckets() && IsCustomBucketsSchema(targetSchema) {
panic(fmt.Errorf("cannot merge from exponential buckets schema to custom schema"))
}
if targetSchema > h.Schema { if targetSchema > h.Schema {
panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema)) panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema))
} }
@ -816,6 +949,7 @@ func (h *FloatHistogram) floatBucketIterator(
if positive { if positive {
i.spans = h.PositiveSpans i.spans = h.PositiveSpans
i.buckets = h.PositiveBuckets i.buckets = h.PositiveBuckets
i.customValues = h.CustomValues
} else { } else {
i.spans = h.NegativeSpans i.spans = h.NegativeSpans
i.buckets = h.NegativeBuckets i.buckets = h.NegativeBuckets
@ -825,14 +959,15 @@ func (h *FloatHistogram) floatBucketIterator(
// reverseFloatBucketIterator is a low-level constructor for reverse bucket iterators. // reverseFloatBucketIterator is a low-level constructor for reverse bucket iterators.
func newReverseFloatBucketIterator( func newReverseFloatBucketIterator(
spans []Span, buckets []float64, schema int32, positive bool, spans []Span, buckets []float64, schema int32, positive bool, customValues []float64,
) reverseFloatBucketIterator { ) reverseFloatBucketIterator {
r := reverseFloatBucketIterator{ r := reverseFloatBucketIterator{
baseBucketIterator: baseBucketIterator[float64, float64]{ baseBucketIterator: baseBucketIterator[float64, float64]{
schema: schema, schema: schema,
spans: spans, spans: spans,
buckets: buckets, buckets: buckets,
positive: positive, positive: positive,
customValues: customValues,
}, },
} }
@ -946,9 +1081,9 @@ func (i *floatBucketIterator) Next() bool {
} }
} }
// Skip buckets before absoluteStartValue. // Skip buckets before absoluteStartValue for exponential schemas.
// TODO(beorn7): Maybe do something more efficient than this recursive call. // TODO(beorn7): Maybe do something more efficient than this recursive call.
if !i.boundReachedStartValue && getBound(i.currIdx, i.targetSchema) <= i.absoluteStartValue { if !i.boundReachedStartValue && IsExponentialSchema(i.targetSchema) && getBoundExponential(i.currIdx, i.targetSchema) <= i.absoluteStartValue {
return i.Next() return i.Next()
} }
i.boundReachedStartValue = true i.boundReachedStartValue = true
@ -1010,14 +1145,7 @@ func (i *allFloatBucketIterator) Next() bool {
case 0: case 0:
i.state = 1 i.state = 1
if i.h.ZeroCount > 0 { if i.h.ZeroCount > 0 {
i.currBucket = Bucket[float64]{ i.currBucket = i.h.ZeroBucket()
Lower: -i.h.ZeroThreshold,
Upper: i.h.ZeroThreshold,
LowerInclusive: true,
UpperInclusive: true,
Count: i.h.ZeroCount,
// Index is irrelevant for the zero bucket.
}
return true return true
} }
return i.Next() return i.Next()
@ -1076,7 +1204,7 @@ func addBuckets(
for _, spanB := range spansB { for _, spanB := range spansB {
indexB += spanB.Offset indexB += spanB.Offset
for j := 0; j < int(spanB.Length); j++ { for j := 0; j < int(spanB.Length); j++ {
if lowerThanThreshold && getBound(indexB, schema) <= threshold { if lowerThanThreshold && IsExponentialSchema(schema) && getBoundExponential(indexB, schema) <= threshold {
goto nextLoop goto nextLoop
} }
lowerThanThreshold = false lowerThanThreshold = false
@ -1177,7 +1305,7 @@ func addBuckets(
return spansA, bucketsA return spansA, bucketsA
} }
func floatBucketsMatch(b1, b2 []float64) bool { func FloatBucketsMatch(b1, b2 []float64) bool {
if len(b1) != len(b2) { if len(b1) != len(b2) {
return false return false
} }
@ -1191,7 +1319,15 @@ func floatBucketsMatch(b1, b2 []float64) bool {
// ReduceResolution reduces the float histogram's spans, buckets into target schema. // ReduceResolution reduces the float histogram's spans, buckets into target schema.
// The target schema must be smaller than the current float histogram's schema. // The target schema must be smaller than the current float histogram's schema.
// This will panic if the histogram has custom buckets or if the target schema is
// a custom buckets schema.
func (h *FloatHistogram) ReduceResolution(targetSchema int32) *FloatHistogram { func (h *FloatHistogram) ReduceResolution(targetSchema int32) *FloatHistogram {
if h.UsesCustomBuckets() {
panic("cannot reduce resolution when there are custom buckets")
}
if IsCustomBucketsSchema(targetSchema) {
panic("cannot reduce resolution to custom buckets schema")
}
if targetSchema >= h.Schema { if targetSchema >= h.Schema {
panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema)) panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema))
} }

File diff suppressed because it is too large Load diff

View file

@ -20,14 +20,33 @@ import (
"strings" "strings"
) )
var ( const (
ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets") ExponentialSchemaMax int32 = 8
ErrHistogramCountMismatch = errors.New("histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)") ExponentialSchemaMin int32 = -4
ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative") CustomBucketsSchema int32 = -53
ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative")
ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided")
) )
var (
ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets")
ErrHistogramCountMismatch = errors.New("histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)")
ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative")
ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative")
ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided")
ErrHistogramCustomBucketsMismatch = errors.New("histogram custom bounds are too few")
ErrHistogramCustomBucketsInvalid = errors.New("histogram custom bounds must be in strictly increasing order")
ErrHistogramCustomBucketsInfinite = errors.New("histogram custom bounds must be finite")
ErrHistogramsIncompatibleSchema = errors.New("cannot apply this operation on histograms with a mix of exponential and custom bucket schemas")
ErrHistogramsIncompatibleBounds = errors.New("cannot apply this operation on custom buckets histograms with different custom bounds")
)
func IsCustomBucketsSchema(s int32) bool {
return s == CustomBucketsSchema
}
func IsExponentialSchema(s int32) bool {
return s >= ExponentialSchemaMin && s <= ExponentialSchemaMax
}
// BucketCount is a type constraint for the count in a bucket, which can be // BucketCount is a type constraint for the count in a bucket, which can be
// float64 (for type FloatHistogram) or uint64 (for type Histogram). // float64 (for type FloatHistogram) or uint64 (for type Histogram).
type BucketCount interface { type BucketCount interface {
@ -115,6 +134,8 @@ type baseBucketIterator[BC BucketCount, IBC InternalBucketCount] struct {
currCount IBC // Count in the current bucket. currCount IBC // Count in the current bucket.
currIdx int32 // The actual bucket index. currIdx int32 // The actual bucket index.
customValues []float64 // Bounds (usually upper) for histograms with custom buckets.
} }
func (b *baseBucketIterator[BC, IBC]) At() Bucket[BC] { func (b *baseBucketIterator[BC, IBC]) At() Bucket[BC] {
@ -128,14 +149,19 @@ func (b *baseBucketIterator[BC, IBC]) at(schema int32) Bucket[BC] {
Index: b.currIdx, Index: b.currIdx,
} }
if b.positive { if b.positive {
bucket.Upper = getBound(b.currIdx, schema) bucket.Upper = getBound(b.currIdx, schema, b.customValues)
bucket.Lower = getBound(b.currIdx-1, schema) bucket.Lower = getBound(b.currIdx-1, schema, b.customValues)
} else { } else {
bucket.Lower = -getBound(b.currIdx, schema) bucket.Lower = -getBound(b.currIdx, schema, b.customValues)
bucket.Upper = -getBound(b.currIdx-1, schema) bucket.Upper = -getBound(b.currIdx-1, schema, b.customValues)
}
if IsCustomBucketsSchema(schema) {
bucket.LowerInclusive = b.currIdx == 0
bucket.UpperInclusive = true
} else {
bucket.LowerInclusive = bucket.Lower < 0
bucket.UpperInclusive = bucket.Upper > 0
} }
bucket.LowerInclusive = bucket.Lower < 0
bucket.UpperInclusive = bucket.Upper > 0
return bucket return bucket
} }
@ -393,7 +419,55 @@ func checkHistogramBuckets[BC BucketCount, IBC InternalBucketCount](buckets []IB
return nil return nil
} }
func getBound(idx, schema int32) float64 { func checkHistogramCustomBounds(bounds []float64, spans []Span, numBuckets int) error {
prev := math.Inf(-1)
for _, curr := range bounds {
if curr <= prev {
return fmt.Errorf("previous bound is %f and current is %f: %w", prev, curr, ErrHistogramCustomBucketsInvalid)
}
prev = curr
}
if prev == math.Inf(1) {
return fmt.Errorf("last +Inf bound must not be explicitly defined: %w", ErrHistogramCustomBucketsInfinite)
}
var spanBuckets int
var totalSpanLength int
for n, span := range spans {
if span.Offset < 0 {
return fmt.Errorf("span number %d with offset %d: %w", n+1, span.Offset, ErrHistogramSpanNegativeOffset)
}
spanBuckets += int(span.Length)
totalSpanLength += int(span.Length) + int(span.Offset)
}
if spanBuckets != numBuckets {
return fmt.Errorf("spans need %d buckets, have %d buckets: %w", spanBuckets, numBuckets, ErrHistogramSpansBucketsMismatch)
}
if (len(bounds) + 1) < totalSpanLength {
return fmt.Errorf("only %d custom bounds defined which is insufficient to cover total span length of %d: %w", len(bounds), totalSpanLength, ErrHistogramCustomBucketsMismatch)
}
return nil
}
func getBound(idx, schema int32, customValues []float64) float64 {
if IsCustomBucketsSchema(schema) {
length := int32(len(customValues))
switch {
case idx > length || idx < -1:
panic(fmt.Errorf("index %d out of bounds for custom bounds of length %d", idx, length))
case idx == length:
return math.Inf(1)
case idx == -1:
return math.Inf(-1)
default:
return customValues[idx]
}
}
return getBoundExponential(idx, schema)
}
func getBoundExponential(idx, schema int32) float64 {
// Here a bit of context about the behavior for the last bucket counting // Here a bit of context about the behavior for the last bucket counting
// regular numbers (called simply "last bucket" below) and the bucket // regular numbers (called simply "last bucket" below) and the bucket
// counting observations of ±Inf (called "inf bucket" below, with an idx // counting observations of ±Inf (called "inf bucket" below, with an idx
@ -703,3 +777,10 @@ func reduceResolution[IBC InternalBucketCount](
return targetSpans, targetBuckets return targetSpans, targetBuckets
} }
func clearIfNotNil[T any](items []T) []T {
if items == nil {
return nil
}
return items[:0]
}

View file

@ -21,7 +21,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestGetBound(t *testing.T) { func TestGetBoundExponential(t *testing.T) {
scenarios := []struct { scenarios := []struct {
idx int32 idx int32
schema int32 schema int32
@ -105,7 +105,7 @@ func TestGetBound(t *testing.T) {
} }
for _, s := range scenarios { for _, s := range scenarios {
got := getBound(s.idx, s.schema) got := getBoundExponential(s.idx, s.schema)
if s.want != got { if s.want != got {
require.Equal(t, s.want, got, "idx %d, schema %d", s.idx, s.schema) require.Equal(t, s.want, got, "idx %d, schema %d", s.idx, s.schema)
} }

View file

@ -49,11 +49,12 @@ const (
type Histogram struct { type Histogram struct {
// Counter reset information. // Counter reset information.
CounterResetHint CounterResetHint CounterResetHint CounterResetHint
// Currently valid schema numbers are -4 <= n <= 8. They are all for // Currently valid schema numbers are -4 <= n <= 8 for exponential buckets,
// base-2 bucket schemas, where 1 is a bucket boundary in each case, and // They are all for base-2 bucket schemas, where 1 is a bucket boundary in
// then each power of two is divided into 2^n logarithmic buckets. Or // each case, and then each power of two is divided into 2^n logarithmic buckets.
// in other words, each bucket boundary is the previous boundary times // Or in other words, each bucket boundary is the previous boundary times
// 2^(2^-n). // 2^(2^-n). Another valid schema number is -53 for custom buckets, defined by
// the CustomValues field.
Schema int32 Schema int32
// Width of the zero bucket. // Width of the zero bucket.
ZeroThreshold float64 ZeroThreshold float64
@ -69,6 +70,12 @@ type Histogram struct {
// count. All following ones are deltas relative to the previous // count. All following ones are deltas relative to the previous
// element. // element.
PositiveBuckets, NegativeBuckets []int64 PositiveBuckets, NegativeBuckets []int64
// Holds the custom (usually upper) bounds for bucket definitions, otherwise nil.
// This slice is interned, to be treated as immutable and copied by reference.
// These numbers should be strictly increasing. This field is only used when the
// schema is for custom buckets, and the ZeroThreshold, ZeroCount, NegativeSpans
// and NegativeBuckets fields are not used in that case.
CustomValues []float64
} }
// A Span defines a continuous sequence of buckets. // A Span defines a continuous sequence of buckets.
@ -80,33 +87,46 @@ type Span struct {
Length uint32 Length uint32
} }
func (h *Histogram) UsesCustomBuckets() bool {
return IsCustomBucketsSchema(h.Schema)
}
// Copy returns a deep copy of the Histogram. // Copy returns a deep copy of the Histogram.
func (h *Histogram) Copy() *Histogram { func (h *Histogram) Copy() *Histogram {
c := Histogram{ c := Histogram{
CounterResetHint: h.CounterResetHint, CounterResetHint: h.CounterResetHint,
Schema: h.Schema, Schema: h.Schema,
ZeroThreshold: h.ZeroThreshold,
ZeroCount: h.ZeroCount,
Count: h.Count, Count: h.Count,
Sum: h.Sum, Sum: h.Sum,
} }
if h.UsesCustomBuckets() {
if len(h.CustomValues) != 0 {
c.CustomValues = make([]float64, len(h.CustomValues))
copy(c.CustomValues, h.CustomValues)
}
} else {
c.ZeroThreshold = h.ZeroThreshold
c.ZeroCount = h.ZeroCount
if len(h.NegativeSpans) != 0 {
c.NegativeSpans = make([]Span, len(h.NegativeSpans))
copy(c.NegativeSpans, h.NegativeSpans)
}
if len(h.NegativeBuckets) != 0 {
c.NegativeBuckets = make([]int64, len(h.NegativeBuckets))
copy(c.NegativeBuckets, h.NegativeBuckets)
}
}
if len(h.PositiveSpans) != 0 { if len(h.PositiveSpans) != 0 {
c.PositiveSpans = make([]Span, len(h.PositiveSpans)) c.PositiveSpans = make([]Span, len(h.PositiveSpans))
copy(c.PositiveSpans, h.PositiveSpans) copy(c.PositiveSpans, h.PositiveSpans)
} }
if len(h.NegativeSpans) != 0 {
c.NegativeSpans = make([]Span, len(h.NegativeSpans))
copy(c.NegativeSpans, h.NegativeSpans)
}
if len(h.PositiveBuckets) != 0 { if len(h.PositiveBuckets) != 0 {
c.PositiveBuckets = make([]int64, len(h.PositiveBuckets)) c.PositiveBuckets = make([]int64, len(h.PositiveBuckets))
copy(c.PositiveBuckets, h.PositiveBuckets) copy(c.PositiveBuckets, h.PositiveBuckets)
} }
if len(h.NegativeBuckets) != 0 {
c.NegativeBuckets = make([]int64, len(h.NegativeBuckets))
copy(c.NegativeBuckets, h.NegativeBuckets)
}
return &c return &c
} }
@ -116,22 +136,36 @@ func (h *Histogram) Copy() *Histogram {
func (h *Histogram) CopyTo(to *Histogram) { func (h *Histogram) CopyTo(to *Histogram) {
to.CounterResetHint = h.CounterResetHint to.CounterResetHint = h.CounterResetHint
to.Schema = h.Schema to.Schema = h.Schema
to.ZeroThreshold = h.ZeroThreshold
to.ZeroCount = h.ZeroCount
to.Count = h.Count to.Count = h.Count
to.Sum = h.Sum to.Sum = h.Sum
if h.UsesCustomBuckets() {
to.ZeroThreshold = 0
to.ZeroCount = 0
to.NegativeSpans = clearIfNotNil(to.NegativeSpans)
to.NegativeBuckets = clearIfNotNil(to.NegativeBuckets)
to.CustomValues = resize(to.CustomValues, len(h.CustomValues))
copy(to.CustomValues, h.CustomValues)
} else {
to.ZeroThreshold = h.ZeroThreshold
to.ZeroCount = h.ZeroCount
to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans))
copy(to.NegativeSpans, h.NegativeSpans)
to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets))
copy(to.NegativeBuckets, h.NegativeBuckets)
to.CustomValues = clearIfNotNil(to.CustomValues)
}
to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans)) to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans))
copy(to.PositiveSpans, h.PositiveSpans) copy(to.PositiveSpans, h.PositiveSpans)
to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans))
copy(to.NegativeSpans, h.NegativeSpans)
to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets)) to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets))
copy(to.PositiveBuckets, h.PositiveBuckets) copy(to.PositiveBuckets, h.PositiveBuckets)
to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets))
copy(to.NegativeBuckets, h.NegativeBuckets)
} }
// String returns a string representation of the Histogram. // String returns a string representation of the Histogram.
@ -165,8 +199,11 @@ func (h *Histogram) String() string {
return sb.String() return sb.String()
} }
// ZeroBucket returns the zero bucket. // ZeroBucket returns the zero bucket. This method panics if the schema is for custom buckets.
func (h *Histogram) ZeroBucket() Bucket[uint64] { func (h *Histogram) ZeroBucket() Bucket[uint64] {
if h.UsesCustomBuckets() {
panic("histograms with custom buckets have no zero bucket")
}
return Bucket[uint64]{ return Bucket[uint64]{
Lower: -h.ZeroThreshold, Lower: -h.ZeroThreshold,
Upper: h.ZeroThreshold, Upper: h.ZeroThreshold,
@ -179,14 +216,14 @@ func (h *Histogram) ZeroBucket() Bucket[uint64] {
// PositiveBucketIterator returns a BucketIterator to iterate over all positive // PositiveBucketIterator returns a BucketIterator to iterate over all positive
// buckets in ascending order (starting next to the zero bucket and going up). // buckets in ascending order (starting next to the zero bucket and going up).
func (h *Histogram) PositiveBucketIterator() BucketIterator[uint64] { func (h *Histogram) PositiveBucketIterator() BucketIterator[uint64] {
it := newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true) it := newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues)
return &it return &it
} }
// NegativeBucketIterator returns a BucketIterator to iterate over all negative // NegativeBucketIterator returns a BucketIterator to iterate over all negative
// buckets in descending order (starting next to the zero bucket and going down). // buckets in descending order (starting next to the zero bucket and going down).
func (h *Histogram) NegativeBucketIterator() BucketIterator[uint64] { func (h *Histogram) NegativeBucketIterator() BucketIterator[uint64] {
it := newRegularBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false) it := newRegularBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil)
return &it return &it
} }
@ -207,30 +244,42 @@ func (h *Histogram) CumulativeBucketIterator() BucketIterator[uint64] {
// but they must represent the same bucket layout to match. // but they must represent the same bucket layout to match.
// Sum is compared based on its bit pattern because this method // Sum is compared based on its bit pattern because this method
// is about data equality rather than mathematical equality. // is about data equality rather than mathematical equality.
// We ignore fields that are not used based on the exponential / custom buckets schema,
// but check fields where differences may cause unintended behaviour even if they are not
// supposed to be used according to the schema.
func (h *Histogram) Equals(h2 *Histogram) bool { func (h *Histogram) Equals(h2 *Histogram) bool {
if h2 == nil { if h2 == nil {
return false return false
} }
if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold || if h.Schema != h2.Schema || h.Count != h2.Count ||
h.ZeroCount != h2.ZeroCount || h.Count != h2.Count ||
math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) { math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) {
return false return false
} }
if h.UsesCustomBuckets() {
if !FloatBucketsMatch(h.CustomValues, h2.CustomValues) {
return false
}
}
if h.ZeroThreshold != h2.ZeroThreshold || h.ZeroCount != h2.ZeroCount {
return false
}
if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
return false
}
if !slices.Equal(h.NegativeBuckets, h2.NegativeBuckets) {
return false
}
if !spansMatch(h.PositiveSpans, h2.PositiveSpans) { if !spansMatch(h.PositiveSpans, h2.PositiveSpans) {
return false return false
} }
if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
return false
}
if !slices.Equal(h.PositiveBuckets, h2.PositiveBuckets) { if !slices.Equal(h.PositiveBuckets, h2.PositiveBuckets) {
return false return false
} }
if !slices.Equal(h.NegativeBuckets, h2.NegativeBuckets) {
return false
}
return true return true
} }
@ -321,17 +370,36 @@ func (h *Histogram) ToFloat(fh *FloatHistogram) *FloatHistogram {
} }
fh.CounterResetHint = h.CounterResetHint fh.CounterResetHint = h.CounterResetHint
fh.Schema = h.Schema fh.Schema = h.Schema
fh.ZeroThreshold = h.ZeroThreshold
fh.ZeroCount = float64(h.ZeroCount)
fh.Count = float64(h.Count) fh.Count = float64(h.Count)
fh.Sum = h.Sum fh.Sum = h.Sum
if h.UsesCustomBuckets() {
fh.ZeroThreshold = 0
fh.ZeroCount = 0
fh.NegativeSpans = clearIfNotNil(fh.NegativeSpans)
fh.NegativeBuckets = clearIfNotNil(fh.NegativeBuckets)
fh.CustomValues = resize(fh.CustomValues, len(h.CustomValues))
copy(fh.CustomValues, h.CustomValues)
} else {
fh.ZeroThreshold = h.ZeroThreshold
fh.ZeroCount = float64(h.ZeroCount)
fh.NegativeSpans = resize(fh.NegativeSpans, len(h.NegativeSpans))
copy(fh.NegativeSpans, h.NegativeSpans)
fh.NegativeBuckets = resize(fh.NegativeBuckets, len(h.NegativeBuckets))
var currentNegative float64
for i, b := range h.NegativeBuckets {
currentNegative += float64(b)
fh.NegativeBuckets[i] = currentNegative
}
fh.CustomValues = clearIfNotNil(fh.CustomValues)
}
fh.PositiveSpans = resize(fh.PositiveSpans, len(h.PositiveSpans)) fh.PositiveSpans = resize(fh.PositiveSpans, len(h.PositiveSpans))
copy(fh.PositiveSpans, h.PositiveSpans) copy(fh.PositiveSpans, h.PositiveSpans)
fh.NegativeSpans = resize(fh.NegativeSpans, len(h.NegativeSpans))
copy(fh.NegativeSpans, h.NegativeSpans)
fh.PositiveBuckets = resize(fh.PositiveBuckets, len(h.PositiveBuckets)) fh.PositiveBuckets = resize(fh.PositiveBuckets, len(h.PositiveBuckets))
var currentPositive float64 var currentPositive float64
for i, b := range h.PositiveBuckets { for i, b := range h.PositiveBuckets {
@ -339,13 +407,6 @@ func (h *Histogram) ToFloat(fh *FloatHistogram) *FloatHistogram {
fh.PositiveBuckets[i] = currentPositive fh.PositiveBuckets[i] = currentPositive
} }
fh.NegativeBuckets = resize(fh.NegativeBuckets, len(h.NegativeBuckets))
var currentNegative float64
for i, b := range h.NegativeBuckets {
currentNegative += float64(b)
fh.NegativeBuckets[i] = currentNegative
}
return fh return fh
} }
@ -357,25 +418,47 @@ func resize[T any](items []T, n int) []T {
} }
// Validate validates consistency between span and bucket slices. Also, buckets are checked // Validate validates consistency between span and bucket slices. Also, buckets are checked
// against negative values. // against negative values. We check to make sure there are no unexpected fields or field values
// based on the exponential / custom buckets schema.
// For histograms that have not observed any NaN values (based on IsNaN(h.Sum) check), a // For histograms that have not observed any NaN values (based on IsNaN(h.Sum) check), a
// strict h.Count = nCount + pCount + h.ZeroCount check is performed. // strict h.Count = nCount + pCount + h.ZeroCount check is performed.
// Otherwise, only a lower bound check will be done (h.Count >= nCount + pCount + h.ZeroCount), // Otherwise, only a lower bound check will be done (h.Count >= nCount + pCount + h.ZeroCount),
// because NaN observations do not increment the values of buckets (but they do increment // because NaN observations do not increment the values of buckets (but they do increment
// the total h.Count). // the total h.Count).
func (h *Histogram) Validate() error { func (h *Histogram) Validate() error {
if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
return fmt.Errorf("negative side: %w", err)
}
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
return fmt.Errorf("positive side: %w", err)
}
var nCount, pCount uint64 var nCount, pCount uint64
err := checkHistogramBuckets(h.NegativeBuckets, &nCount, true) if h.UsesCustomBuckets() {
if err != nil { if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
return fmt.Errorf("negative side: %w", err) return fmt.Errorf("custom buckets: %w", err)
}
if h.ZeroCount != 0 {
return fmt.Errorf("custom buckets: must have zero count of 0")
}
if h.ZeroThreshold != 0 {
return fmt.Errorf("custom buckets: must have zero threshold of 0")
}
if len(h.NegativeSpans) > 0 {
return fmt.Errorf("custom buckets: must not have negative spans")
}
if len(h.NegativeBuckets) > 0 {
return fmt.Errorf("custom buckets: must not have negative buckets")
}
} else {
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
return fmt.Errorf("positive side: %w", err)
}
if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
return fmt.Errorf("negative side: %w", err)
}
err := checkHistogramBuckets(h.NegativeBuckets, &nCount, true)
if err != nil {
return fmt.Errorf("negative side: %w", err)
}
if h.CustomValues != nil {
return fmt.Errorf("histogram with exponential schema must not have custom bounds")
}
} }
err = checkHistogramBuckets(h.PositiveBuckets, &pCount, true) err := checkHistogramBuckets(h.PositiveBuckets, &pCount, true)
if err != nil { if err != nil {
return fmt.Errorf("positive side: %w", err) return fmt.Errorf("positive side: %w", err)
} }
@ -398,12 +481,13 @@ type regularBucketIterator struct {
baseBucketIterator[uint64, int64] baseBucketIterator[uint64, int64]
} }
func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool) regularBucketIterator { func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool, customValues []float64) regularBucketIterator {
i := baseBucketIterator[uint64, int64]{ i := baseBucketIterator[uint64, int64]{
schema: schema, schema: schema,
spans: spans, spans: spans,
buckets: buckets, buckets: buckets,
positive: positive, positive: positive,
customValues: customValues,
} }
return regularBucketIterator{i} return regularBucketIterator{i}
} }
@ -477,7 +561,7 @@ func (c *cumulativeBucketIterator) Next() bool {
if c.emptyBucketCount > 0 { if c.emptyBucketCount > 0 {
// We are traversing through empty buckets at the moment. // We are traversing through empty buckets at the moment.
c.currUpper = getBound(c.currIdx, c.h.Schema) c.currUpper = getBound(c.currIdx, c.h.Schema, c.h.CustomValues)
c.currIdx++ c.currIdx++
c.emptyBucketCount-- c.emptyBucketCount--
return true return true
@ -494,7 +578,7 @@ func (c *cumulativeBucketIterator) Next() bool {
c.currCount += c.h.PositiveBuckets[c.posBucketsIdx] c.currCount += c.h.PositiveBuckets[c.posBucketsIdx]
c.currCumulativeCount += uint64(c.currCount) c.currCumulativeCount += uint64(c.currCount)
c.currUpper = getBound(c.currIdx, c.h.Schema) c.currUpper = getBound(c.currIdx, c.h.Schema, c.h.CustomValues)
c.posBucketsIdx++ c.posBucketsIdx++
c.idxInSpan++ c.idxInSpan++
@ -524,7 +608,15 @@ func (c *cumulativeBucketIterator) At() Bucket[uint64] {
// ReduceResolution reduces the histogram's spans, buckets into target schema. // ReduceResolution reduces the histogram's spans, buckets into target schema.
// The target schema must be smaller than the current histogram's schema. // The target schema must be smaller than the current histogram's schema.
// This will panic if the histogram has custom buckets or if the target schema is
// a custom buckets schema.
func (h *Histogram) ReduceResolution(targetSchema int32) *Histogram { func (h *Histogram) ReduceResolution(targetSchema int32) *Histogram {
if h.UsesCustomBuckets() {
panic("cannot reduce resolution when there are custom buckets")
}
if IsCustomBucketsSchema(targetSchema) {
panic("cannot reduce resolution to custom buckets schema")
}
if targetSchema >= h.Schema { if targetSchema >= h.Schema {
panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema)) panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema))
} }

View file

@ -69,6 +69,21 @@ func TestHistogramString(t *testing.T) {
}, },
expectedString: "{count:19, sum:2.7, [-64,-32):1, [-16,-8):1, [-8,-4):2, [-4,-2):1, [-2,-1):3, [-1,-0.5):1, (0.5,1]:1, (1,2]:3, (2,4]:1, (4,8]:2, (8,16]:1, (16,32]:1, (32,64]:1}", expectedString: "{count:19, sum:2.7, [-64,-32):1, [-16,-8):1, [-8,-4):2, [-4,-2):1, [-2,-1):3, [-1,-0.5):1, (0.5,1]:1, (1,2]:3, (2,4]:1, (4,8]:2, (8,16]:1, (16,32]:1, (32,64]:1}",
}, },
{
histogram: Histogram{
Schema: CustomBucketsSchema,
Count: 19,
Sum: 2.7,
PositiveSpans: []Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
CustomValues: []float64{1, 2, 5, 10, 15, 20, 25, 50},
},
expectedString: "{count:19, sum:2.7, [-Inf,1]:1, (1,2]:3, (2,5]:1, (5,10]:2, (10,15]:1, (15,20]:1, (20,25]:1}",
},
} }
for i, c := range cases { for i, c := range cases {
@ -208,6 +223,26 @@ func TestCumulativeBucketIterator(t *testing.T) {
{Lower: math.Inf(-1), Upper: 16, Count: 8, LowerInclusive: true, UpperInclusive: true, Index: 2}, {Lower: math.Inf(-1), Upper: 16, Count: 8, LowerInclusive: true, UpperInclusive: true, Index: 2},
}, },
}, },
{
histogram: Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
CustomValues: []float64{5, 10, 20, 50},
},
expectedBuckets: []Bucket[uint64]{
{Lower: math.Inf(-1), Upper: 5, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0},
{Lower: math.Inf(-1), Upper: 10, Count: 3, LowerInclusive: true, UpperInclusive: true, Index: 1},
{Lower: math.Inf(-1), Upper: 20, Count: 3, LowerInclusive: true, UpperInclusive: true, Index: 2},
{Lower: math.Inf(-1), Upper: 50, Count: 4, LowerInclusive: true, UpperInclusive: true, Index: 3},
{Lower: math.Inf(-1), Upper: math.Inf(1), Count: 5, LowerInclusive: true, UpperInclusive: true, Index: 4},
},
},
} }
for i, c := range cases { for i, c := range cases {
@ -368,6 +403,62 @@ func TestRegularBucketIterator(t *testing.T) {
}, },
expectedNegativeBuckets: []Bucket[uint64]{}, expectedNegativeBuckets: []Bucket[uint64]{},
}, },
{
histogram: Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
CustomValues: []float64{5, 10, 20, 50},
},
expectedPositiveBuckets: []Bucket[uint64]{
{Lower: math.Inf(-1), Upper: 5, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0},
{Lower: 5, Upper: 10, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1},
{Lower: 20, Upper: 50, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 3},
{Lower: 50, Upper: math.Inf(1), Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4},
},
expectedNegativeBuckets: []Bucket[uint64]{},
},
{
histogram: Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
CustomValues: []float64{0, 10, 20, 50},
},
expectedPositiveBuckets: []Bucket[uint64]{
{Lower: math.Inf(-1), Upper: 0, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0},
{Lower: 0, Upper: 10, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1},
{Lower: 20, Upper: 50, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 3},
{Lower: 50, Upper: math.Inf(1), Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4},
},
expectedNegativeBuckets: []Bucket[uint64]{},
},
{
histogram: Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 5},
},
PositiveBuckets: []int64{1, 1, 0, -1, 0},
CustomValues: []float64{-5, 0, 20, 50},
},
expectedPositiveBuckets: []Bucket[uint64]{
{Lower: math.Inf(-1), Upper: -5, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0},
{Lower: -5, Upper: 0, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1},
{Lower: 0, Upper: 20, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 2},
{Lower: 20, Upper: 50, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 3},
{Lower: 50, Upper: math.Inf(1), Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4},
},
expectedNegativeBuckets: []Bucket[uint64]{},
},
} }
for i, c := range cases { for i, c := range cases {
@ -461,11 +552,81 @@ func TestHistogramToFloat(t *testing.T) {
} }
} }
func TestCustomBucketsHistogramToFloat(t *testing.T) {
h := Histogram{
Schema: CustomBucketsSchema,
Count: 10,
Sum: 2.7,
PositiveSpans: []Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
CustomValues: []float64{5, 10, 20, 50, 100, 500},
}
cases := []struct {
name string
fh *FloatHistogram
}{
{name: "without prior float histogram"},
{name: "prior float histogram with more buckets", fh: &FloatHistogram{
Schema: 2,
Count: 3,
Sum: 5,
ZeroThreshold: 4,
ZeroCount: 1,
PositiveSpans: []Span{
{Offset: 1, Length: 2},
{Offset: 1, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9},
NegativeSpans: []Span{
{Offset: 20, Length: 6},
{Offset: 12, Length: 7},
{Offset: 33, Length: 10},
},
NegativeBuckets: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9},
}},
{name: "prior float histogram with fewer buckets", fh: &FloatHistogram{
Schema: 2,
Count: 3,
Sum: 5,
ZeroThreshold: 4,
ZeroCount: 1,
PositiveSpans: []Span{
{Offset: 1, Length: 2},
{Offset: 1, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []float64{1, 2},
NegativeSpans: []Span{
{Offset: 20, Length: 6},
{Offset: 12, Length: 7},
{Offset: 33, Length: 10},
},
NegativeBuckets: []float64{1, 2},
}},
}
require.NoError(t, h.Validate())
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
hStr := h.String()
fh := h.ToFloat(c.fh)
require.NoError(t, fh.Validate())
require.Equal(t, hStr, h.String())
require.Equal(t, hStr, fh.String())
})
}
}
// TestHistogramEquals tests both Histogram and FloatHistogram. // TestHistogramEquals tests both Histogram and FloatHistogram.
func TestHistogramEquals(t *testing.T) { func TestHistogramEquals(t *testing.T) {
h1 := Histogram{ h1 := Histogram{
Schema: 3, Schema: 3,
Count: 61, Count: 62,
Sum: 2.7, Sum: 2.7,
ZeroThreshold: 0.1, ZeroThreshold: 0.1,
ZeroCount: 42, ZeroCount: 42,
@ -495,6 +656,15 @@ func TestHistogramEquals(t *testing.T) {
require.False(t, h1f.Equals(h2f)) require.False(t, h1f.Equals(h2f))
require.False(t, h2f.Equals(h1f)) require.False(t, h2f.Equals(h1f))
} }
notEqualsUntilFloatConv := func(h1, h2 Histogram) {
require.False(t, h1.Equals(&h2))
require.False(t, h2.Equals(&h1))
h1f, h2f := h1.ToFloat(nil), h2.ToFloat(nil)
require.True(t, h1f.Equals(h2f))
require.True(t, h2f.Equals(h1f))
}
require.NoError(t, h1.Validate())
h2 := h1.Copy() h2 := h1.Copy()
equals(h1, *h2) equals(h1, *h2)
@ -602,6 +772,45 @@ func TestHistogramEquals(t *testing.T) {
// Sum StaleNaN vs regular NaN. // Sum StaleNaN vs regular NaN.
notEquals(*hStale, *hNaN) notEquals(*hStale, *hNaN)
// Has non-empty custom bounds for exponential schema.
hCustom := h1.Copy()
hCustom.CustomValues = []float64{1, 2, 3}
equals(h1, *hCustom)
cbh1 := Histogram{
Schema: CustomBucketsSchema,
Count: 10,
Sum: 2.7,
PositiveSpans: []Span{
{Offset: 0, Length: 4},
{Offset: 10, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
CustomValues: []float64{0.1, 0.2, 0.5, 1, 2, 5, 10, 15, 20, 25, 50, 75, 100, 200, 250, 500, 1000},
}
require.NoError(t, cbh1.Validate())
cbh2 := cbh1.Copy()
equals(cbh1, *cbh2)
// Has different custom bounds for custom buckets schema.
cbh2 = cbh1.Copy()
cbh2.CustomValues = []float64{0.1, 0.2, 0.5}
notEquals(cbh1, *cbh2)
// Has non-empty negative spans and buckets for custom buckets schema.
cbh2 = cbh1.Copy()
cbh2.NegativeSpans = []Span{{Offset: 0, Length: 1}}
cbh2.NegativeBuckets = []int64{1}
notEqualsUntilFloatConv(cbh1, *cbh2)
// Has non-zero zero count and threshold for custom buckets schema.
cbh2 = cbh1.Copy()
cbh2.ZeroThreshold = 0.1
cbh2.ZeroCount = 10
notEqualsUntilFloatConv(cbh1, *cbh2)
} }
func TestHistogramCopy(t *testing.T) { func TestHistogramCopy(t *testing.T) {
@ -640,6 +849,21 @@ func TestHistogramCopy(t *testing.T) {
}, },
expected: &Histogram{}, expected: &Histogram{},
}, },
{
name: "with custom buckets",
orig: &Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 1}},
PositiveBuckets: []int64{1, 3, -3, 42},
CustomValues: []float64{5, 10, 15},
},
expected: &Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 1}},
PositiveBuckets: []int64{1, 3, -3, 42},
CustomValues: []float64{5, 10, 15},
},
},
} }
for _, tcase := range cases { for _, tcase := range cases {
@ -690,6 +914,21 @@ func TestHistogramCopyTo(t *testing.T) {
}, },
expected: &Histogram{}, expected: &Histogram{},
}, },
{
name: "with custom buckets",
orig: &Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 1}},
PositiveBuckets: []int64{1, 3, -3, 42},
CustomValues: []float64{5, 10, 15},
},
expected: &Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 1}},
PositiveBuckets: []int64{1, 3, -3, 42},
CustomValues: []float64{5, 10, 15},
},
},
} }
for _, tcase := range cases { for _, tcase := range cases {
@ -971,6 +1210,86 @@ func TestHistogramCompact(t *testing.T) {
NegativeBuckets: []int64{2, 3}, NegativeBuckets: []int64{2, 3},
}, },
}, },
{
"nothing should happen with custom buckets",
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 1}, {2, 3}},
PositiveBuckets: []int64{1, 3, -3, 42},
CustomValues: []float64{5, 10, 15},
},
0,
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 1}, {2, 3}},
PositiveBuckets: []int64{1, 3, -3, 42},
CustomValues: []float64{5, 10, 15},
},
},
{
"eliminate zero offsets with custom buckets",
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 1}, {0, 3}, {0, 1}},
PositiveBuckets: []int64{1, 3, -3, 42, 3},
CustomValues: []float64{5, 10, 15, 20},
},
0,
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 5}},
PositiveBuckets: []int64{1, 3, -3, 42, 3},
CustomValues: []float64{5, 10, 15, 20},
},
},
{
"eliminate zero length with custom buckets",
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 2}, {2, 0}, {3, 3}},
PositiveBuckets: []int64{1, 3, -3, 42, 3},
CustomValues: []float64{5, 10, 15, 20},
},
0,
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 2}, {5, 3}},
PositiveBuckets: []int64{1, 3, -3, 42, 3},
CustomValues: []float64{5, 10, 15, 20},
},
},
{
"eliminate multiple zero length spans with custom buckets",
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 2}, {2, 0}, {2, 0}, {2, 0}, {3, 3}},
PositiveBuckets: []int64{1, 3, -3, 42, 3},
CustomValues: []float64{5, 10, 15, 20},
},
0,
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 2}, {9, 3}},
PositiveBuckets: []int64{1, 3, -3, 42, 3},
CustomValues: []float64{5, 10, 15, 20},
},
},
{
"cut empty buckets at start or end of spans, even in the middle, with custom buckets",
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-4, 6}, {3, 6}},
PositiveBuckets: []int64{0, 0, 1, 3, -4, 0, 1, 42, 3, -46, 0, 0},
CustomValues: []float64{5, 10, 15, 20},
},
0,
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 2}, {5, 3}},
PositiveBuckets: []int64{1, 3, -3, 42, 3},
CustomValues: []float64{5, 10, 15, 20},
},
},
} }
for _, c := range cases { for _, c := range cases {
@ -1107,6 +1426,145 @@ func TestHistogramValidation(t *testing.T) {
errMsg: `3 observations found in buckets, but the Count field is 2: histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)`, errMsg: `3 observations found in buckets, but the Count field is 2: histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)`,
skipFloat: true, skipFloat: true,
}, },
"rejects an exponential histogram with custom buckets schema": {
h: &Histogram{
Count: 12,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 19.4,
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
NegativeSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
NegativeBuckets: []int64{1, 1, -1, 0},
},
errMsg: `custom buckets: only 0 custom bounds defined which is insufficient to cover total span length of 5: histogram custom bounds are too few`,
},
"rejects a custom buckets histogram with exponential schema": {
h: &Histogram{
Count: 5,
Sum: 19.4,
Schema: 0,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
CustomValues: []float64{1, 2, 3, 4},
},
errMsg: `histogram with exponential schema must not have custom bounds`,
skipFloat: true, // Converting to float will remove the wrong fields so only the float version will pass validation
},
"rejects a custom buckets histogram with zero/negative buckets": {
h: &Histogram{
Count: 12,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 19.4,
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
NegativeSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
NegativeBuckets: []int64{1, 1, -1, 0},
CustomValues: []float64{1, 2, 3, 4},
},
errMsg: `custom buckets: must have zero count of 0`,
skipFloat: true, // Converting to float will remove the wrong fields so only the float version will pass validation
},
"rejects a custom buckets histogram with negative offset in first span": {
h: &Histogram{
Count: 5,
Sum: 19.4,
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: -1, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
CustomValues: []float64{1, 2, 3, 4},
},
errMsg: `custom buckets: span number 1 with offset -1: histogram has a span whose offset is negative`,
},
"rejects a custom buckets histogram with negative offset in subsequent spans": {
h: &Histogram{
Count: 5,
Sum: 19.4,
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: -1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
CustomValues: []float64{1, 2, 3, 4},
},
errMsg: `custom buckets: span number 2 with offset -1: histogram has a span whose offset is negative`,
},
"rejects a custom buckets histogram with non-matching bucket counts": {
h: &Histogram{
Count: 5,
Sum: 19.4,
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1},
CustomValues: []float64{1, 2, 3, 4},
},
errMsg: `custom buckets: spans need 4 buckets, have 3 buckets: histogram spans specify different number of buckets than provided`,
},
"rejects a custom buckets histogram with too few bounds": {
h: &Histogram{
Count: 5,
Sum: 19.4,
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
CustomValues: []float64{1, 2, 3},
},
errMsg: `custom buckets: only 3 custom bounds defined which is insufficient to cover total span length of 5: histogram custom bounds are too few`,
},
"valid custom buckets histogram": {
h: &Histogram{
Count: 5,
Sum: 19.4,
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
CustomValues: []float64{1, 2, 3, 4},
},
},
"valid custom buckets histogram with extra bounds": {
h: &Histogram{
Count: 5,
Sum: 19.4,
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8},
},
},
} }
for testName, tc := range tests { for testName, tc := range tests {

View file

@ -18,6 +18,7 @@ import (
"encoding/json" "encoding/json"
"slices" "slices"
"strconv" "strconv"
"unsafe"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
) )
@ -215,3 +216,7 @@ func contains(s []Label, n string) bool {
} }
return false return false
} }
func yoloString(b []byte) string {
return *((*string)(unsafe.Pointer(&b)))
}

View file

@ -20,7 +20,6 @@ import (
"slices" "slices"
"strings" "strings"
"sync" "sync"
"unsafe"
"github.com/cespare/xxhash/v2" "github.com/cespare/xxhash/v2"
) )
@ -105,30 +104,39 @@ func (t *nameTable) ToName(num int) string {
return t.byNum[num] return t.byNum[num]
} }
// "Varint" in this file is non-standard: we encode small numbers (up to 32767) in 2 bytes,
// because we expect most Prometheus to have more than 127 unique strings.
// And we don't encode numbers larger than 4 bytes because we don't expect more than 536,870,912 unique strings.
func decodeVarint(data string, index int) (int, int) { func decodeVarint(data string, index int) (int, int) {
// Fast-path for common case of a single byte, value 0..127. b := int(data[index]) + int(data[index+1])<<8
b := data[index] index += 2
if b < 0x8000 {
return b, index
}
return decodeVarintRest(b, data, index)
}
func decodeVarintRest(b int, data string, index int) (int, int) {
value := int(b & 0x7FFF)
b = int(data[index])
index++ index++
if b < 0x80 { if b < 0x80 {
return int(b), index return value | (b << 15), index
} }
value := int(b & 0x7F)
for shift := uint(7); ; shift += 7 { value |= (b & 0x7f) << 15
// Just panic if we go of the end of data, since all Labels strings are constructed internally and b = int(data[index])
// malformed data indicates a bug, or memory corruption. index++
b := data[index] return value | (b << 22), index
index++
value |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
return value, index
} }
func decodeString(t *nameTable, data string, index int) (string, int) { func decodeString(t *nameTable, data string, index int) (string, int) {
var num int // Copy decodeVarint here, because the Go compiler says it's too big to inline.
num, index = decodeVarint(data, index) num := int(data[index]) + int(data[index+1])<<8
index += 2
if num >= 0x8000 {
num, index = decodeVarintRest(num, data, index)
}
return t.ToName(num), index return t.ToName(num), index
} }
@ -322,7 +330,12 @@ func (ls Labels) Get(name string) string {
} else if lName[0] > name[0] { // Stop looking if we've gone past. } else if lName[0] > name[0] { // Stop looking if we've gone past.
break break
} }
_, i = decodeVarint(ls.data, i) // Copy decodeVarint here, because the Go compiler says it's too big to inline.
num := int(ls.data[i]) + int(ls.data[i+1])<<8
i += 2
if num >= 0x8000 {
_, i = decodeVarintRest(num, ls.data, i)
}
} }
return "" return ""
} }
@ -340,7 +353,12 @@ func (ls Labels) Has(name string) bool {
} else if lName[0] > name[0] { // Stop looking if we've gone past. } else if lName[0] > name[0] { // Stop looking if we've gone past.
break break
} }
_, i = decodeVarint(ls.data, i) // Copy decodeVarint here, because the Go compiler says it's too big to inline.
num := int(ls.data[i]) + int(ls.data[i+1])<<8
i += 2
if num >= 0x8000 {
_, i = decodeVarintRest(num, ls.data, i)
}
} }
return false return false
} }
@ -426,10 +444,6 @@ func EmptyLabels() Labels {
return Labels{} return Labels{}
} }
func yoloString(b []byte) string {
return *((*string)(unsafe.Pointer(&b)))
}
// New returns a sorted Labels from the given labels. // New returns a sorted Labels from the given labels.
// The caller has to guarantee that all label names are unique. // The caller has to guarantee that all label names are unique.
// Note this function is not efficient; should not be used in performance-critical places. // Note this function is not efficient; should not be used in performance-critical places.
@ -646,29 +660,24 @@ func marshalNumbersToSizedBuffer(nums []int, data []byte) int {
func sizeVarint(x uint64) (n int) { func sizeVarint(x uint64) (n int) {
// Most common case first // Most common case first
if x < 1<<7 { if x < 1<<15 {
return 1 return 2
} }
if x >= 1<<56 { if x < 1<<22 {
return 9 return 3
} }
if x >= 1<<28 { if x >= 1<<29 {
x >>= 28 panic("Number too large to represent")
n = 4
} }
if x >= 1<<14 { return 4
x >>= 14
n += 2
}
if x >= 1<<7 {
n++
}
return n + 1
} }
func encodeVarintSlow(data []byte, offset int, v uint64) int { func encodeVarintSlow(data []byte, offset int, v uint64) int {
offset -= sizeVarint(v) offset -= sizeVarint(v)
base := offset base := offset
data[offset] = uint8(v)
v >>= 8
offset++
for v >= 1<<7 { for v >= 1<<7 {
data[offset] = uint8(v&0x7f | 0x80) data[offset] = uint8(v&0x7f | 0x80)
v >>= 7 v >>= 7
@ -678,11 +687,12 @@ func encodeVarintSlow(data []byte, offset int, v uint64) int {
return base return base
} }
// Special code for the common case that a value is less than 128 // Special code for the common case that a value is less than 32768
func encodeVarint(data []byte, offset, v int) int { func encodeVarint(data []byte, offset, v int) int {
if v < 1<<7 { if v < 1<<15 {
offset-- offset -= 2
data[offset] = uint8(v) data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
return offset return offset
} }
return encodeVarintSlow(data, offset, uint64(v)) return encodeVarintSlow(data, offset, uint64(v))

View file

@ -0,0 +1,50 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build dedupelabels
package labels
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestVarint(t *testing.T) {
cases := []struct {
v int
expected []byte
}{
{0, []byte{0, 0}},
{1, []byte{1, 0}},
{2, []byte{2, 0}},
{0x7FFF, []byte{0xFF, 0x7F}},
{0x8000, []byte{0x00, 0x80, 0x01}},
{0x8001, []byte{0x01, 0x80, 0x01}},
{0x3FFFFF, []byte{0xFF, 0xFF, 0x7F}},
{0x400000, []byte{0x00, 0x80, 0x80, 0x01}},
{0x400001, []byte{0x01, 0x80, 0x80, 0x01}},
{0x1FFFFFFF, []byte{0xFF, 0xFF, 0xFF, 0x7F}},
}
var buf [16]byte
for _, c := range cases {
n := encodeVarint(buf[:], len(buf), c.v)
require.Equal(t, len(c.expected), len(buf)-n)
require.Equal(t, c.expected, buf[n:])
got, m := decodeVarint(string(buf[:]), n)
require.Equal(t, c.v, got)
require.Equal(t, len(buf), m)
}
require.Panics(t, func() { encodeVarint(buf[:], len(buf), 1<<29) })
}

View file

@ -299,11 +299,6 @@ func Equal(ls, o Labels) bool {
func EmptyLabels() Labels { func EmptyLabels() Labels {
return Labels{} return Labels{}
} }
func yoloString(b []byte) string {
return *((*string)(unsafe.Pointer(&b)))
}
func yoloBytes(s string) (b []byte) { func yoloBytes(s string) (b []byte) {
*(*string)(unsafe.Pointer(&b)) = s *(*string)(unsafe.Pointer(&b)) = s
(*reflect.SliceHeader)(unsafe.Pointer(&b)).Cap = len(s) (*reflect.SliceHeader)(unsafe.Pointer(&b)).Cap = len(s)

View file

@ -466,6 +466,38 @@ func TestLabels_DropMetricName(t *testing.T) {
require.True(t, Equal(original, check)) require.True(t, Equal(original, check))
} }
func ScratchBuilderForBenchmark() ScratchBuilder {
// (Only relevant to -tags dedupelabels: stuff the symbol table before adding the real labels, to avoid having everything fitting into 1 byte.)
b := NewScratchBuilder(256)
for i := 0; i < 256; i++ {
b.Add(fmt.Sprintf("name%d", i), fmt.Sprintf("value%d", i))
}
b.Labels()
b.Reset()
return b
}
func NewForBenchmark(ls ...Label) Labels {
b := ScratchBuilderForBenchmark()
for _, l := range ls {
b.Add(l.Name, l.Value)
}
b.Sort()
return b.Labels()
}
func FromStringsForBenchmark(ss ...string) Labels {
if len(ss)%2 != 0 {
panic("invalid number of strings")
}
b := ScratchBuilderForBenchmark()
for i := 0; i < len(ss); i += 2 {
b.Add(ss[i], ss[i+1])
}
b.Sort()
return b.Labels()
}
// BenchmarkLabels_Get was written to check whether a binary search can improve the performance vs the linear search implementation // BenchmarkLabels_Get was written to check whether a binary search can improve the performance vs the linear search implementation
// The results have shown that binary search would only be better when searching last labels in scenarios with more than 10 labels. // The results have shown that binary search would only be better when searching last labels in scenarios with more than 10 labels.
// In the following list, `old` is the linear search while `new` is the binary search implementation (without calling sort.Search, which performs even worse here) // In the following list, `old` is the linear search while `new` is the binary search implementation (without calling sort.Search, which performs even worse here)
@ -488,7 +520,7 @@ func BenchmarkLabels_Get(b *testing.B) {
} }
for _, size := range []int{5, 10, maxLabels} { for _, size := range []int{5, 10, maxLabels} {
b.Run(fmt.Sprintf("with %d labels", size), func(b *testing.B) { b.Run(fmt.Sprintf("with %d labels", size), func(b *testing.B) {
labels := New(allLabels[:size]...) labels := NewForBenchmark(allLabels[:size]...)
for _, scenario := range []struct { for _, scenario := range []struct {
desc, label string desc, label string
}{ }{
@ -520,33 +552,33 @@ var comparisonBenchmarkScenarios = []struct {
}{ }{
{ {
"equal", "equal",
FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "another_label_value"),
FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "another_label_value"),
}, },
{ {
"not equal", "not equal",
FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "another_label_value"),
FromStrings("a_label_name", "a_label_value", "another_label_name", "a_different_label_value"), FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "a_different_label_value"),
}, },
{ {
"different sizes", "different sizes",
FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "another_label_value"),
FromStrings("a_label_name", "a_label_value"), FromStringsForBenchmark("a_label_name", "a_label_value"),
}, },
{ {
"lots", "lots",
FromStrings("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrz"), FromStringsForBenchmark("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrz"),
FromStrings("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrr"), FromStringsForBenchmark("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrr"),
}, },
{ {
"real long equal", "real long equal",
FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"), FromStringsForBenchmark("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"),
FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"), FromStringsForBenchmark("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"),
}, },
{ {
"real long different end", "real long different end",
FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"), FromStringsForBenchmark("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"),
FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "deadbeef-0000-1111-2222-b9ad64bb417e"), FromStringsForBenchmark("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "deadbeef-0000-1111-2222-b9ad64bb417e"),
}, },
} }
@ -834,7 +866,7 @@ func BenchmarkBuilder(b *testing.B) {
} }
func BenchmarkLabels_Copy(b *testing.B) { func BenchmarkLabels_Copy(b *testing.B) {
l := New(benchmarkLabels...) l := NewForBenchmark(benchmarkLabels...)
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
l = l.Copy() l = l.Copy()

View file

@ -101,7 +101,7 @@ func (m *Matcher) shouldQuoteName() bool {
} }
return true return true
} }
return false return len(m.Name) == 0
} }
// Matches returns whether the matcher matches the given string value. // Matches returns whether the matcher matches the given string value.

View file

@ -798,39 +798,23 @@ func (m *equalMultiStringMapMatcher) Matches(s string) bool {
// toNormalisedLower normalise the input string using "Unicode Normalization Form D" and then convert // toNormalisedLower normalise the input string using "Unicode Normalization Form D" and then convert
// it to lower case. // it to lower case.
func toNormalisedLower(s string) string { func toNormalisedLower(s string) string {
// Check if the string is all ASCII chars and convert any upper case character to lower case character. var buf []byte
isASCII := true
var (
b strings.Builder
pos int
)
b.Grow(len(s))
for i := 0; i < len(s); i++ { for i := 0; i < len(s); i++ {
c := s[i] c := s[i]
if isASCII && c >= utf8.RuneSelf { if c >= utf8.RuneSelf {
isASCII = false return strings.Map(unicode.ToLower, norm.NFKD.String(s))
break
} }
if 'A' <= c && c <= 'Z' { if 'A' <= c && c <= 'Z' {
c += 'a' - 'A' if buf == nil {
if pos < i { buf = []byte(s)
b.WriteString(s[pos:i])
} }
b.WriteByte(c) buf[i] = c + 'a' - 'A'
pos = i + 1
} }
} }
if pos < len(s) { if buf == nil {
b.WriteString(s[pos:]) return s
} }
return yoloString(buf)
// Optimize for ASCII-only strings. In this case we don't have to do any normalization.
if isASCII {
return b.String()
}
// Normalise and convert to lower.
return strings.Map(unicode.ToLower, norm.NFKD.String(b.String()))
} }
// anyStringWithoutNewlineMatcher is a stringMatcher which matches any string // anyStringWithoutNewlineMatcher is a stringMatcher which matches any string

View file

@ -1209,6 +1209,10 @@ func visitStringMatcher(matcher StringMatcher, callback func(matcher StringMatch
func TestToNormalisedLower(t *testing.T) { func TestToNormalisedLower(t *testing.T) {
testCases := map[string]string{ testCases := map[string]string{
"foo": "foo", "foo": "foo",
"FOO": "foo",
"Foo": "foo",
"foO": "foo",
"fOo": "foo",
"AAAAAAAAAAAAAAAAAAAAAAAA": "aaaaaaaaaaaaaaaaaaaaaaaa", "AAAAAAAAAAAAAAAAAAAAAAAA": "aaaaaaaaaaaaaaaaaaaaaaaa",
"cccccccccccccccccccccccC": "cccccccccccccccccccccccc", "cccccccccccccccccccccccC": "cccccccccccccccccccccccc",
"ſſſſſſſſſſſſſſſſſſſſſſſſS": "sssssssssssssssssssssssss", "ſſſſſſſſſſſſſſſſſſſſſſſſS": "sssssssssssssssssssssssss",

View file

@ -206,6 +206,11 @@ func (re Regexp) MarshalYAML() (interface{}, error) {
return nil, nil return nil, nil
} }
// IsZero implements the yaml.IsZeroer interface.
func (re Regexp) IsZero() bool {
return re.Regexp == DefaultRelabelConfig.Regex.Regexp
}
// String returns the original string used to compile the regular expression. // String returns the original string used to compile the regular expression.
func (re Regexp) String() string { func (re Regexp) String() string {
str := re.Regexp.String() str := re.Regexp.String()

View file

@ -851,3 +851,52 @@ func BenchmarkRelabel(b *testing.B) {
}) })
} }
} }
func TestConfig_UnmarshalThenMarshal(t *testing.T) {
tests := []struct {
name string
inputYaml string
}{
{
name: "Values provided",
inputYaml: `source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port]
separator: ;
regex: \\d+
target_label: __meta_kubernetes_pod_container_port_number
replacement: $1
action: replace
`,
},
{
name: "No regex provided",
inputYaml: `source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port]
separator: ;
target_label: __meta_kubernetes_pod_container_port_number
replacement: $1
action: keepequal
`,
},
{
name: "Default regex provided",
inputYaml: `source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port]
separator: ;
regex: (.*)
target_label: __meta_kubernetes_pod_container_port_number
replacement: $1
action: replace
`,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
unmarshalled := Config{}
err := yaml.Unmarshal([]byte(test.inputYaml), &unmarshalled)
require.NoError(t, err)
marshalled, err := yaml.Marshal(&unmarshalled)
require.NoError(t, err)
require.Equal(t, test.inputYaml, string(marshalled))
})
}
}

View file

@ -110,10 +110,11 @@ type Manager struct {
metrics *alertMetrics metrics *alertMetrics
more chan struct{} more chan struct{}
mtx sync.RWMutex mtx sync.RWMutex
ctx context.Context
cancel func() stopOnce *sync.Once
stopRequested chan struct{}
alertmanagers map[string]*alertmanagerSet alertmanagers map[string]*alertmanagerSet
logger log.Logger logger log.Logger
@ -121,9 +122,10 @@ type Manager struct {
// Options are the configurable parameters of a Handler. // Options are the configurable parameters of a Handler.
type Options struct { type Options struct {
QueueCapacity int QueueCapacity int
ExternalLabels labels.Labels DrainOnShutdown bool
RelabelConfigs []*relabel.Config ExternalLabels labels.Labels
RelabelConfigs []*relabel.Config
// Used for sending HTTP requests to the Alertmanager. // Used for sending HTTP requests to the Alertmanager.
Do func(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) Do func(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error)
@ -217,8 +219,6 @@ func do(ctx context.Context, client *http.Client, req *http.Request) (*http.Resp
// NewManager is the manager constructor. // NewManager is the manager constructor.
func NewManager(o *Options, logger log.Logger) *Manager { func NewManager(o *Options, logger log.Logger) *Manager {
ctx, cancel := context.WithCancel(context.Background())
if o.Do == nil { if o.Do == nil {
o.Do = do o.Do = do
} }
@ -227,12 +227,12 @@ func NewManager(o *Options, logger log.Logger) *Manager {
} }
n := &Manager{ n := &Manager{
queue: make([]*Alert, 0, o.QueueCapacity), queue: make([]*Alert, 0, o.QueueCapacity),
ctx: ctx, more: make(chan struct{}, 1),
cancel: cancel, stopRequested: make(chan struct{}),
more: make(chan struct{}, 1), stopOnce: &sync.Once{},
opts: o, opts: o,
logger: logger, logger: logger,
} }
queueLenFunc := func() float64 { return float64(n.queueLen()) } queueLenFunc := func() float64 { return float64(n.queueLen()) }
@ -298,38 +298,100 @@ func (n *Manager) nextBatch() []*Alert {
return alerts return alerts
} }
// Run dispatches notifications continuously. // Run dispatches notifications continuously, returning once Stop has been called and all
// pending notifications have been drained from the queue (if draining is enabled).
//
// Dispatching of notifications occurs in parallel to processing target updates to avoid one starving the other.
// Refer to https://github.com/prometheus/prometheus/issues/13676 for more details.
func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) { func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) {
wg := sync.WaitGroup{}
wg.Add(2)
go func() {
defer wg.Done()
n.targetUpdateLoop(tsets)
}()
go func() {
defer wg.Done()
n.sendLoop()
n.drainQueue()
}()
wg.Wait()
level.Info(n.logger).Log("msg", "Notification manager stopped")
}
// sendLoop continuously consumes the notifications queue and sends alerts to
// the configured Alertmanagers.
func (n *Manager) sendLoop() {
for { for {
// The select is split in two parts, such as we will first try to read // If we've been asked to stop, that takes priority over sending any further notifications.
// new alertmanager targets if they are available, before sending new
// alerts.
select { select {
case <-n.ctx.Done(): case <-n.stopRequested:
return return
case ts := <-tsets:
n.reload(ts)
default: default:
select { select {
case <-n.ctx.Done(): case <-n.stopRequested:
return
case <-n.more:
n.sendOneBatch()
// If the queue still has items left, kick off the next iteration.
if n.queueLen() > 0 {
n.setMore()
}
}
}
}
}
// targetUpdateLoop receives updates of target groups and triggers a reload.
func (n *Manager) targetUpdateLoop(tsets <-chan map[string][]*targetgroup.Group) {
for {
// If we've been asked to stop, that takes priority over processing any further target group updates.
select {
case <-n.stopRequested:
return
default:
select {
case <-n.stopRequested:
return return
case ts := <-tsets: case ts := <-tsets:
n.reload(ts) n.reload(ts)
case <-n.more:
} }
} }
alerts := n.nextBatch()
if !n.sendAll(alerts...) {
n.metrics.dropped.Add(float64(len(alerts)))
}
// If the queue still has items left, kick off the next iteration.
if n.queueLen() > 0 {
n.setMore()
}
} }
} }
func (n *Manager) sendOneBatch() {
alerts := n.nextBatch()
if !n.sendAll(alerts...) {
n.metrics.dropped.Add(float64(len(alerts)))
}
}
func (n *Manager) drainQueue() {
if !n.opts.DrainOnShutdown {
if n.queueLen() > 0 {
level.Warn(n.logger).Log("msg", "Draining remaining notifications on shutdown is disabled, and some notifications have been dropped", "count", n.queueLen())
n.metrics.dropped.Add(float64(n.queueLen()))
}
return
}
level.Info(n.logger).Log("msg", "Draining any remaining notifications...")
for n.queueLen() > 0 {
n.sendOneBatch()
}
level.Info(n.logger).Log("msg", "Remaining notifications drained")
}
func (n *Manager) reload(tgs map[string][]*targetgroup.Group) { func (n *Manager) reload(tgs map[string][]*targetgroup.Group) {
n.mtx.Lock() n.mtx.Lock()
defer n.mtx.Unlock() defer n.mtx.Unlock()
@ -471,10 +533,6 @@ func (n *Manager) sendAll(alerts ...*Alert) bool {
numSuccess atomic.Uint64 numSuccess atomic.Uint64
) )
for _, ams := range amSets { for _, ams := range amSets {
if len(ams.ams) == 0 {
continue
}
var ( var (
payload []byte payload []byte
err error err error
@ -483,6 +541,11 @@ func (n *Manager) sendAll(alerts ...*Alert) bool {
ams.mtx.RLock() ams.mtx.RLock()
if len(ams.ams) == 0 {
ams.mtx.RUnlock()
continue
}
if len(ams.cfg.AlertRelabelConfigs) > 0 { if len(ams.cfg.AlertRelabelConfigs) > 0 {
amAlerts = relabelAlerts(ams.cfg.AlertRelabelConfigs, labels.Labels{}, alerts) amAlerts = relabelAlerts(ams.cfg.AlertRelabelConfigs, labels.Labels{}, alerts)
if len(amAlerts) == 0 { if len(amAlerts) == 0 {
@ -541,7 +604,7 @@ func (n *Manager) sendAll(alerts ...*Alert) bool {
for _, am := range ams.ams { for _, am := range ams.ams {
wg.Add(1) wg.Add(1)
ctx, cancel := context.WithTimeout(n.ctx, time.Duration(ams.cfg.Timeout)) ctx, cancel := context.WithTimeout(context.Background(), time.Duration(ams.cfg.Timeout))
defer cancel() defer cancel()
go func(ctx context.Context, client *http.Client, url string, payload []byte, count int) { go func(ctx context.Context, client *http.Client, url string, payload []byte, count int) {
@ -611,6 +674,7 @@ func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []b
}() }()
// Any HTTP status 2xx is OK. // Any HTTP status 2xx is OK.
//nolint:usestdlibvars
if resp.StatusCode/100 != 2 { if resp.StatusCode/100 != 2 {
return fmt.Errorf("bad response status %s", resp.Status) return fmt.Errorf("bad response status %s", resp.Status)
} }
@ -618,10 +682,19 @@ func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []b
return nil return nil
} }
// Stop shuts down the notification handler. // Stop signals the notification manager to shut down and immediately returns.
//
// Run will return once the notification manager has successfully shut down.
//
// The manager will optionally drain any queued notifications before shutting down.
//
// Stop is safe to call multiple times.
func (n *Manager) Stop() { func (n *Manager) Stop() {
level.Info(n.logger).Log("msg", "Stopping notification manager...") level.Info(n.logger).Log("msg", "Stopping notification manager...")
n.cancel()
n.stopOnce.Do(func() {
close(n.stopRequested)
})
} }
// Alertmanager holds Alertmanager endpoint information. // Alertmanager holds Alertmanager endpoint information.

View file

@ -26,13 +26,17 @@ import (
"testing" "testing"
"time" "time"
"github.com/go-kit/log"
"github.com/prometheus/alertmanager/api/v2/models" "github.com/prometheus/alertmanager/api/v2/models"
"github.com/prometheus/client_golang/prometheus"
config_util "github.com/prometheus/common/config" config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/atomic" "go.uber.org/atomic"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
@ -697,117 +701,319 @@ func TestLabelsToOpenAPILabelSet(t *testing.T) {
require.Equal(t, models.LabelSet{"aaa": "111", "bbb": "222"}, labelsToOpenAPILabelSet(labels.FromStrings("aaa", "111", "bbb", "222"))) require.Equal(t, models.LabelSet{"aaa": "111", "bbb": "222"}, labelsToOpenAPILabelSet(labels.FromStrings("aaa", "111", "bbb", "222")))
} }
// TestHangingNotifier validates that targets updates happen even when there are // TestHangingNotifier ensures that the notifier takes into account SD changes even when there are
// queued alerts. // queued alerts. This test reproduces the issue described in https://github.com/prometheus/prometheus/issues/13676.
// and https://github.com/prometheus/prometheus/issues/8768.
func TestHangingNotifier(t *testing.T) { func TestHangingNotifier(t *testing.T) {
// Note: When targets are not updated in time, this test is flaky because go const (
// selects are not deterministic. Therefore we run 10 subtests to run into the issue. batches = 100
for i := 0; i < 10; i++ { alertsCount = maxBatchSize * batches
t.Run(strconv.Itoa(i), func(t *testing.T) { )
var (
done = make(chan struct{})
changed = make(chan struct{})
syncCh = make(chan map[string][]*targetgroup.Group)
)
defer func() { var (
close(done) sendTimeout = 10 * time.Millisecond
}() sdUpdatert = sendTimeout / 2
var calledOnce bool done = make(chan struct{})
// Setting up a bad server. This server hangs for 2 seconds. )
badServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if calledOnce {
t.Fatal("hanging server called multiple times")
}
calledOnce = true
select {
case <-done:
case <-time.After(2 * time.Second):
}
}))
badURL, err := url.Parse(badServer.URL)
require.NoError(t, err)
badAddress := badURL.Host // Used for __name__ label in targets.
// Setting up a bad server. This server returns fast, signaling requests on defer func() {
// by closing the changed channel. close(done)
goodServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { }()
close(changed)
}))
goodURL, err := url.Parse(goodServer.URL)
require.NoError(t, err)
goodAddress := goodURL.Host // Used for __name__ label in targets.
h := NewManager( // Set up a faulty Alertmanager.
&Options{ var faultyCalled atomic.Bool
QueueCapacity: 20 * maxBatchSize, faultyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
}, faultyCalled.Store(true)
nil, select {
) case <-done:
case <-time.After(time.Hour):
}
}))
faultyURL, err := url.Parse(faultyServer.URL)
require.NoError(t, err)
h.alertmanagers = make(map[string]*alertmanagerSet) // Set up a functional Alertmanager.
var functionalCalled atomic.Bool
functionalServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
functionalCalled.Store(true)
}))
functionalURL, err := url.Parse(functionalServer.URL)
require.NoError(t, err)
am1Cfg := config.DefaultAlertmanagerConfig // Initialize the discovery manager
am1Cfg.Timeout = model.Duration(200 * time.Millisecond) // This is relevant as the updates aren't sent continually in real life, but only each updatert.
// The old implementation of TestHangingNotifier didn't take that into acount.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
reg := prometheus.NewRegistry()
sdMetrics, err := discovery.RegisterSDMetrics(reg, discovery.NewRefreshMetrics(reg))
require.NoError(t, err)
sdManager := discovery.NewManager(
ctx,
log.NewNopLogger(),
reg,
sdMetrics,
discovery.Name("sd-manager"),
discovery.Updatert(sdUpdatert),
)
go sdManager.Run()
h.alertmanagers["config-0"] = &alertmanagerSet{ // Set up the notifier with both faulty and functional Alertmanagers.
ams: []alertmanager{}, notifier := NewManager(
cfg: &am1Cfg, &Options{
metrics: h.metrics, QueueCapacity: alertsCount,
} },
go h.Run(syncCh) nil,
defer h.Stop() )
notifier.alertmanagers = make(map[string]*alertmanagerSet)
amCfg := config.DefaultAlertmanagerConfig
amCfg.Timeout = model.Duration(sendTimeout)
notifier.alertmanagers["config-0"] = &alertmanagerSet{
ams: []alertmanager{
alertmanagerMock{
urlf: func() string { return faultyURL.String() },
},
alertmanagerMock{
urlf: func() string { return functionalURL.String() },
},
},
cfg: &amCfg,
metrics: notifier.metrics,
}
go notifier.Run(sdManager.SyncCh())
defer notifier.Stop()
var alerts []*Alert require.Len(t, notifier.Alertmanagers(), 2)
for i := range make([]struct{}, 20*maxBatchSize) {
alerts = append(alerts, &Alert{
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
})
}
// Injecting the hanging server URL. // Enqueue the alerts.
syncCh <- map[string][]*targetgroup.Group{ var alerts []*Alert
"config-0": { for i := range make([]struct{}, alertsCount) {
{ alerts = append(alerts, &Alert{
Targets: []model.LabelSet{ Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
{
model.AddressLabel: model.LabelValue(badAddress),
},
},
},
},
}
// Queing alerts.
h.Send(alerts...)
// Updating with a working alertmanager target.
go func() {
select {
case syncCh <- map[string][]*targetgroup.Group{
"config-0": {
{
Targets: []model.LabelSet{
{
model.AddressLabel: model.LabelValue(goodAddress),
},
},
},
},
}:
case <-done:
}
}()
select {
case <-time.After(1 * time.Second):
t.Fatalf("Timeout after 1 second, targets not synced in time.")
case <-changed:
// The good server has been hit in less than 3 seconds, therefore
// targets have been updated before a second call could be made to the
// bad server.
}
}) })
} }
notifier.Send(alerts...)
// Wait for the Alertmanagers to start receiving alerts.
// 10*sdUpdatert is used as an arbitrary timeout here.
timeout := time.After(10 * sdUpdatert)
loop1:
for {
select {
case <-timeout:
t.Fatalf("Timeout waiting for the alertmanagers to be reached for the first time.")
default:
if faultyCalled.Load() && functionalCalled.Load() {
break loop1
}
}
}
// Request to remove the faulty Alertmanager.
c := map[string]discovery.Configs{
"config-0": {
discovery.StaticConfig{
&targetgroup.Group{
Targets: []model.LabelSet{
{
model.AddressLabel: model.LabelValue(functionalURL.Host),
},
},
},
},
},
}
require.NoError(t, sdManager.ApplyConfig(c))
// The notifier should not wait until the alerts queue is empty to apply the discovery changes
// A faulty Alertmanager could cause each alert sending cycle to take up to AlertmanagerConfig.Timeout
// The queue may never be emptied, as the arrival rate could be larger than the departure rate
// It could even overflow and alerts could be dropped.
timeout = time.After(batches * sendTimeout)
loop2:
for {
select {
case <-timeout:
t.Fatalf("Timeout, the faulty alertmanager not removed on time.")
default:
// The faulty alertmanager was dropped.
if len(notifier.Alertmanagers()) == 1 {
// Prevent from TOCTOU.
require.Positive(t, notifier.queueLen())
break loop2
}
require.Positive(t, notifier.queueLen(), "The faulty alertmanager wasn't dropped before the alerts queue was emptied.")
}
}
}
func TestStop_DrainingDisabled(t *testing.T) {
releaseReceiver := make(chan struct{})
receiverReceivedRequest := make(chan struct{}, 2)
alertsReceived := atomic.NewInt64(0)
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Let the test know we've received a request.
receiverReceivedRequest <- struct{}{}
var alerts []*Alert
b, err := io.ReadAll(r.Body)
require.NoError(t, err)
err = json.Unmarshal(b, &alerts)
require.NoError(t, err)
alertsReceived.Add(int64(len(alerts)))
// Wait for the test to release us.
<-releaseReceiver
w.WriteHeader(http.StatusOK)
}))
defer func() {
server.Close()
}()
m := NewManager(
&Options{
QueueCapacity: 10,
DrainOnShutdown: false,
},
nil,
)
m.alertmanagers = make(map[string]*alertmanagerSet)
am1Cfg := config.DefaultAlertmanagerConfig
am1Cfg.Timeout = model.Duration(time.Second)
m.alertmanagers["1"] = &alertmanagerSet{
ams: []alertmanager{
alertmanagerMock{
urlf: func() string { return server.URL },
},
},
cfg: &am1Cfg,
}
notificationManagerStopped := make(chan struct{})
go func() {
defer close(notificationManagerStopped)
m.Run(nil)
}()
// Queue two alerts. The first should be immediately sent to the receiver, which should block until we release it later.
m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-1")})
select {
case <-receiverReceivedRequest:
// Nothing more to do.
case <-time.After(time.Second):
require.FailNow(t, "gave up waiting for receiver to receive notification of first alert")
}
m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-2")})
// Stop the notification manager, pause to allow the shutdown to be observed, and then allow the receiver to proceed.
m.Stop()
time.Sleep(time.Second)
close(releaseReceiver)
// Wait for the notification manager to stop and confirm only the first notification was sent.
// The second notification should be dropped.
select {
case <-notificationManagerStopped:
// Nothing more to do.
case <-time.After(time.Second):
require.FailNow(t, "gave up waiting for notification manager to stop")
}
require.Equal(t, int64(1), alertsReceived.Load())
}
func TestStop_DrainingEnabled(t *testing.T) {
releaseReceiver := make(chan struct{})
receiverReceivedRequest := make(chan struct{}, 2)
alertsReceived := atomic.NewInt64(0)
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Let the test know we've received a request.
receiverReceivedRequest <- struct{}{}
var alerts []*Alert
b, err := io.ReadAll(r.Body)
require.NoError(t, err)
err = json.Unmarshal(b, &alerts)
require.NoError(t, err)
alertsReceived.Add(int64(len(alerts)))
// Wait for the test to release us.
<-releaseReceiver
w.WriteHeader(http.StatusOK)
}))
defer func() {
server.Close()
}()
m := NewManager(
&Options{
QueueCapacity: 10,
DrainOnShutdown: true,
},
nil,
)
m.alertmanagers = make(map[string]*alertmanagerSet)
am1Cfg := config.DefaultAlertmanagerConfig
am1Cfg.Timeout = model.Duration(time.Second)
m.alertmanagers["1"] = &alertmanagerSet{
ams: []alertmanager{
alertmanagerMock{
urlf: func() string { return server.URL },
},
},
cfg: &am1Cfg,
}
notificationManagerStopped := make(chan struct{})
go func() {
defer close(notificationManagerStopped)
m.Run(nil)
}()
// Queue two alerts. The first should be immediately sent to the receiver, which should block until we release it later.
m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-1")})
select {
case <-receiverReceivedRequest:
// Nothing more to do.
case <-time.After(time.Second):
require.FailNow(t, "gave up waiting for receiver to receive notification of first alert")
}
m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-2")})
// Stop the notification manager and allow the receiver to proceed.
m.Stop()
close(releaseReceiver)
// Wait for the notification manager to stop and confirm both notifications were sent.
select {
case <-notificationManagerStopped:
// Nothing more to do.
case <-time.After(200 * time.Millisecond):
require.FailNow(t, "gave up waiting for notification manager to stop")
}
require.Equal(t, int64(2), alertsReceived.Load())
} }

View file

@ -1804,18 +1804,21 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
}, e.LHS, e.RHS) }, e.LHS, e.RHS)
default: default:
return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh), nil vec, err := ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh)
return vec, handleVectorBinopError(err, e)
}, e.LHS, e.RHS) }, e.LHS, e.RHS)
} }
case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar: case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar:
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh), nil vec, err := ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh)
return vec, handleVectorBinopError(err, e)
}, e.LHS, e.RHS) }, e.LHS, e.RHS)
case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector: case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector:
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh), nil vec, err := ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh)
return vec, handleVectorBinopError(err, e)
}, e.LHS, e.RHS) }, e.LHS, e.RHS)
} }
@ -2448,12 +2451,12 @@ func (ev *evaluator) VectorUnless(lhs, rhs Vector, matching *parser.VectorMatchi
} }
// VectorBinop evaluates a binary operation between two Vectors, excluding set operators. // VectorBinop evaluates a binary operation between two Vectors, excluding set operators.
func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *parser.VectorMatching, returnBool bool, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector { func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *parser.VectorMatching, returnBool bool, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) (Vector, error) {
if matching.Card == parser.CardManyToMany { if matching.Card == parser.CardManyToMany {
panic("many-to-many only allowed for set operators") panic("many-to-many only allowed for set operators")
} }
if len(lhs) == 0 || len(rhs) == 0 { if len(lhs) == 0 || len(rhs) == 0 {
return nil // Short-circuit: nothing is going to match. return nil, nil // Short-circuit: nothing is going to match.
} }
// The control flow below handles one-to-one or many-to-one matching. // The control flow below handles one-to-one or many-to-one matching.
@ -2506,6 +2509,7 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
// For all lhs samples find a respective rhs sample and perform // For all lhs samples find a respective rhs sample and perform
// the binary operation. // the binary operation.
var lastErr error
for i, ls := range lhs { for i, ls := range lhs {
sig := lhsh[i].signature sig := lhsh[i].signature
@ -2521,7 +2525,10 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
fl, fr = fr, fl fl, fr = fr, fl
hl, hr = hr, hl hl, hr = hr, hl
} }
floatValue, histogramValue, keep := vectorElemBinop(op, fl, fr, hl, hr) floatValue, histogramValue, keep, err := vectorElemBinop(op, fl, fr, hl, hr)
if err != nil {
lastErr = err
}
switch { switch {
case returnBool: case returnBool:
if keep { if keep {
@ -2563,7 +2570,7 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
H: histogramValue, H: histogramValue,
}) })
} }
return enh.Out return enh.Out, lastErr
} }
func signatureFunc(on bool, b []byte, names ...string) func(labels.Labels) string { func signatureFunc(on bool, b []byte, names ...string) func(labels.Labels) string {
@ -2626,7 +2633,8 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V
} }
// VectorscalarBinop evaluates a binary operation between a Vector and a Scalar. // VectorscalarBinop evaluates a binary operation between a Vector and a Scalar.
func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) Vector { func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) (Vector, error) {
var lastErr error
for _, lhsSample := range lhs { for _, lhsSample := range lhs {
lf, rf := lhsSample.F, rhs.V lf, rf := lhsSample.F, rhs.V
var rh *histogram.FloatHistogram var rh *histogram.FloatHistogram
@ -2637,7 +2645,10 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala
lf, rf = rf, lf lf, rf = rf, lf
lh, rh = rh, lh lh, rh = rh, lh
} }
float, histogram, keep := vectorElemBinop(op, lf, rf, lh, rh) float, histogram, keep, err := vectorElemBinop(op, lf, rf, lh, rh)
if err != nil {
lastErr = err
}
// Catch cases where the scalar is the LHS in a scalar-vector comparison operation. // Catch cases where the scalar is the LHS in a scalar-vector comparison operation.
// We want to always keep the vector element value as the output value, even if it's on the RHS. // We want to always keep the vector element value as the output value, even if it's on the RHS.
if op.IsComparisonOperator() && swap { if op.IsComparisonOperator() && swap {
@ -2661,7 +2672,7 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala
enh.Out = append(enh.Out, lhsSample) enh.Out = append(enh.Out, lhsSample)
} }
} }
return enh.Out return enh.Out, lastErr
} }
// scalarBinop evaluates a binary operation between two Scalars. // scalarBinop evaluates a binary operation between two Scalars.
@ -2698,49 +2709,57 @@ func scalarBinop(op parser.ItemType, lhs, rhs float64) float64 {
} }
// vectorElemBinop evaluates a binary operation between two Vector elements. // vectorElemBinop evaluates a binary operation between two Vector elements.
func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool) { func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) {
switch op { switch op {
case parser.ADD: case parser.ADD:
if hlhs != nil && hrhs != nil { if hlhs != nil && hrhs != nil {
return 0, hlhs.Copy().Add(hrhs).Compact(0), true res, err := hlhs.Copy().Add(hrhs)
if err != nil {
return 0, nil, false, err
}
return 0, res.Compact(0), true, nil
} }
return lhs + rhs, nil, true return lhs + rhs, nil, true, nil
case parser.SUB: case parser.SUB:
if hlhs != nil && hrhs != nil { if hlhs != nil && hrhs != nil {
return 0, hlhs.Copy().Sub(hrhs).Compact(0), true res, err := hlhs.Copy().Sub(hrhs)
if err != nil {
return 0, nil, false, err
}
return 0, res.Compact(0), true, nil
} }
return lhs - rhs, nil, true return lhs - rhs, nil, true, nil
case parser.MUL: case parser.MUL:
if hlhs != nil && hrhs == nil { if hlhs != nil && hrhs == nil {
return 0, hlhs.Copy().Mul(rhs), true return 0, hlhs.Copy().Mul(rhs), true, nil
} }
if hlhs == nil && hrhs != nil { if hlhs == nil && hrhs != nil {
return 0, hrhs.Copy().Mul(lhs), true return 0, hrhs.Copy().Mul(lhs), true, nil
} }
return lhs * rhs, nil, true return lhs * rhs, nil, true, nil
case parser.DIV: case parser.DIV:
if hlhs != nil && hrhs == nil { if hlhs != nil && hrhs == nil {
return 0, hlhs.Copy().Div(rhs), true return 0, hlhs.Copy().Div(rhs), true, nil
} }
return lhs / rhs, nil, true return lhs / rhs, nil, true, nil
case parser.POW: case parser.POW:
return math.Pow(lhs, rhs), nil, true return math.Pow(lhs, rhs), nil, true, nil
case parser.MOD: case parser.MOD:
return math.Mod(lhs, rhs), nil, true return math.Mod(lhs, rhs), nil, true, nil
case parser.EQLC: case parser.EQLC:
return lhs, nil, lhs == rhs return lhs, nil, lhs == rhs, nil
case parser.NEQ: case parser.NEQ:
return lhs, nil, lhs != rhs return lhs, nil, lhs != rhs, nil
case parser.GTR: case parser.GTR:
return lhs, nil, lhs > rhs return lhs, nil, lhs > rhs, nil
case parser.LSS: case parser.LSS:
return lhs, nil, lhs < rhs return lhs, nil, lhs < rhs, nil
case parser.GTE: case parser.GTE:
return lhs, nil, lhs >= rhs return lhs, nil, lhs >= rhs, nil
case parser.LTE: case parser.LTE:
return lhs, nil, lhs <= rhs return lhs, nil, lhs <= rhs, nil
case parser.ATAN2: case parser.ATAN2:
return math.Atan2(lhs, rhs), nil, true return math.Atan2(lhs, rhs), nil, true, nil
} }
panic(fmt.Errorf("operator %q not allowed for operations between Vectors", op)) panic(fmt.Errorf("operator %q not allowed for operations between Vectors", op))
} }
@ -2751,7 +2770,7 @@ type groupedAggregation struct {
hasHistogram bool // Has at least 1 histogram sample aggregated. hasHistogram bool // Has at least 1 histogram sample aggregated.
floatValue float64 floatValue float64
histogramValue *histogram.FloatHistogram histogramValue *histogram.FloatHistogram
floatMean float64 floatMean float64 // Mean, or "compensating value" for Kahan summation.
groupCount int groupCount int
heap vectorByValueHeap heap vectorByValueHeap
} }
@ -2779,11 +2798,13 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
*group = groupedAggregation{ *group = groupedAggregation{
seen: true, seen: true,
floatValue: f, floatValue: f,
floatMean: f,
groupCount: 1, groupCount: 1,
} }
switch op { switch op {
case parser.SUM, parser.AVG: case parser.AVG:
group.floatMean = f
fallthrough
case parser.SUM:
if h == nil { if h == nil {
group.hasFloat = true group.hasFloat = true
} else { } else {
@ -2791,6 +2812,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
group.hasHistogram = true group.hasHistogram = true
} }
case parser.STDVAR, parser.STDDEV: case parser.STDVAR, parser.STDDEV:
group.floatMean = f
group.floatValue = 0 group.floatValue = 0
case parser.QUANTILE: case parser.QUANTILE:
group.heap = make(vectorByValueHeap, 1) group.heap = make(vectorByValueHeap, 1)
@ -2806,14 +2828,17 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
if h != nil { if h != nil {
group.hasHistogram = true group.hasHistogram = true
if group.histogramValue != nil { if group.histogramValue != nil {
group.histogramValue.Add(h) _, err := group.histogramValue.Add(h)
if err != nil {
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
}
} }
// Otherwise the aggregation contained floats // Otherwise the aggregation contained floats
// previously and will be invalid anyway. No // previously and will be invalid anyway. No
// point in copying the histogram in that case. // point in copying the histogram in that case.
} else { } else {
group.hasFloat = true group.hasFloat = true
group.floatValue += f group.floatValue, group.floatMean = kahanSumInc(f, group.floatValue, group.floatMean)
} }
case parser.AVG: case parser.AVG:
@ -2823,8 +2848,14 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
if group.histogramValue != nil { if group.histogramValue != nil {
left := h.Copy().Div(float64(group.groupCount)) left := h.Copy().Div(float64(group.groupCount))
right := group.histogramValue.Copy().Div(float64(group.groupCount)) right := group.histogramValue.Copy().Div(float64(group.groupCount))
toAdd := left.Sub(right) toAdd, err := left.Sub(right)
group.histogramValue.Add(toAdd) if err != nil {
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
}
_, err = group.histogramValue.Add(toAdd)
if err != nil {
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
}
} }
// Otherwise the aggregation contained floats // Otherwise the aggregation contained floats
// previously and will be invalid anyway. No // previously and will be invalid anyway. No
@ -2924,6 +2955,8 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
} }
if aggr.hasHistogram { if aggr.hasHistogram {
aggr.histogramValue.Compact(0) aggr.histogramValue.Compact(0)
} else {
aggr.floatValue += aggr.floatMean // Add Kahan summation compensating term.
} }
default: default:
// For other aggregations, we already have the right value. // For other aggregations, we already have the right value.
@ -3121,6 +3154,31 @@ func (ev *evaluator) nextValues(ts int64, series *Series) (f float64, h *histogr
return f, h, true return f, h, true
} }
// handleAggregationError adds the appropriate annotation based on the aggregation error.
func handleAggregationError(err error, e *parser.AggregateExpr, metricName string, annos *annotations.Annotations) {
pos := e.Expr.PositionRange()
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
annos.Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
} else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
annos.Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos))
}
}
// handleVectorBinopError returns the appropriate annotation based on the vector binary operation error.
func handleVectorBinopError(err error, e *parser.BinaryExpr) annotations.Annotations {
if err == nil {
return nil
}
metricName := ""
pos := e.PositionRange()
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
return annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
} else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
return annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos))
}
return nil
}
// groupingKey builds and returns the grouping key for the given metric and // groupingKey builds and returns the grouping key for the given metric and
// grouping labels. // grouping labels.
func generateGroupingKey(metric labels.Labels, grouping []string, without bool, buf []byte) (uint64, []byte) { func generateGroupingKey(metric labels.Labels, grouping []string, without bool, buf []byte) (uint64, []byte) {

View file

@ -843,10 +843,10 @@ load 10s
{ {
Query: "metricWith1HistogramEvery10Seconds", Query: "metricWith1HistogramEvery10Seconds",
Start: time.Unix(21, 0), Start: time.Unix(21, 0),
PeakSamples: 12, PeakSamples: 13,
TotalSamples: 12, // 1 histogram sample of size 12 / 10 seconds TotalSamples: 13, // 1 histogram HPoint of size 13 / 10 seconds
TotalSamplesPerStep: stats.TotalSamplesPerStep{ TotalSamplesPerStep: stats.TotalSamplesPerStep{
21000: 12, 21000: 13,
}, },
}, },
{ {
@ -943,10 +943,10 @@ load 10s
{ {
Query: "metricWith1HistogramEvery10Seconds[60s]", Query: "metricWith1HistogramEvery10Seconds[60s]",
Start: time.Unix(201, 0), Start: time.Unix(201, 0),
PeakSamples: 72, PeakSamples: 78,
TotalSamples: 72, // 1 histogram (size 12) / 10 seconds * 60 seconds TotalSamples: 78, // 1 histogram (size 13 HPoint) / 10 seconds * 60 seconds
TotalSamplesPerStep: stats.TotalSamplesPerStep{ TotalSamplesPerStep: stats.TotalSamplesPerStep{
201000: 72, 201000: 78,
}, },
}, },
{ {
@ -973,11 +973,11 @@ load 10s
{ {
Query: "max_over_time(metricWith1HistogramEvery10Seconds[60s])[20s:5s]", Query: "max_over_time(metricWith1HistogramEvery10Seconds[60s])[20s:5s]",
Start: time.Unix(201, 0), Start: time.Unix(201, 0),
PeakSamples: 72, PeakSamples: 78,
TotalSamples: 312, // (1 histogram (size 12) / 10 seconds * 60 seconds) * 4 + 2 * 12 as TotalSamples: 338, // (1 histogram (size 13 HPoint) / 10 seconds * 60 seconds) * 4 + 2 * 13 as
// max_over_time(metricWith1SampleEvery10Seconds[60s]) @ 190 and 200 will return 7 samples. // max_over_time(metricWith1SampleEvery10Seconds[60s]) @ 190 and 200 will return 7 samples.
TotalSamplesPerStep: stats.TotalSamplesPerStep{ TotalSamplesPerStep: stats.TotalSamplesPerStep{
201000: 312, 201000: 338,
}, },
}, },
{ {
@ -992,10 +992,10 @@ load 10s
{ {
Query: "metricWith1HistogramEvery10Seconds[60s] @ 30", Query: "metricWith1HistogramEvery10Seconds[60s] @ 30",
Start: time.Unix(201, 0), Start: time.Unix(201, 0),
PeakSamples: 48, PeakSamples: 52,
TotalSamples: 48, // @ modifier force the evaluation to at 30 seconds - So it brings 4 datapoints (0, 10, 20, 30 seconds) * 1 series TotalSamples: 52, // @ modifier force the evaluation to at 30 seconds - So it brings 4 datapoints (0, 10, 20, 30 seconds) * 1 series
TotalSamplesPerStep: stats.TotalSamplesPerStep{ TotalSamplesPerStep: stats.TotalSamplesPerStep{
201000: 48, 201000: 52,
}, },
}, },
{ {
@ -1130,13 +1130,13 @@ load 10s
Start: time.Unix(204, 0), Start: time.Unix(204, 0),
End: time.Unix(223, 0), End: time.Unix(223, 0),
Interval: 5 * time.Second, Interval: 5 * time.Second,
PeakSamples: 48, PeakSamples: 52,
TotalSamples: 48, // 1 histogram (size 12) per query * 4 steps TotalSamples: 52, // 1 histogram (size 13 HPoint) per query * 4 steps
TotalSamplesPerStep: stats.TotalSamplesPerStep{ TotalSamplesPerStep: stats.TotalSamplesPerStep{
204000: 12, // aligned to the step time, not the sample time 204000: 13, // aligned to the step time, not the sample time
209000: 12, 209000: 13,
214000: 12, 214000: 13,
219000: 12, 219000: 13,
}, },
}, },
{ {
@ -2024,47 +2024,6 @@ func TestSubquerySelector(t *testing.T) {
} }
} }
func TestTimestampFunction_StepsMoreOftenThanSamples(t *testing.T) {
engine := newTestEngine(t)
storage := promqltest.LoadedStorage(t, `
load 1m
metric 0+1x1000
`)
t.Cleanup(func() { storage.Close() })
query := "timestamp(metric)"
start := time.Unix(0, 0)
end := time.Unix(61, 0)
interval := time.Second
// We expect the value to be 0 for t=0s to t=59s (inclusive), then 60 for t=60s and t=61s.
expectedPoints := []promql.FPoint{}
for t := 0; t <= 59; t++ {
expectedPoints = append(expectedPoints, promql.FPoint{F: 0, T: int64(t * 1000)})
}
expectedPoints = append(
expectedPoints,
promql.FPoint{F: 60, T: 60_000},
promql.FPoint{F: 60, T: 61_000},
)
expectedResult := promql.Matrix{
promql.Series{
Floats: expectedPoints,
Metric: labels.EmptyLabels(),
},
}
qry, err := engine.NewRangeQuery(context.Background(), storage, nil, query, start, end, interval)
require.NoError(t, err)
res := qry.Exec(context.Background())
require.NoError(t, res.Err)
testutil.RequireEqual(t, expectedResult, res.Value)
}
type FakeQueryLogger struct { type FakeQueryLogger struct {
closed bool closed bool
logs []interface{} logs []interface{}
@ -3082,167 +3041,7 @@ func TestEngineOptsValidation(t *testing.T) {
} }
} }
func TestRangeQuery(t *testing.T) { engine := newTestEngine()
cases := []struct {
Name string
Load string
Query string
Result parser.Value
Start time.Time
End time.Time
Interval time.Duration
}{
{
Name: "sum_over_time with all values",
Load: `load 30s
bar 0 1 10 100 1000`,
Query: "sum_over_time(bar[30s])",
Result: promql.Matrix{
promql.Series{
Floats: []promql.FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}},
Metric: labels.EmptyLabels(),
},
},
Start: time.Unix(0, 0),
End: time.Unix(120, 0),
Interval: 60 * time.Second,
},
{
Name: "sum_over_time with trailing values",
Load: `load 30s
bar 0 1 10 100 1000 0 0 0 0`,
Query: "sum_over_time(bar[30s])",
Result: promql.Matrix{
promql.Series{
Floats: []promql.FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}},
Metric: labels.EmptyLabels(),
},
},
Start: time.Unix(0, 0),
End: time.Unix(120, 0),
Interval: 60 * time.Second,
},
{
Name: "sum_over_time with all values long",
Load: `load 30s
bar 0 1 10 100 1000 10000 100000 1000000 10000000`,
Query: "sum_over_time(bar[30s])",
Result: promql.Matrix{
promql.Series{
Floats: []promql.FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}, {F: 110000, T: 180000}, {F: 11000000, T: 240000}},
Metric: labels.EmptyLabels(),
},
},
Start: time.Unix(0, 0),
End: time.Unix(240, 0),
Interval: 60 * time.Second,
},
{
Name: "sum_over_time with all values random",
Load: `load 30s
bar 5 17 42 2 7 905 51`,
Query: "sum_over_time(bar[30s])",
Result: promql.Matrix{
promql.Series{
Floats: []promql.FPoint{{F: 5, T: 0}, {F: 59, T: 60000}, {F: 9, T: 120000}, {F: 956, T: 180000}},
Metric: labels.EmptyLabels(),
},
},
Start: time.Unix(0, 0),
End: time.Unix(180, 0),
Interval: 60 * time.Second,
},
{
Name: "metric query",
Load: `load 30s
metric 1+1x4`,
Query: "metric",
Result: promql.Matrix{
promql.Series{
Floats: []promql.FPoint{{F: 1, T: 0}, {F: 3, T: 60000}, {F: 5, T: 120000}},
Metric: labels.FromStrings("__name__", "metric"),
},
},
Start: time.Unix(0, 0),
End: time.Unix(120, 0),
Interval: 1 * time.Minute,
},
{
Name: "metric query with trailing values",
Load: `load 30s
metric 1+1x8`,
Query: "metric",
Result: promql.Matrix{
promql.Series{
Floats: []promql.FPoint{{F: 1, T: 0}, {F: 3, T: 60000}, {F: 5, T: 120000}},
Metric: labels.FromStrings("__name__", "metric"),
},
},
Start: time.Unix(0, 0),
End: time.Unix(120, 0),
Interval: 1 * time.Minute,
},
{
Name: "short-circuit",
Load: `load 30s
foo{job="1"} 1+1x4
bar{job="2"} 1+1x4`,
Query: `foo > 2 or bar`,
Result: promql.Matrix{
promql.Series{
Floats: []promql.FPoint{{F: 1, T: 0}, {F: 3, T: 60000}, {F: 5, T: 120000}},
Metric: labels.FromStrings(
"__name__", "bar",
"job", "2",
),
},
promql.Series{
Floats: []promql.FPoint{{F: 3, T: 60000}, {F: 5, T: 120000}},
Metric: labels.FromStrings(
"__name__", "foo",
"job", "1",
),
},
},
Start: time.Unix(0, 0),
End: time.Unix(120, 0),
Interval: 1 * time.Minute,
},
{
Name: "drop-metric-name",
Load: `load 30s
requests{job="1", __address__="bar"} 100`,
Query: `requests * 2`,
Result: promql.Matrix{
promql.Series{
Floats: []promql.FPoint{{F: 200, T: 0}, {F: 200, T: 60000}, {F: 200, T: 120000}},
Metric: labels.FromStrings(
"__address__", "bar",
"job", "1",
),
},
},
Start: time.Unix(0, 0),
End: time.Unix(120, 0),
Interval: 1 * time.Minute,
},
}
for _, c := range cases {
t.Run(c.Name, func(t *testing.T) {
engine := newTestEngine(t)
storage := promqltest.LoadedStorage(t, c.Load)
t.Cleanup(func() { storage.Close() })
qry, err := engine.NewRangeQuery(context.Background(), storage, nil, c.Query, c.Start, c.End, c.Interval)
require.NoError(t, err)
res := qry.Exec(context.Background())
require.NoError(t, res.Err)
testutil.RequireEqual(t, c.Result, res.Value)
})
}
}
func TestInstantQueryWithRangeVectorSelector(t *testing.T) { func TestInstantQueryWithRangeVectorSelector(t *testing.T) {
engine := newTestEngine(t) engine := newTestEngine(t)

View file

@ -14,6 +14,7 @@
package promql package promql
import ( import (
"errors"
"fmt" "fmt"
"math" "math"
"slices" "slices"
@ -210,14 +211,28 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra
} }
h := last.CopyToSchema(minSchema) h := last.CopyToSchema(minSchema)
h.Sub(prev) _, err := h.Sub(prev)
if err != nil {
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
} else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
return nil, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos))
}
}
if isCounter { if isCounter {
// Second iteration to deal with counter resets. // Second iteration to deal with counter resets.
for _, currPoint := range points[1:] { for _, currPoint := range points[1:] {
curr := currPoint.H curr := currPoint.H
if curr.DetectReset(prev) { if curr.DetectReset(prev) {
h.Add(prev) _, err := h.Add(prev)
if err != nil {
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
} else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
return nil, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos))
}
}
} }
prev = curr prev = curr
} }
@ -513,10 +528,11 @@ func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series)
return append(enh.Out, Sample{F: aggrFn(el)}) return append(enh.Out, Sample{F: aggrFn(el)})
} }
func aggrHistOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) *histogram.FloatHistogram) Vector { func aggrHistOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) (*histogram.FloatHistogram, error)) (Vector, error) {
el := vals[0].(Matrix)[0] el := vals[0].(Matrix)[0]
res, err := aggrFn(el)
return append(enh.Out, Sample{H: aggrFn(el)}) return append(enh.Out, Sample{H: res}), err
} }
// === avg_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === // === avg_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
@ -528,18 +544,33 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
} }
if len(firstSeries.Floats) == 0 { if len(firstSeries.Floats) == 0 {
// The passed values only contain histograms. // The passed values only contain histograms.
return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram { vec, err := aggrHistOverTime(vals, enh, func(s Series) (*histogram.FloatHistogram, error) {
count := 1 count := 1
mean := s.Histograms[0].H.Copy() mean := s.Histograms[0].H.Copy()
for _, h := range s.Histograms[1:] { for _, h := range s.Histograms[1:] {
count++ count++
left := h.H.Copy().Div(float64(count)) left := h.H.Copy().Div(float64(count))
right := mean.Copy().Div(float64(count)) right := mean.Copy().Div(float64(count))
toAdd := left.Sub(right) toAdd, err := left.Sub(right)
mean.Add(toAdd) if err != nil {
return mean, err
}
_, err = mean.Add(toAdd)
if err != nil {
return mean, err
}
} }
return mean return mean, nil
}), nil })
if err != nil {
metricName := firstSeries.Metric.Get(labels.MetricName)
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
return enh.Out, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, args[0].PositionRange()))
} else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
return enh.Out, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, args[0].PositionRange()))
}
}
return vec, nil
} }
return aggrOverTime(vals, enh, func(s Series) float64 { return aggrOverTime(vals, enh, func(s Series) float64 {
var mean, count, c float64 var mean, count, c float64
@ -673,13 +704,25 @@ func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
} }
if len(firstSeries.Floats) == 0 { if len(firstSeries.Floats) == 0 {
// The passed values only contain histograms. // The passed values only contain histograms.
return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram { vec, err := aggrHistOverTime(vals, enh, func(s Series) (*histogram.FloatHistogram, error) {
sum := s.Histograms[0].H.Copy() sum := s.Histograms[0].H.Copy()
for _, h := range s.Histograms[1:] { for _, h := range s.Histograms[1:] {
sum.Add(h.H) _, err := sum.Add(h.H)
if err != nil {
return sum, err
}
} }
return sum return sum, nil
}), nil })
if err != nil {
metricName := firstSeries.Metric.Get(labels.MetricName)
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
return enh.Out, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, args[0].PositionRange()))
} else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
return enh.Out, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, args[0].PositionRange()))
}
}
return vec, nil
} }
return aggrOverTime(vals, enh, func(s Series) float64 { return aggrOverTime(vals, enh, func(s Series) float64 {
var sum, c float64 var sum, c float64

View file

@ -84,6 +84,7 @@ BUCKETS_DESC
NEGATIVE_BUCKETS_DESC NEGATIVE_BUCKETS_DESC
ZERO_BUCKET_DESC ZERO_BUCKET_DESC
ZERO_BUCKET_WIDTH_DESC ZERO_BUCKET_WIDTH_DESC
CUSTOM_VALUES_DESC
%token histogramDescEnd %token histogramDescEnd
// Operators. // Operators.
@ -797,6 +798,11 @@ histogram_desc_item
$$ = yylex.(*parser).newMap() $$ = yylex.(*parser).newMap()
$$["z_bucket_w"] = $3 $$["z_bucket_w"] = $3
} }
| CUSTOM_VALUES_DESC COLON bucket_set
{
$$ = yylex.(*parser).newMap()
$$["custom_values"] = $3
}
| BUCKETS_DESC COLON bucket_set | BUCKETS_DESC COLON bucket_set
{ {
$$ = yylex.(*parser).newMap() $$ = yylex.(*parser).newMap()

View file

@ -67,62 +67,63 @@ const BUCKETS_DESC = 57375
const NEGATIVE_BUCKETS_DESC = 57376 const NEGATIVE_BUCKETS_DESC = 57376
const ZERO_BUCKET_DESC = 57377 const ZERO_BUCKET_DESC = 57377
const ZERO_BUCKET_WIDTH_DESC = 57378 const ZERO_BUCKET_WIDTH_DESC = 57378
const histogramDescEnd = 57379 const CUSTOM_VALUES_DESC = 57379
const operatorsStart = 57380 const histogramDescEnd = 57380
const ADD = 57381 const operatorsStart = 57381
const DIV = 57382 const ADD = 57382
const EQLC = 57383 const DIV = 57383
const EQL_REGEX = 57384 const EQLC = 57384
const GTE = 57385 const EQL_REGEX = 57385
const GTR = 57386 const GTE = 57386
const LAND = 57387 const GTR = 57387
const LOR = 57388 const LAND = 57388
const LSS = 57389 const LOR = 57389
const LTE = 57390 const LSS = 57390
const LUNLESS = 57391 const LTE = 57391
const MOD = 57392 const LUNLESS = 57392
const MUL = 57393 const MOD = 57393
const NEQ = 57394 const MUL = 57394
const NEQ_REGEX = 57395 const NEQ = 57395
const POW = 57396 const NEQ_REGEX = 57396
const SUB = 57397 const POW = 57397
const AT = 57398 const SUB = 57398
const ATAN2 = 57399 const AT = 57399
const operatorsEnd = 57400 const ATAN2 = 57400
const aggregatorsStart = 57401 const operatorsEnd = 57401
const AVG = 57402 const aggregatorsStart = 57402
const BOTTOMK = 57403 const AVG = 57403
const COUNT = 57404 const BOTTOMK = 57404
const COUNT_VALUES = 57405 const COUNT = 57405
const GROUP = 57406 const COUNT_VALUES = 57406
const MAX = 57407 const GROUP = 57407
const MIN = 57408 const MAX = 57408
const QUANTILE = 57409 const MIN = 57409
const STDDEV = 57410 const QUANTILE = 57410
const STDVAR = 57411 const STDDEV = 57411
const SUM = 57412 const STDVAR = 57412
const TOPK = 57413 const SUM = 57413
const aggregatorsEnd = 57414 const TOPK = 57414
const keywordsStart = 57415 const aggregatorsEnd = 57415
const BOOL = 57416 const keywordsStart = 57416
const BY = 57417 const BOOL = 57417
const GROUP_LEFT = 57418 const BY = 57418
const GROUP_RIGHT = 57419 const GROUP_LEFT = 57419
const IGNORING = 57420 const GROUP_RIGHT = 57420
const OFFSET = 57421 const IGNORING = 57421
const ON = 57422 const OFFSET = 57422
const WITHOUT = 57423 const ON = 57423
const keywordsEnd = 57424 const WITHOUT = 57424
const preprocessorStart = 57425 const keywordsEnd = 57425
const START = 57426 const preprocessorStart = 57426
const END = 57427 const START = 57427
const preprocessorEnd = 57428 const END = 57428
const startSymbolsStart = 57429 const preprocessorEnd = 57429
const START_METRIC = 57430 const startSymbolsStart = 57430
const START_SERIES_DESCRIPTION = 57431 const START_METRIC = 57431
const START_EXPRESSION = 57432 const START_SERIES_DESCRIPTION = 57432
const START_METRIC_SELECTOR = 57433 const START_EXPRESSION = 57433
const startSymbolsEnd = 57434 const START_METRIC_SELECTOR = 57434
const startSymbolsEnd = 57435
var yyToknames = [...]string{ var yyToknames = [...]string{
"$end", "$end",
@ -161,6 +162,7 @@ var yyToknames = [...]string{
"NEGATIVE_BUCKETS_DESC", "NEGATIVE_BUCKETS_DESC",
"ZERO_BUCKET_DESC", "ZERO_BUCKET_DESC",
"ZERO_BUCKET_WIDTH_DESC", "ZERO_BUCKET_WIDTH_DESC",
"CUSTOM_VALUES_DESC",
"histogramDescEnd", "histogramDescEnd",
"operatorsStart", "operatorsStart",
"ADD", "ADD",
@ -235,270 +237,273 @@ var yyExca = [...]int16{
24, 134, 24, 134,
-2, 0, -2, 0,
-1, 58, -1, 58,
2, 171,
15, 171,
75, 171,
81, 171,
-2, 100,
-1, 59,
2, 172, 2, 172,
15, 172, 15, 172,
75, 172, 76, 172,
81, 172, 82, 172,
-2, 101, -2, 100,
-1, 60, -1, 59,
2, 173, 2, 173,
15, 173, 15, 173,
75, 173, 76, 173,
81, 173, 82, 173,
-2, 103, -2, 101,
-1, 61, -1, 60,
2, 174, 2, 174,
15, 174, 15, 174,
75, 174, 76, 174,
81, 174, 82, 174,
-2, 104, -2, 103,
-1, 62, -1, 61,
2, 175, 2, 175,
15, 175, 15, 175,
75, 175, 76, 175,
81, 175, 82, 175,
-2, 105, -2, 104,
-1, 63, -1, 62,
2, 176, 2, 176,
15, 176, 15, 176,
75, 176, 76, 176,
81, 176, 82, 176,
-2, 110, -2, 105,
-1, 64, -1, 63,
2, 177, 2, 177,
15, 177, 15, 177,
75, 177, 76, 177,
81, 177, 82, 177,
-2, 112, -2, 110,
-1, 65, -1, 64,
2, 178, 2, 178,
15, 178, 15, 178,
75, 178, 76, 178,
81, 178, 82, 178,
-2, 114, -2, 112,
-1, 66, -1, 65,
2, 179, 2, 179,
15, 179, 15, 179,
75, 179, 76, 179,
81, 179, 82, 179,
-2, 115, -2, 114,
-1, 67, -1, 66,
2, 180, 2, 180,
15, 180, 15, 180,
75, 180, 76, 180,
81, 180, 82, 180,
-2, 116, -2, 115,
-1, 68, -1, 67,
2, 181, 2, 181,
15, 181, 15, 181,
75, 181, 76, 181,
81, 181, 82, 181,
-2, 117, -2, 116,
-1, 69, -1, 68,
2, 182, 2, 182,
15, 182, 15, 182,
75, 182, 76, 182,
81, 182, 82, 182,
-2, 117,
-1, 69,
2, 183,
15, 183,
76, 183,
82, 183,
-2, 118, -2, 118,
-1, 195, -1, 195,
12, 230, 12, 231,
13, 230, 13, 231,
18, 230, 18, 231,
19, 230, 19, 231,
25, 230, 25, 231,
39, 230, 40, 231,
45, 230, 46, 231,
46, 230, 47, 231,
49, 230, 50, 231,
55, 230, 56, 231,
60, 230, 61, 231,
61, 230, 62, 231,
62, 230, 63, 231,
63, 230, 64, 231,
64, 230, 65, 231,
65, 230, 66, 231,
66, 230, 67, 231,
67, 230, 68, 231,
68, 230, 69, 231,
69, 230, 70, 231,
70, 230, 71, 231,
71, 230, 72, 231,
75, 230, 76, 231,
79, 230, 80, 231,
81, 230, 82, 231,
84, 230, 85, 231,
85, 230, 86, 231,
-2, 0, -2, 0,
-1, 196, -1, 196,
12, 230, 12, 231,
13, 230, 13, 231,
18, 230, 18, 231,
19, 230, 19, 231,
25, 230, 25, 231,
39, 230, 40, 231,
45, 230, 46, 231,
46, 230, 47, 231,
49, 230, 50, 231,
55, 230, 56, 231,
60, 230, 61, 231,
61, 230, 62, 231,
62, 230, 63, 231,
63, 230, 64, 231,
64, 230, 65, 231,
65, 230, 66, 231,
66, 230, 67, 231,
67, 230, 68, 231,
68, 230, 69, 231,
69, 230, 70, 231,
70, 230, 71, 231,
71, 230, 72, 231,
75, 230, 76, 231,
79, 230, 80, 231,
81, 230, 82, 231,
84, 230, 85, 231,
85, 230, 86, 231,
-2, 0, -2, 0,
-1, 217, -1, 217,
21, 228,
-2, 0,
-1, 285,
21, 229, 21, 229,
-2, 0, -2, 0,
-1, 286,
21, 230,
-2, 0,
} }
const yyPrivate = 57344 const yyPrivate = 57344
const yyLast = 742 const yyLast = 778
var yyAct = [...]int16{ var yyAct = [...]int16{
151, 322, 320, 268, 327, 148, 221, 37, 187, 144, 151, 324, 322, 268, 329, 148, 221, 37, 187, 144,
281, 280, 152, 113, 77, 173, 104, 102, 101, 6, 282, 281, 152, 113, 77, 173, 104, 102, 101, 6,
128, 223, 105, 193, 155, 194, 195, 196, 339, 262, 223, 193, 105, 194, 195, 196, 128, 262, 260, 155,
260, 233, 317, 316, 57, 100, 294, 239, 103, 146, 233, 103, 342, 293, 100, 319, 239, 116, 146, 318,
300, 313, 263, 156, 156, 283, 147, 338, 259, 123, 315, 263, 156, 123, 106, 147, 284, 114, 295, 116,
337, 106, 252, 311, 155, 299, 340, 301, 264, 157, 156, 341, 175, 259, 340, 253, 57, 264, 157, 114,
157, 108, 298, 109, 235, 236, 292, 251, 237, 107, 117, 108, 313, 109, 235, 236, 157, 112, 237, 107,
155, 292, 174, 191, 175, 96, 250, 99, 258, 224, 323, 174, 117, 175, 155, 96, 250, 99, 293, 224,
226, 228, 229, 230, 238, 240, 243, 244, 245, 246, 226, 228, 229, 230, 238, 240, 243, 244, 245, 246,
247, 110, 145, 225, 227, 231, 232, 234, 241, 242, 247, 177, 145, 225, 227, 231, 232, 234, 241, 242,
98, 257, 321, 248, 249, 2, 3, 4, 5, 218, 98, 176, 178, 248, 249, 104, 2, 3, 4, 5,
158, 104, 177, 217, 168, 162, 165, 105, 175, 160, 158, 105, 177, 110, 168, 162, 165, 302, 150, 160,
164, 161, 176, 178, 189, 213, 106, 328, 216, 256, 191, 161, 176, 178, 189, 155, 213, 343, 106, 330,
183, 179, 192, 163, 181, 100, 190, 197, 198, 199, 72, 179, 192, 33, 181, 155, 190, 197, 198, 199,
200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
210, 211, 255, 182, 72, 212, 177, 214, 215, 33, 210, 211, 185, 301, 258, 212, 156, 214, 215, 188,
82, 84, 85, 7, 86, 87, 176, 178, 90, 91, 256, 183, 290, 191, 252, 164, 155, 289, 300, 218,
223, 93, 94, 95, 116, 96, 97, 99, 83, 147, 223, 79, 157, 217, 7, 299, 312, 257, 163, 251,
233, 286, 289, 116, 114, 254, 239, 288, 147, 172, 233, 78, 288, 255, 182, 254, 239, 156, 216, 180,
220, 124, 253, 114, 171, 310, 309, 117, 120, 261, 220, 124, 172, 120, 147, 311, 314, 171, 119, 261,
98, 112, 287, 119, 278, 279, 117, 170, 282, 10, 287, 153, 154, 157, 279, 280, 79, 147, 283, 310,
308, 159, 307, 235, 236, 312, 118, 237, 147, 74, 170, 118, 159, 10, 235, 236, 78, 309, 237, 147,
306, 305, 304, 303, 302, 250, 81, 285, 224, 226, 308, 307, 306, 74, 76, 305, 250, 286, 304, 224,
228, 229, 230, 238, 240, 243, 244, 245, 246, 247, 226, 228, 229, 230, 238, 240, 243, 244, 245, 246,
79, 79, 225, 227, 231, 232, 234, 241, 242, 48, 247, 303, 81, 225, 227, 231, 232, 234, 241, 242,
78, 78, 248, 249, 122, 73, 121, 150, 180, 76, 48, 34, 1, 248, 249, 122, 73, 121, 285, 47,
290, 291, 293, 56, 295, 8, 9, 9, 34, 35, 291, 292, 294, 56, 296, 8, 9, 9, 46, 35,
1, 284, 296, 297, 155, 129, 130, 131, 132, 133, 45, 44, 297, 298, 127, 129, 130, 131, 132, 133,
134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143,
47, 46, 45, 44, 156, 314, 315, 127, 43, 42, 43, 42, 41, 125, 166, 40, 316, 317, 126, 39,
41, 185, 319, 125, 166, 324, 325, 326, 188, 323, 38, 49, 186, 321, 338, 265, 326, 327, 328, 80,
157, 329, 191, 331, 330, 155, 40, 126, 332, 333, 325, 184, 219, 332, 331, 334, 333, 75, 115, 149,
100, 51, 72, 334, 53, 39, 38, 22, 52, 336, 335, 336, 100, 51, 72, 337, 53, 55, 222, 22,
49, 167, 186, 335, 54, 156, 265, 80, 341, 153, 52, 339, 50, 167, 111, 0, 54, 0, 0, 0,
154, 184, 219, 75, 115, 82, 84, 149, 70, 55, 0, 344, 0, 0, 0, 0, 0, 0, 82, 84,
222, 157, 50, 111, 18, 19, 93, 94, 20, 0, 0, 70, 0, 0, 0, 0, 0, 18, 19, 93,
96, 97, 99, 83, 71, 0, 0, 0, 0, 58, 94, 20, 0, 96, 97, 99, 83, 71, 0, 0,
0, 0, 58, 59, 60, 61, 62, 63, 64, 65,
66, 67, 68, 69, 0, 0, 0, 13, 98, 0,
0, 24, 0, 30, 0, 0, 31, 32, 36, 100,
51, 72, 0, 53, 267, 0, 22, 52, 0, 0,
0, 266, 0, 54, 0, 270, 271, 269, 276, 278,
275, 277, 272, 273, 274, 0, 84, 0, 70, 0,
0, 0, 0, 0, 18, 19, 93, 94, 20, 0,
96, 0, 99, 83, 71, 0, 0, 0, 0, 58,
59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
69, 0, 0, 0, 13, 98, 0, 0, 24, 0, 69, 0, 0, 0, 13, 98, 0, 0, 24, 0,
30, 0, 0, 31, 32, 36, 100, 51, 72, 0, 30, 0, 0, 31, 32, 51, 72, 0, 53, 320,
53, 267, 0, 22, 52, 0, 0, 0, 266, 0, 0, 22, 52, 0, 0, 0, 0, 0, 54, 0,
54, 0, 270, 271, 269, 275, 277, 274, 276, 272, 270, 271, 269, 276, 278, 275, 277, 272, 273, 274,
273, 0, 84, 0, 70, 0, 0, 0, 0, 0, 0, 0, 0, 70, 0, 0, 17, 72, 0, 18,
18, 19, 93, 94, 20, 0, 96, 0, 99, 83, 19, 0, 22, 20, 0, 0, 0, 0, 0, 71,
71, 0, 0, 0, 0, 58, 59, 60, 61, 62, 0, 0, 0, 0, 58, 59, 60, 61, 62, 63,
63, 64, 65, 66, 67, 68, 69, 0, 0, 0, 64, 65, 66, 67, 68, 69, 0, 0, 0, 13,
13, 98, 0, 0, 24, 0, 30, 0, 0, 31, 18, 19, 0, 24, 20, 30, 0, 0, 31, 32,
32, 51, 72, 0, 53, 318, 0, 22, 52, 0, 0, 0, 0, 0, 0, 11, 12, 14, 15, 16,
0, 0, 0, 0, 54, 0, 270, 271, 269, 275, 21, 23, 25, 26, 27, 28, 29, 17, 33, 0,
277, 274, 276, 272, 273, 0, 0, 0, 70, 0, 13, 0, 0, 22, 24, 0, 30, 0, 0, 31,
0, 0, 0, 0, 18, 19, 0, 0, 20, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 17, 72, 71, 0, 0, 0, 22, 58, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 0, 18, 19, 0, 0, 20, 0, 0, 0, 0,
69, 0, 0, 0, 13, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0, 0, 11, 12, 14, 15,
30, 0, 0, 31, 32, 18, 19, 0, 0, 20, 16, 21, 23, 25, 26, 27, 28, 29, 100, 0,
0, 0, 0, 17, 33, 0, 0, 0, 0, 22, 0, 13, 0, 0, 0, 24, 169, 30, 0, 0,
11, 12, 14, 15, 16, 21, 23, 25, 26, 27, 31, 32, 0, 0, 0, 0, 0, 100, 0, 0,
28, 29, 0, 0, 0, 13, 0, 0, 0, 24, 0, 0, 0, 0, 82, 84, 85, 0, 86, 87,
0, 30, 0, 0, 31, 32, 18, 19, 0, 0, 88, 89, 90, 91, 92, 93, 94, 95, 0, 96,
20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 97, 99, 83, 82, 84, 85, 0, 86, 87, 88,
0, 11, 12, 14, 15, 16, 21, 23, 25, 26, 89, 90, 91, 92, 93, 94, 95, 0, 96, 97,
27, 28, 29, 100, 0, 0, 13, 0, 0, 0, 99, 83, 100, 0, 98, 0, 0, 0, 0, 0,
24, 169, 30, 0, 0, 31, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 100, 0, 0, 0, 0, 0, 82, 84, 0, 100, 0, 98, 0, 0, 0, 0, 82, 84,
85, 0, 86, 87, 88, 89, 90, 91, 92, 93, 85, 0, 86, 87, 88, 0, 90, 91, 92, 93,
94, 95, 0, 96, 97, 99, 83, 82, 84, 85, 94, 95, 0, 96, 97, 99, 83, 82, 84, 85,
0, 86, 87, 88, 89, 90, 91, 92, 93, 94, 0, 86, 87, 0, 0, 90, 91, 0, 93, 94,
95, 0, 96, 97, 99, 83, 100, 0, 98, 0, 95, 0, 96, 97, 99, 83, 0, 0, 98, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 98,
0, 82, 84, 85, 0, 86, 87, 88, 0, 90,
91, 92, 93, 94, 95, 0, 96, 97, 99, 83,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 98,
} }
var yyPact = [...]int16{ var yyPact = [...]int16{
17, 153, 541, 541, 385, 500, -1000, -1000, -1000, 146, 17, 164, 555, 555, 388, 494, -1000, -1000, -1000, 120,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, 239, -1000, 224, -1000, 618, -1000, -1000, -1000, -1000, -1000, 204, -1000, 240, -1000, 633, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
36, 111, -1000, 459, -1000, 459, 141, -1000, -1000, -1000, 29, 113, -1000, 463, -1000, 463, 117, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, 181, -1000, -1000, 196, -1000, -1000, 252, -1000, -1000, -1000, 47, -1000, -1000, 191, -1000, -1000, 253, -1000,
25, -1000, -54, -54, -54, -54, -54, -54, -54, -54, 19, -1000, -49, -49, -49, -49, -49, -49, -49, -49,
-54, -54, -54, -54, -54, -54, -54, -54, 37, 255, -49, -49, -49, -49, -49, -49, -49, -49, 36, 116,
209, 111, -59, -1000, 118, 118, 309, -1000, 599, 21, 210, 113, -60, -1000, 163, 163, 311, -1000, 614, 20,
-1000, 187, -1000, -1000, 70, 114, -1000, -1000, -1000, 238, -1000, 190, -1000, -1000, 69, 48, -1000, -1000, -1000, 169,
-1000, 128, -1000, 296, 459, -1000, -55, -50, -1000, 459, -1000, 159, -1000, 147, 463, -1000, -58, -53, -1000, 463,
459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463,
459, 459, 459, 459, -1000, 170, -1000, -1000, -1000, 110, 463, 463, 463, 463, -1000, 185, -1000, -1000, -1000, 111,
-1000, -1000, -1000, -1000, -1000, -1000, 51, 51, 107, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 55, 55, 167, -1000,
-1000, -1000, -1000, 168, -1000, -1000, 45, -1000, 618, -1000, -1000, -1000, -1000, 168, -1000, -1000, 157, -1000, 633, -1000,
-1000, 172, -1000, 127, -1000, -1000, -1000, -1000, -1000, 76, -1000, 35, -1000, 158, -1000, -1000, -1000, -1000, -1000, 152,
-1000, -1000, -1000, -1000, -1000, 22, 4, 3, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 27, 2, 1, -1000, -1000,
-1000, 384, 382, 118, 118, 118, 118, 21, 21, 306, -1000, 387, 385, 163, 163, 163, 163, 20, 20, 308,
306, 306, 121, 662, 306, 306, 121, 21, 21, 306, 308, 308, 697, 678, 308, 308, 697, 20, 20, 308,
21, 382, -1000, 23, -1000, -1000, -1000, 179, -1000, 180, 20, 385, -1000, 24, -1000, -1000, -1000, 198, -1000, 160,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, 459, -1000, -1000, -1000, -1000, -1000, -1000, 52, -1000, -1000, 463, -1000, -1000, -1000, -1000, -1000, -1000, 59,
52, 10, 52, 57, 57, 38, 40, -1000, -1000, 218, 59, 22, 59, 104, 104, 151, 100, -1000, -1000, 235,
217, 216, 215, 214, 206, 204, 190, 189, -1000, -1000, 222, 219, 216, 215, 214, 211, 203, 189, 170, -1000,
-1000, -1000, -1000, -1000, 32, 213, -1000, -1000, 19, -1000, -1000, -1000, -1000, -1000, -1000, 41, 194, -1000, -1000, 18,
618, -1000, -1000, -1000, 52, -1000, 7, 6, 458, -1000, -1000, 633, -1000, -1000, -1000, 59, -1000, 13, 9, 462,
-1000, -1000, 47, 5, 51, 51, 51, 113, 47, 113, -1000, -1000, -1000, 14, 10, 55, 55, 55, 115, 115,
47, -1000, -1000, -1000, -1000, -1000, 52, 52, -1000, -1000, 14, 115, 14, -1000, -1000, -1000, -1000, -1000, 59, 59,
-1000, 52, -1000, -1000, -1000, -1000, -1000, -1000, 51, -1000, -1000, -1000, -1000, 59, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, 26, -1000, 35, -1000, -1000, 55, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 30, -1000,
-1000, -1000, 106, -1000, -1000, -1000, -1000,
} }
var yyPgo = [...]int16{ var yyPgo = [...]int16{
0, 353, 13, 352, 6, 15, 350, 263, 349, 347, 0, 334, 13, 332, 6, 15, 328, 263, 327, 319,
344, 209, 265, 343, 14, 342, 10, 11, 341, 337, 318, 213, 265, 317, 14, 312, 10, 11, 311, 309,
8, 336, 3, 4, 333, 2, 1, 0, 332, 12, 8, 305, 3, 4, 304, 2, 1, 0, 302, 12,
5, 330, 326, 18, 191, 325, 317, 7, 316, 304, 5, 301, 300, 18, 191, 299, 298, 7, 295, 294,
17, 303, 34, 300, 299, 298, 297, 293, 292, 291, 17, 293, 56, 292, 291, 290, 274, 271, 270, 268,
290, 249, 9, 271, 270, 268, 259, 250, 9, 258, 252, 251,
} }
var yyR1 = [...]int8{ var yyR1 = [...]int8{
@ -518,14 +523,14 @@ var yyR1 = [...]int8{
14, 14, 14, 55, 19, 19, 19, 19, 18, 18, 14, 14, 14, 55, 19, 19, 19, 19, 18, 18,
18, 18, 18, 18, 18, 18, 18, 28, 28, 28, 18, 18, 18, 18, 18, 18, 18, 28, 28, 28,
20, 20, 20, 20, 21, 21, 21, 22, 22, 22, 20, 20, 20, 20, 21, 21, 21, 22, 22, 22,
22, 22, 22, 22, 22, 22, 23, 23, 24, 24, 22, 22, 22, 22, 22, 22, 22, 23, 23, 24,
24, 3, 3, 3, 3, 3, 3, 3, 3, 3, 24, 24, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
8, 8, 5, 5, 5, 5, 44, 27, 29, 29, 6, 8, 8, 5, 5, 5, 5, 44, 27, 29,
30, 30, 26, 25, 25, 52, 48, 10, 53, 53, 29, 30, 30, 26, 25, 25, 52, 48, 10, 53,
17, 17, 53, 17, 17,
} }
var yyR2 = [...]int8{ var yyR2 = [...]int8{
@ -545,52 +550,52 @@ var yyR2 = [...]int8{
3, 2, 1, 2, 0, 3, 2, 1, 1, 3, 3, 2, 1, 2, 0, 3, 2, 1, 1, 3,
1, 3, 4, 1, 3, 5, 5, 1, 1, 1, 1, 3, 4, 1, 3, 5, 5, 1, 1, 1,
4, 3, 3, 2, 3, 1, 2, 3, 3, 3, 4, 3, 3, 2, 3, 1, 2, 3, 3, 3,
3, 3, 3, 3, 3, 3, 4, 3, 3, 1, 3, 3, 3, 3, 3, 3, 3, 4, 3, 3,
2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
1, 1, 1, 2, 1, 1, 1, 1, 0, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1, 0,
0, 1, 1, 0, 1,
} }
var yyChk = [...]int16{ var yyChk = [...]int16{
-1000, -54, 88, 89, 90, 91, 2, 10, -12, -7, -1000, -54, 89, 90, 91, 92, 2, 10, -12, -7,
-11, 60, 61, 75, 62, 63, 64, 12, 45, 46, -11, 61, 62, 76, 63, 64, 65, 12, 46, 47,
49, 65, 18, 66, 79, 67, 68, 69, 70, 71, 50, 66, 18, 67, 80, 68, 69, 70, 71, 72,
81, 84, 85, 13, -55, -12, 10, -37, -32, -35, 82, 85, 86, 13, -55, -12, 10, -37, -32, -35,
-38, -43, -44, -45, -47, -48, -49, -50, -51, -31, -38, -43, -44, -45, -47, -48, -49, -50, -51, -31,
-3, 12, 19, 15, 25, -8, -7, -42, 60, 61, -3, 12, 19, 15, 25, -8, -7, -42, 61, 62,
62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
39, 55, 13, -51, -11, -13, 20, -14, 12, 2, 40, 56, 13, -51, -11, -13, 20, -14, 12, 2,
-19, 2, 39, 57, 40, 41, 43, 44, 45, 46, -19, 2, 40, 58, 41, 42, 44, 45, 46, 47,
47, 48, 49, 50, 51, 52, 54, 55, 79, 56, 48, 49, 50, 51, 52, 53, 55, 56, 80, 57,
14, -33, -40, 2, 75, 81, 15, -40, -37, -37, 14, -33, -40, 2, 76, 82, 15, -40, -37, -37,
-42, -1, 20, -2, 12, -10, 2, 25, 20, 7, -42, -1, 20, -2, 12, -10, 2, 25, 20, 7,
2, 4, 2, 24, -34, -41, -36, -46, 74, -34, 2, 4, 2, 24, -34, -41, -36, -46, 75, -34,
-34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34,
-34, -34, -34, -34, -52, 55, 2, 9, -30, -9, -34, -34, -34, -34, -52, 56, 2, 9, -30, -9,
2, -27, -29, 84, 85, 19, 39, 55, -52, 2, 2, -27, -29, 85, 86, 19, 40, 56, -52, 2,
-40, -33, -16, 15, 2, -16, -39, 22, -37, 22, -40, -33, -16, 15, 2, -16, -39, 22, -37, 22,
20, 7, 2, -5, 2, 4, 52, 42, 53, -5, 20, 7, 2, -5, 2, 4, 53, 43, 54, -5,
20, -14, 25, 2, -18, 5, -28, -20, 12, -27, 20, -14, 25, 2, -18, 5, -28, -20, 12, -27,
-29, 16, -37, 78, 80, 76, 77, -37, -37, -37, -29, 16, -37, 79, 81, 77, 78, -37, -37, -37,
-37, -37, -37, -37, -37, -37, -37, -37, -37, -37, -37, -37, -37, -37, -37, -37, -37, -37, -37, -37,
-37, -37, -52, 15, -27, -27, 21, 6, 2, -15, -37, -37, -52, 15, -27, -27, 21, 6, 2, -15,
22, -4, -6, 2, 60, 74, 61, 75, 62, 63, 22, -4, -6, 2, 61, 75, 62, 76, 63, 64,
64, 76, 77, 12, 78, 45, 46, 49, 65, 18, 65, 77, 78, 12, 79, 46, 47, 50, 66, 18,
66, 79, 80, 67, 68, 69, 70, 71, 84, 85, 67, 80, 81, 68, 69, 70, 71, 72, 85, 86,
57, 22, 7, 20, -2, 25, 2, 25, 2, 26, 58, 22, 7, 20, -2, 25, 2, 25, 2, 26,
26, -29, 26, 39, 55, -21, 24, 17, -22, 30, 26, -29, 26, 40, 56, -21, 24, 17, -22, 30,
28, 29, 35, 36, 33, 31, 34, 32, -16, -16, 28, 29, 35, 36, 37, 33, 31, 34, 32, -16,
-17, -16, -17, 22, -53, -52, 2, 22, 7, 2, -16, -17, -16, -17, 22, -53, -52, 2, 22, 7,
-37, -26, 19, -26, 26, -26, -20, -20, 24, 17, 2, -37, -26, 19, -26, 26, -26, -20, -20, 24,
2, 17, 6, 6, 6, 6, 6, 6, 6, 6, 17, 2, 17, 6, 6, 6, 6, 6, 6, 6,
6, 21, 2, 22, -4, -26, 26, 26, 17, -22, 6, 6, 6, 21, 2, 22, -4, -26, 26, 26,
-25, 55, -26, -30, -27, -27, -27, -23, 14, -25, 17, -22, -25, 56, -26, -30, -27, -27, -27, -23,
-23, -25, -26, -26, -26, -24, -27, 24, 21, 2, 14, -23, -25, -23, -25, -26, -26, -26, -24, -27,
21, -27, 24, 21, 2, 21, -27,
} }
var yyDef = [...]int16{ var yyDef = [...]int16{
@ -599,36 +604,36 @@ var yyDef = [...]int16{
109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118,
119, 120, 121, 0, 2, -2, 3, 4, 8, 9, 119, 120, 121, 0, 2, -2, 3, 4, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
0, 106, 216, 0, 226, 0, 83, 84, -2, -2, 0, 106, 217, 0, 227, 0, 83, 84, -2, -2,
-2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
210, 211, 0, 5, 98, 0, 124, 127, 0, 132, 211, 212, 0, 5, 98, 0, 124, 127, 0, 132,
133, 137, 43, 43, 43, 43, 43, 43, 43, 43, 133, 137, 43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43, 0, 0, 43, 43, 43, 43, 43, 43, 43, 43, 0, 0,
0, 0, 22, 23, 0, 0, 0, 60, 0, 81, 0, 0, 22, 23, 0, 0, 0, 60, 0, 81,
82, 0, 87, 89, 0, 93, 97, 227, 122, 0, 82, 0, 87, 89, 0, 93, 97, 228, 122, 0,
128, 0, 131, 136, 0, 42, 47, 48, 44, 0, 128, 0, 131, 136, 0, 42, 47, 48, 44, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 67, 0, 69, 225, 70, 0, 0, 0, 0, 0, 67, 0, 69, 226, 70, 0,
72, 220, 221, 73, 74, 217, 0, 0, 0, 80, 72, 221, 222, 73, 74, 218, 0, 0, 0, 80,
20, 21, 24, 0, 54, 25, 0, 62, 64, 66, 20, 21, 24, 0, 54, 25, 0, 62, 64, 66,
85, 0, 90, 0, 96, 212, 213, 214, 215, 0, 85, 0, 90, 0, 96, 213, 214, 215, 216, 0,
123, 126, 129, 130, 135, 138, 140, 143, 147, 148, 123, 126, 129, 130, 135, 138, 140, 143, 147, 148,
149, 0, 26, 0, 0, -2, -2, 27, 28, 29, 149, 0, 26, 0, 0, -2, -2, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 68, 0, 218, 219, 75, -2, 79, 0, 40, 41, 68, 0, 219, 220, 75, -2, 79, 0,
53, 56, 58, 59, 183, 184, 185, 186, 187, 188, 53, 56, 58, 59, 184, 185, 186, 187, 188, 189,
189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
209, 61, 65, 86, 88, 91, 95, 92, 94, 0, 210, 61, 65, 86, 88, 91, 95, 92, 94, 0,
0, 0, 0, 0, 0, 0, 0, 153, 155, 0, 0, 0, 0, 0, 0, 0, 0, 153, 155, 0,
0, 0, 0, 0, 0, 0, 0, 0, 45, 46, 0, 0, 0, 0, 0, 0, 0, 0, 0, 45,
49, 231, 50, 71, 0, -2, 78, 51, 0, 57, 46, 49, 232, 50, 71, 0, -2, 78, 51, 0,
63, 139, 222, 141, 0, 144, 0, 0, 0, 151, 57, 63, 139, 223, 141, 0, 144, 0, 0, 0,
156, 152, 0, 0, 0, 0, 0, 0, 0, 0, 151, 156, 152, 0, 0, 0, 0, 0, 0, 0,
0, 76, 77, 52, 55, 142, 0, 0, 150, 154, 0, 0, 0, 76, 77, 52, 55, 142, 0, 0,
157, 0, 224, 158, 159, 160, 161, 162, 0, 163, 150, 154, 157, 0, 225, 158, 159, 160, 161, 162,
164, 165, 145, 146, 223, 0, 169, 0, 167, 170, 0, 163, 164, 165, 166, 145, 146, 224, 0, 170,
166, 168, 0, 168, 171, 167, 169,
} }
var yyTok1 = [...]int8{ var yyTok1 = [...]int8{
@ -645,7 +650,7 @@ var yyTok2 = [...]int8{
62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
92, 92, 93,
} }
var yyTok3 = [...]int8{ var yyTok3 = [...]int8{
@ -1738,47 +1743,53 @@ yydefault:
yyDollar = yyS[yypt-3 : yypt+1] yyDollar = yyS[yypt-3 : yypt+1]
{ {
yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors = yylex.(*parser).newMap()
yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set yyVAL.descriptors["custom_values"] = yyDollar[3].bucket_set
} }
case 163: case 163:
yyDollar = yyS[yypt-3 : yypt+1] yyDollar = yyS[yypt-3 : yypt+1]
{ {
yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors = yylex.(*parser).newMap()
yyVAL.descriptors["offset"] = yyDollar[3].int yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set
} }
case 164: case 164:
yyDollar = yyS[yypt-3 : yypt+1] yyDollar = yyS[yypt-3 : yypt+1]
{ {
yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors = yylex.(*parser).newMap()
yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set yyVAL.descriptors["offset"] = yyDollar[3].int
} }
case 165: case 165:
yyDollar = yyS[yypt-3 : yypt+1] yyDollar = yyS[yypt-3 : yypt+1]
{ {
yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors = yylex.(*parser).newMap()
yyVAL.descriptors["n_offset"] = yyDollar[3].int yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set
} }
case 166: case 166:
yyDollar = yyS[yypt-4 : yypt+1] yyDollar = yyS[yypt-3 : yypt+1]
{ {
yyVAL.bucket_set = yyDollar[2].bucket_set yyVAL.descriptors = yylex.(*parser).newMap()
yyVAL.descriptors["n_offset"] = yyDollar[3].int
} }
case 167: case 167:
yyDollar = yyS[yypt-3 : yypt+1] yyDollar = yyS[yypt-4 : yypt+1]
{ {
yyVAL.bucket_set = yyDollar[2].bucket_set yyVAL.bucket_set = yyDollar[2].bucket_set
} }
case 168: case 168:
yyDollar = yyS[yypt-3 : yypt+1] yyDollar = yyS[yypt-3 : yypt+1]
{ {
yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float) yyVAL.bucket_set = yyDollar[2].bucket_set
} }
case 169: case 169:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float)
}
case 170:
yyDollar = yyS[yypt-1 : yypt+1] yyDollar = yyS[yypt-1 : yypt+1]
{ {
yyVAL.bucket_set = []float64{yyDollar[1].float} yyVAL.bucket_set = []float64{yyDollar[1].float}
} }
case 216: case 217:
yyDollar = yyS[yypt-1 : yypt+1] yyDollar = yyS[yypt-1 : yypt+1]
{ {
yyVAL.node = &NumberLiteral{ yyVAL.node = &NumberLiteral{
@ -1786,22 +1797,22 @@ yydefault:
PosRange: yyDollar[1].item.PositionRange(), PosRange: yyDollar[1].item.PositionRange(),
} }
} }
case 217: case 218:
yyDollar = yyS[yypt-1 : yypt+1] yyDollar = yyS[yypt-1 : yypt+1]
{ {
yyVAL.float = yylex.(*parser).number(yyDollar[1].item.Val) yyVAL.float = yylex.(*parser).number(yyDollar[1].item.Val)
} }
case 218: case 219:
yyDollar = yyS[yypt-2 : yypt+1] yyDollar = yyS[yypt-2 : yypt+1]
{ {
yyVAL.float = yyDollar[2].float yyVAL.float = yyDollar[2].float
} }
case 219: case 220:
yyDollar = yyS[yypt-2 : yypt+1] yyDollar = yyS[yypt-2 : yypt+1]
{ {
yyVAL.float = -yyDollar[2].float yyVAL.float = -yyDollar[2].float
} }
case 222: case 223:
yyDollar = yyS[yypt-1 : yypt+1] yyDollar = yyS[yypt-1 : yypt+1]
{ {
var err error var err error
@ -1810,17 +1821,17 @@ yydefault:
yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid repetition in series values: %s", err) yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid repetition in series values: %s", err)
} }
} }
case 223: case 224:
yyDollar = yyS[yypt-2 : yypt+1] yyDollar = yyS[yypt-2 : yypt+1]
{ {
yyVAL.int = -int64(yyDollar[2].uint) yyVAL.int = -int64(yyDollar[2].uint)
} }
case 224: case 225:
yyDollar = yyS[yypt-1 : yypt+1] yyDollar = yyS[yypt-1 : yypt+1]
{ {
yyVAL.int = int64(yyDollar[1].uint) yyVAL.int = int64(yyDollar[1].uint)
} }
case 225: case 226:
yyDollar = yyS[yypt-1 : yypt+1] yyDollar = yyS[yypt-1 : yypt+1]
{ {
var err error var err error
@ -1829,7 +1840,7 @@ yydefault:
yylex.(*parser).addParseErr(yyDollar[1].item.PositionRange(), err) yylex.(*parser).addParseErr(yyDollar[1].item.PositionRange(), err)
} }
} }
case 226: case 227:
yyDollar = yyS[yypt-1 : yypt+1] yyDollar = yyS[yypt-1 : yypt+1]
{ {
yyVAL.node = &StringLiteral{ yyVAL.node = &StringLiteral{
@ -1837,7 +1848,7 @@ yydefault:
PosRange: yyDollar[1].item.PositionRange(), PosRange: yyDollar[1].item.PositionRange(),
} }
} }
case 227: case 228:
yyDollar = yyS[yypt-1 : yypt+1] yyDollar = yyS[yypt-1 : yypt+1]
{ {
yyVAL.item = Item{ yyVAL.item = Item{
@ -1846,12 +1857,12 @@ yydefault:
Val: yylex.(*parser).unquoteString(yyDollar[1].item.Val), Val: yylex.(*parser).unquoteString(yyDollar[1].item.Val),
} }
} }
case 228: case 229:
yyDollar = yyS[yypt-0 : yypt+1] yyDollar = yyS[yypt-0 : yypt+1]
{ {
yyVAL.duration = 0 yyVAL.duration = 0
} }
case 230: case 231:
yyDollar = yyS[yypt-0 : yypt+1] yyDollar = yyS[yypt-0 : yypt+1]
{ {
yyVAL.strings = nil yyVAL.strings = nil

View file

@ -135,15 +135,16 @@ var key = map[string]ItemType{
} }
var histogramDesc = map[string]ItemType{ var histogramDesc = map[string]ItemType{
"sum": SUM_DESC, "sum": SUM_DESC,
"count": COUNT_DESC, "count": COUNT_DESC,
"schema": SCHEMA_DESC, "schema": SCHEMA_DESC,
"offset": OFFSET_DESC, "offset": OFFSET_DESC,
"n_offset": NEGATIVE_OFFSET_DESC, "n_offset": NEGATIVE_OFFSET_DESC,
"buckets": BUCKETS_DESC, "buckets": BUCKETS_DESC,
"n_buckets": NEGATIVE_BUCKETS_DESC, "n_buckets": NEGATIVE_BUCKETS_DESC,
"z_bucket": ZERO_BUCKET_DESC, "z_bucket": ZERO_BUCKET_DESC,
"z_bucket_w": ZERO_BUCKET_WIDTH_DESC, "z_bucket_w": ZERO_BUCKET_WIDTH_DESC,
"custom_values": CUSTOM_VALUES_DESC,
} }
// ItemTypeStr is the default string representations for common Items. It does not // ItemTypeStr is the default string representations for common Items. It does not
@ -313,6 +314,11 @@ func (l *Lexer) accept(valid string) bool {
return false return false
} }
// is peeks and returns true if the next rune is contained in the provided string.
func (l *Lexer) is(valid string) bool {
return strings.ContainsRune(valid, l.peek())
}
// acceptRun consumes a run of runes from the valid set. // acceptRun consumes a run of runes from the valid set.
func (l *Lexer) acceptRun(valid string) { func (l *Lexer) acceptRun(valid string) {
for strings.ContainsRune(valid, l.next()) { for strings.ContainsRune(valid, l.next()) {
@ -901,19 +907,78 @@ func acceptRemainingDuration(l *Lexer) bool {
// scanNumber scans numbers of different formats. The scanned Item is // scanNumber scans numbers of different formats. The scanned Item is
// not necessarily a valid number. This case is caught by the parser. // not necessarily a valid number. This case is caught by the parser.
func (l *Lexer) scanNumber() bool { func (l *Lexer) scanNumber() bool {
digits := "0123456789" // Modify the digit pattern if the number is hexadecimal.
digitPattern := "0123456789"
// Disallow hexadecimal in series descriptions as the syntax is ambiguous. // Disallow hexadecimal in series descriptions as the syntax is ambiguous.
if !l.seriesDesc && l.accept("0") && l.accept("xX") { if !l.seriesDesc &&
digits = "0123456789abcdefABCDEF" l.accept("0") && l.accept("xX") {
l.accept("_") // eg., 0X_1FFFP-16 == 0.1249847412109375
digitPattern = "0123456789abcdefABCDEF"
} }
l.acceptRun(digits) const (
if l.accept(".") { // Define dot, exponent, and underscore patterns.
l.acceptRun(digits) dotPattern = "."
} exponentPattern = "eE"
if l.accept("eE") { underscorePattern = "_"
l.accept("+-") // Anti-patterns are rune sets that cannot follow their respective rune.
l.acceptRun("0123456789") dotAntiPattern = "_."
exponentAntiPattern = "._eE" // and EOL.
underscoreAntiPattern = "._eE" // and EOL.
)
// All numbers follow the prefix: [.][d][d._eE]*
l.accept(dotPattern)
l.accept(digitPattern)
// [d._eE]* hereon.
dotConsumed := false
exponentConsumed := false
for l.is(digitPattern + dotPattern + underscorePattern + exponentPattern) {
// "." cannot repeat.
if l.is(dotPattern) {
if dotConsumed {
l.accept(dotPattern)
return false
}
}
// "eE" cannot repeat.
if l.is(exponentPattern) {
if exponentConsumed {
l.accept(exponentPattern)
return false
}
}
// Handle dots.
if l.accept(dotPattern) {
dotConsumed = true
if l.accept(dotAntiPattern) {
return false
}
// Fractional hexadecimal literals are not allowed.
if len(digitPattern) > 10 /* 0x[\da-fA-F].[\d]+p[\d] */ {
return false
}
continue
}
// Handle exponents.
if l.accept(exponentPattern) {
exponentConsumed = true
l.accept("+-")
if l.accept(exponentAntiPattern) || l.peek() == eof {
return false
}
continue
}
// Handle underscores.
if l.accept(underscorePattern) {
if l.accept(underscoreAntiPattern) || l.peek() == eof {
return false
}
continue
}
// Handle digits at the end since we already consumed before this loop.
l.acceptRun(digitPattern)
} }
// Next thing must not be alphanumeric unless it's the times token // Next thing must not be alphanumeric unless it's the times token
// for series repetitions. // for series repetitions.
if r := l.peek(); (l.seriesDesc && r == 'x') || !isAlphaNumeric(r) { if r := l.peek(); (l.seriesDesc && r == 'x') || !isAlphaNumeric(r) {

View file

@ -132,6 +132,84 @@ var tests = []struct {
}, { }, {
input: "0x123", input: "0x123",
expected: []Item{{NUMBER, 0, "0x123"}}, expected: []Item{{NUMBER, 0, "0x123"}},
}, {
input: "1..2",
fail: true,
}, {
input: "1.2.",
fail: true,
}, {
input: "00_1_23_4.56_7_8",
expected: []Item{{NUMBER, 0, "00_1_23_4.56_7_8"}},
}, {
input: "00_1_23__4.56_7_8",
fail: true,
}, {
input: "00_1_23_4._56_7_8",
fail: true,
}, {
input: "00_1_23_4_.56_7_8",
fail: true,
}, {
input: "0x1_2_34",
expected: []Item{{NUMBER, 0, "0x1_2_34"}},
}, {
input: "0x1_2__34",
fail: true,
}, {
input: "0x1_2__34.5_6p1", // "0x1.1p1"-based formats are not supported yet.
fail: true,
}, {
input: "0x1_2__34.5_6",
fail: true,
}, {
input: "0x1_2__34.56",
fail: true,
}, {
input: "1_e2",
fail: true,
}, {
input: "1.e2",
expected: []Item{{NUMBER, 0, "1.e2"}},
}, {
input: "1e.2",
fail: true,
}, {
input: "1e+.2",
fail: true,
}, {
input: "1ee2",
fail: true,
}, {
input: "1e+e2",
fail: true,
}, {
input: "1e",
fail: true,
}, {
input: "1e+",
fail: true,
}, {
input: "1e1_2_34",
expected: []Item{{NUMBER, 0, "1e1_2_34"}},
}, {
input: "1e_1_2_34",
fail: true,
}, {
input: "1e1_2__34",
fail: true,
}, {
input: "1e+_1_2_34",
fail: true,
}, {
input: "1e-_1_2_34",
fail: true,
}, {
input: "12_",
fail: true,
}, {
input: "_1_2",
expected: []Item{{IDENTIFIER, 0, "_1_2"}},
}, },
}, },
}, },

View file

@ -481,19 +481,19 @@ func (p *parser) mergeMaps(left, right *map[string]interface{}) (ret *map[string
} }
func (p *parser) histogramsIncreaseSeries(base, inc *histogram.FloatHistogram, times uint64) ([]SequenceValue, error) { func (p *parser) histogramsIncreaseSeries(base, inc *histogram.FloatHistogram, times uint64) ([]SequenceValue, error) {
return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) *histogram.FloatHistogram { return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) (*histogram.FloatHistogram, error) {
return a.Add(b) return a.Add(b)
}) })
} }
func (p *parser) histogramsDecreaseSeries(base, inc *histogram.FloatHistogram, times uint64) ([]SequenceValue, error) { func (p *parser) histogramsDecreaseSeries(base, inc *histogram.FloatHistogram, times uint64) ([]SequenceValue, error) {
return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) *histogram.FloatHistogram { return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) (*histogram.FloatHistogram, error) {
return a.Sub(b) return a.Sub(b)
}) })
} }
func (p *parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uint64, func (p *parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uint64,
combine func(*histogram.FloatHistogram, *histogram.FloatHistogram) *histogram.FloatHistogram, combine func(*histogram.FloatHistogram, *histogram.FloatHistogram) (*histogram.FloatHistogram, error),
) ([]SequenceValue, error) { ) ([]SequenceValue, error) {
ret := make([]SequenceValue, times+1) ret := make([]SequenceValue, times+1)
// Add an additional value (the base) for time 0, which we ignore in tests. // Add an additional value (the base) for time 0, which we ignore in tests.
@ -504,7 +504,11 @@ func (p *parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uin
return nil, fmt.Errorf("error combining histograms: cannot merge from schema %d to %d", inc.Schema, cur.Schema) return nil, fmt.Errorf("error combining histograms: cannot merge from schema %d to %d", inc.Schema, cur.Schema)
} }
cur = combine(cur.Copy(), inc) var err error
cur, err = combine(cur.Copy(), inc)
if err != nil {
return ret, err
}
ret[i] = SequenceValue{Histogram: cur} ret[i] = SequenceValue{Histogram: cur}
} }
@ -562,6 +566,15 @@ func (p *parser) buildHistogramFromMap(desc *map[string]interface{}) *histogram.
p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing z_bucket_w number: %v", val) p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing z_bucket_w number: %v", val)
} }
} }
val, ok = (*desc)["custom_values"]
if ok {
customValues, ok := val.([]float64)
if ok {
output.CustomValues = customValues
} else {
p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing custom_values: %v", val)
}
}
buckets, spans := p.buildHistogramBucketsAndSpans(desc, "buckets", "offset") buckets, spans := p.buildHistogramBucketsAndSpans(desc, "buckets", "offset")
output.PositiveBuckets = buckets output.PositiveBuckets = buckets

View file

@ -513,12 +513,12 @@ var testExpr = []struct {
{ {
input: "2.5.", input: "2.5.",
fail: true, fail: true,
errMsg: "unexpected character: '.'", errMsg: `1:1: parse error: bad number or duration syntax: "2.5."`,
}, },
{ {
input: "100..4", input: "100..4",
fail: true, fail: true,
errMsg: `unexpected number ".4"`, errMsg: `1:1: parse error: bad number or duration syntax: "100.."`,
}, },
{ {
input: "0deadbeef", input: "0deadbeef",

View file

@ -148,6 +148,13 @@ func TestExprString(t *testing.T) {
in: `{"_0"="1"}`, in: `{"_0"="1"}`,
out: `{_0="1"}`, out: `{_0="1"}`,
}, },
{
in: `{""="0"}`,
},
{
in: "{``=\"0\"}",
out: `{""="0"}`,
},
} }
for _, test := range inputs { for _, test := range inputs {

View file

@ -63,6 +63,10 @@ load 1m
Each `load` command is additive - it does not replace any data loaded in a previous `load` command. Each `load` command is additive - it does not replace any data loaded in a previous `load` command.
Use `clear` to remove all loaded data. Use `clear` to remove all loaded data.
### Native histograms with custom buckets (NHCB)
When loading a batch of classic histogram float series, you can optionally append the suffix `_with_nhcb` to convert them to native histograms with custom buckets and load both the original float series and the new histogram series.
## `clear` command ## `clear` command
`clear` removes all data previously loaded with `load` commands. `clear` removes all data previously loaded with `load` commands.

View file

@ -19,6 +19,8 @@ import (
"errors" "errors"
"fmt" "fmt"
"io/fs" "io/fs"
"math"
"sort"
"strconv" "strconv"
"strings" "strings"
"testing" "testing"
@ -43,9 +45,9 @@ import (
var ( var (
patSpace = regexp.MustCompile("[\t ]+") patSpace = regexp.MustCompile("[\t ]+")
patLoad = regexp.MustCompile(`^load\s+(.+?)$`) patLoad = regexp.MustCompile(`^load(?:_(with_nhcb))?\s+(.+?)$`)
patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`) patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|warn|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`)
patEvalRange = regexp.MustCompile(`^eval(?:_(fail))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`) patEvalRange = regexp.MustCompile(`^eval(?:_(fail|warn))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`)
) )
const ( const (
@ -181,15 +183,18 @@ func raise(line int, format string, v ...interface{}) error {
func parseLoad(lines []string, i int) (int, *loadCmd, error) { func parseLoad(lines []string, i int) (int, *loadCmd, error) {
if !patLoad.MatchString(lines[i]) { if !patLoad.MatchString(lines[i]) {
return i, nil, raise(i, "invalid load command. (load <step:duration>)") return i, nil, raise(i, "invalid load command. (load[_with_nhcb] <step:duration>)")
} }
parts := patLoad.FindStringSubmatch(lines[i]) parts := patLoad.FindStringSubmatch(lines[i])
var (
gap, err := model.ParseDuration(parts[1]) withNHCB = parts[1] == "with_nhcb"
step = parts[2]
)
gap, err := model.ParseDuration(step)
if err != nil { if err != nil {
return i, nil, raise(i, "invalid step definition %q: %s", parts[1], err) return i, nil, raise(i, "invalid step definition %q: %s", step, err)
} }
cmd := newLoadCmd(time.Duration(gap)) cmd := newLoadCmd(time.Duration(gap), withNHCB)
for i+1 < len(lines) { for i+1 < len(lines) {
i++ i++
defLine := lines[i] defLine := lines[i]
@ -222,7 +227,7 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
rangeParts := patEvalRange.FindStringSubmatch(lines[i]) rangeParts := patEvalRange.FindStringSubmatch(lines[i])
if instantParts == nil && rangeParts == nil { if instantParts == nil && rangeParts == nil {
return i, nil, raise(i, "invalid evaluation command. Must be either 'eval[_fail|_ordered] instant [at <offset:duration>] <query>' or 'eval[_fail] range from <from> to <to> step <step> <query>'") return i, nil, raise(i, "invalid evaluation command. Must be either 'eval[_fail|_warn|_ordered] instant [at <offset:duration>] <query>' or 'eval[_fail|_warn] range from <from> to <to> step <step> <query>'")
} }
isInstant := instantParts != nil isInstant := instantParts != nil
@ -301,6 +306,8 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
cmd.ordered = true cmd.ordered = true
case "fail": case "fail":
cmd.fail = true cmd.fail = true
case "warn":
cmd.warn = true
} }
for j := 1; i+1 < len(lines); j++ { for j := 1; i+1 < len(lines); j++ {
@ -371,7 +378,7 @@ func (t *test) parse(input string) error {
switch c := strings.ToLower(patSpace.Split(l, 2)[0]); { switch c := strings.ToLower(patSpace.Split(l, 2)[0]); {
case c == "clear": case c == "clear":
cmd = &clearCmd{} cmd = &clearCmd{}
case c == "load": case strings.HasPrefix(c, "load"):
i, cmd, err = parseLoad(lines, i) i, cmd, err = parseLoad(lines, i)
case strings.HasPrefix(c, "eval"): case strings.HasPrefix(c, "eval"):
i, cmd, err = t.parseEval(lines, i) i, cmd, err = t.parseEval(lines, i)
@ -403,14 +410,16 @@ type loadCmd struct {
metrics map[uint64]labels.Labels metrics map[uint64]labels.Labels
defs map[uint64][]promql.Sample defs map[uint64][]promql.Sample
exemplars map[uint64][]exemplar.Exemplar exemplars map[uint64][]exemplar.Exemplar
withNHCB bool
} }
func newLoadCmd(gap time.Duration) *loadCmd { func newLoadCmd(gap time.Duration, withNHCB bool) *loadCmd {
return &loadCmd{ return &loadCmd{
gap: gap, gap: gap,
metrics: map[uint64]labels.Labels{}, metrics: map[uint64]labels.Labels{},
defs: map[uint64][]promql.Sample{}, defs: map[uint64][]promql.Sample{},
exemplars: map[uint64][]exemplar.Exemplar{}, exemplars: map[uint64][]exemplar.Exemplar{},
withNHCB: withNHCB,
} }
} }
@ -449,6 +458,167 @@ func (cmd *loadCmd) append(a storage.Appender) error {
} }
} }
} }
if cmd.withNHCB {
return cmd.appendCustomHistogram(a)
}
return nil
}
func getHistogramMetricBase(m labels.Labels, suffix string) (labels.Labels, uint64) {
mName := m.Get(labels.MetricName)
baseM := labels.NewBuilder(m).
Set(labels.MetricName, strings.TrimSuffix(mName, suffix)).
Del(labels.BucketLabel).
Labels()
hash := baseM.Hash()
return baseM, hash
}
type tempHistogramWrapper struct {
metric labels.Labels
upperBounds []float64
histogramByTs map[int64]tempHistogram
}
func newTempHistogramWrapper() tempHistogramWrapper {
return tempHistogramWrapper{
upperBounds: []float64{},
histogramByTs: map[int64]tempHistogram{},
}
}
type tempHistogram struct {
bucketCounts map[float64]float64
count float64
sum float64
}
func newTempHistogram() tempHistogram {
return tempHistogram{
bucketCounts: map[float64]float64{},
}
}
func processClassicHistogramSeries(m labels.Labels, suffix string, histogramMap map[uint64]tempHistogramWrapper, smpls []promql.Sample, updateHistogramWrapper func(*tempHistogramWrapper), updateHistogram func(*tempHistogram, float64)) {
m2, m2hash := getHistogramMetricBase(m, suffix)
histogramWrapper, exists := histogramMap[m2hash]
if !exists {
histogramWrapper = newTempHistogramWrapper()
}
histogramWrapper.metric = m2
if updateHistogramWrapper != nil {
updateHistogramWrapper(&histogramWrapper)
}
for _, s := range smpls {
if s.H != nil {
continue
}
histogram, exists := histogramWrapper.histogramByTs[s.T]
if !exists {
histogram = newTempHistogram()
}
updateHistogram(&histogram, s.F)
histogramWrapper.histogramByTs[s.T] = histogram
}
histogramMap[m2hash] = histogramWrapper
}
func processUpperBoundsAndCreateBaseHistogram(upperBounds0 []float64) ([]float64, *histogram.FloatHistogram) {
sort.Float64s(upperBounds0)
upperBounds := make([]float64, 0, len(upperBounds0))
prevLE := math.Inf(-1)
for _, le := range upperBounds0 {
if le != prevLE { // deduplicate
upperBounds = append(upperBounds, le)
prevLE = le
}
}
var customBounds []float64
if upperBounds[len(upperBounds)-1] == math.Inf(1) {
customBounds = upperBounds[:len(upperBounds)-1]
} else {
customBounds = upperBounds
}
return upperBounds, &histogram.FloatHistogram{
Count: 0,
Sum: 0,
Schema: histogram.CustomBucketsSchema,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: uint32(len(upperBounds))},
},
PositiveBuckets: make([]float64, len(upperBounds)),
CustomValues: customBounds,
}
}
// If classic histograms are defined, convert them into native histograms with custom
// bounds and append the defined time series to the storage.
func (cmd *loadCmd) appendCustomHistogram(a storage.Appender) error {
histogramMap := map[uint64]tempHistogramWrapper{}
// Go through all the time series to collate classic histogram data
// and organise them by timestamp.
for hash, smpls := range cmd.defs {
m := cmd.metrics[hash]
mName := m.Get(labels.MetricName)
switch {
case strings.HasSuffix(mName, "_bucket") && m.Has(labels.BucketLabel):
le, err := strconv.ParseFloat(m.Get(labels.BucketLabel), 64)
if err != nil || math.IsNaN(le) {
continue
}
processClassicHistogramSeries(m, "_bucket", histogramMap, smpls, func(histogramWrapper *tempHistogramWrapper) {
histogramWrapper.upperBounds = append(histogramWrapper.upperBounds, le)
}, func(histogram *tempHistogram, f float64) {
histogram.bucketCounts[le] = f
})
case strings.HasSuffix(mName, "_count"):
processClassicHistogramSeries(m, "_count", histogramMap, smpls, nil, func(histogram *tempHistogram, f float64) {
histogram.count = f
})
case strings.HasSuffix(mName, "_sum"):
processClassicHistogramSeries(m, "_sum", histogramMap, smpls, nil, func(histogram *tempHistogram, f float64) {
histogram.sum = f
})
}
}
// Convert the collated classic histogram data into native histograms
// with custom bounds and append them to the storage.
for _, histogramWrapper := range histogramMap {
upperBounds, fhBase := processUpperBoundsAndCreateBaseHistogram(histogramWrapper.upperBounds)
samples := make([]promql.Sample, 0, len(histogramWrapper.histogramByTs))
for t, histogram := range histogramWrapper.histogramByTs {
fh := fhBase.Copy()
var prevCount, total float64
for i, le := range upperBounds {
currCount, exists := histogram.bucketCounts[le]
if !exists {
currCount = 0
}
count := currCount - prevCount
fh.PositiveBuckets[i] = count
total += count
prevCount = currCount
}
fh.Sum = histogram.sum
if histogram.count != 0 {
total = histogram.count
}
fh.Count = total
s := promql.Sample{T: t, H: fh.Compact(0)}
if err := s.H.Validate(); err != nil {
return err
}
samples = append(samples, s)
}
sort.Slice(samples, func(i, j int) bool { return samples[i].T < samples[j].T })
for _, s := range samples {
if err := appendSample(a, s, histogramWrapper.metric); err != nil {
return err
}
}
}
return nil return nil
} }
@ -475,7 +645,7 @@ type evalCmd struct {
line int line int
isRange bool // if false, instant query isRange bool // if false, instant query
fail, ordered bool fail, warn, ordered bool
expectedFailMessage string expectedFailMessage string
expectedFailRegexp *regexp.Regexp expectedFailRegexp *regexp.Regexp
@ -832,6 +1002,13 @@ func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error {
return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err) return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err)
} }
res := q.Exec(t.context) res := q.Exec(t.context)
countWarnings, _ := res.Warnings.CountWarningsAndInfo()
if !cmd.warn && countWarnings > 0 {
return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", cmd.expr, cmd.line, res.Warnings)
}
if cmd.warn && countWarnings == 0 {
return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", cmd.expr, cmd.line)
}
if res.Err != nil { if res.Err != nil {
if cmd.fail { if cmd.fail {
return cmd.checkExpectedFailure(res.Err) return cmd.checkExpectedFailure(res.Err)
@ -858,76 +1035,89 @@ func (t *test) execInstantEval(cmd *evalCmd, engine promql.QueryEngine) error {
} }
queries = append([]atModifierTestCase{{expr: cmd.expr, evalTime: cmd.start}}, queries...) queries = append([]atModifierTestCase{{expr: cmd.expr, evalTime: cmd.start}}, queries...)
for _, iq := range queries { for _, iq := range queries {
q, err := engine.NewInstantQuery(t.context, t.storage, nil, iq.expr, iq.evalTime) if err := t.runInstantQuery(iq, cmd, engine); err != nil {
if err != nil {
return fmt.Errorf("error creating instant query for %q (line %d): %w", cmd.expr, cmd.line, err)
}
defer q.Close()
res := q.Exec(t.context)
if res.Err != nil {
if cmd.fail {
if err := cmd.checkExpectedFailure(res.Err); err != nil {
return err
}
continue
}
return fmt.Errorf("error evaluating query %q (line %d): %w", iq.expr, cmd.line, res.Err)
}
if res.Err == nil && cmd.fail {
return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line)
}
err = cmd.compareResult(res.Value)
if err != nil {
return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err)
}
// Check query returns same result in range mode,
// by checking against the middle step.
q, err = engine.NewRangeQuery(t.context, t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute)
if err != nil {
return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err)
}
rangeRes := q.Exec(t.context)
if rangeRes.Err != nil {
return fmt.Errorf("error evaluating query %q (line %d) in range mode: %w", iq.expr, cmd.line, rangeRes.Err)
}
defer q.Close()
if cmd.ordered {
// Range queries are always sorted by labels, so skip this test case that expects results in a particular order.
continue
}
mat := rangeRes.Value.(promql.Matrix)
if err := assertMatrixSorted(mat); err != nil {
return err return err
} }
}
return nil
}
vec := make(promql.Vector, 0, len(mat)) func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promql.QueryEngine) error {
for _, series := range mat { q, err := engine.NewInstantQuery(t.context, t.storage, nil, iq.expr, iq.evalTime)
// We expect either Floats or Histograms. if err != nil {
for _, point := range series.Floats { return fmt.Errorf("error creating instant query for %q (line %d): %w", cmd.expr, cmd.line, err)
if point.T == timeMilliseconds(iq.evalTime) { }
vec = append(vec, promql.Sample{Metric: series.Metric, T: point.T, F: point.F}) defer q.Close()
break res := q.Exec(t.context)
} countWarnings, _ := res.Warnings.CountWarningsAndInfo()
} if !cmd.warn && countWarnings > 0 {
for _, point := range series.Histograms { return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings)
if point.T == timeMilliseconds(iq.evalTime) { }
vec = append(vec, promql.Sample{Metric: series.Metric, T: point.T, H: point.H}) if cmd.warn && countWarnings == 0 {
break return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", iq.expr, cmd.line)
} }
if res.Err != nil {
if cmd.fail {
if err := cmd.checkExpectedFailure(res.Err); err != nil {
return err
} }
return nil
} }
if _, ok := res.Value.(promql.Scalar); ok { return fmt.Errorf("error evaluating query %q (line %d): %w", iq.expr, cmd.line, res.Err)
err = cmd.compareResult(promql.Scalar{V: vec[0].F}) }
} else { if res.Err == nil && cmd.fail {
err = cmd.compareResult(vec) return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line)
} }
if err != nil { err = cmd.compareResult(res.Value)
return fmt.Errorf("error in %s %s (line %d) range mode: %w", cmd, iq.expr, cmd.line, err) if err != nil {
} return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err)
} }
// Check query returns same result in range mode,
// by checking against the middle step.
q, err = engine.NewRangeQuery(t.context, t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute)
if err != nil {
return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err)
}
rangeRes := q.Exec(t.context)
if rangeRes.Err != nil {
return fmt.Errorf("error evaluating query %q (line %d) in range mode: %w", iq.expr, cmd.line, rangeRes.Err)
}
defer q.Close()
if cmd.ordered {
// Range queries are always sorted by labels, so skip this test case that expects results in a particular order.
return nil
}
mat := rangeRes.Value.(promql.Matrix)
if err := assertMatrixSorted(mat); err != nil {
return err
}
vec := make(promql.Vector, 0, len(mat))
for _, series := range mat {
// We expect either Floats or Histograms.
for _, point := range series.Floats {
if point.T == timeMilliseconds(iq.evalTime) {
vec = append(vec, promql.Sample{Metric: series.Metric, T: point.T, F: point.F})
break
}
}
for _, point := range series.Histograms {
if point.T == timeMilliseconds(iq.evalTime) {
vec = append(vec, promql.Sample{Metric: series.Metric, T: point.T, H: point.H})
break
}
}
}
if _, ok := res.Value.(promql.Scalar); ok {
err = cmd.compareResult(promql.Scalar{V: vec[0].F})
} else {
err = cmd.compareResult(vec)
}
if err != nil {
return fmt.Errorf("error in %s %s (line %d) range mode: %w", cmd, iq.expr, cmd.line, err)
}
return nil return nil
} }
@ -1018,7 +1208,7 @@ func (ll *LazyLoader) parse(input string) error {
if len(l) == 0 { if len(l) == 0 {
continue continue
} }
if strings.ToLower(patSpace.Split(l, 2)[0]) == "load" { if strings.HasPrefix(strings.ToLower(patSpace.Split(l, 2)[0]), "load") {
_, cmd, err := parseLoad(lines, i) _, cmd, err := parseLoad(lines, i)
if err != nil { if err != nil {
return err return err

View file

@ -399,7 +399,7 @@ eval instant at 1m quantile without(point)((scalar(foo)), data)
{test="three samples"} 1.6 {test="three samples"} 1.6
{test="uneven samples"} 2.8 {test="uneven samples"} 2.8
eval instant at 1m quantile without(point)(NaN, data) eval_warn instant at 1m quantile without(point)(NaN, data)
{test="two samples"} NaN {test="two samples"} NaN
{test="three samples"} NaN {test="three samples"} NaN
{test="uneven samples"} NaN {test="uneven samples"} NaN
@ -503,6 +503,18 @@ eval instant at 1m avg(data{test="-big"})
eval instant at 1m avg(data{test="bigzero"}) eval instant at 1m avg(data{test="bigzero"})
{} 0 {} 0
# Test summing extreme values.
clear
load 10s
data{test="ten",point="a"} 2
data{test="ten",point="b"} 8
data{test="ten",point="c"} 1e+100
data{test="ten",point="d"} -1e100
eval instant at 1m sum(data{test="ten"})
{} 10
clear clear
# Test that aggregations are deterministic. # Test that aggregations are deterministic.

View file

@ -838,17 +838,17 @@ eval instant at 1m quantile_over_time(1, data[1m])
{test="three samples"} 2 {test="three samples"} 2
{test="uneven samples"} 4 {test="uneven samples"} 4
eval instant at 1m quantile_over_time(-1, data[1m]) eval_warn instant at 1m quantile_over_time(-1, data[1m])
{test="two samples"} -Inf {test="two samples"} -Inf
{test="three samples"} -Inf {test="three samples"} -Inf
{test="uneven samples"} -Inf {test="uneven samples"} -Inf
eval instant at 1m quantile_over_time(2, data[1m]) eval_warn instant at 1m quantile_over_time(2, data[1m])
{test="two samples"} +Inf {test="two samples"} +Inf
{test="three samples"} +Inf {test="three samples"} +Inf
{test="uneven samples"} +Inf {test="uneven samples"} +Inf
eval instant at 1m (quantile_over_time(2, (data[1m]))) eval_warn instant at 1m (quantile_over_time(2, (data[1m])))
{test="two samples"} +Inf {test="two samples"} +Inf
{test="three samples"} +Inf {test="three samples"} +Inf
{test="uneven samples"} +Inf {test="uneven samples"} +Inf
@ -1213,3 +1213,11 @@ eval instant at 5m log10(exp_root_log - 20)
{l="y"} -Inf {l="y"} -Inf
clear clear
# Test that timestamp() handles the scenario where there are more steps than samples.
load 1m
metric 0+1x1000
# We expect the value to be 0 for t=0s to t=59s (inclusive), then 60 for t=60s and t=61s.
eval range from 0 to 61s step 1s timestamp(metric)
{} 0x59 60 60

View file

@ -5,7 +5,7 @@
# server has to cope with it. # server has to cope with it.
# Test histogram. # Test histogram.
load 5m load_with_nhcb 5m
testhistogram_bucket{le="0.1", start="positive"} 0+5x10 testhistogram_bucket{le="0.1", start="positive"} 0+5x10
testhistogram_bucket{le=".2", start="positive"} 0+7x10 testhistogram_bucket{le=".2", start="positive"} 0+7x10
testhistogram_bucket{le="1e0", start="positive"} 0+11x10 testhistogram_bucket{le="1e0", start="positive"} 0+11x10
@ -18,15 +18,33 @@ load 5m
# Another test histogram, where q(1/6), q(1/2), and q(5/6) are each in # Another test histogram, where q(1/6), q(1/2), and q(5/6) are each in
# the middle of a bucket and should therefore be 1, 3, and 5, # the middle of a bucket and should therefore be 1, 3, and 5,
# respectively. # respectively.
load 5m load_with_nhcb 5m
testhistogram2_bucket{le="0"} 0+0x10 testhistogram2_bucket{le="0"} 0+0x10
testhistogram2_bucket{le="2"} 0+1x10 testhistogram2_bucket{le="2"} 0+1x10
testhistogram2_bucket{le="4"} 0+2x10 testhistogram2_bucket{le="4"} 0+2x10
testhistogram2_bucket{le="6"} 0+3x10 testhistogram2_bucket{le="6"} 0+3x10
testhistogram2_bucket{le="+Inf"} 0+3x10 testhistogram2_bucket{le="+Inf"} 0+3x10
# Another test histogram, this time without any observations in the +Inf bucket.
# This enables a meaningful calculation of standard deviation and variance.
load_with_nhcb 5m
testhistogram3_bucket{le="0", start="positive"} 0+0x10
testhistogram3_bucket{le="0.1", start="positive"} 0+5x10
testhistogram3_bucket{le=".2", start="positive"} 0+7x10
testhistogram3_bucket{le="1e0", start="positive"} 0+11x10
testhistogram3_bucket{le="+Inf", start="positive"} 0+11x10
testhistogram3_sum{start="positive"} 0+33x10
testhistogram3_count{start="positive"} 0+11x10
testhistogram3_bucket{le="-.25", start="negative"} 0+0x10
testhistogram3_bucket{le="-.2", start="negative"} 0+1x10
testhistogram3_bucket{le="-0.1", start="negative"} 0+2x10
testhistogram3_bucket{le="0.3", start="negative"} 0+2x10
testhistogram3_bucket{le="+Inf", start="negative"} 0+2x10
testhistogram3_sum{start="negative"} 0+8x10
testhistogram3_count{start="negative"} 0+2x10
# Now a more realistic histogram per job and instance to test aggregation. # Now a more realistic histogram per job and instance to test aggregation.
load 5m load_with_nhcb 5m
request_duration_seconds_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10 request_duration_seconds_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10
request_duration_seconds_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10 request_duration_seconds_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10
request_duration_seconds_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10 request_duration_seconds_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10
@ -41,7 +59,7 @@ load 5m
request_duration_seconds_bucket{job="job2", instance="ins2", le="+Inf"} 0+9x10 request_duration_seconds_bucket{job="job2", instance="ins2", le="+Inf"} 0+9x10
# Different le representations in one histogram. # Different le representations in one histogram.
load 5m load_with_nhcb 5m
mixed_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10 mixed_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10
mixed_bucket{job="job1", instance="ins1", le="0.2"} 0+1x10 mixed_bucket{job="job1", instance="ins1", le="0.2"} 0+1x10
mixed_bucket{job="job1", instance="ins1", le="2e-1"} 0+1x10 mixed_bucket{job="job1", instance="ins1", le="2e-1"} 0+1x10
@ -50,27 +68,81 @@ load 5m
mixed_bucket{job="job1", instance="ins2", le="+inf"} 0+0x10 mixed_bucket{job="job1", instance="ins2", le="+inf"} 0+0x10
mixed_bucket{job="job1", instance="ins2", le="+Inf"} 0+0x10 mixed_bucket{job="job1", instance="ins2", le="+Inf"} 0+0x10
# Test histogram_count.
eval instant at 50m histogram_count(testhistogram3)
{start="positive"} 110
{start="negative"} 20
# Test histogram_sum.
eval instant at 50m histogram_sum(testhistogram3)
{start="positive"} 330
{start="negative"} 80
# Test histogram_avg.
eval instant at 50m histogram_avg(testhistogram3)
{start="positive"} 3
{start="negative"} 4
# Test histogram_stddev.
eval instant at 50m histogram_stddev(testhistogram3)
{start="positive"} 2.8189265757336734
{start="negative"} 4.182715937754936
# Test histogram_stdvar.
eval instant at 50m histogram_stdvar(testhistogram3)
{start="positive"} 7.946347039377573
{start="negative"} 17.495112615949154
# Test histogram_fraction.
eval instant at 50m histogram_fraction(0, 0.2, testhistogram3)
{start="positive"} 0.6363636363636364
{start="negative"} 0
eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[5m]))
{start="positive"} 0.6363636363636364
{start="negative"} 0
# Test histogram_quantile.
eval instant at 50m histogram_quantile(0, testhistogram3_bucket)
{start="positive"} 0
{start="negative"} -0.25
eval instant at 50m histogram_quantile(0.25, testhistogram3_bucket)
{start="positive"} 0.055
{start="negative"} -0.225
eval instant at 50m histogram_quantile(0.5, testhistogram3_bucket)
{start="positive"} 0.125
{start="negative"} -0.2
eval instant at 50m histogram_quantile(0.75, testhistogram3_bucket)
{start="positive"} 0.45
{start="negative"} -0.15
eval instant at 50m histogram_quantile(1, testhistogram3_bucket)
{start="positive"} 1
{start="negative"} -0.1
# Quantile too low. # Quantile too low.
eval instant at 50m histogram_quantile(-0.1, testhistogram_bucket) eval_warn instant at 50m histogram_quantile(-0.1, testhistogram_bucket)
{start="positive"} -Inf {start="positive"} -Inf
{start="negative"} -Inf {start="negative"} -Inf
# Quantile too high. # Quantile too high.
eval instant at 50m histogram_quantile(1.01, testhistogram_bucket) eval_warn instant at 50m histogram_quantile(1.01, testhistogram_bucket)
{start="positive"} +Inf {start="positive"} +Inf
{start="negative"} +Inf {start="negative"} +Inf
# Quantile invalid. # Quantile invalid.
eval instant at 50m histogram_quantile(NaN, testhistogram_bucket) eval_warn instant at 50m histogram_quantile(NaN, testhistogram_bucket)
{start="positive"} NaN {start="positive"} NaN
{start="negative"} NaN {start="negative"} NaN
# Quantile value in lowest bucket, which is positive. # Quantile value in lowest bucket.
eval instant at 50m histogram_quantile(0, testhistogram_bucket{start="positive"}) eval instant at 50m histogram_quantile(0, testhistogram_bucket)
{start="positive"} 0 {start="positive"} 0
# Quantile value in lowest bucket, which is negative.
eval instant at 50m histogram_quantile(0, testhistogram_bucket{start="negative"})
{start="negative"} -0.2 {start="negative"} -0.2
# Quantile value in highest bucket. # Quantile value in highest bucket.
@ -83,7 +155,6 @@ eval instant at 50m histogram_quantile(0.2, testhistogram_bucket)
{start="positive"} 0.048 {start="positive"} 0.048
{start="negative"} -0.2 {start="negative"} -0.2
eval instant at 50m histogram_quantile(0.5, testhistogram_bucket) eval instant at 50m histogram_quantile(0.5, testhistogram_bucket)
{start="positive"} 0.15 {start="positive"} 0.15
{start="negative"} -0.15 {start="negative"} -0.15
@ -182,6 +253,9 @@ eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket
{instance="ins1", job="job2"} 0.1 {instance="ins1", job="job2"} 0.1
{instance="ins2", job="job2"} 0.11666666666666667 {instance="ins2", job="job2"} 0.11666666666666667
eval instant at 50m sum(request_duration_seconds)
{} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}}
# A histogram with nonmonotonic bucket counts. This may happen when recording # A histogram with nonmonotonic bucket counts. This may happen when recording
# rule evaluation or federation races scrape ingestion, causing some buckets # rule evaluation or federation races scrape ingestion, causing some buckets
# counts to be derived from fewer samples. # counts to be derived from fewer samples.
@ -209,6 +283,10 @@ eval instant at 50m histogram_quantile(0.5, rate(mixed_bucket[5m]))
{instance="ins1", job="job1"} 0.15 {instance="ins1", job="job1"} 0.15
{instance="ins2", job="job1"} NaN {instance="ins2", job="job1"} NaN
eval instant at 50m histogram_quantile(0.5, rate(mixed[5m]))
{instance="ins1", job="job1"} 0.2
{instance="ins2", job="job1"} NaN
eval instant at 50m histogram_quantile(0.75, rate(mixed_bucket[5m])) eval instant at 50m histogram_quantile(0.75, rate(mixed_bucket[5m]))
{instance="ins1", job="job1"} 0.2 {instance="ins1", job="job1"} 0.2
{instance="ins2", job="job1"} NaN {instance="ins2", job="job1"} NaN
@ -217,7 +295,7 @@ eval instant at 50m histogram_quantile(1, rate(mixed_bucket[5m]))
{instance="ins1", job="job1"} 0.2 {instance="ins1", job="job1"} 0.2
{instance="ins2", job="job1"} NaN {instance="ins2", job="job1"} NaN
load 5m load_with_nhcb 5m
empty_bucket{le="0.1", job="job1", instance="ins1"} 0x10 empty_bucket{le="0.1", job="job1", instance="ins1"} 0x10
empty_bucket{le="0.2", job="job1", instance="ins1"} 0x10 empty_bucket{le="0.2", job="job1", instance="ins1"} 0x10
empty_bucket{le="+Inf", job="job1", instance="ins1"} 0x10 empty_bucket{le="+Inf", job="job1", instance="ins1"} 0x10
@ -227,9 +305,9 @@ eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[5m]))
# Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set # Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set
# https://github.com/prometheus/prometheus/issues/9910 # https://github.com/prometheus/prometheus/issues/9910
load 5m load_with_nhcb 5m
request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10 request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10
request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10 request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10
request_duration_seconds2_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10 request_duration_seconds2_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10
eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration.*"}) eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket$"})

View file

@ -364,7 +364,7 @@ eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_7)
load 10m load 10m
histogram_quantile_1 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1 histogram_quantile_1 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1
eval instant at 10m histogram_quantile(1.001, histogram_quantile_1) eval_warn instant at 10m histogram_quantile(1.001, histogram_quantile_1)
{} Inf {} Inf
eval instant at 10m histogram_quantile(1, histogram_quantile_1) eval instant at 10m histogram_quantile(1, histogram_quantile_1)
@ -388,14 +388,14 @@ eval instant at 10m histogram_quantile(0.1, histogram_quantile_1)
eval instant at 10m histogram_quantile(0, histogram_quantile_1) eval instant at 10m histogram_quantile(0, histogram_quantile_1)
{} 0 {} 0
eval instant at 10m histogram_quantile(-1, histogram_quantile_1) eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_1)
{} -Inf {} -Inf
# Apply quantile function to histogram with all negative buckets with zero bucket. # Apply quantile function to histogram with all negative buckets with zero bucket.
load 10m load 10m
histogram_quantile_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 n_buckets:[2 3 0 1 4]}}x1 histogram_quantile_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 n_buckets:[2 3 0 1 4]}}x1
eval instant at 10m histogram_quantile(1.001, histogram_quantile_2) eval_warn instant at 10m histogram_quantile(1.001, histogram_quantile_2)
{} Inf {} Inf
eval instant at 10m histogram_quantile(1, histogram_quantile_2) eval instant at 10m histogram_quantile(1, histogram_quantile_2)
@ -416,14 +416,14 @@ eval instant at 10m histogram_quantile(0.1, histogram_quantile_2)
eval instant at 10m histogram_quantile(0, histogram_quantile_2) eval instant at 10m histogram_quantile(0, histogram_quantile_2)
{} -16 {} -16
eval instant at 10m histogram_quantile(-1, histogram_quantile_2) eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_2)
{} -Inf {} -Inf
# Apply quantile function to histogram with both positive and negative buckets with zero bucket. # Apply quantile function to histogram with both positive and negative buckets with zero bucket.
load 10m load 10m
histogram_quantile_3 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 histogram_quantile_3 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1
eval instant at 10m histogram_quantile(1.001, histogram_quantile_3) eval_warn instant at 10m histogram_quantile(1.001, histogram_quantile_3)
{} Inf {} Inf
eval instant at 10m histogram_quantile(1, histogram_quantile_3) eval instant at 10m histogram_quantile(1, histogram_quantile_3)
@ -459,7 +459,7 @@ eval instant at 10m histogram_quantile(0.01, histogram_quantile_3)
eval instant at 10m histogram_quantile(0, histogram_quantile_3) eval instant at 10m histogram_quantile(0, histogram_quantile_3)
{} -16 {} -16
eval instant at 10m histogram_quantile(-1, histogram_quantile_3) eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_3)
{} -Inf {} -Inf
# Apply fraction function to empty histogram. # Apply fraction function to empty histogram.
@ -731,3 +731,17 @@ eval instant at 10m histogram_count(increase(reset_in_bucket[15m]))
eval instant at 10m histogram_sum(increase(reset_in_bucket[15m])) eval instant at 10m histogram_sum(increase(reset_in_bucket[15m]))
{} 10.5 {} 10.5
clear
# Test native histograms with custom buckets.
load 5m
custom_buckets_histogram {{schema:-53 sum:5 count:4 custom_values:[5 10] buckets:[1 2 1]}}x10
eval instant at 5m histogram_fraction(5, 10, custom_buckets_histogram)
{} 0.5
eval instant at 5m histogram_quantile(0.5, custom_buckets_histogram)
{} 7.5
eval instant at 5m sum(custom_buckets_histogram)
{} {{schema:-53 sum:5 count:4 custom_values:[5 10] buckets:[1 2 1]}}

View file

@ -0,0 +1,73 @@
# sum_over_time with all values
load 30s
bar 0 1 10 100 1000
eval range from 0 to 2m step 1m sum_over_time(bar[30s])
{} 0 11 1100
clear
# sum_over_time with trailing values
load 30s
bar 0 1 10 100 1000 0 0 0 0
eval range from 0 to 2m step 1m sum_over_time(bar[30s])
{} 0 11 1100
clear
# sum_over_time with all values long
load 30s
bar 0 1 10 100 1000 10000 100000 1000000 10000000
eval range from 0 to 4m step 1m sum_over_time(bar[30s])
{} 0 11 1100 110000 11000000
clear
# sum_over_time with all values random
load 30s
bar 5 17 42 2 7 905 51
eval range from 0 to 3m step 1m sum_over_time(bar[30s])
{} 5 59 9 956
clear
# metric query
load 30s
metric 1+1x4
eval range from 0 to 2m step 1m metric
metric 1 3 5
clear
# metric query with trailing values
load 30s
metric 1+1x8
eval range from 0 to 2m step 1m metric
metric 1 3 5
clear
# short-circuit
load 30s
foo{job="1"} 1+1x4
bar{job="2"} 1+1x4
eval range from 0 to 2m step 1m foo > 2 or bar
foo{job="1"} _ 3 5
bar{job="2"} 1 3 5
clear
# Drop metric name
load 30s
requests{job="1", __address__="bar"} 100
eval range from 0 to 2m step 1m requests * 2
{job="1", __address__="bar"} 200 200 200
clear

View file

@ -206,12 +206,15 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 {
for it.Next() { for it.Next() {
bucket = it.At() bucket = it.At()
if bucket.Count == 0 {
continue
}
count += bucket.Count count += bucket.Count
if count >= rank { if count >= rank {
break break
} }
} }
if bucket.Lower < 0 && bucket.Upper > 0 { if !h.UsesCustomBuckets() && bucket.Lower < 0 && bucket.Upper > 0 {
switch { switch {
case len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0: case len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0:
// The result is in the zero bucket and the histogram has only // The result is in the zero bucket and the histogram has only
@ -222,6 +225,17 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 {
// negative buckets. So we consider 0 to be the upper bound. // negative buckets. So we consider 0 to be the upper bound.
bucket.Upper = 0 bucket.Upper = 0
} }
} else if h.UsesCustomBuckets() {
if bucket.Lower == math.Inf(-1) {
// first bucket, with lower bound -Inf
if bucket.Upper <= 0 {
return bucket.Upper
}
bucket.Lower = 0
} else if bucket.Upper == math.Inf(1) {
// last bucket, with upper bound +Inf
return bucket.Lower
}
} }
// Due to numerical inaccuracies, we could end up with a higher count // Due to numerical inaccuracies, we could end up with a higher count
// than h.Count. Thus, make sure count is never higher than h.Count. // than h.Count. Thus, make sure count is never higher than h.Count.

View file

@ -190,10 +190,18 @@ func (m *Manager) Stop() {
// Update the rule manager's state as the config requires. If // Update the rule manager's state as the config requires. If
// loading the new rules failed the old rule set is restored. // loading the new rules failed the old rule set is restored.
// This method will no-op in case the manager is already stopped.
func (m *Manager) Update(interval time.Duration, files []string, externalLabels labels.Labels, externalURL string, groupEvalIterationFunc GroupEvalIterationFunc) error { func (m *Manager) Update(interval time.Duration, files []string, externalLabels labels.Labels, externalURL string, groupEvalIterationFunc GroupEvalIterationFunc) error {
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
// We cannot update a stopped manager
select {
case <-m.done:
return nil
default:
}
groups, errs := m.LoadGroups(interval, externalLabels, externalURL, groupEvalIterationFunc, files...) groups, errs := m.LoadGroups(interval, externalLabels, externalURL, groupEvalIterationFunc, files...)
if errs != nil { if errs != nil {

View file

@ -1481,7 +1481,8 @@ func TestNativeHistogramsInRecordingRules(t *testing.T) {
expHist := hists[0].ToFloat(nil) expHist := hists[0].ToFloat(nil)
for _, h := range hists[1:] { for _, h := range hists[1:] {
expHist = expHist.Add(h.ToFloat(nil)) expHist, err = expHist.Add(h.ToFloat(nil))
require.NoError(t, err)
} }
it := s.Iterator(nil) it := s.Iterator(nil)
@ -1936,18 +1937,12 @@ func TestDependencyMapUpdatesOnGroupUpdate(t *testing.T) {
} }
func TestAsyncRuleEvaluation(t *testing.T) { func TestAsyncRuleEvaluation(t *testing.T) {
storage := teststorage.New(t)
t.Cleanup(func() { storage.Close() })
var (
inflightQueries atomic.Int32
maxInflight atomic.Int32
)
t.Run("synchronous evaluation with independent rules", func(t *testing.T) { t.Run("synchronous evaluation with independent rules", func(t *testing.T) {
// Reset. t.Parallel()
inflightQueries.Store(0) storage := teststorage.New(t)
maxInflight.Store(0) t.Cleanup(func() { storage.Close() })
inflightQueries := atomic.Int32{}
maxInflight := atomic.Int32{}
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel) t.Cleanup(cancel)
@ -1975,9 +1970,11 @@ func TestAsyncRuleEvaluation(t *testing.T) {
}) })
t.Run("asynchronous evaluation with independent and dependent rules", func(t *testing.T) { t.Run("asynchronous evaluation with independent and dependent rules", func(t *testing.T) {
// Reset. t.Parallel()
inflightQueries.Store(0) storage := teststorage.New(t)
maxInflight.Store(0) t.Cleanup(func() { storage.Close() })
inflightQueries := atomic.Int32{}
maxInflight := atomic.Int32{}
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel) t.Cleanup(cancel)
@ -2011,9 +2008,11 @@ func TestAsyncRuleEvaluation(t *testing.T) {
}) })
t.Run("asynchronous evaluation of all independent rules, insufficient concurrency", func(t *testing.T) { t.Run("asynchronous evaluation of all independent rules, insufficient concurrency", func(t *testing.T) {
// Reset. t.Parallel()
inflightQueries.Store(0) storage := teststorage.New(t)
maxInflight.Store(0) t.Cleanup(func() { storage.Close() })
inflightQueries := atomic.Int32{}
maxInflight := atomic.Int32{}
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel) t.Cleanup(cancel)
@ -2047,9 +2046,11 @@ func TestAsyncRuleEvaluation(t *testing.T) {
}) })
t.Run("asynchronous evaluation of all independent rules, sufficient concurrency", func(t *testing.T) { t.Run("asynchronous evaluation of all independent rules, sufficient concurrency", func(t *testing.T) {
// Reset. t.Parallel()
inflightQueries.Store(0) storage := teststorage.New(t)
maxInflight.Store(0) t.Cleanup(func() { storage.Close() })
inflightQueries := atomic.Int32{}
maxInflight := atomic.Int32{}
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel) t.Cleanup(cancel)
@ -2124,7 +2125,24 @@ func TestBoundedRuleEvalConcurrency(t *testing.T) {
require.EqualValues(t, maxInflight.Load(), int32(maxConcurrency)+int32(groupCount)) require.EqualValues(t, maxInflight.Load(), int32(maxConcurrency)+int32(groupCount))
} }
const artificialDelay = 15 * time.Millisecond func TestUpdateWhenStopped(t *testing.T) {
files := []string{"fixtures/rules.yaml"}
ruleManager := NewManager(&ManagerOptions{
Context: context.Background(),
Logger: log.NewNopLogger(),
})
ruleManager.start()
err := ruleManager.Update(10*time.Second, files, labels.EmptyLabels(), "", nil)
require.NoError(t, err)
require.NotEmpty(t, ruleManager.groups)
ruleManager.Stop()
// Updates following a stop are no-op.
err = ruleManager.Update(10*time.Second, []string{}, labels.EmptyLabels(), "", nil)
require.NoError(t, err)
}
const artificialDelay = 250 * time.Millisecond
func optsFactory(storage storage.Storage, maxInflight, inflightQueries *atomic.Int32, maxConcurrent int64) *ManagerOptions { func optsFactory(storage storage.Storage, maxInflight, inflightQueries *atomic.Int32, maxConcurrent int64) *ManagerOptions {
var inflightMu sync.Mutex var inflightMu sync.Mutex

View file

@ -663,7 +663,7 @@ func appender(app storage.Appender, sampleLimit, bucketLimit int, maxSchema int3
} }
} }
if maxSchema < nativeHistogramMaxSchema { if maxSchema < histogram.ExponentialSchemaMax {
app = &maxSchemaAppender{ app = &maxSchemaAppender{
Appender: app, Appender: app,
maxSchema: maxSchema, maxSchema: maxSchema,
@ -1978,10 +1978,10 @@ func pickSchema(bucketFactor float64) int32 {
} }
floor := math.Floor(-math.Log2(math.Log2(bucketFactor))) floor := math.Floor(-math.Log2(math.Log2(bucketFactor)))
switch { switch {
case floor >= float64(nativeHistogramMaxSchema): case floor >= float64(histogram.ExponentialSchemaMax):
return nativeHistogramMaxSchema return histogram.ExponentialSchemaMax
case floor <= float64(nativeHistogramMinSchema): case floor <= float64(histogram.ExponentialSchemaMin):
return nativeHistogramMinSchema return histogram.ExponentialSchemaMin
default: default:
return int32(floor) return int32(floor)
} }

View file

@ -511,7 +511,7 @@ func TestScrapePoolAppender(t *testing.T) {
appl, ok := loop.(*scrapeLoop) appl, ok := loop.(*scrapeLoop)
require.True(t, ok, "Expected scrapeLoop but got %T", loop) require.True(t, ok, "Expected scrapeLoop but got %T", loop)
wrapped := appender(appl.appender(context.Background()), 0, 0, nativeHistogramMaxSchema) wrapped := appender(appl.appender(context.Background()), 0, 0, histogram.ExponentialSchemaMax)
tl, ok := wrapped.(*timeLimitAppender) tl, ok := wrapped.(*timeLimitAppender)
require.True(t, ok, "Expected timeLimitAppender but got %T", wrapped) require.True(t, ok, "Expected timeLimitAppender but got %T", wrapped)
@ -527,7 +527,7 @@ func TestScrapePoolAppender(t *testing.T) {
appl, ok = loop.(*scrapeLoop) appl, ok = loop.(*scrapeLoop)
require.True(t, ok, "Expected scrapeLoop but got %T", loop) require.True(t, ok, "Expected scrapeLoop but got %T", loop)
wrapped = appender(appl.appender(context.Background()), sampleLimit, 0, nativeHistogramMaxSchema) wrapped = appender(appl.appender(context.Background()), sampleLimit, 0, histogram.ExponentialSchemaMax)
sl, ok := wrapped.(*limitAppender) sl, ok := wrapped.(*limitAppender)
require.True(t, ok, "Expected limitAppender but got %T", wrapped) require.True(t, ok, "Expected limitAppender but got %T", wrapped)
@ -538,7 +538,7 @@ func TestScrapePoolAppender(t *testing.T) {
_, ok = tl.Appender.(nopAppender) _, ok = tl.Appender.(nopAppender)
require.True(t, ok, "Expected base appender but got %T", tl.Appender) require.True(t, ok, "Expected base appender but got %T", tl.Appender)
wrapped = appender(appl.appender(context.Background()), sampleLimit, 100, nativeHistogramMaxSchema) wrapped = appender(appl.appender(context.Background()), sampleLimit, 100, histogram.ExponentialSchemaMax)
bl, ok := wrapped.(*bucketLimitAppender) bl, ok := wrapped.(*bucketLimitAppender)
require.True(t, ok, "Expected bucketLimitAppender but got %T", wrapped) require.True(t, ok, "Expected bucketLimitAppender but got %T", wrapped)
@ -670,7 +670,7 @@ func newBasicScrapeLoop(t testing.TB, ctx context.Context, scraper scraper, app
true, true,
false, false,
true, true,
0, 0, nativeHistogramMaxSchema, 0, 0, histogram.ExponentialSchemaMax,
nil, nil,
interval, interval,
time.Hour, time.Hour,
@ -812,7 +812,7 @@ func TestScrapeLoopRun(t *testing.T) {
true, true,
false, false,
true, true,
0, 0, nativeHistogramMaxSchema, 0, 0, histogram.ExponentialSchemaMax,
nil, nil,
time.Second, time.Second,
time.Hour, time.Hour,
@ -956,7 +956,7 @@ func TestScrapeLoopMetadata(t *testing.T) {
true, true,
false, false,
true, true,
0, 0, nativeHistogramMaxSchema, 0, 0, histogram.ExponentialSchemaMax,
nil, nil,
0, 0,
0, 0,

View file

@ -365,16 +365,26 @@ type bucketLimitAppender struct {
func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
if h != nil { if h != nil {
// Return with an early error if the histogram has too many buckets and the
// schema is not exponential, in which case we can't reduce the resolution.
if len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit && !histogram.IsExponentialSchema(h.Schema) {
return 0, errBucketLimit
}
for len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit { for len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit {
if h.Schema == -4 { if h.Schema <= histogram.ExponentialSchemaMin {
return 0, errBucketLimit return 0, errBucketLimit
} }
h = h.ReduceResolution(h.Schema - 1) h = h.ReduceResolution(h.Schema - 1)
} }
} }
if fh != nil { if fh != nil {
// Return with an early error if the histogram has too many buckets and the
// schema is not exponential, in which case we can't reduce the resolution.
if len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit && !histogram.IsExponentialSchema(fh.Schema) {
return 0, errBucketLimit
}
for len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit { for len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit {
if fh.Schema == -4 { if fh.Schema <= histogram.ExponentialSchemaMin {
return 0, errBucketLimit return 0, errBucketLimit
} }
fh = fh.ReduceResolution(fh.Schema - 1) fh = fh.ReduceResolution(fh.Schema - 1)
@ -387,11 +397,6 @@ func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labe
return ref, nil return ref, nil
} }
const (
nativeHistogramMaxSchema int32 = 8
nativeHistogramMinSchema int32 = -4
)
type maxSchemaAppender struct { type maxSchemaAppender struct {
storage.Appender storage.Appender
@ -400,12 +405,12 @@ type maxSchemaAppender struct {
func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
if h != nil { if h != nil {
if h.Schema > app.maxSchema { if histogram.IsExponentialSchema(h.Schema) && h.Schema > app.maxSchema {
h = h.ReduceResolution(app.maxSchema) h = h.ReduceResolution(app.maxSchema)
} }
} }
if fh != nil { if fh != nil {
if fh.Schema > app.maxSchema { if histogram.IsExponentialSchema(fh.Schema) && fh.Schema > app.maxSchema {
fh = fh.ReduceResolution(app.maxSchema) fh = fh.ReduceResolution(app.maxSchema)
} }
} }

View file

@ -474,6 +474,17 @@ func TestBucketLimitAppender(t *testing.T) {
PositiveBuckets: []int64{1, 0}, // 1, 1 PositiveBuckets: []int64{1, 0}, // 1, 1
} }
customBuckets := histogram.Histogram{
Schema: histogram.CustomBucketsSchema,
Count: 9,
Sum: 33,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{3, 0, 0},
CustomValues: []float64{1, 2, 3},
}
cases := []struct { cases := []struct {
h histogram.Histogram h histogram.Histogram
limit int limit int
@ -507,6 +518,18 @@ func TestBucketLimitAppender(t *testing.T) {
expectBucketCount: 1, expectBucketCount: 1,
expectSchema: -2, expectSchema: -2,
}, },
{
h: customBuckets,
limit: 2,
expectError: true,
},
{
h: customBuckets,
limit: 3,
expectError: false,
expectBucketCount: 3,
expectSchema: histogram.CustomBucketsSchema,
},
} }
resApp := &collectResultAppender{} resApp := &collectResultAppender{}
@ -562,6 +585,17 @@ func TestMaxSchemaAppender(t *testing.T) {
NegativeBuckets: []int64{3, 0, 0}, NegativeBuckets: []int64{3, 0, 0},
} }
customBuckets := histogram.Histogram{
Schema: histogram.CustomBucketsSchema,
Count: 9,
Sum: 33,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{3, 0, 0},
CustomValues: []float64{1, 2, 3},
}
cases := []struct { cases := []struct {
h histogram.Histogram h histogram.Histogram
maxSchema int32 maxSchema int32
@ -577,6 +611,11 @@ func TestMaxSchemaAppender(t *testing.T) {
maxSchema: 0, maxSchema: 0,
expectSchema: 0, expectSchema: 0,
}, },
{
h: customBuckets,
maxSchema: -1,
expectSchema: histogram.CustomBucketsSchema,
},
} }
resApp := &collectResultAppender{} resApp := &collectResultAppender{}

View file

@ -36,4 +36,4 @@ jobs:
uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1 uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1
with: with:
args: --verbose args: --verbose
version: v1.59.0 version: v1.59.1

View file

@ -45,9 +45,15 @@ type mergeGenericQuerier struct {
// //
// In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used. // In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used.
func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMergeFunc) Querier { func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMergeFunc) Querier {
if len(primaries)+len(secondaries) == 0 { switch {
return NoopQuerier() case len(primaries)+len(secondaries) == 0:
return noopQuerier{}
case len(primaries) == 1 && len(secondaries) == 0:
return primaries[0]
case len(primaries) == 0 && len(secondaries) == 1:
return secondaries[0]
} }
queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries)) queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries))
for _, q := range primaries { for _, q := range primaries {
if _, ok := q.(noopQuerier); !ok && q != nil { if _, ok := q.(noopQuerier); !ok && q != nil {
@ -77,6 +83,15 @@ func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMer
// In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used. // In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used.
// TODO(bwplotka): Currently merge will compact overlapping chunks with bigger chunk, without limit. Split it: https://github.com/prometheus/tsdb/issues/670 // TODO(bwplotka): Currently merge will compact overlapping chunks with bigger chunk, without limit. Split it: https://github.com/prometheus/tsdb/issues/670
func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn VerticalChunkSeriesMergeFunc) ChunkQuerier { func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn VerticalChunkSeriesMergeFunc) ChunkQuerier {
switch {
case len(primaries) == 0 && len(secondaries) == 0:
return noopChunkQuerier{}
case len(primaries) == 1 && len(secondaries) == 0:
return primaries[0]
case len(primaries) == 0 && len(secondaries) == 1:
return secondaries[0]
}
queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries)) queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries))
for _, q := range primaries { for _, q := range primaries {
if _, ok := q.(noopChunkQuerier); !ok && q != nil { if _, ok := q.(noopChunkQuerier); !ok && q != nil {
@ -102,13 +117,6 @@ func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn Vertica
// Select returns a set of series that matches the given label matchers. // Select returns a set of series that matches the given label matchers.
func (q *mergeGenericQuerier) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet { func (q *mergeGenericQuerier) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet {
if len(q.queriers) == 0 {
return noopGenericSeriesSet{}
}
if len(q.queriers) == 1 {
return q.queriers[0].Select(ctx, sortSeries, hints, matchers...)
}
seriesSets := make([]genericSeriesSet, 0, len(q.queriers)) seriesSets := make([]genericSeriesSet, 0, len(q.queriers))
if !q.concurrentSelect { if !q.concurrentSelect {
for _, querier := range q.queriers { for _, querier := range q.queriers {

View file

@ -180,9 +180,9 @@ func TestMergeQuerierWithChainMerger(t *testing.T) {
}, },
} { } {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
var p Querier var p []Querier
if tc.primaryQuerierSeries != nil { if tc.primaryQuerierSeries != nil {
p = &mockQuerier{toReturn: tc.primaryQuerierSeries} p = append(p, &mockQuerier{toReturn: tc.primaryQuerierSeries})
} }
var qs []Querier var qs []Querier
for _, in := range tc.querierSeries { for _, in := range tc.querierSeries {
@ -190,7 +190,7 @@ func TestMergeQuerierWithChainMerger(t *testing.T) {
} }
qs = append(qs, tc.extraQueriers...) qs = append(qs, tc.extraQueriers...)
mergedQuerier := NewMergeQuerier([]Querier{p}, qs, ChainedSeriesMerge).Select(context.Background(), false, nil) mergedQuerier := NewMergeQuerier(p, qs, ChainedSeriesMerge).Select(context.Background(), false, nil)
// Get all merged series upfront to make sure there are no incorrectly retained shared // Get all merged series upfront to make sure there are no incorrectly retained shared
// buffers causing bugs. // buffers causing bugs.
@ -355,9 +355,9 @@ func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) {
}, },
} { } {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
var p ChunkQuerier var p []ChunkQuerier
if tc.primaryChkQuerierSeries != nil { if tc.primaryChkQuerierSeries != nil {
p = &mockChunkQuerier{toReturn: tc.primaryChkQuerierSeries} p = append(p, &mockChunkQuerier{toReturn: tc.primaryChkQuerierSeries})
} }
var qs []ChunkQuerier var qs []ChunkQuerier
@ -366,7 +366,7 @@ func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) {
} }
qs = append(qs, tc.extraQueriers...) qs = append(qs, tc.extraQueriers...)
merged := NewMergeChunkQuerier([]ChunkQuerier{p}, qs, NewCompactingChunkSeriesMerger(nil)).Select(context.Background(), false, nil) merged := NewMergeChunkQuerier(p, qs, NewCompactingChunkSeriesMerger(nil)).Select(context.Background(), false, nil)
for merged.Next() { for merged.Next() {
require.True(t, tc.expected.Next(), "Expected Next() to be true") require.True(t, tc.expected.Next(), "Expected Next() to be true")
actualSeries := merged.At() actualSeries := merged.At()
@ -1443,6 +1443,8 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
expectedErrs [4]error expectedErrs [4]error
}{ }{
{ {
// NewMergeQuerier will not create a mergeGenericQuerier
// with just one querier inside, but we can test it anyway.
name: "one successful primary querier", name: "one successful primary querier",
queriers: []genericQuerier{&mockGenericQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil}}, queriers: []genericQuerier{&mockGenericQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil}},
expectedSelectsSeries: []labels.Labels{ expectedSelectsSeries: []labels.Labels{
@ -1551,12 +1553,8 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
for _, qr := range q.queriers { for _, qr := range q.queriers {
m := unwrapMockGenericQuerier(t, qr) m := unwrapMockGenericQuerier(t, qr)
// mergeGenericQuerier forces all Selects to be sorted.
exp := []bool{true} require.Equal(t, []bool{true}, m.sortedSeriesRequested)
if len(q.queriers) == 1 {
exp[0] = false
}
require.Equal(t, exp, m.sortedSeriesRequested)
} }
}) })
t.Run("LabelNames", func(t *testing.T) { t.Run("LabelNames", func(t *testing.T) {

View file

@ -231,6 +231,7 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) error {
httpResp.Body.Close() httpResp.Body.Close()
}() }()
//nolint:usestdlibvars
if httpResp.StatusCode/100 != 2 { if httpResp.StatusCode/100 != 2 {
scanner := bufio.NewScanner(io.LimitReader(httpResp.Body, maxErrMsgLen)) scanner := bufio.NewScanner(io.LimitReader(httpResp.Body, maxErrMsgLen))
line := "" line := ""
@ -239,6 +240,7 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) error {
} }
err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, line) err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, line)
} }
//nolint:usestdlibvars
if httpResp.StatusCode/100 == 5 || if httpResp.StatusCode/100 == 5 ||
(c.retryOnRateLimit && httpResp.StatusCode == http.StatusTooManyRequests) { (c.retryOnRateLimit && httpResp.StatusCode == http.StatusTooManyRequests) {
return RecoverableError{err, retryAfterDuration(httpResp.Header.Get("Retry-After"))} return RecoverableError{err, retryAfterDuration(httpResp.Header.Get("Retry-After"))}
@ -323,6 +325,7 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe
return nil, fmt.Errorf("error reading response. HTTP status code: %s: %w", httpResp.Status, err) return nil, fmt.Errorf("error reading response. HTTP status code: %s: %w", httpResp.Status, err)
} }
//nolint:usestdlibvars
if httpResp.StatusCode/100 != 2 { if httpResp.StatusCode/100 != 2 {
return nil, fmt.Errorf("remote server %s returned HTTP status %s: %s", c.urlString, httpResp.Status, strings.TrimSpace(string(compressed))) return nil, fmt.Errorf("remote server %s returned HTTP status %s: %s", c.urlString, httpResp.Status, strings.TrimSpace(string(compressed)))
} }

View file

@ -95,7 +95,7 @@ func EncodeReadResponse(resp *prompb.ReadResponse, w http.ResponseWriter) error
// ToQuery builds a Query proto. // ToQuery builds a Query proto.
func ToQuery(from, to int64, matchers []*labels.Matcher, hints *storage.SelectHints) (*prompb.Query, error) { func ToQuery(from, to int64, matchers []*labels.Matcher, hints *storage.SelectHints) (*prompb.Query, error) {
ms, err := toLabelMatchers(matchers) ms, err := ToLabelMatchers(matchers)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -166,7 +166,7 @@ func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult,
} }
resp.Timeseries = append(resp.Timeseries, &prompb.TimeSeries{ resp.Timeseries = append(resp.Timeseries, &prompb.TimeSeries{
Labels: labelsToLabelsProto(series.Labels(), nil), Labels: LabelsToLabelsProto(series.Labels(), nil),
Samples: samples, Samples: samples,
Histograms: histograms, Histograms: histograms,
}) })
@ -182,7 +182,7 @@ func FromQueryResult(sortSeries bool, res *prompb.QueryResult) storage.SeriesSet
if err := validateLabelsAndMetricName(ts.Labels); err != nil { if err := validateLabelsAndMetricName(ts.Labels); err != nil {
return errSeriesSet{err: err} return errSeriesSet{err: err}
} }
lbls := labelProtosToLabels(&b, ts.Labels) lbls := LabelProtosToLabels(&b, ts.Labels)
series = append(series, &concreteSeries{labels: lbls, floats: ts.Samples, histograms: ts.Histograms}) series = append(series, &concreteSeries{labels: lbls, floats: ts.Samples, histograms: ts.Histograms})
} }
@ -235,7 +235,7 @@ func StreamChunkedReadResponses(
for ss.Next() { for ss.Next() {
series := ss.At() series := ss.At()
iter = series.Iterator(iter) iter = series.Iterator(iter)
lbls = MergeLabels(labelsToLabelsProto(series.Labels(), lbls), sortedExternalLabels) lbls = MergeLabels(LabelsToLabelsProto(series.Labels(), lbls), sortedExternalLabels)
maxDataLength := maxBytesInFrame maxDataLength := maxBytesInFrame
for _, lbl := range lbls { for _, lbl := range lbls {
@ -566,7 +566,8 @@ func validateLabelsAndMetricName(ls []prompb.Label) error {
return nil return nil
} }
func toLabelMatchers(matchers []*labels.Matcher) ([]*prompb.LabelMatcher, error) { // ToLabelMatchers converts Prometheus label matchers to protobuf label matchers.
func ToLabelMatchers(matchers []*labels.Matcher) ([]*prompb.LabelMatcher, error) {
pbMatchers := make([]*prompb.LabelMatcher, 0, len(matchers)) pbMatchers := make([]*prompb.LabelMatcher, 0, len(matchers))
for _, m := range matchers { for _, m := range matchers {
var mType prompb.LabelMatcher_Type var mType prompb.LabelMatcher_Type
@ -591,7 +592,7 @@ func toLabelMatchers(matchers []*labels.Matcher) ([]*prompb.LabelMatcher, error)
return pbMatchers, nil return pbMatchers, nil
} }
// FromLabelMatchers parses protobuf label matchers to Prometheus label matchers. // FromLabelMatchers converts protobuf label matchers to Prometheus label matchers.
func FromLabelMatchers(matchers []*prompb.LabelMatcher) ([]*labels.Matcher, error) { func FromLabelMatchers(matchers []*prompb.LabelMatcher) ([]*labels.Matcher, error) {
result := make([]*labels.Matcher, 0, len(matchers)) result := make([]*labels.Matcher, 0, len(matchers))
for _, matcher := range matchers { for _, matcher := range matchers {
@ -621,7 +622,7 @@ func exemplarProtoToExemplar(b *labels.ScratchBuilder, ep prompb.Exemplar) exemp
timestamp := ep.Timestamp timestamp := ep.Timestamp
return exemplar.Exemplar{ return exemplar.Exemplar{
Labels: labelProtosToLabels(b, ep.Labels), Labels: LabelProtosToLabels(b, ep.Labels),
Value: ep.Value, Value: ep.Value,
Ts: timestamp, Ts: timestamp,
HasTs: timestamp != 0, HasTs: timestamp != 0,
@ -761,7 +762,9 @@ func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric {
return metric return metric
} }
func labelProtosToLabels(b *labels.ScratchBuilder, labelPairs []prompb.Label) labels.Labels { // LabelProtosToLabels transforms prompb labels into labels. The labels builder
// will be used to build the returned labels.
func LabelProtosToLabels(b *labels.ScratchBuilder, labelPairs []prompb.Label) labels.Labels {
b.Reset() b.Reset()
for _, l := range labelPairs { for _, l := range labelPairs {
b.Add(l.Name, l.Value) b.Add(l.Name, l.Value)
@ -770,9 +773,9 @@ func labelProtosToLabels(b *labels.ScratchBuilder, labelPairs []prompb.Label) la
return b.Labels() return b.Labels()
} }
// labelsToLabelsProto transforms labels into prompb labels. The buffer slice // LabelsToLabelsProto transforms labels into prompb labels. The buffer slice
// will be used to avoid allocations if it is big enough to store the labels. // will be used to avoid allocations if it is big enough to store the labels.
func labelsToLabelsProto(lbls labels.Labels, buf []prompb.Label) []prompb.Label { func LabelsToLabelsProto(lbls labels.Labels, buf []prompb.Label) []prompb.Label {
result := buf[:0] result := buf[:0]
lbls.Range(func(l labels.Label) { lbls.Range(func(l labels.Label) {
result = append(result, prompb.Label{ result = append(result, prompb.Label{

View file

@ -729,8 +729,8 @@ func TestFloatHistogramToProtoConvert(t *testing.T) {
} }
func TestStreamResponse(t *testing.T) { func TestStreamResponse(t *testing.T) {
lbs1 := labelsToLabelsProto(labels.FromStrings("instance", "localhost1", "job", "demo1"), nil) lbs1 := LabelsToLabelsProto(labels.FromStrings("instance", "localhost1", "job", "demo1"), nil)
lbs2 := labelsToLabelsProto(labels.FromStrings("instance", "localhost2", "job", "demo2"), nil) lbs2 := LabelsToLabelsProto(labels.FromStrings("instance", "localhost2", "job", "demo2"), nil)
chunk := prompb.Chunk{ chunk := prompb.Chunk{
Type: prompb.Chunk_XOR, Type: prompb.Chunk_XOR,
Data: make([]byte, 100), Data: make([]byte, 100),
@ -802,7 +802,7 @@ func (c *mockChunkSeriesSet) Next() bool {
func (c *mockChunkSeriesSet) At() storage.ChunkSeries { func (c *mockChunkSeriesSet) At() storage.ChunkSeries {
return &storage.ChunkSeriesEntry{ return &storage.ChunkSeriesEntry{
Lset: labelProtosToLabels(&c.builder, c.chunkedSeries[c.index].Labels), Lset: LabelProtosToLabels(&c.builder, c.chunkedSeries[c.index].Labels),
ChunkIteratorFn: func(chunks.Iterator) chunks.Iterator { ChunkIteratorFn: func(chunks.Iterator) chunks.Iterator {
return &mockChunkIterator{ return &mockChunkIterator{
chunks: c.chunkedSeries[c.index].Chunks, chunks: c.chunkedSeries[c.index].Chunks,

View file

@ -16,6 +16,7 @@ package remote
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"math" "math"
"strconv" "strconv"
"sync" "sync"
@ -1224,12 +1225,16 @@ func (s *shards) stop() {
// Force an unclean shutdown. // Force an unclean shutdown.
s.hardShutdown() s.hardShutdown()
<-s.done <-s.done
if dropped := s.samplesDroppedOnHardShutdown.Load(); dropped > 0 {
level.Error(s.qm.logger).Log("msg", "Failed to flush all samples on shutdown", "count", dropped) // Log error for any dropped samples, exemplars, or histograms.
} logDroppedError := func(t string, counter atomic.Uint32) {
if dropped := s.exemplarsDroppedOnHardShutdown.Load(); dropped > 0 { if dropped := counter.Load(); dropped > 0 {
level.Error(s.qm.logger).Log("msg", "Failed to flush all exemplars on shutdown", "count", dropped) level.Error(s.qm.logger).Log("msg", fmt.Sprintf("Failed to flush all %s on shutdown", t), "count", dropped)
}
} }
logDroppedError("samples", s.samplesDroppedOnHardShutdown)
logDroppedError("exemplars", s.exemplarsDroppedOnHardShutdown)
logDroppedError("histograms", s.histogramsDroppedOnHardShutdown)
} }
// enqueue data (sample or exemplar). If the shard is full, shutting down, or // enqueue data (sample or exemplar). If the shard is full, shutting down, or
@ -1507,7 +1512,7 @@ func (s *shards) populateTimeSeries(batch []timeSeries, pendingData []prompb.Tim
// Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff) // Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff)
// retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll // retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll
// stop reading from the queue. This makes it safe to reference pendingSamples by index. // stop reading from the queue. This makes it safe to reference pendingSamples by index.
pendingData[nPending].Labels = labelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels) pendingData[nPending].Labels = LabelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels)
switch d.sType { switch d.sType {
case tSample: case tSample:
pendingData[nPending].Samples = append(pendingData[nPending].Samples, prompb.Sample{ pendingData[nPending].Samples = append(pendingData[nPending].Samples, prompb.Sample{
@ -1517,7 +1522,7 @@ func (s *shards) populateTimeSeries(batch []timeSeries, pendingData []prompb.Tim
nPendingSamples++ nPendingSamples++
case tExemplar: case tExemplar:
pendingData[nPending].Exemplars = append(pendingData[nPending].Exemplars, prompb.Exemplar{ pendingData[nPending].Exemplars = append(pendingData[nPending].Exemplars, prompb.Exemplar{
Labels: labelsToLabelsProto(d.exemplarLabels, nil), Labels: LabelsToLabelsProto(d.exemplarLabels, nil),
Value: d.value, Value: d.value,
Timestamp: d.timestamp, Timestamp: d.timestamp,
}) })
@ -1537,7 +1542,7 @@ func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, s
begin := time.Now() begin := time.Now()
err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, histogramCount, pBuf, buf) err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, histogramCount, pBuf, buf)
if err != nil { if err != nil {
level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "err", err) level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "histogramCount", histogramCount, "err", err)
s.qm.metrics.failedSamplesTotal.Add(float64(sampleCount)) s.qm.metrics.failedSamplesTotal.Add(float64(sampleCount))
s.qm.metrics.failedExemplarsTotal.Add(float64(exemplarCount)) s.qm.metrics.failedExemplarsTotal.Add(float64(exemplarCount))
s.qm.metrics.failedHistogramsTotal.Add(float64(histogramCount)) s.qm.metrics.failedHistogramsTotal.Add(float64(histogramCount))
@ -1778,9 +1783,11 @@ func buildTimeSeries(timeSeries []prompb.TimeSeries, filter func(prompb.TimeSeri
if len(ts.Histograms) > 0 && ts.Histograms[0].Timestamp < lowest { if len(ts.Histograms) > 0 && ts.Histograms[0].Timestamp < lowest {
lowest = ts.Histograms[0].Timestamp lowest = ts.Histograms[0].Timestamp
} }
if i != keepIdx {
// Move the current element to the write position and increment the write pointer // We have to swap the kept timeseries with the one which should be dropped.
timeSeries[keepIdx] = timeSeries[i] // Copying any elements within timeSeries could cause data corruptions when reusing the slice in a next batch (shards.populateTimeSeries).
timeSeries[keepIdx], timeSeries[i] = timeSeries[i], timeSeries[keepIdx]
}
keepIdx++ keepIdx++
} }

View file

@ -17,6 +17,7 @@ import (
"context" "context"
"fmt" "fmt"
"math" "math"
"math/rand"
"os" "os"
"runtime/pprof" "runtime/pprof"
"sort" "sort"
@ -29,6 +30,7 @@ import (
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
"github.com/golang/snappy" "github.com/golang/snappy"
"github.com/google/go-cmp/cmp"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
client_testutil "github.com/prometheus/client_golang/prometheus/testutil" client_testutil "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -611,6 +613,30 @@ func createTimeseries(numSamples, numSeries int, extraLabels ...labels.Label) ([
return samples, series return samples, series
} }
func createProtoTimeseriesWithOld(numSamples, baseTs int64, extraLabels ...labels.Label) []prompb.TimeSeries {
samples := make([]prompb.TimeSeries, numSamples)
// use a fixed rand source so tests are consistent
r := rand.New(rand.NewSource(99))
for j := int64(0); j < numSamples; j++ {
name := fmt.Sprintf("test_metric_%d", j)
samples[j] = prompb.TimeSeries{
Labels: []prompb.Label{{Name: "__name__", Value: name}},
Samples: []prompb.Sample{
{
Timestamp: baseTs + j,
Value: float64(j),
},
},
}
// 10% of the time use a ts that is too old
if r.Intn(10) == 0 {
samples[j].Samples[0].Timestamp = baseTs - 5
}
}
return samples
}
func createExemplars(numExemplars, numSeries int) ([]record.RefExemplar, []record.RefSeries) { func createExemplars(numExemplars, numSeries int) ([]record.RefExemplar, []record.RefSeries) {
exemplars := make([]record.RefExemplar, 0, numExemplars) exemplars := make([]record.RefExemplar, 0, numExemplars)
series := make([]record.RefSeries, 0, numSeries) series := make([]record.RefSeries, 0, numSeries)
@ -679,8 +705,8 @@ func createHistograms(numSamples, numSeries int, floatHistogram bool) ([]record.
return histograms, nil, series return histograms, nil, series
} }
func getSeriesNameFromRef(r record.RefSeries) string { func getSeriesIDFromRef(r record.RefSeries) string {
return r.Labels.Get("__name__") return r.Labels.String()
} }
type TestWriteClient struct { type TestWriteClient struct {
@ -698,6 +724,9 @@ type TestWriteClient struct {
wg sync.WaitGroup wg sync.WaitGroup
mtx sync.Mutex mtx sync.Mutex
buf []byte buf []byte
storeWait time.Duration
returnError error
} }
func NewTestWriteClient() *TestWriteClient { func NewTestWriteClient() *TestWriteClient {
@ -706,6 +735,8 @@ func NewTestWriteClient() *TestWriteClient {
receivedSamples: map[string][]prompb.Sample{}, receivedSamples: map[string][]prompb.Sample{},
expectedSamples: map[string][]prompb.Sample{}, expectedSamples: map[string][]prompb.Sample{},
receivedMetadata: map[string][]prompb.MetricMetadata{}, receivedMetadata: map[string][]prompb.MetricMetadata{},
storeWait: 0,
returnError: nil,
} }
} }
@ -720,12 +751,15 @@ func (c *TestWriteClient) expectSamples(ss []record.RefSample, series []record.R
c.receivedSamples = map[string][]prompb.Sample{} c.receivedSamples = map[string][]prompb.Sample{}
for _, s := range ss { for _, s := range ss {
seriesName := getSeriesNameFromRef(series[s.Ref]) tsID := getSeriesIDFromRef(series[s.Ref])
c.expectedSamples[seriesName] = append(c.expectedSamples[seriesName], prompb.Sample{ c.expectedSamples[tsID] = append(c.expectedSamples[tsID], prompb.Sample{
Timestamp: s.T, Timestamp: s.T,
Value: s.V, Value: s.V,
}) })
} }
if !c.withWaitGroup {
return
}
c.wg.Add(len(ss)) c.wg.Add(len(ss))
} }
@ -740,13 +774,13 @@ func (c *TestWriteClient) expectExemplars(ss []record.RefExemplar, series []reco
c.receivedExemplars = map[string][]prompb.Exemplar{} c.receivedExemplars = map[string][]prompb.Exemplar{}
for _, s := range ss { for _, s := range ss {
seriesName := getSeriesNameFromRef(series[s.Ref]) tsID := getSeriesIDFromRef(series[s.Ref])
e := prompb.Exemplar{ e := prompb.Exemplar{
Labels: labelsToLabelsProto(s.Labels, nil), Labels: LabelsToLabelsProto(s.Labels, nil),
Timestamp: s.T, Timestamp: s.T,
Value: s.V, Value: s.V,
} }
c.expectedExemplars[seriesName] = append(c.expectedExemplars[seriesName], e) c.expectedExemplars[tsID] = append(c.expectedExemplars[tsID], e)
} }
c.wg.Add(len(ss)) c.wg.Add(len(ss))
} }
@ -762,8 +796,8 @@ func (c *TestWriteClient) expectHistograms(hh []record.RefHistogramSample, serie
c.receivedHistograms = map[string][]prompb.Histogram{} c.receivedHistograms = map[string][]prompb.Histogram{}
for _, h := range hh { for _, h := range hh {
seriesName := getSeriesNameFromRef(series[h.Ref]) tsID := getSeriesIDFromRef(series[h.Ref])
c.expectedHistograms[seriesName] = append(c.expectedHistograms[seriesName], HistogramToHistogramProto(h.T, h.H)) c.expectedHistograms[tsID] = append(c.expectedHistograms[tsID], HistogramToHistogramProto(h.T, h.H))
} }
c.wg.Add(len(hh)) c.wg.Add(len(hh))
} }
@ -779,8 +813,8 @@ func (c *TestWriteClient) expectFloatHistograms(fhs []record.RefFloatHistogramSa
c.receivedFloatHistograms = map[string][]prompb.Histogram{} c.receivedFloatHistograms = map[string][]prompb.Histogram{}
for _, fh := range fhs { for _, fh := range fhs {
seriesName := getSeriesNameFromRef(series[fh.Ref]) tsID := getSeriesIDFromRef(series[fh.Ref])
c.expectedFloatHistograms[seriesName] = append(c.expectedFloatHistograms[seriesName], FloatHistogramToHistogramProto(fh.T, fh.FH)) c.expectedFloatHistograms[tsID] = append(c.expectedFloatHistograms[tsID], FloatHistogramToHistogramProto(fh.T, fh.FH))
} }
c.wg.Add(len(fhs)) c.wg.Add(len(fhs))
} }
@ -806,9 +840,27 @@ func (c *TestWriteClient) waitForExpectedData(tb testing.TB) {
} }
} }
func (c *TestWriteClient) SetStoreWait(w time.Duration) {
c.mtx.Lock()
defer c.mtx.Unlock()
c.storeWait = w
}
func (c *TestWriteClient) SetReturnError(err error) {
c.mtx.Lock()
defer c.mtx.Unlock()
c.returnError = err
}
func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error { func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error {
c.mtx.Lock() c.mtx.Lock()
defer c.mtx.Unlock() defer c.mtx.Unlock()
if c.storeWait > 0 {
time.Sleep(c.storeWait)
}
if c.returnError != nil {
return c.returnError
}
// nil buffers are ok for snappy, ignore cast error. // nil buffers are ok for snappy, ignore cast error.
if c.buf != nil { if c.buf != nil {
c.buf = c.buf[:cap(c.buf)] c.buf = c.buf[:cap(c.buf)]
@ -826,24 +878,24 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error {
builder := labels.NewScratchBuilder(0) builder := labels.NewScratchBuilder(0)
count := 0 count := 0
for _, ts := range reqProto.Timeseries { for _, ts := range reqProto.Timeseries {
labels := labelProtosToLabels(&builder, ts.Labels) labels := LabelProtosToLabels(&builder, ts.Labels)
seriesName := labels.Get("__name__") tsID := labels.String()
for _, sample := range ts.Samples { for _, sample := range ts.Samples {
count++ count++
c.receivedSamples[seriesName] = append(c.receivedSamples[seriesName], sample) c.receivedSamples[tsID] = append(c.receivedSamples[tsID], sample)
} }
for _, ex := range ts.Exemplars { for _, ex := range ts.Exemplars {
count++ count++
c.receivedExemplars[seriesName] = append(c.receivedExemplars[seriesName], ex) c.receivedExemplars[tsID] = append(c.receivedExemplars[tsID], ex)
} }
for _, histogram := range ts.Histograms { for _, histogram := range ts.Histograms {
count++ count++
if histogram.IsFloatHistogram() { if histogram.IsFloatHistogram() {
c.receivedFloatHistograms[seriesName] = append(c.receivedFloatHistograms[seriesName], histogram) c.receivedFloatHistograms[tsID] = append(c.receivedFloatHistograms[tsID], histogram)
} else { } else {
c.receivedHistograms[seriesName] = append(c.receivedHistograms[seriesName], histogram) c.receivedHistograms[tsID] = append(c.receivedHistograms[tsID], histogram)
} }
} }
} }
@ -1441,6 +1493,99 @@ func TestIsSampleOld(t *testing.T) {
require.False(t, isSampleOld(currentTime, 60*time.Second, timestamp.FromTime(currentTime.Add(-59*time.Second)))) require.False(t, isSampleOld(currentTime, 60*time.Second, timestamp.FromTime(currentTime.Add(-59*time.Second))))
} }
// Simulates scenario in which remote write endpoint is down and a subset of samples is dropped due to age limit while backoffing.
func TestSendSamplesWithBackoffWithSampleAgeLimit(t *testing.T) {
maxSamplesPerSend := 10
sampleAgeLimit := time.Second
cfg := config.DefaultQueueConfig
cfg.MaxShards = 1
cfg.SampleAgeLimit = model.Duration(sampleAgeLimit)
// Set the batch send deadline to 5 minutes to effectively disable it.
cfg.BatchSendDeadline = model.Duration(time.Minute * 5)
cfg.Capacity = 10 * maxSamplesPerSend // more than the amount of data we append in the test
cfg.MaxBackoff = model.Duration(time.Millisecond * 100)
cfg.MinBackoff = model.Duration(time.Millisecond * 100)
cfg.MaxSamplesPerSend = maxSamplesPerSend
metadataCfg := config.DefaultMetadataConfig
metadataCfg.Send = true
metadataCfg.SendInterval = model.Duration(time.Second * 60)
metadataCfg.MaxSamplesPerSend = maxSamplesPerSend
c := NewTestWriteClient()
c.withWaitGroup = false
m := newTestQueueManager(t, cfg, metadataCfg, time.Second, c)
m.Start()
batchID := 0
expectedSamples := map[string][]prompb.Sample{}
appendData := func(numberOfSeries int, timeAdd time.Duration, shouldBeDropped bool) {
t.Log(">>>> Appending series ", numberOfSeries, " as batch ID ", batchID, " with timeAdd ", timeAdd, " and should be dropped ", shouldBeDropped)
samples, series := createTimeseriesWithRandomLabelCount(strconv.Itoa(batchID), numberOfSeries, timeAdd, 9)
m.StoreSeries(series, batchID)
sent := m.Append(samples)
require.True(t, sent, "samples not sent")
if !shouldBeDropped {
for _, s := range samples {
tsID := getSeriesIDFromRef(series[s.Ref])
expectedSamples[tsID] = append(c.expectedSamples[tsID], prompb.Sample{
Timestamp: s.T,
Value: s.V,
})
}
}
batchID++
}
timeShift := -time.Millisecond * 5
c.SetReturnError(RecoverableError{context.DeadlineExceeded, defaultBackoff})
appendData(maxSamplesPerSend/2, timeShift, true)
time.Sleep(sampleAgeLimit)
appendData(maxSamplesPerSend/2, timeShift, true)
time.Sleep(sampleAgeLimit / 10)
appendData(maxSamplesPerSend/2, timeShift, true)
time.Sleep(2 * sampleAgeLimit)
appendData(2*maxSamplesPerSend, timeShift, false)
time.Sleep(sampleAgeLimit / 2)
c.SetReturnError(nil)
appendData(5, timeShift, false)
m.Stop()
if diff := cmp.Diff(expectedSamples, c.receivedSamples); diff != "" {
t.Errorf("mismatch (-want +got):\n%s", diff)
}
}
func createTimeseriesWithRandomLabelCount(id string, seriesCount int, timeAdd time.Duration, maxLabels int) ([]record.RefSample, []record.RefSeries) {
samples := []record.RefSample{}
series := []record.RefSeries{}
// use a fixed rand source so tests are consistent
r := rand.New(rand.NewSource(99))
for i := 0; i < seriesCount; i++ {
s := record.RefSample{
Ref: chunks.HeadSeriesRef(i),
T: time.Now().Add(timeAdd).UnixMilli(),
V: r.Float64(),
}
samples = append(samples, s)
labelsCount := r.Intn(maxLabels)
lb := labels.NewScratchBuilder(1 + labelsCount)
lb.Add("__name__", "batch_"+id+"_id_"+strconv.Itoa(i))
for j := 1; j < labelsCount+1; j++ {
// same for both name and value
label := "batch_" + id + "_label_" + strconv.Itoa(j)
lb.Add(label, label)
}
series = append(series, record.RefSeries{
Ref: chunks.HeadSeriesRef(i),
Labels: lb.Labels(),
})
}
return samples, series
}
func createTimeseriesWithOldSamples(numSamples, numSeries int, extraLabels ...labels.Label) ([]record.RefSample, []record.RefSample, []record.RefSeries) { func createTimeseriesWithOldSamples(numSamples, numSeries int, extraLabels ...labels.Label) ([]record.RefSample, []record.RefSample, []record.RefSeries) {
newSamples := make([]record.RefSample, 0, numSamples) newSamples := make([]record.RefSample, 0, numSamples)
samples := make([]record.RefSample, 0, numSamples) samples := make([]record.RefSample, 0, numSamples)
@ -1668,3 +1813,14 @@ func TestBuildTimeSeries(t *testing.T) {
}) })
} }
} }
func BenchmarkBuildTimeSeries(b *testing.B) {
// Send one sample per series, which is the typical remote_write case
const numSamples = 10000
filter := func(ts prompb.TimeSeries) bool { return filterTsLimit(99, ts) }
for i := 0; i < b.N; i++ {
samples := createProtoTimeseriesWithOld(numSamples, 100, extraLabels...)
_, _, result, _, _, _ := buildTimeSeries(samples, filter)
require.NotNil(b, result)
}
}

View file

@ -172,12 +172,12 @@ func TestSeriesSetFilter(t *testing.T) {
toRemove: []string{"foo"}, toRemove: []string{"foo"},
in: &prompb.QueryResult{ in: &prompb.QueryResult{
Timeseries: []*prompb.TimeSeries{ Timeseries: []*prompb.TimeSeries{
{Labels: labelsToLabelsProto(labels.FromStrings("foo", "bar", "a", "b"), nil)}, {Labels: LabelsToLabelsProto(labels.FromStrings("foo", "bar", "a", "b"), nil)},
}, },
}, },
expected: &prompb.QueryResult{ expected: &prompb.QueryResult{
Timeseries: []*prompb.TimeSeries{ Timeseries: []*prompb.TimeSeries{
{Labels: labelsToLabelsProto(labels.FromStrings("a", "b"), nil)}, {Labels: LabelsToLabelsProto(labels.FromStrings("a", "b"), nil)},
}, },
}, },
}, },
@ -211,7 +211,7 @@ func (c *mockedRemoteClient) Read(_ context.Context, query *prompb.Query) (*prom
q := &prompb.QueryResult{} q := &prompb.QueryResult{}
for _, s := range c.store { for _, s := range c.store {
l := labelProtosToLabels(&c.b, s.Labels) l := LabelProtosToLabels(&c.b, s.Labels)
var notMatch bool var notMatch bool
for _, m := range matchers { for _, m := range matchers {

View file

@ -18,6 +18,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"net/http" "net/http"
"time"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
@ -25,7 +26,9 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
otlptranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite" otlptranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
@ -38,6 +41,8 @@ type writeHandler struct {
samplesWithInvalidLabelsTotal prometheus.Counter samplesWithInvalidLabelsTotal prometheus.Counter
} }
const maxAheadTime = 10 * time.Minute
// NewWriteHandler creates a http.Handler that accepts remote write requests and // NewWriteHandler creates a http.Handler that accepts remote write requests and
// writes them to the provided appendable. // writes them to the provided appendable.
func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable) http.Handler { func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable) http.Handler {
@ -104,19 +109,24 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
outOfOrderExemplarErrs := 0 outOfOrderExemplarErrs := 0
samplesWithInvalidLabels := 0 samplesWithInvalidLabels := 0
app := h.appendable.Appender(ctx) timeLimitApp := &timeLimitAppender{
Appender: h.appendable.Appender(ctx),
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
}
defer func() { defer func() {
if err != nil { if err != nil {
_ = app.Rollback() _ = timeLimitApp.Rollback()
return return
} }
err = app.Commit() err = timeLimitApp.Commit()
}() }()
b := labels.NewScratchBuilder(0) b := labels.NewScratchBuilder(0)
var exemplarErr error var exemplarErr error
for _, ts := range req.Timeseries { for _, ts := range req.Timeseries {
labels := labelProtosToLabels(&b, ts.Labels) labels := LabelProtosToLabels(&b, ts.Labels)
if !labels.IsValid() { if !labels.IsValid() {
level.Warn(h.logger).Log("msg", "Invalid metric names or labels", "got", labels.String()) level.Warn(h.logger).Log("msg", "Invalid metric names or labels", "got", labels.String())
samplesWithInvalidLabels++ samplesWithInvalidLabels++
@ -124,7 +134,7 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
} }
var ref storage.SeriesRef var ref storage.SeriesRef
for _, s := range ts.Samples { for _, s := range ts.Samples {
ref, err = app.Append(ref, labels, s.Timestamp, s.Value) ref, err = timeLimitApp.Append(ref, labels, s.Timestamp, s.Value)
if err != nil { if err != nil {
unwrappedErr := errors.Unwrap(err) unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil { if unwrappedErr == nil {
@ -140,7 +150,7 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
for _, ep := range ts.Exemplars { for _, ep := range ts.Exemplars {
e := exemplarProtoToExemplar(&b, ep) e := exemplarProtoToExemplar(&b, ep)
_, exemplarErr = app.AppendExemplar(0, labels, e) _, exemplarErr = timeLimitApp.AppendExemplar(0, labels, e)
exemplarErr = h.checkAppendExemplarError(exemplarErr, e, &outOfOrderExemplarErrs) exemplarErr = h.checkAppendExemplarError(exemplarErr, e, &outOfOrderExemplarErrs)
if exemplarErr != nil { if exemplarErr != nil {
// Since exemplar storage is still experimental, we don't fail the request on ingestion errors. // Since exemplar storage is still experimental, we don't fail the request on ingestion errors.
@ -151,11 +161,12 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
for _, hp := range ts.Histograms { for _, hp := range ts.Histograms {
if hp.IsFloatHistogram() { if hp.IsFloatHistogram() {
fhs := FloatHistogramProtoToFloatHistogram(hp) fhs := FloatHistogramProtoToFloatHistogram(hp)
_, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, fhs) _, err = timeLimitApp.AppendHistogram(0, labels, hp.Timestamp, nil, fhs)
} else { } else {
hs := HistogramProtoToHistogram(hp) hs := HistogramProtoToHistogram(hp)
_, err = app.AppendHistogram(0, labels, hp.Timestamp, hs, nil) _, err = timeLimitApp.AppendHistogram(0, labels, hp.Timestamp, hs, nil)
} }
if err != nil { if err != nil {
unwrappedErr := errors.Unwrap(err) unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil { if unwrappedErr == nil {
@ -233,3 +244,45 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
} }
type timeLimitAppender struct {
storage.Appender
maxTime int64
}
func (app *timeLimitAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
if t > app.maxTime {
return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds)
}
ref, err := app.Appender.Append(ref, lset, t, v)
if err != nil {
return 0, err
}
return ref, nil
}
func (app *timeLimitAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
if t > app.maxTime {
return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds)
}
ref, err := app.Appender.AppendHistogram(ref, l, t, h, fh)
if err != nil {
return 0, err
}
return ref, nil
}
func (app *timeLimitAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
if e.Ts > app.maxTime {
return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds)
}
ref, err := app.Appender.AppendExemplar(ref, l, e)
if err != nil {
return 0, err
}
return ref, nil
}

View file

@ -18,6 +18,7 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"math"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"strconv" "strconv"
@ -60,14 +61,14 @@ func TestRemoteWriteHandler(t *testing.T) {
j := 0 j := 0
k := 0 k := 0
for _, ts := range writeRequestFixture.Timeseries { for _, ts := range writeRequestFixture.Timeseries {
labels := labelProtosToLabels(&b, ts.Labels) labels := LabelProtosToLabels(&b, ts.Labels)
for _, s := range ts.Samples { for _, s := range ts.Samples {
requireEqual(t, mockSample{labels, s.Timestamp, s.Value}, appendable.samples[i]) requireEqual(t, mockSample{labels, s.Timestamp, s.Value}, appendable.samples[i])
i++ i++
} }
for _, e := range ts.Exemplars { for _, e := range ts.Exemplars {
exemplarLabels := labelProtosToLabels(&b, e.Labels) exemplarLabels := LabelProtosToLabels(&b, e.Labels)
requireEqual(t, mockExemplar{labels, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j]) requireEqual(t, mockExemplar{labels, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
j++ j++
} }
@ -87,73 +88,127 @@ func TestRemoteWriteHandler(t *testing.T) {
} }
func TestOutOfOrderSample(t *testing.T) { func TestOutOfOrderSample(t *testing.T) {
buf, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{ tests := []struct {
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}}, Name string
Samples: []prompb.Sample{{Value: 1, Timestamp: 0}}, Timestamp int64
}}, nil, nil, nil, nil) }{
require.NoError(t, err) {
Name: "historic",
req, err := http.NewRequest("", "", bytes.NewReader(buf)) Timestamp: 0,
require.NoError(t, err) },
{
appendable := &mockAppendable{ Name: "future",
latestSample: 100, Timestamp: math.MaxInt64,
},
} }
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable)
recorder := httptest.NewRecorder() for _, tc := range tests {
handler.ServeHTTP(recorder, req) t.Run(tc.Name, func(t *testing.T) {
buf, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}},
Samples: []prompb.Sample{{Value: 1, Timestamp: tc.Timestamp}},
}}, nil, nil, nil, nil)
require.NoError(t, err)
resp := recorder.Result() req, err := http.NewRequest("", "", bytes.NewReader(buf))
require.Equal(t, http.StatusBadRequest, resp.StatusCode) require.NoError(t, err)
appendable := &mockAppendable{
latestSample: 100,
}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
resp := recorder.Result()
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
})
}
} }
// This test case currently aims to verify that the WriteHandler endpoint // This test case currently aims to verify that the WriteHandler endpoint
// don't fail on ingestion errors since the exemplar storage is // don't fail on ingestion errors since the exemplar storage is
// still experimental. // still experimental.
func TestOutOfOrderExemplar(t *testing.T) { func TestOutOfOrderExemplar(t *testing.T) {
buf, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{ tests := []struct {
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}}, Name string
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "foo", Value: "bar"}}, Value: 1, Timestamp: 0}}, Timestamp int64
}}, nil, nil, nil, nil) }{
require.NoError(t, err) {
Name: "historic",
req, err := http.NewRequest("", "", bytes.NewReader(buf)) Timestamp: 0,
require.NoError(t, err) },
{
appendable := &mockAppendable{ Name: "future",
latestExemplar: 100, Timestamp: math.MaxInt64,
},
} }
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable)
recorder := httptest.NewRecorder() for _, tc := range tests {
handler.ServeHTTP(recorder, req) t.Run(tc.Name, func(t *testing.T) {
buf, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}},
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "foo", Value: "bar"}}, Value: 1, Timestamp: tc.Timestamp}},
}}, nil, nil, nil, nil)
require.NoError(t, err)
resp := recorder.Result() req, err := http.NewRequest("", "", bytes.NewReader(buf))
// TODO: update to require.Equal(t, http.StatusConflict, resp.StatusCode) once exemplar storage is not experimental. require.NoError(t, err)
require.Equal(t, http.StatusNoContent, resp.StatusCode)
appendable := &mockAppendable{
latestExemplar: 100,
}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
resp := recorder.Result()
// TODO: update to require.Equal(t, http.StatusConflict, resp.StatusCode) once exemplar storage is not experimental.
require.Equal(t, http.StatusNoContent, resp.StatusCode)
})
}
} }
func TestOutOfOrderHistogram(t *testing.T) { func TestOutOfOrderHistogram(t *testing.T) {
buf, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{ tests := []struct {
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}}, Name string
Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram), FloatHistogramToHistogramProto(1, testHistogram.ToFloat(nil))}, Timestamp int64
}}, nil, nil, nil, nil) }{
require.NoError(t, err) {
Name: "historic",
req, err := http.NewRequest("", "", bytes.NewReader(buf)) Timestamp: 0,
require.NoError(t, err) },
{
appendable := &mockAppendable{ Name: "future",
latestHistogram: 100, Timestamp: math.MaxInt64,
},
} }
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable)
recorder := httptest.NewRecorder() for _, tc := range tests {
handler.ServeHTTP(recorder, req) t.Run(tc.Name, func(t *testing.T) {
buf, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}},
Histograms: []prompb.Histogram{HistogramToHistogramProto(tc.Timestamp, &testHistogram), FloatHistogramToHistogramProto(1, testHistogram.ToFloat(nil))},
}}, nil, nil, nil, nil)
require.NoError(t, err)
resp := recorder.Result() req, err := http.NewRequest("", "", bytes.NewReader(buf))
require.Equal(t, http.StatusBadRequest, resp.StatusCode) require.NoError(t, err)
appendable := &mockAppendable{
latestHistogram: 100,
}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
resp := recorder.Result()
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
})
}
} }
func BenchmarkRemoteWritehandler(b *testing.B) { func BenchmarkRemoteWritehandler(b *testing.B) {

View file

@ -81,19 +81,23 @@ type Options struct {
// NoLockfile disables creation and consideration of a lock file. // NoLockfile disables creation and consideration of a lock file.
NoLockfile bool NoLockfile bool
// OutOfOrderTimeWindow specifies how much out of order is allowed, if any.
OutOfOrderTimeWindow int64
} }
// DefaultOptions used for the WAL storage. They are reasonable for setups using // DefaultOptions used for the WAL storage. They are reasonable for setups using
// millisecond-precision timestamps. // millisecond-precision timestamps.
func DefaultOptions() *Options { func DefaultOptions() *Options {
return &Options{ return &Options{
WALSegmentSize: wlog.DefaultSegmentSize, WALSegmentSize: wlog.DefaultSegmentSize,
WALCompression: wlog.CompressionNone, WALCompression: wlog.CompressionNone,
StripeSize: tsdb.DefaultStripeSize, StripeSize: tsdb.DefaultStripeSize,
TruncateFrequency: DefaultTruncateFrequency, TruncateFrequency: DefaultTruncateFrequency,
MinWALTime: DefaultMinWALTime, MinWALTime: DefaultMinWALTime,
MaxWALTime: DefaultMaxWALTime, MaxWALTime: DefaultMaxWALTime,
NoLockfile: false, NoLockfile: false,
OutOfOrderTimeWindow: 0,
} }
} }
@ -812,6 +816,11 @@ func (a *appender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v flo
series.Lock() series.Lock()
defer series.Unlock() defer series.Unlock()
if t <= a.minValidTime(series.lastTs) {
a.metrics.totalOutOfOrderSamples.Inc()
return 0, storage.ErrOutOfOrderSample
}
// NOTE: always modify pendingSamples and sampleSeries together. // NOTE: always modify pendingSamples and sampleSeries together.
a.pendingSamples = append(a.pendingSamples, record.RefSample{ a.pendingSamples = append(a.pendingSamples, record.RefSample{
Ref: series.ref, Ref: series.ref,
@ -935,6 +944,11 @@ func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int
series.Lock() series.Lock()
defer series.Unlock() defer series.Unlock()
if t <= a.minValidTime(series.lastTs) {
a.metrics.totalOutOfOrderSamples.Inc()
return 0, storage.ErrOutOfOrderSample
}
switch { switch {
case h != nil: case h != nil:
// NOTE: always modify pendingHistograms and histogramSeries together // NOTE: always modify pendingHistograms and histogramSeries together
@ -1103,3 +1117,13 @@ func (a *appender) logSeries() error {
return nil return nil
} }
// mintTs returns the minimum timestamp that a sample can have
// and is needed for preventing underflow.
func (a *appender) minValidTime(lastTs int64) int64 {
if lastTs < math.MinInt64+a.opts.OutOfOrderTimeWindow {
return math.MinInt64
}
return lastTs - a.opts.OutOfOrderTimeWindow
}

View file

@ -16,6 +16,7 @@ package agent
import ( import (
"context" "context"
"fmt" "fmt"
"math"
"path/filepath" "path/filepath"
"strconv" "strconv"
"testing" "testing"
@ -761,7 +762,9 @@ func TestDBAllowOOOSamples(t *testing.T) {
) )
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()
s := createTestAgentDB(t, reg, DefaultOptions()) opts := DefaultOptions()
opts.OutOfOrderTimeWindow = math.MaxInt64
s := createTestAgentDB(t, reg, opts)
app := s.Appender(context.TODO()) app := s.Appender(context.TODO())
// Let's add some samples in the [offset, offset+numDatapoints) range. // Let's add some samples in the [offset, offset+numDatapoints) range.
@ -879,6 +882,56 @@ func TestDBAllowOOOSamples(t *testing.T) {
require.NoError(t, db.Close()) require.NoError(t, db.Close())
} }
func TestDBOutOfOrderTimeWindow(t *testing.T) {
tc := []struct {
outOfOrderTimeWindow, firstTs, secondTs int64
expectedError error
}{
{0, 100, 101, nil},
{0, 100, 100, storage.ErrOutOfOrderSample},
{0, 100, 99, storage.ErrOutOfOrderSample},
{100, 100, 1, nil},
{100, 100, 0, storage.ErrOutOfOrderSample},
}
for _, c := range tc {
t.Run(fmt.Sprintf("outOfOrderTimeWindow=%d, firstTs=%d, secondTs=%d, expectedError=%s", c.outOfOrderTimeWindow, c.firstTs, c.secondTs, c.expectedError), func(t *testing.T) {
reg := prometheus.NewRegistry()
opts := DefaultOptions()
opts.OutOfOrderTimeWindow = c.outOfOrderTimeWindow
s := createTestAgentDB(t, reg, opts)
app := s.Appender(context.TODO())
lbls := labelsForTest(t.Name()+"_histogram", 1)
lset := labels.New(lbls[0]...)
_, err := app.AppendHistogram(0, lset, c.firstTs, tsdbutil.GenerateTestHistograms(1)[0], nil)
require.NoError(t, err)
err = app.Commit()
require.NoError(t, err)
_, err = app.AppendHistogram(0, lset, c.secondTs, tsdbutil.GenerateTestHistograms(1)[0], nil)
require.ErrorIs(t, err, c.expectedError)
lbls = labelsForTest(t.Name(), 1)
lset = labels.New(lbls[0]...)
_, err = app.Append(0, lset, c.firstTs, 0)
require.NoError(t, err)
err = app.Commit()
require.NoError(t, err)
_, err = app.Append(0, lset, c.secondTs, 0)
require.ErrorIs(t, err, c.expectedError)
expectedAppendedSamples := float64(2)
if c.expectedError != nil {
expectedAppendedSamples = 1
}
m := gatherFamily(t, reg, "prometheus_agent_samples_appended_total")
require.Equal(t, expectedAppendedSamples, m.Metric[0].Counter.GetValue(), "agent wal mismatch of total appended samples")
require.Equal(t, expectedAppendedSamples, m.Metric[1].Counter.GetValue(), "agent wal mismatch of total appended histograms")
require.NoError(t, s.Close())
})
}
}
func BenchmarkCreateSeries(b *testing.B) { func BenchmarkCreateSeries(b *testing.B) {
s := createTestAgentDB(b, nil, DefaultOptions()) s := createTestAgentDB(b, nil, DefaultOptions())
defer s.Close() defer s.Close()

View file

@ -646,10 +646,10 @@ Outer:
} }
// CleanTombstones will remove the tombstones and rewrite the block (only if there are any tombstones). // CleanTombstones will remove the tombstones and rewrite the block (only if there are any tombstones).
// If there was a rewrite, then it returns the ULID of the new block written, else nil. // If there was a rewrite, then it returns the ULID of new blocks written, else nil.
// If the resultant block is empty (tombstones covered the whole block), then it deletes the new block and return nil UID. // If a resultant block is empty (tombstones covered the whole block), then it returns an empty slice.
// It returns a boolean indicating if the parent block can be deleted safely of not. // It returns a boolean indicating if the parent block can be deleted safely of not.
func (pb *Block) CleanTombstones(dest string, c Compactor) (*ulid.ULID, bool, error) { func (pb *Block) CleanTombstones(dest string, c Compactor) ([]ulid.ULID, bool, error) {
numStones := 0 numStones := 0
if err := pb.tombstones.Iter(func(id storage.SeriesRef, ivs tombstones.Intervals) error { if err := pb.tombstones.Iter(func(id storage.SeriesRef, ivs tombstones.Intervals) error {
@ -664,12 +664,12 @@ func (pb *Block) CleanTombstones(dest string, c Compactor) (*ulid.ULID, bool, er
} }
meta := pb.Meta() meta := pb.Meta()
uid, err := c.Write(dest, pb, pb.meta.MinTime, pb.meta.MaxTime, &meta) uids, err := c.Write(dest, pb, pb.meta.MinTime, pb.meta.MaxTime, &meta)
if err != nil { if err != nil {
return nil, false, err return nil, false, err
} }
return &uid, true, nil return uids, true, nil
} }
// Snapshot creates snapshot of the block into dir. // Snapshot creates snapshot of the block into dir.

View file

@ -346,9 +346,10 @@ func TestBlockSize(t *testing.T) {
c, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{0}, nil, nil) c, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{0}, nil, nil)
require.NoError(t, err) require.NoError(t, err)
blockDirAfterCompact, err := c.Compact(tmpdir, []string{blockInit.Dir()}, nil) blockDirsAfterCompact, err := c.Compact(tmpdir, []string{blockInit.Dir()}, nil)
require.NoError(t, err) require.NoError(t, err)
blockAfterCompact, err := OpenBlock(nil, filepath.Join(tmpdir, blockDirAfterCompact.String()), nil) require.Len(t, blockDirsAfterCompact, 1)
blockAfterCompact, err := OpenBlock(nil, filepath.Join(tmpdir, blockDirsAfterCompact[0].String()), nil)
require.NoError(t, err) require.NoError(t, err)
defer func() { defer func() {
require.NoError(t, blockAfterCompact.Close()) require.NoError(t, blockAfterCompact.Close())
@ -605,9 +606,10 @@ func createBlockFromHead(tb testing.TB, dir string, head *Head) string {
// Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime). // Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime).
// Because of this block intervals are always +1 than the total samples it includes. // Because of this block intervals are always +1 than the total samples it includes.
ulid, err := compactor.Write(dir, head, head.MinTime(), head.MaxTime()+1, nil) ulids, err := compactor.Write(dir, head, head.MinTime(), head.MaxTime()+1, nil)
require.NoError(tb, err) require.NoError(tb, err)
return filepath.Join(dir, ulid.String()) require.Len(tb, ulids, 1)
return filepath.Join(dir, ulids[0].String())
} }
func createBlockFromOOOHead(tb testing.TB, dir string, head *OOOCompactionHead) string { func createBlockFromOOOHead(tb testing.TB, dir string, head *OOOCompactionHead) string {
@ -618,9 +620,10 @@ func createBlockFromOOOHead(tb testing.TB, dir string, head *OOOCompactionHead)
// Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime). // Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime).
// Because of this block intervals are always +1 than the total samples it includes. // Because of this block intervals are always +1 than the total samples it includes.
ulid, err := compactor.Write(dir, head, head.MinTime(), head.MaxTime()+1, nil) ulids, err := compactor.Write(dir, head, head.MinTime(), head.MaxTime()+1, nil)
require.NoError(tb, err) require.NoError(tb, err)
return filepath.Join(dir, ulid.String()) require.Len(tb, ulids, 1)
return filepath.Join(dir, ulids[0].String())
} }
func createHead(tb testing.TB, w *wlog.WL, series []storage.Series, chunkDir string) *Head { func createHead(tb testing.TB, w *wlog.WL, series []storage.Series, chunkDir string) *Head {

View file

@ -105,12 +105,17 @@ func (w *BlockWriter) Flush(ctx context.Context) (ulid.ULID, error) {
if err != nil { if err != nil {
return ulid.ULID{}, fmt.Errorf("create leveled compactor: %w", err) return ulid.ULID{}, fmt.Errorf("create leveled compactor: %w", err)
} }
id, err := compactor.Write(w.destinationDir, w.head, mint, maxt, nil) ids, err := compactor.Write(w.destinationDir, w.head, mint, maxt, nil)
if err != nil { if err != nil {
return ulid.ULID{}, fmt.Errorf("compactor write: %w", err) return ulid.ULID{}, fmt.Errorf("compactor write: %w", err)
} }
return id, nil // No block was produced. Caller is responsible to check empty
// ulid.ULID based on its use case.
if len(ids) == 0 {
return ulid.ULID{}, nil
}
return ids[0], nil
} }
func (w *BlockWriter) Close() error { func (w *BlockWriter) Close() error {

View file

@ -76,6 +76,7 @@ func (c *FloatHistogramChunk) NumSamples() int {
func (c *FloatHistogramChunk) Layout() ( func (c *FloatHistogramChunk) Layout() (
schema int32, zeroThreshold float64, schema int32, zeroThreshold float64,
negativeSpans, positiveSpans []histogram.Span, negativeSpans, positiveSpans []histogram.Span,
customValues []float64,
err error, err error,
) { ) {
if c.NumSamples() == 0 { if c.NumSamples() == 0 {
@ -133,17 +134,18 @@ func (c *FloatHistogramChunk) Appender() (Appender, error) {
a := &FloatHistogramAppender{ a := &FloatHistogramAppender{
b: &c.b, b: &c.b,
schema: it.schema, schema: it.schema,
zThreshold: it.zThreshold, zThreshold: it.zThreshold,
pSpans: it.pSpans, pSpans: it.pSpans,
nSpans: it.nSpans, nSpans: it.nSpans,
t: it.t, customValues: it.customValues,
tDelta: it.tDelta, t: it.t,
cnt: it.cnt, tDelta: it.tDelta,
zCnt: it.zCnt, cnt: it.cnt,
pBuckets: pBuckets, zCnt: it.zCnt,
nBuckets: nBuckets, pBuckets: pBuckets,
sum: it.sum, nBuckets: nBuckets,
sum: it.sum,
} }
if it.numTotal == 0 { if it.numTotal == 0 {
a.sum.leading = 0xff a.sum.leading = 0xff
@ -191,6 +193,7 @@ type FloatHistogramAppender struct {
schema int32 schema int32
zThreshold float64 zThreshold float64
pSpans, nSpans []histogram.Span pSpans, nSpans []histogram.Span
customValues []float64
t, tDelta int64 t, tDelta int64
sum, cnt, zCnt xorValue sum, cnt, zCnt xorValue
@ -222,6 +225,7 @@ func (a *FloatHistogramAppender) Append(int64, float64) {
// //
// The chunk is not appendable in the following cases: // The chunk is not appendable in the following cases:
// - The schema has changed. // - The schema has changed.
// - The custom bounds have changed if the current schema is custom buckets.
// - The threshold for the zero bucket has changed. // - The threshold for the zero bucket has changed.
// - Any buckets have disappeared. // - Any buckets have disappeared.
// - There was a counter reset in the count of observations or in any bucket, including the zero bucket. // - There was a counter reset in the count of observations or in any bucket, including the zero bucket.
@ -263,6 +267,11 @@ func (a *FloatHistogramAppender) appendable(h *histogram.FloatHistogram) (
return return
} }
if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.FloatBucketsMatch(h.CustomValues, a.customValues) {
counterReset = true
return
}
if h.ZeroCount < a.zCnt.value { if h.ZeroCount < a.zCnt.value {
// There has been a counter reset since ZeroThreshold didn't change. // There has been a counter reset since ZeroThreshold didn't change.
counterReset = true counterReset = true
@ -303,6 +312,7 @@ func (a *FloatHistogramAppender) appendable(h *histogram.FloatHistogram) (
// //
// The chunk is not appendable in the following cases: // The chunk is not appendable in the following cases:
// - The schema has changed. // - The schema has changed.
// - The custom bounds have changed if the current schema is custom buckets.
// - The threshold for the zero bucket has changed. // - The threshold for the zero bucket has changed.
// - The last sample in the chunk was stale while the current sample is not stale. // - The last sample in the chunk was stale while the current sample is not stale.
func (a *FloatHistogramAppender) appendableGauge(h *histogram.FloatHistogram) ( func (a *FloatHistogramAppender) appendableGauge(h *histogram.FloatHistogram) (
@ -329,6 +339,10 @@ func (a *FloatHistogramAppender) appendableGauge(h *histogram.FloatHistogram) (
return return
} }
if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.FloatBucketsMatch(h.CustomValues, a.customValues) {
return
}
positiveInserts, backwardPositiveInserts, positiveSpans = expandSpansBothWays(a.pSpans, h.PositiveSpans) positiveInserts, backwardPositiveInserts, positiveSpans = expandSpansBothWays(a.pSpans, h.PositiveSpans)
negativeInserts, backwardNegativeInserts, negativeSpans = expandSpansBothWays(a.nSpans, h.NegativeSpans) negativeInserts, backwardNegativeInserts, negativeSpans = expandSpansBothWays(a.nSpans, h.NegativeSpans)
okToAppend = true okToAppend = true
@ -422,7 +436,7 @@ func (a *FloatHistogramAppender) appendFloatHistogram(t int64, h *histogram.Floa
if num == 0 { if num == 0 {
// The first append gets the privilege to dictate the layout // The first append gets the privilege to dictate the layout
// but it's also responsible for encoding it into the chunk! // but it's also responsible for encoding it into the chunk!
writeHistogramChunkLayout(a.b, h.Schema, h.ZeroThreshold, h.PositiveSpans, h.NegativeSpans) writeHistogramChunkLayout(a.b, h.Schema, h.ZeroThreshold, h.PositiveSpans, h.NegativeSpans, h.CustomValues)
a.schema = h.Schema a.schema = h.Schema
a.zThreshold = h.ZeroThreshold a.zThreshold = h.ZeroThreshold
@ -438,6 +452,12 @@ func (a *FloatHistogramAppender) appendFloatHistogram(t int64, h *histogram.Floa
} else { } else {
a.nSpans = nil a.nSpans = nil
} }
if len(h.CustomValues) > 0 {
a.customValues = make([]float64, len(h.CustomValues))
copy(a.customValues, h.CustomValues)
} else {
a.customValues = nil
}
numPBuckets, numNBuckets := countSpans(h.PositiveSpans), countSpans(h.NegativeSpans) numPBuckets, numNBuckets := countSpans(h.PositiveSpans), countSpans(h.NegativeSpans)
if numPBuckets > 0 { if numPBuckets > 0 {
@ -693,6 +713,7 @@ type floatHistogramIterator struct {
schema int32 schema int32
zThreshold float64 zThreshold float64
pSpans, nSpans []histogram.Span pSpans, nSpans []histogram.Span
customValues []float64
// For the fields that are tracked as deltas and ultimately dod's. // For the fields that are tracked as deltas and ultimately dod's.
t int64 t int64
@ -753,6 +774,7 @@ func (it *floatHistogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram)
NegativeSpans: it.nSpans, NegativeSpans: it.nSpans,
PositiveBuckets: it.pBuckets, PositiveBuckets: it.pBuckets,
NegativeBuckets: it.nBuckets, NegativeBuckets: it.nBuckets,
CustomValues: it.customValues,
} }
} }
@ -775,6 +797,9 @@ func (it *floatHistogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram)
fh.NegativeBuckets = resize(fh.NegativeBuckets, len(it.nBuckets)) fh.NegativeBuckets = resize(fh.NegativeBuckets, len(it.nBuckets))
copy(fh.NegativeBuckets, it.nBuckets) copy(fh.NegativeBuckets, it.nBuckets)
fh.CustomValues = resize(fh.CustomValues, len(it.customValues))
copy(fh.CustomValues, it.customValues)
return it.t, fh return it.t, fh
} }
@ -819,7 +844,7 @@ func (it *floatHistogramIterator) Next() ValueType {
// The first read is responsible for reading the chunk layout // The first read is responsible for reading the chunk layout
// and for initializing fields that depend on it. We give // and for initializing fields that depend on it. We give
// counter reset info at chunk level, hence we discard it here. // counter reset info at chunk level, hence we discard it here.
schema, zeroThreshold, posSpans, negSpans, err := readHistogramChunkLayout(&it.br) schema, zeroThreshold, posSpans, negSpans, customValues, err := readHistogramChunkLayout(&it.br)
if err != nil { if err != nil {
it.err = err it.err = err
return ValNone return ValNone
@ -827,6 +852,7 @@ func (it *floatHistogramIterator) Next() ValueType {
it.schema = schema it.schema = schema
it.zThreshold = zeroThreshold it.zThreshold = zeroThreshold
it.pSpans, it.nSpans = posSpans, negSpans it.pSpans, it.nSpans = posSpans, negSpans
it.customValues = customValues
numPBuckets, numNBuckets := countSpans(posSpans), countSpans(negSpans) numPBuckets, numNBuckets := countSpans(posSpans), countSpans(negSpans)
// Allocate bucket slices as needed, recycling existing slices // Allocate bucket slices as needed, recycling existing slices
// in case this iterator was reset and already has slices of a // in case this iterator was reset and already has slices of a

View file

@ -280,7 +280,38 @@ func TestFloatHistogramChunkBucketChanges(t *testing.T) {
} }
func TestFloatHistogramChunkAppendable(t *testing.T) { func TestFloatHistogramChunkAppendable(t *testing.T) {
setup := func() (Chunk, *FloatHistogramAppender, int64, *histogram.FloatHistogram) { eh := &histogram.FloatHistogram{
Count: 5,
ZeroCount: 2,
Sum: 18.4,
ZeroThreshold: 1e-125,
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
PositiveBuckets: []float64{6, 3, 3, 2, 4, 5, 1},
}
cbh := &histogram.FloatHistogram{
Count: 24,
Sum: 18.4,
Schema: histogram.CustomBucketsSchema,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
PositiveBuckets: []float64{6, 3, 3, 2, 4, 5, 1},
CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20},
}
setup := func(h *histogram.FloatHistogram) (Chunk, *FloatHistogramAppender, int64, *histogram.FloatHistogram) {
c := Chunk(NewFloatHistogramChunk()) c := Chunk(NewFloatHistogramChunk())
// Create fresh appender and add the first histogram. // Create fresh appender and add the first histogram.
@ -289,32 +320,17 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
require.Equal(t, 0, c.NumSamples()) require.Equal(t, 0, c.NumSamples())
ts := int64(1234567890) ts := int64(1234567890)
h1 := &histogram.FloatHistogram{
Count: 5,
ZeroCount: 2,
Sum: 18.4,
ZeroThreshold: 1e-125,
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
PositiveBuckets: []float64{6, 3, 3, 2, 4, 5, 1},
}
chk, _, app, err := app.AppendFloatHistogram(nil, ts, h1.Copy(), false) chk, _, app, err := app.AppendFloatHistogram(nil, ts, h.Copy(), false)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, chk) require.Nil(t, chk)
require.Equal(t, 1, c.NumSamples()) require.Equal(t, 1, c.NumSamples())
require.Equal(t, UnknownCounterReset, c.(*FloatHistogramChunk).GetCounterResetHeader()) require.Equal(t, UnknownCounterReset, c.(*FloatHistogramChunk).GetCounterResetHeader())
return c, app.(*FloatHistogramAppender), ts, h1 return c, app.(*FloatHistogramAppender), ts, h
} }
{ // Schema change. { // Schema change.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.Schema++ h2.Schema++
_, _, ok, _ := hApp.appendable(h2) _, _, ok, _ := hApp.appendable(h2)
@ -324,7 +340,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
} }
{ // Zero threshold change. { // Zero threshold change.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.ZeroThreshold += 0.1 h2.ZeroThreshold += 0.1
_, _, ok, _ := hApp.appendable(h2) _, _, ok, _ := hApp.appendable(h2)
@ -334,7 +350,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
} }
{ // New histogram that has more buckets. { // New histogram that has more buckets.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{ h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3}, {Offset: 0, Length: 3},
@ -357,7 +373,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
} }
{ // New histogram that has a bucket missing. { // New histogram that has a bucket missing.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{ h2.PositiveSpans = []histogram.Span{
@ -379,7 +395,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
} }
{ // New histogram that has a counter reset while buckets are same. { // New histogram that has a counter reset while buckets are same.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.Sum = 23 h2.Sum = 23
h2.PositiveBuckets = []float64{6, 2, 3, 2, 4, 5, 1} h2.PositiveBuckets = []float64{6, 2, 3, 2, 4, 5, 1}
@ -394,7 +410,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
} }
{ // New histogram that has a counter reset while new buckets were added. { // New histogram that has a counter reset while new buckets were added.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{ h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3}, {Offset: 0, Length: 3},
@ -415,7 +431,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
} }
{ {
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
// New histogram that has a counter reset while new buckets were // New histogram that has a counter reset while new buckets were
// added before the first bucket and reset on first bucket. (to // added before the first bucket and reset on first bucket. (to
// catch the edge case where the new bucket should be forwarded // catch the edge case where the new bucket should be forwarded
@ -442,7 +458,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
} }
{ // New histogram that has an explicit counter reset. { // New histogram that has an explicit counter reset.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.CounterResetHint = histogram.CounterReset h2.CounterResetHint = histogram.CounterReset
@ -450,7 +466,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
} }
{ // Start new chunk explicitly, and append a new histogram that is considered appendable to the previous chunk. { // Start new chunk explicitly, and append a new histogram that is considered appendable to the previous chunk.
_, hApp, ts, h1 := setup() _, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() // Identity is appendable. h2 := h1.Copy() // Identity is appendable.
nextChunk := NewFloatHistogramChunk() nextChunk := NewFloatHistogramChunk()
@ -466,7 +482,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
} }
{ // Start new chunk explicitly, and append a new histogram that is not considered appendable to the previous chunk. { // Start new chunk explicitly, and append a new histogram that is not considered appendable to the previous chunk.
_, hApp, ts, h1 := setup() _, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.Count-- // Make this not appendable due to counter reset. h2.Count-- // Make this not appendable due to counter reset.
@ -483,7 +499,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
} }
{ // Start new chunk explicitly, and append a new histogram that would need recoding if we added it to the chunk. { // Start new chunk explicitly, and append a new histogram that would need recoding if we added it to the chunk.
_, hApp, ts, h1 := setup() _, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{ h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3}, {Offset: 0, Length: 3},
@ -507,6 +523,72 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
assertSampleCount(t, nextChunk, 1, ValFloatHistogram) assertSampleCount(t, nextChunk, 1, ValFloatHistogram)
require.Equal(t, NotCounterReset, nextChunk.GetCounterResetHeader()) require.Equal(t, NotCounterReset, nextChunk.GetCounterResetHeader())
} }
{ // Custom buckets, no change.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
_, _, ok, _ := hApp.appendable(h2)
require.True(t, ok)
assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
}
{ // Custom buckets, increase in bucket counts but no change in layout.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
h2.Count++
h2.PositiveBuckets = []float64{6, 3, 3, 2, 4, 5, 2}
_, _, ok, _ := hApp.appendable(h2)
require.True(t, ok)
assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
}
{ // Custom buckets, decrease in bucket counts but no change in layout.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
h2.Count--
h2.PositiveBuckets = []float64{6, 3, 3, 2, 4, 5, 0}
_, _, ok, _ := hApp.appendable(h2)
require.False(t, ok)
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
}
{ // Custom buckets, change only in custom bounds.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
h2.CustomValues = []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21}
_, _, ok, _ := hApp.appendable(h2)
require.False(t, ok)
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
}
{ // Custom buckets, with more buckets.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 1, Length: 1},
{Offset: 1, Length: 4},
{Offset: 3, Length: 3},
}
h2.Count += 6
h2.Sum = 30
// Existing histogram should get values converted from the above to:
// 6 3 0 3 0 0 2 4 5 0 1 (previous values with some new empty buckets in between)
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1} // (total 30)
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
require.NotEmpty(t, posInterjections)
require.Empty(t, negInterjections)
require.True(t, ok) // Only new buckets came in.
require.False(t, cr)
assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
}
} }
func assertNewFloatHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *FloatHistogramAppender, ts int64, h *histogram.FloatHistogram, expectHeader CounterResetHeader) { func assertNewFloatHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *FloatHistogramAppender, ts int64, h *histogram.FloatHistogram, expectHeader CounterResetHeader) {
@ -526,7 +608,7 @@ func assertNewFloatHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *Fl
func assertNoNewFloatHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *FloatHistogramAppender, ts int64, h *histogram.FloatHistogram, expectHeader CounterResetHeader) { func assertNoNewFloatHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *FloatHistogramAppender, ts int64, h *histogram.FloatHistogram, expectHeader CounterResetHeader) {
oldChunkBytes := oldChunk.Bytes() oldChunkBytes := oldChunk.Bytes()
newChunk, recoded, newAppender, err := hApp.AppendFloatHistogram(nil, ts, h, false) newChunk, recoded, newAppender, err := hApp.AppendFloatHistogram(nil, ts, h, false)
require.NotEqual(t, oldChunkBytes, oldChunk.Bytes()) // Sanity check that previous chunk is untouched. require.Greater(t, len(oldChunk.Bytes()), len(oldChunkBytes)) // Check that current chunk is bigger than previously.
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, newChunk) require.Nil(t, newChunk)
require.False(t, recoded) require.False(t, recoded)
@ -715,6 +797,32 @@ func TestFloatHistogramChunkAppendableWithEmptySpan(t *testing.T) {
NegativeBuckets: []float64{1, 4, 2, 7, 5, 5, 2}, NegativeBuckets: []float64{1, 4, 2, 7, 5, 5, 2},
}, },
}, },
"empty span in old and new custom buckets histogram": {
h1: &histogram.FloatHistogram{
Schema: histogram.CustomBucketsSchema,
Count: 7,
Sum: 1234.5,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []float64{1, 2, 1, 1, 1, 1, 1},
CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
},
h2: &histogram.FloatHistogram{
Schema: histogram.CustomBucketsSchema,
Count: 10,
Sum: 2345.6,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []float64{1, 3, 1, 2, 1, 1, 1},
CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
},
},
} }
for name, tc := range tests { for name, tc := range tests {
@ -741,7 +849,40 @@ func TestFloatHistogramChunkAppendableWithEmptySpan(t *testing.T) {
} }
func TestFloatHistogramChunkAppendableGauge(t *testing.T) { func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
setup := func() (Chunk, *FloatHistogramAppender, int64, *histogram.FloatHistogram) { eh := &histogram.FloatHistogram{
CounterResetHint: histogram.GaugeType,
Count: 5,
ZeroCount: 2,
Sum: 18.4,
ZeroThreshold: 1e-125,
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
PositiveBuckets: []float64{6, 3, 3, 2, 4, 5, 1},
}
cbh := &histogram.FloatHistogram{
CounterResetHint: histogram.GaugeType,
Count: 24,
Sum: 18.4,
Schema: histogram.CustomBucketsSchema,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
PositiveBuckets: []float64{6, 3, 3, 2, 4, 5, 1},
CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20},
}
setup := func(h *histogram.FloatHistogram) (Chunk, *FloatHistogramAppender, int64, *histogram.FloatHistogram) {
c := Chunk(NewFloatHistogramChunk()) c := Chunk(NewFloatHistogramChunk())
// Create fresh appender and add the first histogram. // Create fresh appender and add the first histogram.
@ -750,33 +891,17 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
require.Equal(t, 0, c.NumSamples()) require.Equal(t, 0, c.NumSamples())
ts := int64(1234567890) ts := int64(1234567890)
h1 := &histogram.FloatHistogram{
CounterResetHint: histogram.GaugeType,
Count: 5,
ZeroCount: 2,
Sum: 18.4,
ZeroThreshold: 1e-125,
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
PositiveBuckets: []float64{6, 3, 3, 2, 4, 5, 1},
}
chk, _, app, err := app.AppendFloatHistogram(nil, ts, h1.Copy(), false) chk, _, app, err := app.AppendFloatHistogram(nil, ts, h.Copy(), false)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, chk) require.Nil(t, chk)
require.Equal(t, 1, c.NumSamples()) require.Equal(t, 1, c.NumSamples())
require.Equal(t, GaugeType, c.(*FloatHistogramChunk).GetCounterResetHeader()) require.Equal(t, GaugeType, c.(*FloatHistogramChunk).GetCounterResetHeader())
return c, app.(*FloatHistogramAppender), ts, h1 return c, app.(*FloatHistogramAppender), ts, h
} }
{ // Schema change. { // Schema change.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.Schema++ h2.Schema++
_, _, _, _, _, _, ok := hApp.appendableGauge(h2) _, _, _, _, _, _, ok := hApp.appendableGauge(h2)
@ -786,7 +911,7 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
} }
{ // Zero threshold change. { // Zero threshold change.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.ZeroThreshold += 0.1 h2.ZeroThreshold += 0.1
_, _, _, _, _, _, ok := hApp.appendableGauge(h2) _, _, _, _, _, _, ok := hApp.appendableGauge(h2)
@ -796,7 +921,7 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
} }
{ // New histogram that has more buckets. { // New histogram that has more buckets.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{ h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3}, {Offset: 0, Length: 3},
@ -820,7 +945,7 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
} }
{ // New histogram that has buckets missing. { // New histogram that has buckets missing.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{ h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 2}, {Offset: 0, Length: 2},
@ -844,7 +969,7 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
} }
{ // New histogram that has a bucket missing and new buckets. { // New histogram that has a bucket missing and new buckets.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{ h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 2}, {Offset: 0, Length: 2},
@ -866,7 +991,7 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
} }
{ // New histogram that has a counter reset while buckets are same. { // New histogram that has a counter reset while buckets are same.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.Sum = 23 h2.Sum = 23
h2.PositiveBuckets = []float64{6, 2, 3, 2, 4, 5, 1} h2.PositiveBuckets = []float64{6, 2, 3, 2, 4, 5, 1}
@ -882,7 +1007,7 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
} }
{ // New histogram that has a counter reset while new buckets were added. { // New histogram that has a counter reset while new buckets were added.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{ h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3}, {Offset: 0, Length: 3},
@ -906,7 +1031,7 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
{ {
// New histogram that has a counter reset while new buckets were // New histogram that has a counter reset while new buckets were
// added before the first bucket and reset on first bucket. // added before the first bucket and reset on first bucket.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{ h2.PositiveSpans = []histogram.Span{
{Offset: -3, Length: 2}, {Offset: -3, Length: 2},
@ -928,6 +1053,73 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
} }
{ // Custom buckets, no change.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
_, _, _, _, _, _, ok := hApp.appendableGauge(h2)
require.True(t, ok)
assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
}
{ // Custom buckets, increase in bucket counts but no change in layout.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
h2.Count++
h2.PositiveBuckets = []float64{6, 3, 3, 2, 4, 5, 2}
_, _, _, _, _, _, ok := hApp.appendableGauge(h2)
require.True(t, ok)
assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
}
{ // Custom buckets, decrease in bucket counts but no change in layout.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
h2.Count--
h2.PositiveBuckets = []float64{6, 3, 3, 2, 4, 5, 0}
_, _, _, _, _, _, ok := hApp.appendableGauge(h2)
require.True(t, ok)
assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
}
{ // Custom buckets, change only in custom bounds.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
h2.CustomValues = []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21}
_, _, _, _, _, _, ok := hApp.appendableGauge(h2)
require.False(t, ok)
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
}
{ // Custom buckets, with more buckets.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 1, Length: 1},
{Offset: 1, Length: 4},
{Offset: 3, Length: 3},
}
h2.Count += 6
h2.Sum = 30
// Existing histogram should get values converted from the above to:
// 6 3 0 3 0 0 2 4 5 0 1 (previous values with some new empty buckets in between)
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1} // (total 30)
posInterjections, negInterjections, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2)
require.NotEmpty(t, posInterjections)
require.Empty(t, negInterjections)
require.Empty(t, pBackwardI)
require.Empty(t, nBackwardI)
require.True(t, ok) // Only new buckets came in.
assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
}
} }
func TestFloatHistogramAppendOnlyErrors(t *testing.T) { func TestFloatHistogramAppendOnlyErrors(t *testing.T) {
@ -975,4 +1167,26 @@ func TestFloatHistogramAppendOnlyErrors(t *testing.T) {
require.False(t, isRecoded) require.False(t, isRecoded)
require.EqualError(t, err, "float histogram counter reset") require.EqualError(t, err, "float histogram counter reset")
}) })
t.Run("counter reset error with custom buckets", func(t *testing.T) {
c := Chunk(NewFloatHistogramChunk())
// Create fresh appender and add the first histogram.
app, err := c.Appender()
require.NoError(t, err)
h := tsdbutil.GenerateTestCustomBucketsFloatHistogram(0)
var isRecoded bool
c, isRecoded, app, err = app.AppendFloatHistogram(nil, 1, h, true)
require.Nil(t, c)
require.False(t, isRecoded)
require.NoError(t, err)
// Add erroring histogram.
h2 := h.Copy()
h2.CustomValues = []float64{0, 1, 2, 3, 4, 5, 6, 7}
c, isRecoded, _, err = app.AppendFloatHistogram(nil, 2, h2, true)
require.Nil(t, c)
require.False(t, isRecoded)
require.EqualError(t, err, "float histogram counter reset")
})
} }

View file

@ -69,6 +69,7 @@ func (c *HistogramChunk) NumSamples() int {
func (c *HistogramChunk) Layout() ( func (c *HistogramChunk) Layout() (
schema int32, zeroThreshold float64, schema int32, zeroThreshold float64,
negativeSpans, positiveSpans []histogram.Span, negativeSpans, positiveSpans []histogram.Span,
customValues []float64,
err error, err error,
) { ) {
if c.NumSamples() == 0 { if c.NumSamples() == 0 {
@ -131,6 +132,7 @@ func (c *HistogramChunk) Appender() (Appender, error) {
zThreshold: it.zThreshold, zThreshold: it.zThreshold,
pSpans: it.pSpans, pSpans: it.pSpans,
nSpans: it.nSpans, nSpans: it.nSpans,
customValues: it.customValues,
t: it.t, t: it.t,
cnt: it.cnt, cnt: it.cnt,
zCnt: it.zCnt, zCnt: it.zCnt,
@ -198,6 +200,7 @@ type HistogramAppender struct {
schema int32 schema int32
zThreshold float64 zThreshold float64
pSpans, nSpans []histogram.Span pSpans, nSpans []histogram.Span
customValues []float64
// Although we intend to start new chunks on counter resets, we still // Although we intend to start new chunks on counter resets, we still
// have to handle negative deltas for gauge histograms. Therefore, even // have to handle negative deltas for gauge histograms. Therefore, even
@ -241,6 +244,7 @@ func (a *HistogramAppender) Append(int64, float64) {
// The chunk is not appendable in the following cases: // The chunk is not appendable in the following cases:
// //
// - The schema has changed. // - The schema has changed.
// - The custom bounds have changed if the current schema is custom buckets.
// - The threshold for the zero bucket has changed. // - The threshold for the zero bucket has changed.
// - Any buckets have disappeared. // - Any buckets have disappeared.
// - There was a counter reset in the count of observations or in any bucket, // - There was a counter reset in the count of observations or in any bucket,
@ -283,6 +287,11 @@ func (a *HistogramAppender) appendable(h *histogram.Histogram) (
return return
} }
if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.FloatBucketsMatch(h.CustomValues, a.customValues) {
counterReset = true
return
}
if h.ZeroCount < a.zCnt { if h.ZeroCount < a.zCnt {
// There has been a counter reset since ZeroThreshold didn't change. // There has been a counter reset since ZeroThreshold didn't change.
counterReset = true counterReset = true
@ -323,6 +332,7 @@ func (a *HistogramAppender) appendable(h *histogram.Histogram) (
// //
// The chunk is not appendable in the following cases: // The chunk is not appendable in the following cases:
// - The schema has changed. // - The schema has changed.
// - The custom bounds have changed if the current schema is custom buckets.
// - The threshold for the zero bucket has changed. // - The threshold for the zero bucket has changed.
// - The last sample in the chunk was stale while the current sample is not stale. // - The last sample in the chunk was stale while the current sample is not stale.
func (a *HistogramAppender) appendableGauge(h *histogram.Histogram) ( func (a *HistogramAppender) appendableGauge(h *histogram.Histogram) (
@ -349,6 +359,10 @@ func (a *HistogramAppender) appendableGauge(h *histogram.Histogram) (
return return
} }
if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.FloatBucketsMatch(h.CustomValues, a.customValues) {
return
}
positiveInserts, backwardPositiveInserts, positiveSpans = expandSpansBothWays(a.pSpans, h.PositiveSpans) positiveInserts, backwardPositiveInserts, positiveSpans = expandSpansBothWays(a.pSpans, h.PositiveSpans)
negativeInserts, backwardNegativeInserts, negativeSpans = expandSpansBothWays(a.nSpans, h.NegativeSpans) negativeInserts, backwardNegativeInserts, negativeSpans = expandSpansBothWays(a.nSpans, h.NegativeSpans)
okToAppend = true okToAppend = true
@ -442,7 +456,7 @@ func (a *HistogramAppender) appendHistogram(t int64, h *histogram.Histogram) {
if num == 0 { if num == 0 {
// The first append gets the privilege to dictate the layout // The first append gets the privilege to dictate the layout
// but it's also responsible for encoding it into the chunk! // but it's also responsible for encoding it into the chunk!
writeHistogramChunkLayout(a.b, h.Schema, h.ZeroThreshold, h.PositiveSpans, h.NegativeSpans) writeHistogramChunkLayout(a.b, h.Schema, h.ZeroThreshold, h.PositiveSpans, h.NegativeSpans, h.CustomValues)
a.schema = h.Schema a.schema = h.Schema
a.zThreshold = h.ZeroThreshold a.zThreshold = h.ZeroThreshold
@ -458,6 +472,12 @@ func (a *HistogramAppender) appendHistogram(t int64, h *histogram.Histogram) {
} else { } else {
a.nSpans = nil a.nSpans = nil
} }
if len(h.CustomValues) > 0 {
a.customValues = make([]float64, len(h.CustomValues))
copy(a.customValues, h.CustomValues)
} else {
a.customValues = nil
}
numPBuckets, numNBuckets := countSpans(h.PositiveSpans), countSpans(h.NegativeSpans) numPBuckets, numNBuckets := countSpans(h.PositiveSpans), countSpans(h.NegativeSpans)
if numPBuckets > 0 { if numPBuckets > 0 {
@ -741,6 +761,7 @@ type histogramIterator struct {
schema int32 schema int32
zThreshold float64 zThreshold float64
pSpans, nSpans []histogram.Span pSpans, nSpans []histogram.Span
customValues []float64
// For the fields that are tracked as deltas and ultimately dod's. // For the fields that are tracked as deltas and ultimately dod's.
t int64 t int64
@ -797,6 +818,7 @@ func (it *histogramIterator) AtHistogram(h *histogram.Histogram) (int64, *histog
NegativeSpans: it.nSpans, NegativeSpans: it.nSpans,
PositiveBuckets: it.pBuckets, PositiveBuckets: it.pBuckets,
NegativeBuckets: it.nBuckets, NegativeBuckets: it.nBuckets,
CustomValues: it.customValues,
} }
} }
@ -819,6 +841,9 @@ func (it *histogramIterator) AtHistogram(h *histogram.Histogram) (int64, *histog
h.NegativeBuckets = resize(h.NegativeBuckets, len(it.nBuckets)) h.NegativeBuckets = resize(h.NegativeBuckets, len(it.nBuckets))
copy(h.NegativeBuckets, it.nBuckets) copy(h.NegativeBuckets, it.nBuckets)
h.CustomValues = resize(h.CustomValues, len(it.customValues))
copy(h.CustomValues, it.customValues)
return it.t, h return it.t, h
} }
@ -839,6 +864,7 @@ func (it *histogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int
NegativeSpans: it.nSpans, NegativeSpans: it.nSpans,
PositiveBuckets: it.pFloatBuckets, PositiveBuckets: it.pFloatBuckets,
NegativeBuckets: it.nFloatBuckets, NegativeBuckets: it.nFloatBuckets,
CustomValues: it.customValues,
} }
} }
@ -869,6 +895,9 @@ func (it *histogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int
fh.NegativeBuckets[i] = currentNegative fh.NegativeBuckets[i] = currentNegative
} }
fh.CustomValues = resize(fh.CustomValues, len(it.customValues))
copy(fh.CustomValues, it.customValues)
return it.t, fh return it.t, fh
} }
@ -927,7 +956,7 @@ func (it *histogramIterator) Next() ValueType {
// The first read is responsible for reading the chunk layout // The first read is responsible for reading the chunk layout
// and for initializing fields that depend on it. We give // and for initializing fields that depend on it. We give
// counter reset info at chunk level, hence we discard it here. // counter reset info at chunk level, hence we discard it here.
schema, zeroThreshold, posSpans, negSpans, err := readHistogramChunkLayout(&it.br) schema, zeroThreshold, posSpans, negSpans, customValues, err := readHistogramChunkLayout(&it.br)
if err != nil { if err != nil {
it.err = err it.err = err
return ValNone return ValNone
@ -935,6 +964,7 @@ func (it *histogramIterator) Next() ValueType {
it.schema = schema it.schema = schema
it.zThreshold = zeroThreshold it.zThreshold = zeroThreshold
it.pSpans, it.nSpans = posSpans, negSpans it.pSpans, it.nSpans = posSpans, negSpans
it.customValues = customValues
numPBuckets, numNBuckets := countSpans(posSpans), countSpans(negSpans) numPBuckets, numNBuckets := countSpans(posSpans), countSpans(negSpans)
// The code below recycles existing slices in case this iterator // The code below recycles existing slices in case this iterator
// was reset and already has slices of a sufficient capacity. // was reset and already has slices of a sufficient capacity.

View file

@ -21,17 +21,21 @@ import (
func writeHistogramChunkLayout( func writeHistogramChunkLayout(
b *bstream, schema int32, zeroThreshold float64, b *bstream, schema int32, zeroThreshold float64,
positiveSpans, negativeSpans []histogram.Span, positiveSpans, negativeSpans []histogram.Span, customValues []float64,
) { ) {
putZeroThreshold(b, zeroThreshold) putZeroThreshold(b, zeroThreshold)
putVarbitInt(b, int64(schema)) putVarbitInt(b, int64(schema))
putHistogramChunkLayoutSpans(b, positiveSpans) putHistogramChunkLayoutSpans(b, positiveSpans)
putHistogramChunkLayoutSpans(b, negativeSpans) putHistogramChunkLayoutSpans(b, negativeSpans)
if histogram.IsCustomBucketsSchema(schema) {
putHistogramChunkLayoutCustomBounds(b, customValues)
}
} }
func readHistogramChunkLayout(b *bstreamReader) ( func readHistogramChunkLayout(b *bstreamReader) (
schema int32, zeroThreshold float64, schema int32, zeroThreshold float64,
positiveSpans, negativeSpans []histogram.Span, positiveSpans, negativeSpans []histogram.Span,
customValues []float64,
err error, err error,
) { ) {
zeroThreshold, err = readZeroThreshold(b) zeroThreshold, err = readZeroThreshold(b)
@ -55,6 +59,13 @@ func readHistogramChunkLayout(b *bstreamReader) (
return return
} }
if histogram.IsCustomBucketsSchema(schema) {
customValues, err = readHistogramChunkLayoutCustomBounds(b)
if err != nil {
return
}
}
return return
} }
@ -91,6 +102,30 @@ func readHistogramChunkLayoutSpans(b *bstreamReader) ([]histogram.Span, error) {
return spans, nil return spans, nil
} }
func putHistogramChunkLayoutCustomBounds(b *bstream, customValues []float64) {
putVarbitUint(b, uint64(len(customValues)))
for _, bound := range customValues {
putCustomBound(b, bound)
}
}
func readHistogramChunkLayoutCustomBounds(b *bstreamReader) ([]float64, error) {
var customValues []float64
num, err := readVarbitUint(b)
if err != nil {
return nil, err
}
for i := 0; i < int(num); i++ {
bound, err := readCustomBound(b)
if err != nil {
return nil, err
}
customValues = append(customValues, bound)
}
return customValues, nil
}
// putZeroThreshold writes the zero threshold to the bstream. It stores typical // putZeroThreshold writes the zero threshold to the bstream. It stores typical
// values in just one byte, but needs 9 bytes for other values. In detail: // values in just one byte, but needs 9 bytes for other values. In detail:
// - If the threshold is 0, store a single zero byte. // - If the threshold is 0, store a single zero byte.
@ -139,6 +174,59 @@ func readZeroThreshold(br *bstreamReader) (float64, error) {
} }
} }
// isWholeWhenMultiplied checks to see if the number when multiplied by 1000 can
// be converted into an integer without losing precision.
func isWholeWhenMultiplied(in float64) bool {
i := uint(math.Round(in * 1000))
out := float64(i) / 1000
return in == out
}
// putCustomBound writes a custom bound to the bstream. It stores values from
// 0 to 33554.430 (inclusive) that are multiples of 0.001 in unsigned varbit
// encoding of up to 4 bytes, but needs 1 bit + 8 bytes for other values like
// negative numbers, numbers greater than 33554.430, or numbers that are not
// a multiple of 0.001, on the assumption that they are less common. In detail:
// - Multiply the bound by 1000, without rounding.
// - If the multiplied bound is >= 0, <= 33554430 and a whole number,
// add 1 and store it in unsigned varbit encoding. All these numbers are
// greater than 0, so the leading bit of the varbit is always 1!
// - Otherwise, store a 0 bit, followed by the 8 bytes of the original
// bound as a float64.
//
// When reading the values, we can first decode a value as unsigned varbit,
// if it's 0, then we read the next 8 bytes as a float64, otherwise
// we can convert the value to a float64 by subtracting 1 and dividing by 1000.
func putCustomBound(b *bstream, f float64) {
tf := f * 1000
// 33554431-1 comes from the maximum that can be stored in a varbit in 4
// bytes, other values are stored in 8 bytes anyway.
if tf < 0 || tf > 33554430 || !isWholeWhenMultiplied(f) {
b.writeBit(zero)
b.writeBits(math.Float64bits(f), 64)
return
}
putVarbitUint(b, uint64(math.Round(tf))+1)
}
// readCustomBound reads the custom bound written with putCustomBound.
func readCustomBound(br *bstreamReader) (float64, error) {
b, err := readVarbitUint(br)
if err != nil {
return 0, err
}
switch b {
case 0:
v, err := br.readBits(64)
if err != nil {
return 0, err
}
return math.Float64frombits(v), nil
default:
return float64(b-1) / 1000, nil
}
}
type bucketIterator struct { type bucketIterator struct {
spans []histogram.Span spans []histogram.Span
span int // Span position of last yielded bucket. span int // Span position of last yielded bucket.

View file

@ -373,6 +373,7 @@ func TestWriteReadHistogramChunkLayout(t *testing.T) {
schema int32 schema int32
zeroThreshold float64 zeroThreshold float64
positiveSpans, negativeSpans []histogram.Span positiveSpans, negativeSpans []histogram.Span
customValues []float64
}{ }{
{ {
schema: 3, schema: 3,
@ -422,23 +423,48 @@ func TestWriteReadHistogramChunkLayout(t *testing.T) {
positiveSpans: nil, positiveSpans: nil,
negativeSpans: nil, negativeSpans: nil,
}, },
{
schema: histogram.CustomBucketsSchema,
positiveSpans: []histogram.Span{{Offset: -4, Length: 3}, {Offset: 2, Length: 42}},
negativeSpans: nil,
customValues: []float64{-5, -2.5, 0, 0.1, 0.25, 0.5, 1, 2, 5, 10, 25, 50, 100, 255, 500, 1000, 50000, 1e7},
},
{
schema: histogram.CustomBucketsSchema,
positiveSpans: []histogram.Span{{Offset: -4, Length: 3}, {Offset: 2, Length: 42}},
negativeSpans: nil,
customValues: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0, 25.0, 50.0, 100.0},
},
{
schema: histogram.CustomBucketsSchema,
positiveSpans: []histogram.Span{{Offset: -4, Length: 3}, {Offset: 2, Length: 42}},
negativeSpans: nil,
customValues: []float64{0.001, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512, 1.024, 2.048, 4.096, 8.192},
},
{
schema: histogram.CustomBucketsSchema,
positiveSpans: []histogram.Span{{Offset: -4, Length: 3}, {Offset: 2, Length: 42}},
negativeSpans: nil,
customValues: []float64{1.001, 1.023, 2.01, 4.007, 4.095, 8.001, 8.19, 16.24},
},
} }
bs := bstream{} bs := bstream{}
for _, l := range layouts { for _, l := range layouts {
writeHistogramChunkLayout(&bs, l.schema, l.zeroThreshold, l.positiveSpans, l.negativeSpans) writeHistogramChunkLayout(&bs, l.schema, l.zeroThreshold, l.positiveSpans, l.negativeSpans, l.customValues)
} }
bsr := newBReader(bs.bytes()) bsr := newBReader(bs.bytes())
for _, want := range layouts { for _, want := range layouts {
gotSchema, gotZeroThreshold, gotPositiveSpans, gotNegativeSpans, err := readHistogramChunkLayout(&bsr) gotSchema, gotZeroThreshold, gotPositiveSpans, gotNegativeSpans, gotCustomBounds, err := readHistogramChunkLayout(&bsr)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, want.schema, gotSchema) require.Equal(t, want.schema, gotSchema)
require.Equal(t, want.zeroThreshold, gotZeroThreshold) require.Equal(t, want.zeroThreshold, gotZeroThreshold)
require.Equal(t, want.positiveSpans, gotPositiveSpans) require.Equal(t, want.positiveSpans, gotPositiveSpans)
require.Equal(t, want.negativeSpans, gotNegativeSpans) require.Equal(t, want.negativeSpans, gotNegativeSpans)
require.Equal(t, want.customValues, gotCustomBounds)
} }
} }

View file

@ -294,7 +294,38 @@ func TestHistogramChunkBucketChanges(t *testing.T) {
} }
func TestHistogramChunkAppendable(t *testing.T) { func TestHistogramChunkAppendable(t *testing.T) {
setup := func() (Chunk, *HistogramAppender, int64, *histogram.Histogram) { eh := &histogram.Histogram{
Count: 5,
ZeroCount: 2,
Sum: 18.4,
ZeroThreshold: 1e-125,
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
PositiveBuckets: []int64{6, -3, 0, -1, 2, 1, -4}, // counts: 6, 3, 3, 2, 4, 5, 1 (total 24)
}
cbh := &histogram.Histogram{
Count: 24,
Sum: 18.4,
Schema: histogram.CustomBucketsSchema,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
PositiveBuckets: []int64{6, -3, 0, -1, 2, 1, -4}, // counts: 6, 3, 3, 2, 4, 5, 1 (total 24)
CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20},
}
setup := func(h *histogram.Histogram) (Chunk, *HistogramAppender, int64, *histogram.Histogram) {
c := Chunk(NewHistogramChunk()) c := Chunk(NewHistogramChunk())
// Create fresh appender and add the first histogram. // Create fresh appender and add the first histogram.
@ -303,32 +334,17 @@ func TestHistogramChunkAppendable(t *testing.T) {
require.Equal(t, 0, c.NumSamples()) require.Equal(t, 0, c.NumSamples())
ts := int64(1234567890) ts := int64(1234567890)
h1 := &histogram.Histogram{
Count: 5,
ZeroCount: 2,
Sum: 18.4,
ZeroThreshold: 1e-125,
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
PositiveBuckets: []int64{6, -3, 0, -1, 2, 1, -4}, // counts: 6, 3, 3, 2, 4, 5, 1 (total 24)
}
chk, _, app, err := app.AppendHistogram(nil, ts, h1.Copy(), false) chk, _, app, err := app.AppendHistogram(nil, ts, h.Copy(), false)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, chk) require.Nil(t, chk)
require.Equal(t, 1, c.NumSamples()) require.Equal(t, 1, c.NumSamples())
require.Equal(t, UnknownCounterReset, c.(*HistogramChunk).GetCounterResetHeader()) require.Equal(t, UnknownCounterReset, c.(*HistogramChunk).GetCounterResetHeader())
return c, app.(*HistogramAppender), ts, h1 return c, app.(*HistogramAppender), ts, h
} }
{ // Schema change. { // Schema change.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.Schema++ h2.Schema++
_, _, ok, _ := hApp.appendable(h2) _, _, ok, _ := hApp.appendable(h2)
@ -338,7 +354,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
} }
{ // Zero threshold change. { // Zero threshold change.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.ZeroThreshold += 0.1 h2.ZeroThreshold += 0.1
_, _, ok, _ := hApp.appendable(h2) _, _, ok, _ := hApp.appendable(h2)
@ -348,7 +364,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
} }
{ // New histogram that has more buckets. { // New histogram that has more buckets.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{ h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3}, {Offset: 0, Length: 3},
@ -374,7 +390,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
} }
{ // New histogram that has a bucket missing. { // New histogram that has a bucket missing.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{ h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 2}, {Offset: 0, Length: 2},
@ -395,7 +411,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
} }
{ // New histogram that has a counter reset while buckets are same. { // New histogram that has a counter reset while buckets are same.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.Sum = 23 h2.Sum = 23
h2.PositiveBuckets = []int64{6, -4, 1, -1, 2, 1, -4} // counts: 6, 2, 3, 2, 4, 5, 1 (total 23) h2.PositiveBuckets = []int64{6, -4, 1, -1, 2, 1, -4} // counts: 6, 2, 3, 2, 4, 5, 1 (total 23)
@ -410,7 +426,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
} }
{ // New histogram that has a counter reset while new buckets were added. { // New histogram that has a counter reset while new buckets were added.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{ h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3}, {Offset: 0, Length: 3},
@ -438,7 +454,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
// added before the first bucket and reset on first bucket. (to // added before the first bucket and reset on first bucket. (to
// catch the edge case where the new bucket should be forwarded // catch the edge case where the new bucket should be forwarded
// ahead until first old bucket at start) // ahead until first old bucket at start)
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{ h2.PositiveSpans = []histogram.Span{
{Offset: -3, Length: 2}, {Offset: -3, Length: 2},
@ -464,7 +480,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
} }
{ // New histogram that has an explicit counter reset. { // New histogram that has an explicit counter reset.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.CounterResetHint = histogram.CounterReset h2.CounterResetHint = histogram.CounterReset
@ -472,7 +488,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
} }
{ // Start new chunk explicitly, and append a new histogram that is considered appendable to the previous chunk. { // Start new chunk explicitly, and append a new histogram that is considered appendable to the previous chunk.
_, hApp, ts, h1 := setup() _, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() // Identity is appendable. h2 := h1.Copy() // Identity is appendable.
nextChunk := NewHistogramChunk() nextChunk := NewHistogramChunk()
@ -488,7 +504,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
} }
{ // Start new chunk explicitly, and append a new histogram that is not considered appendable to the previous chunk. { // Start new chunk explicitly, and append a new histogram that is not considered appendable to the previous chunk.
_, hApp, ts, h1 := setup() _, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.Count-- // Make this not appendable due to counter reset. h2.Count-- // Make this not appendable due to counter reset.
@ -505,7 +521,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
} }
{ // Start new chunk explicitly, and append a new histogram that would need recoding if we added it to the chunk. { // Start new chunk explicitly, and append a new histogram that would need recoding if we added it to the chunk.
_, hApp, ts, h1 := setup() _, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{ h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3}, {Offset: 0, Length: 3},
@ -532,6 +548,72 @@ func TestHistogramChunkAppendable(t *testing.T) {
assertSampleCount(t, nextChunk, 1, ValHistogram) assertSampleCount(t, nextChunk, 1, ValHistogram)
require.Equal(t, NotCounterReset, nextChunk.GetCounterResetHeader()) require.Equal(t, NotCounterReset, nextChunk.GetCounterResetHeader())
} }
{ // Custom buckets, no change.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
_, _, ok, _ := hApp.appendable(h2)
require.True(t, ok)
assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
}
{ // Custom buckets, increase in bucket counts but no change in layout.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
h2.Count++
h2.PositiveBuckets = []int64{6, -3, 0, -1, 2, 1, -3}
_, _, ok, _ := hApp.appendable(h2)
require.True(t, ok)
assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
}
{ // Custom buckets, decrease in bucket counts but no change in layout.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
h2.Count--
h2.PositiveBuckets = []int64{6, -3, 0, -1, 2, 1, -5}
_, _, ok, _ := hApp.appendable(h2)
require.False(t, ok)
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
}
{ // Custom buckets, change only in custom bounds.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
h2.CustomValues = []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21}
_, _, ok, _ := hApp.appendable(h2)
require.False(t, ok)
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
}
{ // Custom buckets, with more buckets.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 1, Length: 1},
{Offset: 1, Length: 4},
{Offset: 3, Length: 3},
}
h2.Count += 6
h2.Sum = 30
// Existing histogram should get values converted from the above to:
// 6 3 0 3 0 0 2 4 5 0 1 (previous values with some new empty buckets in between)
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 1} // 7 5 1 3 1 0 2 5 5 0 1 (total 30)
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
require.NotEmpty(t, posInterjections)
require.Empty(t, negInterjections)
require.True(t, ok) // Only new buckets came in.
require.False(t, cr)
assertRecodedHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
}
} }
func assertNewHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *HistogramAppender, ts int64, h *histogram.Histogram, expectHeader CounterResetHeader) { func assertNewHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *HistogramAppender, ts int64, h *histogram.Histogram, expectHeader CounterResetHeader) {
@ -548,6 +630,19 @@ func assertNewHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *Histogr
assertSampleCount(t, newChunk, 1, ValHistogram) assertSampleCount(t, newChunk, 1, ValHistogram)
} }
func assertNoNewHistogramChunkOnAppend(t *testing.T, currChunk Chunk, hApp *HistogramAppender, ts int64, h *histogram.Histogram, expectHeader CounterResetHeader) {
prevChunkBytes := currChunk.Bytes()
newChunk, recoded, newAppender, err := hApp.AppendHistogram(nil, ts, h, false)
require.Greater(t, len(currChunk.Bytes()), len(prevChunkBytes)) // Check that current chunk is bigger than previously.
require.NoError(t, err)
require.Nil(t, newChunk)
require.False(t, recoded)
require.Equal(t, expectHeader, currChunk.(*HistogramChunk).GetCounterResetHeader())
require.NotNil(t, newAppender)
require.Equal(t, hApp, newAppender)
assertSampleCount(t, currChunk, 2, ValHistogram)
}
func assertRecodedHistogramChunkOnAppend(t *testing.T, prevChunk Chunk, hApp *HistogramAppender, ts int64, h *histogram.Histogram, expectHeader CounterResetHeader) { func assertRecodedHistogramChunkOnAppend(t *testing.T, prevChunk Chunk, hApp *HistogramAppender, ts int64, h *histogram.Histogram, expectHeader CounterResetHeader) {
prevChunkBytes := prevChunk.Bytes() prevChunkBytes := prevChunk.Bytes()
newChunk, recoded, newAppender, err := hApp.AppendHistogram(nil, ts, h, false) newChunk, recoded, newAppender, err := hApp.AppendHistogram(nil, ts, h, false)
@ -738,6 +833,32 @@ func TestHistogramChunkAppendableWithEmptySpan(t *testing.T) {
NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3}, NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
}, },
}, },
"empty span in old and new custom buckets histogram": {
h1: &histogram.Histogram{
Schema: histogram.CustomBucketsSchema,
Count: 7,
Sum: 1234.5,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{1, 1, -1, 0, 0, 0, 0},
CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
},
h2: &histogram.Histogram{
Schema: histogram.CustomBucketsSchema,
Count: 10,
Sum: 2345.6,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
},
},
} }
for name, tc := range tests { for name, tc := range tests {
@ -905,7 +1026,40 @@ func TestAtFloatHistogram(t *testing.T) {
} }
func TestHistogramChunkAppendableGauge(t *testing.T) { func TestHistogramChunkAppendableGauge(t *testing.T) {
setup := func() (Chunk, *HistogramAppender, int64, *histogram.Histogram) { eh := &histogram.Histogram{
CounterResetHint: histogram.GaugeType,
Count: 5,
ZeroCount: 2,
Sum: 18.4,
ZeroThreshold: 1e-125,
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
PositiveBuckets: []int64{6, -3, 0, -1, 2, 1, -4}, // {6, 3, 3, 2, 4, 5, 1}
}
cbh := &histogram.Histogram{
CounterResetHint: histogram.GaugeType,
Count: 24,
Sum: 18.4,
Schema: histogram.CustomBucketsSchema,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
PositiveBuckets: []int64{6, -3, 0, -1, 2, 1, -4}, // {6, 3, 3, 2, 4, 5, 1}
CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20},
}
setup := func(h *histogram.Histogram) (Chunk, *HistogramAppender, int64, *histogram.Histogram) {
c := Chunk(NewHistogramChunk()) c := Chunk(NewHistogramChunk())
// Create fresh appender and add the first histogram. // Create fresh appender and add the first histogram.
@ -914,66 +1068,38 @@ func TestHistogramChunkAppendableGauge(t *testing.T) {
require.Equal(t, 0, c.NumSamples()) require.Equal(t, 0, c.NumSamples())
ts := int64(1234567890) ts := int64(1234567890)
h1 := &histogram.Histogram{
CounterResetHint: histogram.GaugeType,
Count: 5,
ZeroCount: 2,
Sum: 18.4,
ZeroThreshold: 1e-125,
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 2},
{Offset: 3, Length: 1},
{Offset: 1, Length: 1},
},
PositiveBuckets: []int64{6, -3, 0, -1, 2, 1, -4}, // {6, 3, 3, 2, 4, 5, 1}
}
chk, _, app, err := app.AppendHistogram(nil, ts, h1.Copy(), false) chk, _, app, err := app.AppendHistogram(nil, ts, h.Copy(), false)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, chk) require.Nil(t, chk)
require.Equal(t, 1, c.NumSamples()) require.Equal(t, 1, c.NumSamples())
require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader()) require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader())
return c, app.(*HistogramAppender), ts, h1 return c, app.(*HistogramAppender), ts, h
} }
{ // Schema change. { // Schema change.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.Schema++ h2.Schema++
_, _, _, _, _, _, ok := hApp.appendableGauge(h2) _, _, _, _, _, _, ok := hApp.appendableGauge(h2)
require.False(t, ok) require.False(t, ok)
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false) assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
require.NoError(t, err)
require.NotNil(t, newc)
require.False(t, recoded)
require.NotEqual(t, c, newc)
require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader())
require.Equal(t, GaugeType, newc.(*HistogramChunk).GetCounterResetHeader())
} }
{ // Zero threshold change. { // Zero threshold change.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.ZeroThreshold += 0.1 h2.ZeroThreshold += 0.1
_, _, _, _, _, _, ok := hApp.appendableGauge(h2) _, _, _, _, _, _, ok := hApp.appendableGauge(h2)
require.False(t, ok) require.False(t, ok)
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false) assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
require.NoError(t, err)
require.NotNil(t, newc)
require.False(t, recoded)
require.NotEqual(t, c, newc)
require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader())
require.Equal(t, GaugeType, newc.(*HistogramChunk).GetCounterResetHeader())
} }
{ // New histogram that has more buckets. { // New histogram that has more buckets.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{ h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3}, {Offset: 0, Length: 3},
@ -993,15 +1119,11 @@ func TestHistogramChunkAppendableGauge(t *testing.T) {
require.Empty(t, nBackwardI) require.Empty(t, nBackwardI)
require.True(t, ok) require.True(t, ok)
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false) assertRecodedHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
require.NoError(t, err)
require.NotNil(t, newc)
require.True(t, recoded)
require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader())
} }
{ // New histogram that has buckets missing. { // New histogram that has buckets missing.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{ h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 2}, {Offset: 0, Length: 2},
@ -1021,15 +1143,11 @@ func TestHistogramChunkAppendableGauge(t *testing.T) {
require.Empty(t, nBackwardI) require.Empty(t, nBackwardI)
require.True(t, ok) require.True(t, ok)
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false) assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
require.NoError(t, err)
require.Nil(t, newc)
require.False(t, recoded)
require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader())
} }
{ // New histogram that has a bucket missing and new buckets. { // New histogram that has a bucket missing and new buckets.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{ h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 2}, {Offset: 0, Length: 2},
@ -1047,15 +1165,11 @@ func TestHistogramChunkAppendableGauge(t *testing.T) {
require.Empty(t, nBackwardI) require.Empty(t, nBackwardI)
require.True(t, ok) require.True(t, ok)
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false) assertRecodedHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
require.NoError(t, err)
require.NotNil(t, newc)
require.True(t, recoded)
require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader())
} }
{ // New histogram that has a counter reset while buckets are same. { // New histogram that has a counter reset while buckets are same.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.Sum = 23 h2.Sum = 23
h2.PositiveBuckets = []int64{6, -4, 1, -1, 2, 1, -4} // {6, 2, 3, 2, 4, 5, 1} h2.PositiveBuckets = []int64{6, -4, 1, -1, 2, 1, -4} // {6, 2, 3, 2, 4, 5, 1}
@ -1067,15 +1181,11 @@ func TestHistogramChunkAppendableGauge(t *testing.T) {
require.Empty(t, nBackwardI) require.Empty(t, nBackwardI)
require.True(t, ok) require.True(t, ok)
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false) assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
require.NoError(t, err)
require.Nil(t, newc)
require.False(t, recoded)
require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader())
} }
{ // New histogram that has a counter reset while new buckets were added. { // New histogram that has a counter reset while new buckets were added.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{ h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3}, {Offset: 0, Length: 3},
@ -1093,17 +1203,13 @@ func TestHistogramChunkAppendableGauge(t *testing.T) {
require.Empty(t, nBackwardI) require.Empty(t, nBackwardI)
require.True(t, ok) require.True(t, ok)
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false) assertRecodedHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
require.NoError(t, err)
require.NotNil(t, newc)
require.True(t, recoded)
require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader())
} }
{ {
// New histogram that has a counter reset while new buckets were // New histogram that has a counter reset while new buckets were
// added before the first bucket and reset on first bucket. // added before the first bucket and reset on first bucket.
c, hApp, ts, h1 := setup() c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy() h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{ h2.PositiveSpans = []histogram.Span{
{Offset: -3, Length: 2}, {Offset: -3, Length: 2},
@ -1123,11 +1229,74 @@ func TestHistogramChunkAppendableGauge(t *testing.T) {
require.Empty(t, nBackwardI) require.Empty(t, nBackwardI)
require.True(t, ok) require.True(t, ok)
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false) assertRecodedHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
require.NoError(t, err) }
require.NotNil(t, newc)
require.True(t, recoded) { // Custom buckets, no change.
require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader()) c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
_, _, _, _, _, _, ok := hApp.appendableGauge(h2)
require.True(t, ok)
assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
}
{ // Custom buckets, increase in bucket counts but no change in layout.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
h2.Count++
h2.PositiveBuckets = []int64{6, -3, 0, -1, 2, 1, -3}
_, _, _, _, _, _, ok := hApp.appendableGauge(h2)
require.True(t, ok)
assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
}
{ // Custom buckets, decrease in bucket counts but no change in layout.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
h2.Count--
h2.PositiveBuckets = []int64{6, -3, 0, -1, 2, 1, -5}
_, _, _, _, _, _, ok := hApp.appendableGauge(h2)
require.True(t, ok)
assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
}
{ // Custom buckets, change only in custom bounds.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
h2.CustomValues = []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21}
_, _, _, _, _, _, ok := hApp.appendableGauge(h2)
require.False(t, ok)
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
}
{ // Custom buckets, with more buckets.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 3},
{Offset: 1, Length: 1},
{Offset: 1, Length: 4},
{Offset: 3, Length: 3},
}
h2.Count += 6
h2.Sum = 30
// Existing histogram should get values converted from the above to:
// 6 3 0 3 0 0 2 4 5 0 1 (previous values with some new empty buckets in between)
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 1} // 7 5 1 3 1 0 2 5 5 0 1 (total 30)
posInterjections, negInterjections, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2)
require.NotEmpty(t, posInterjections)
require.Empty(t, negInterjections)
require.Empty(t, pBackwardI)
require.Empty(t, nBackwardI)
require.True(t, ok) // Only new buckets came in.
assertRecodedHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
} }
} }
@ -1176,4 +1345,26 @@ func TestHistogramAppendOnlyErrors(t *testing.T) {
require.False(t, isRecoded) require.False(t, isRecoded)
require.EqualError(t, err, "histogram counter reset") require.EqualError(t, err, "histogram counter reset")
}) })
t.Run("counter reset error with custom buckets", func(t *testing.T) {
c := Chunk(NewHistogramChunk())
// Create fresh appender and add the first histogram.
app, err := c.Appender()
require.NoError(t, err)
h := tsdbutil.GenerateTestCustomBucketsHistogram(0)
var isRecoded bool
c, isRecoded, app, err = app.AppendHistogram(nil, 1, h, true)
require.Nil(t, c)
require.False(t, isRecoded)
require.NoError(t, err)
// Add erroring histogram.
h2 := h.Copy()
h2.CustomValues = []float64{0, 1, 2, 3, 4, 5, 6, 7}
c, isRecoded, _, err = app.AppendHistogram(nil, 2, h2, true)
require.Nil(t, c)
require.False(t, isRecoded)
require.EqualError(t, err, "histogram counter reset")
})
} }

View file

@ -58,19 +58,23 @@ type Compactor interface {
// Results returned when compactions are in progress are undefined. // Results returned when compactions are in progress are undefined.
Plan(dir string) ([]string, error) Plan(dir string) ([]string, error)
// Write persists a Block into a directory. // Write persists one or more Blocks into a directory.
// No Block is written when resulting Block has 0 samples, and returns empty ulid.ULID{}. // No Block is written when resulting Block has 0 samples and returns an empty slice.
Write(dest string, b BlockReader, mint, maxt int64, base *BlockMeta) (ulid.ULID, error) // Prometheus always return one or no block. The interface allows returning more than one
// block for downstream users to experiment with compactor.
Write(dest string, b BlockReader, mint, maxt int64, base *BlockMeta) ([]ulid.ULID, error)
// Compact runs compaction against the provided directories. Must // Compact runs compaction against the provided directories. Must
// only be called concurrently with results of Plan(). // only be called concurrently with results of Plan().
// Can optionally pass a list of already open blocks, // Can optionally pass a list of already open blocks,
// to avoid having to reopen them. // to avoid having to reopen them.
// When resulting Block has 0 samples // Prometheus always return one or no block. The interface allows returning more than one
// block for downstream users to experiment with compactor.
// When one resulting Block has 0 samples
// * No block is written. // * No block is written.
// * The source dirs are marked Deletable. // * The source dirs are marked Deletable.
// * Returns empty ulid.ULID{}. // * Block is not included in the result.
Compact(dest string, dirs []string, open []*Block) (ulid.ULID, error) Compact(dest string, dirs []string, open []*Block) ([]ulid.ULID, error)
} }
// LeveledCompactor implements the Compactor interface. // LeveledCompactor implements the Compactor interface.
@ -441,11 +445,11 @@ func CompactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta {
// Compact creates a new block in the compactor's directory from the blocks in the // Compact creates a new block in the compactor's directory from the blocks in the
// provided directories. // provided directories.
func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) (uid ulid.ULID, err error) { func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) ([]ulid.ULID, error) {
return c.CompactWithBlockPopulator(dest, dirs, open, DefaultBlockPopulator{}) return c.CompactWithBlockPopulator(dest, dirs, open, DefaultBlockPopulator{})
} }
func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, open []*Block, blockPopulator BlockPopulator) (uid ulid.ULID, err error) { func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, open []*Block, blockPopulator BlockPopulator) ([]ulid.ULID, error) {
var ( var (
blocks []BlockReader blocks []BlockReader
bs []*Block bs []*Block
@ -457,7 +461,7 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string,
for _, d := range dirs { for _, d := range dirs {
meta, _, err := readMetaFile(d) meta, _, err := readMetaFile(d)
if err != nil { if err != nil {
return uid, err return nil, err
} }
var b *Block var b *Block
@ -475,7 +479,7 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string,
var err error var err error
b, err = OpenBlock(c.logger, d, c.chunkPool) b, err = OpenBlock(c.logger, d, c.chunkPool)
if err != nil { if err != nil {
return uid, err return nil, err
} }
defer b.Close() defer b.Close()
} }
@ -486,10 +490,10 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string,
uids = append(uids, meta.ULID.String()) uids = append(uids, meta.ULID.String())
} }
uid = ulid.MustNew(ulid.Now(), rand.Reader) uid := ulid.MustNew(ulid.Now(), rand.Reader)
meta := CompactBlockMetas(uid, metas...) meta := CompactBlockMetas(uid, metas...)
err = c.write(dest, meta, blockPopulator, blocks...) err := c.write(dest, meta, blockPopulator, blocks...)
if err == nil { if err == nil {
if meta.Stats.NumSamples == 0 { if meta.Stats.NumSamples == 0 {
for _, b := range bs { for _, b := range bs {
@ -503,25 +507,25 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string,
} }
b.numBytesMeta = n b.numBytesMeta = n
} }
uid = ulid.ULID{}
level.Info(c.logger).Log( level.Info(c.logger).Log(
"msg", "compact blocks resulted in empty block", "msg", "compact blocks resulted in empty block",
"count", len(blocks), "count", len(blocks),
"sources", fmt.Sprintf("%v", uids), "sources", fmt.Sprintf("%v", uids),
"duration", time.Since(start), "duration", time.Since(start),
) )
} else { return nil, nil
level.Info(c.logger).Log(
"msg", "compact blocks",
"count", len(blocks),
"mint", meta.MinTime,
"maxt", meta.MaxTime,
"ulid", meta.ULID,
"sources", fmt.Sprintf("%v", uids),
"duration", time.Since(start),
)
} }
return uid, nil
level.Info(c.logger).Log(
"msg", "compact blocks",
"count", len(blocks),
"mint", meta.MinTime,
"maxt", meta.MaxTime,
"ulid", meta.ULID,
"sources", fmt.Sprintf("%v", uids),
"duration", time.Since(start),
)
return []ulid.ULID{uid}, nil
} }
errs := tsdb_errors.NewMulti(err) errs := tsdb_errors.NewMulti(err)
@ -533,10 +537,10 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string,
} }
} }
return uid, errs.Err() return nil, errs.Err()
} }
func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, base *BlockMeta) (ulid.ULID, error) { func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, base *BlockMeta) ([]ulid.ULID, error) {
start := time.Now() start := time.Now()
uid := ulid.MustNew(ulid.Now(), rand.Reader) uid := ulid.MustNew(ulid.Now(), rand.Reader)
@ -560,7 +564,7 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, b
err := c.write(dest, meta, DefaultBlockPopulator{}, b) err := c.write(dest, meta, DefaultBlockPopulator{}, b)
if err != nil { if err != nil {
return uid, err return nil, err
} }
if meta.Stats.NumSamples == 0 { if meta.Stats.NumSamples == 0 {
@ -570,7 +574,7 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, b
"maxt", meta.MaxTime, "maxt", meta.MaxTime,
"duration", time.Since(start), "duration", time.Since(start),
) )
return ulid.ULID{}, nil return nil, nil
} }
level.Info(c.logger).Log( level.Info(c.logger).Log(
@ -581,7 +585,7 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, b
"duration", time.Since(start), "duration", time.Since(start),
"ooo", meta.Compaction.FromOutOfOrder(), "ooo", meta.Compaction.FromOutOfOrder(),
) )
return uid, nil return []ulid.ULID{uid}, nil
} }
// instrumentedChunkWriter is used for level 1 compactions to record statistics // instrumentedChunkWriter is used for level 1 compactions to record statistics
@ -652,7 +656,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blockPopulator Bl
} }
closers = append(closers, indexw) closers = append(closers, indexw)
if err := blockPopulator.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, indexw, chunkw); err != nil { if err := blockPopulator.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, indexw, chunkw, AllSortedPostings); err != nil {
return fmt.Errorf("populate block: %w", err) return fmt.Errorf("populate block: %w", err)
} }
@ -718,7 +722,20 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blockPopulator Bl
} }
type BlockPopulator interface { type BlockPopulator interface {
PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter) error PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter, postingsFunc IndexReaderPostingsFunc) error
}
// IndexReaderPostingsFunc is a function to get a sorted posting iterator from a given index reader.
type IndexReaderPostingsFunc func(ctx context.Context, reader IndexReader) index.Postings
// AllSortedPostings returns a sorted all posting iterator from the input index reader.
func AllSortedPostings(ctx context.Context, reader IndexReader) index.Postings {
k, v := index.AllPostingsKey()
all, err := reader.Postings(ctx, k, v)
if err != nil {
return index.ErrPostings(err)
}
return reader.SortedPostings(all)
} }
type DefaultBlockPopulator struct{} type DefaultBlockPopulator struct{}
@ -726,7 +743,7 @@ type DefaultBlockPopulator struct{}
// PopulateBlock fills the index and chunk writers with new data gathered as the union // PopulateBlock fills the index and chunk writers with new data gathered as the union
// of the provided blocks. It returns meta information for the new block. // of the provided blocks. It returns meta information for the new block.
// It expects sorted blocks input by mint. // It expects sorted blocks input by mint.
func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter) (err error) { func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter, postingsFunc IndexReaderPostingsFunc) (err error) {
if len(blocks) == 0 { if len(blocks) == 0 {
return errors.New("cannot populate block from no readers") return errors.New("cannot populate block from no readers")
} }
@ -784,14 +801,9 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa
} }
closers = append(closers, tombsr) closers = append(closers, tombsr)
k, v := index.AllPostingsKey() postings := postingsFunc(ctx, indexr)
all, err := indexr.Postings(ctx, k, v)
if err != nil {
return err
}
all = indexr.SortedPostings(all)
// Blocks meta is half open: [min, max), so subtract 1 to ensure we don't hold samples with exact meta.MaxTime timestamp. // Blocks meta is half open: [min, max), so subtract 1 to ensure we don't hold samples with exact meta.MaxTime timestamp.
sets = append(sets, NewBlockChunkSeriesSet(b.Meta().ULID, indexr, chunkr, tombsr, all, meta.MinTime, meta.MaxTime-1, false)) sets = append(sets, NewBlockChunkSeriesSet(b.Meta().ULID, indexr, chunkr, tombsr, postings, meta.MinTime, meta.MaxTime-1, false))
syms := indexr.Symbols() syms := indexr.Symbols()
if i == 0 { if i == 0 {
symbols = syms symbols = syms

View file

@ -38,6 +38,7 @@ import (
"github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/fileutil" "github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/tsdb/tombstones" "github.com/prometheus/prometheus/tsdb/tombstones"
"github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/tsdb/wlog" "github.com/prometheus/prometheus/tsdb/wlog"
@ -493,6 +494,7 @@ func TestCompaction_populateBlock(t *testing.T) {
inputSeriesSamples [][]seriesSamples inputSeriesSamples [][]seriesSamples
compactMinTime int64 compactMinTime int64
compactMaxTime int64 // When not defined the test runner sets a default of math.MaxInt64. compactMaxTime int64 // When not defined the test runner sets a default of math.MaxInt64.
irPostingsFunc IndexReaderPostingsFunc
expSeriesSamples []seriesSamples expSeriesSamples []seriesSamples
expErr error expErr error
}{ }{
@ -961,6 +963,60 @@ func TestCompaction_populateBlock(t *testing.T) {
}, },
}, },
}, },
{
title: "Populate from single block with index reader postings function selecting different series. Expect empty block.",
inputSeriesSamples: [][]seriesSamples{
{
{
lset: map[string]string{"a": "b"},
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}},
},
},
},
irPostingsFunc: func(ctx context.Context, reader IndexReader) index.Postings {
p, err := reader.Postings(ctx, "a", "c")
if err != nil {
return index.EmptyPostings()
}
return reader.SortedPostings(p)
},
},
{
title: "Populate from single block with index reader postings function selecting one series. Expect partial block.",
inputSeriesSamples: [][]seriesSamples{
{
{
lset: map[string]string{"a": "b"},
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}},
},
{
lset: map[string]string{"a": "c"},
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}},
},
{
lset: map[string]string{"a": "d"},
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}},
},
},
},
irPostingsFunc: func(ctx context.Context, reader IndexReader) index.Postings {
p, err := reader.Postings(ctx, "a", "c", "d")
if err != nil {
return index.EmptyPostings()
}
return reader.SortedPostings(p)
},
expSeriesSamples: []seriesSamples{
{
lset: map[string]string{"a": "c"},
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}},
},
{
lset: map[string]string{"a": "d"},
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}},
},
},
},
} { } {
t.Run(tc.title, func(t *testing.T) { t.Run(tc.title, func(t *testing.T) {
blocks := make([]BlockReader, 0, len(tc.inputSeriesSamples)) blocks := make([]BlockReader, 0, len(tc.inputSeriesSamples))
@ -982,7 +1038,11 @@ func TestCompaction_populateBlock(t *testing.T) {
iw := &mockIndexWriter{} iw := &mockIndexWriter{}
blockPopulator := DefaultBlockPopulator{} blockPopulator := DefaultBlockPopulator{}
err = blockPopulator.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, iw, nopChunkWriter{}) irPostingsFunc := AllSortedPostings
if tc.irPostingsFunc != nil {
irPostingsFunc = tc.irPostingsFunc
}
err = blockPopulator.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, iw, nopChunkWriter{}, irPostingsFunc)
if tc.expErr != nil { if tc.expErr != nil {
require.Error(t, err) require.Error(t, err)
require.Equal(t, tc.expErr.Error(), err.Error()) require.Equal(t, tc.expErr.Error(), err.Error())
@ -1484,12 +1544,12 @@ func TestHeadCompactionWithHistograms(t *testing.T) {
maxt := head.MaxTime() + 1 // Block intervals are half-open: [b.MinTime, b.MaxTime). maxt := head.MaxTime() + 1 // Block intervals are half-open: [b.MinTime, b.MaxTime).
compactor, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{DefaultBlockDuration}, chunkenc.NewPool(), nil) compactor, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{DefaultBlockDuration}, chunkenc.NewPool(), nil)
require.NoError(t, err) require.NoError(t, err)
id, err := compactor.Write(head.opts.ChunkDirRoot, head, mint, maxt, nil) ids, err := compactor.Write(head.opts.ChunkDirRoot, head, mint, maxt, nil)
require.NoError(t, err) require.NoError(t, err)
require.NotEqual(t, ulid.ULID{}, id) require.Len(t, ids, 1)
// Open the block and query it and check the histograms. // Open the block and query it and check the histograms.
block, err := OpenBlock(nil, path.Join(head.opts.ChunkDirRoot, id.String()), nil) block, err := OpenBlock(nil, path.Join(head.opts.ChunkDirRoot, ids[0].String()), nil)
require.NoError(t, err) require.NoError(t, err)
t.Cleanup(func() { t.Cleanup(func() {
require.NoError(t, block.Close()) require.NoError(t, block.Close())
@ -1598,8 +1658,8 @@ func TestSparseHistogramSpaceSavings(t *testing.T) {
sparseApp := sparseHead.Appender(context.Background()) sparseApp := sparseHead.Appender(context.Background())
numOldSeriesPerHistogram := 0 numOldSeriesPerHistogram := 0
var oldULID ulid.ULID var oldULIDs []ulid.ULID
var sparseULID ulid.ULID var sparseULIDs []ulid.ULID
var wg sync.WaitGroup var wg sync.WaitGroup
@ -1626,9 +1686,9 @@ func TestSparseHistogramSpaceSavings(t *testing.T) {
maxt := sparseHead.MaxTime() + 1 // Block intervals are half-open: [b.MinTime, b.MaxTime). maxt := sparseHead.MaxTime() + 1 // Block intervals are half-open: [b.MinTime, b.MaxTime).
compactor, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{DefaultBlockDuration}, chunkenc.NewPool(), nil) compactor, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{DefaultBlockDuration}, chunkenc.NewPool(), nil)
require.NoError(t, err) require.NoError(t, err)
sparseULID, err = compactor.Write(sparseHead.opts.ChunkDirRoot, sparseHead, mint, maxt, nil) sparseULIDs, err = compactor.Write(sparseHead.opts.ChunkDirRoot, sparseHead, mint, maxt, nil)
require.NoError(t, err) require.NoError(t, err)
require.NotEqual(t, ulid.ULID{}, sparseULID) require.Len(t, sparseULIDs, 1)
}() }()
wg.Add(1) wg.Add(1)
@ -1677,15 +1737,15 @@ func TestSparseHistogramSpaceSavings(t *testing.T) {
maxt := oldHead.MaxTime() + 1 // Block intervals are half-open: [b.MinTime, b.MaxTime). maxt := oldHead.MaxTime() + 1 // Block intervals are half-open: [b.MinTime, b.MaxTime).
compactor, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{DefaultBlockDuration}, chunkenc.NewPool(), nil) compactor, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{DefaultBlockDuration}, chunkenc.NewPool(), nil)
require.NoError(t, err) require.NoError(t, err)
oldULID, err = compactor.Write(oldHead.opts.ChunkDirRoot, oldHead, mint, maxt, nil) oldULIDs, err = compactor.Write(oldHead.opts.ChunkDirRoot, oldHead, mint, maxt, nil)
require.NoError(t, err) require.NoError(t, err)
require.NotEqual(t, ulid.ULID{}, oldULID) require.Len(t, oldULIDs, 1)
}() }()
wg.Wait() wg.Wait()
oldBlockDir := filepath.Join(oldHead.opts.ChunkDirRoot, oldULID.String()) oldBlockDir := filepath.Join(oldHead.opts.ChunkDirRoot, oldULIDs[0].String())
sparseBlockDir := filepath.Join(sparseHead.opts.ChunkDirRoot, sparseULID.String()) sparseBlockDir := filepath.Join(sparseHead.opts.ChunkDirRoot, sparseULIDs[0].String())
oldSize, err := fileutil.DirSize(oldBlockDir) oldSize, err := fileutil.DirSize(oldBlockDir)
require.NoError(t, err) require.NoError(t, err)
@ -1846,3 +1906,22 @@ func TestCompactBlockMetas(t *testing.T) {
} }
require.Equal(t, expected, output) require.Equal(t, expected, output)
} }
func TestCompactEmptyResultBlockWithTombstone(t *testing.T) {
ctx := context.Background()
tmpdir := t.TempDir()
blockDir := createBlock(t, tmpdir, genSeries(1, 1, 0, 10))
block, err := OpenBlock(nil, blockDir, nil)
require.NoError(t, err)
// Write tombstone covering the whole block.
err = block.Delete(ctx, 0, 10, labels.MustNewMatcher(labels.MatchEqual, defaultLabelName, "0"))
require.NoError(t, err)
c, err := NewLeveledCompactor(ctx, nil, log.NewNopLogger(), []int64{0}, nil, nil)
require.NoError(t, err)
ulids, err := c.Compact(tmpdir, []string{blockDir}, []*Block{block})
require.NoError(t, err)
require.Nil(t, ulids)
require.NoError(t, block.Close())
}

View file

@ -192,12 +192,22 @@ type Options struct {
// NewCompactorFunc is a function that returns a TSDB compactor. // NewCompactorFunc is a function that returns a TSDB compactor.
NewCompactorFunc NewCompactorFunc NewCompactorFunc NewCompactorFunc
// BlockQuerierFunc is a function to return storage.Querier from a BlockReader.
BlockQuerierFunc BlockQuerierFunc
// BlockChunkQuerierFunc is a function to return storage.ChunkQuerier from a BlockReader.
BlockChunkQuerierFunc BlockChunkQuerierFunc
} }
type NewCompactorFunc func(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error) type NewCompactorFunc func(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error)
type BlocksToDeleteFunc func(blocks []*Block) map[ulid.ULID]struct{} type BlocksToDeleteFunc func(blocks []*Block) map[ulid.ULID]struct{}
type BlockQuerierFunc func(b BlockReader, mint, maxt int64) (storage.Querier, error)
type BlockChunkQuerierFunc func(b BlockReader, mint, maxt int64) (storage.ChunkQuerier, error)
// DB handles reads and writes of time series falling into // DB handles reads and writes of time series falling into
// a hashed partition of a seriedb. // a hashed partition of a seriedb.
type DB struct { type DB struct {
@ -244,6 +254,10 @@ type DB struct {
writeNotified wlog.WriteNotified writeNotified wlog.WriteNotified
registerer prometheus.Registerer registerer prometheus.Registerer
blockQuerierFunc BlockQuerierFunc
blockChunkQuerierFunc BlockChunkQuerierFunc
} }
type dbMetrics struct { type dbMetrics struct {
@ -559,10 +573,12 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue
db.closers = append(db.closers, head) db.closers = append(db.closers, head)
return &DB{ return &DB{
dir: db.dir, dir: db.dir,
logger: db.logger, logger: db.logger,
blocks: blocks, blocks: blocks,
head: head, head: head,
blockQuerierFunc: NewBlockQuerier,
blockChunkQuerierFunc: NewBlockChunkQuerier,
}, nil }, nil
} }
@ -870,6 +886,18 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
} }
db.compactCancel = cancel db.compactCancel = cancel
if opts.BlockQuerierFunc == nil {
db.blockQuerierFunc = NewBlockQuerier
} else {
db.blockQuerierFunc = opts.BlockQuerierFunc
}
if opts.BlockChunkQuerierFunc == nil {
db.blockChunkQuerierFunc = NewBlockChunkQuerier
} else {
db.blockChunkQuerierFunc = opts.BlockChunkQuerierFunc
}
var wal, wbl *wlog.WL var wal, wbl *wlog.WL
segmentSize := wlog.DefaultSegmentSize segmentSize := wlog.DefaultSegmentSize
// Wal is enabled. // Wal is enabled.
@ -1336,13 +1364,11 @@ func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID
for t := blockSize * (oooHeadMint / blockSize); t <= oooHeadMaxt; t += blockSize { for t := blockSize * (oooHeadMint / blockSize); t <= oooHeadMaxt; t += blockSize {
mint, maxt := t, t+blockSize mint, maxt := t, t+blockSize
// Block intervals are half-open: [b.MinTime, b.MaxTime). Block intervals are always +1 than the total samples it includes. // Block intervals are half-open: [b.MinTime, b.MaxTime). Block intervals are always +1 than the total samples it includes.
uid, err := db.compactor.Write(dest, oooHead.CloneForTimeRange(mint, maxt-1), mint, maxt, meta) uids, err := db.compactor.Write(dest, oooHead.CloneForTimeRange(mint, maxt-1), mint, maxt, meta)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if uid.Compare(ulid.ULID{}) != 0 { ulids = append(ulids, uids...)
ulids = append(ulids, uid)
}
} }
if len(ulids) == 0 { if len(ulids) == 0 {
@ -1364,19 +1390,19 @@ func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID
// compactHead compacts the given RangeHead. // compactHead compacts the given RangeHead.
// The compaction mutex should be held before calling this method. // The compaction mutex should be held before calling this method.
func (db *DB) compactHead(head *RangeHead) error { func (db *DB) compactHead(head *RangeHead) error {
uid, err := db.compactor.Write(db.dir, head, head.MinTime(), head.BlockMaxTime(), nil) uids, err := db.compactor.Write(db.dir, head, head.MinTime(), head.BlockMaxTime(), nil)
if err != nil { if err != nil {
return fmt.Errorf("persist head block: %w", err) return fmt.Errorf("persist head block: %w", err)
} }
if err := db.reloadBlocks(); err != nil { if err := db.reloadBlocks(); err != nil {
if errRemoveAll := os.RemoveAll(filepath.Join(db.dir, uid.String())); errRemoveAll != nil { multiErr := tsdb_errors.NewMulti(fmt.Errorf("reloadBlocks blocks: %w", err))
return tsdb_errors.NewMulti( for _, uid := range uids {
fmt.Errorf("reloadBlocks blocks: %w", err), if errRemoveAll := os.RemoveAll(filepath.Join(db.dir, uid.String())); errRemoveAll != nil {
fmt.Errorf("delete persisted head block after failed db reloadBlocks:%s: %w", uid, errRemoveAll), multiErr.Add(fmt.Errorf("delete persisted head block after failed db reloadBlocks:%s: %w", uid, errRemoveAll))
).Err() }
} }
return fmt.Errorf("reloadBlocks blocks: %w", err) return multiErr.Err()
} }
if err = db.head.truncateMemory(head.BlockMaxTime()); err != nil { if err = db.head.truncateMemory(head.BlockMaxTime()); err != nil {
return fmt.Errorf("head memory truncate: %w", err) return fmt.Errorf("head memory truncate: %w", err)
@ -1411,16 +1437,19 @@ func (db *DB) compactBlocks() (err error) {
default: default:
} }
uid, err := db.compactor.Compact(db.dir, plan, db.blocks) uids, err := db.compactor.Compact(db.dir, plan, db.blocks)
if err != nil { if err != nil {
return fmt.Errorf("compact %s: %w", plan, err) return fmt.Errorf("compact %s: %w", plan, err)
} }
if err := db.reloadBlocks(); err != nil { if err := db.reloadBlocks(); err != nil {
if err := os.RemoveAll(filepath.Join(db.dir, uid.String())); err != nil { errs := tsdb_errors.NewMulti(fmt.Errorf("reloadBlocks blocks: %w", err))
return fmt.Errorf("delete compacted block after failed db reloadBlocks:%s: %w", uid, err) for _, uid := range uids {
if errRemoveAll := os.RemoveAll(filepath.Join(db.dir, uid.String())); errRemoveAll != nil {
errs.Add(fmt.Errorf("delete persisted block after failed db reloadBlocks:%s: %w", uid, errRemoveAll))
}
} }
return fmt.Errorf("reloadBlocks blocks: %w", err) return errs.Err()
} }
} }
@ -1541,12 +1570,15 @@ func (db *DB) reloadBlocks() (err error) {
oldBlocks := db.blocks oldBlocks := db.blocks
db.blocks = toLoad db.blocks = toLoad
blockMetas := make([]BlockMeta, 0, len(toLoad)) // Only check overlapping blocks when overlapping compaction is enabled.
for _, b := range toLoad { if db.opts.EnableOverlappingCompaction {
blockMetas = append(blockMetas, b.Meta()) blockMetas := make([]BlockMeta, 0, len(toLoad))
} for _, b := range toLoad {
if overlaps := OverlappingBlocks(blockMetas); len(overlaps) > 0 { blockMetas = append(blockMetas, b.Meta())
level.Warn(db.logger).Log("msg", "Overlapping blocks found during reloadBlocks", "detail", overlaps.String()) }
if overlaps := OverlappingBlocks(blockMetas); len(overlaps) > 0 {
level.Warn(db.logger).Log("msg", "Overlapping blocks found during reloadBlocks", "detail", overlaps.String())
}
} }
// Append blocks to old, deletable blocks, so we can close them. // Append blocks to old, deletable blocks, so we can close them.
@ -1960,7 +1992,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) {
if maxt >= db.head.MinTime() { if maxt >= db.head.MinTime() {
rh := NewRangeHead(db.head, mint, maxt) rh := NewRangeHead(db.head, mint, maxt)
var err error var err error
inOrderHeadQuerier, err := NewBlockQuerier(rh, mint, maxt) inOrderHeadQuerier, err := db.blockQuerierFunc(rh, mint, maxt)
if err != nil { if err != nil {
return nil, fmt.Errorf("open block querier for head %s: %w", rh, err) return nil, fmt.Errorf("open block querier for head %s: %w", rh, err)
} }
@ -1977,7 +2009,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) {
} }
if getNew { if getNew {
rh := NewRangeHead(db.head, newMint, maxt) rh := NewRangeHead(db.head, newMint, maxt)
inOrderHeadQuerier, err = NewBlockQuerier(rh, newMint, maxt) inOrderHeadQuerier, err = db.blockQuerierFunc(rh, newMint, maxt)
if err != nil { if err != nil {
return nil, fmt.Errorf("open block querier for head while getting new querier %s: %w", rh, err) return nil, fmt.Errorf("open block querier for head while getting new querier %s: %w", rh, err)
} }
@ -1991,9 +2023,9 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) {
if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) { if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) {
rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef) rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef)
var err error var err error
outOfOrderHeadQuerier, err := NewBlockQuerier(rh, mint, maxt) outOfOrderHeadQuerier, err := db.blockQuerierFunc(rh, mint, maxt)
if err != nil { if err != nil {
// If NewBlockQuerier() failed, make sure to clean up the pending read created by NewOOORangeHead. // If BlockQuerierFunc() failed, make sure to clean up the pending read created by NewOOORangeHead.
rh.isoState.Close() rh.isoState.Close()
return nil, fmt.Errorf("open block querier for ooo head %s: %w", rh, err) return nil, fmt.Errorf("open block querier for ooo head %s: %w", rh, err)
@ -2003,7 +2035,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) {
} }
for _, b := range blocks { for _, b := range blocks {
q, err := NewBlockQuerier(b, mint, maxt) q, err := db.blockQuerierFunc(b, mint, maxt)
if err != nil { if err != nil {
return nil, fmt.Errorf("open querier for block %s: %w", b, err) return nil, fmt.Errorf("open querier for block %s: %w", b, err)
} }
@ -2041,7 +2073,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer
if maxt >= db.head.MinTime() { if maxt >= db.head.MinTime() {
rh := NewRangeHead(db.head, mint, maxt) rh := NewRangeHead(db.head, mint, maxt)
inOrderHeadQuerier, err := NewBlockChunkQuerier(rh, mint, maxt) inOrderHeadQuerier, err := db.blockChunkQuerierFunc(rh, mint, maxt)
if err != nil { if err != nil {
return nil, fmt.Errorf("open querier for head %s: %w", rh, err) return nil, fmt.Errorf("open querier for head %s: %w", rh, err)
} }
@ -2058,7 +2090,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer
} }
if getNew { if getNew {
rh := NewRangeHead(db.head, newMint, maxt) rh := NewRangeHead(db.head, newMint, maxt)
inOrderHeadQuerier, err = NewBlockChunkQuerier(rh, newMint, maxt) inOrderHeadQuerier, err = db.blockChunkQuerierFunc(rh, newMint, maxt)
if err != nil { if err != nil {
return nil, fmt.Errorf("open querier for head while getting new querier %s: %w", rh, err) return nil, fmt.Errorf("open querier for head while getting new querier %s: %w", rh, err)
} }
@ -2071,8 +2103,11 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer
if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) { if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) {
rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef) rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef)
outOfOrderHeadQuerier, err := NewBlockChunkQuerier(rh, mint, maxt) outOfOrderHeadQuerier, err := db.blockChunkQuerierFunc(rh, mint, maxt)
if err != nil { if err != nil {
// If NewBlockQuerier() failed, make sure to clean up the pending read created by NewOOORangeHead.
rh.isoState.Close()
return nil, fmt.Errorf("open block chunk querier for ooo head %s: %w", rh, err) return nil, fmt.Errorf("open block chunk querier for ooo head %s: %w", rh, err)
} }
@ -2080,7 +2115,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer
} }
for _, b := range blocks { for _, b := range blocks {
q, err := NewBlockChunkQuerier(b, mint, maxt) q, err := db.blockChunkQuerierFunc(b, mint, maxt)
if err != nil { if err != nil {
return nil, fmt.Errorf("open querier for block %s: %w", b, err) return nil, fmt.Errorf("open querier for block %s: %w", b, err)
} }
@ -2149,7 +2184,7 @@ func (db *DB) CleanTombstones() (err error) {
cleanUpCompleted = true cleanUpCompleted = true
for _, pb := range db.Blocks() { for _, pb := range db.Blocks() {
uid, safeToDelete, cleanErr := pb.CleanTombstones(db.Dir(), db.compactor) uids, safeToDelete, cleanErr := pb.CleanTombstones(db.Dir(), db.compactor)
if cleanErr != nil { if cleanErr != nil {
return fmt.Errorf("clean tombstones: %s: %w", pb.Dir(), cleanErr) return fmt.Errorf("clean tombstones: %s: %w", pb.Dir(), cleanErr)
} }
@ -2173,7 +2208,7 @@ func (db *DB) CleanTombstones() (err error) {
} }
// Delete new block if it was created. // Delete new block if it was created.
if uid != nil && *uid != (ulid.ULID{}) { for _, uid := range uids {
dir := filepath.Join(db.Dir(), uid.String()) dir := filepath.Join(db.Dir(), uid.String())
if err := os.RemoveAll(dir); err != nil { if err := os.RemoveAll(dir); err != nil {
level.Error(db.logger).Log("msg", "failed to delete block after failed `CleanTombstones`", "dir", dir, "err", err) level.Error(db.logger).Log("msg", "failed to delete block after failed `CleanTombstones`", "dir", dir, "err", err)

View file

@ -1431,9 +1431,9 @@ func (*mockCompactorFailing) Plan(string) ([]string, error) {
return nil, nil return nil, nil
} }
func (c *mockCompactorFailing) Write(dest string, _ BlockReader, _, _ int64, _ *BlockMeta) (ulid.ULID, error) { func (c *mockCompactorFailing) Write(dest string, _ BlockReader, _, _ int64, _ *BlockMeta) ([]ulid.ULID, error) {
if len(c.blocks) >= c.max { if len(c.blocks) >= c.max {
return ulid.ULID{}, fmt.Errorf("the compactor already did the maximum allowed blocks so it is time to fail") return []ulid.ULID{}, fmt.Errorf("the compactor already did the maximum allowed blocks so it is time to fail")
} }
block, err := OpenBlock(nil, createBlock(c.t, dest, genSeries(1, 1, 0, 1)), nil) block, err := OpenBlock(nil, createBlock(c.t, dest, genSeries(1, 1, 0, 1)), nil)
@ -1452,11 +1452,11 @@ func (c *mockCompactorFailing) Write(dest string, _ BlockReader, _, _ int64, _ *
require.Equal(c.t, expectedBlocks, actualBlockDirs) require.Equal(c.t, expectedBlocks, actualBlockDirs)
return block.Meta().ULID, nil return []ulid.ULID{block.Meta().ULID}, nil
} }
func (*mockCompactorFailing) Compact(string, []string, []*Block) (ulid.ULID, error) { func (*mockCompactorFailing) Compact(string, []string, []*Block) ([]ulid.ULID, error) {
return ulid.ULID{}, nil return []ulid.ULID{}, nil
} }
func (*mockCompactorFailing) CompactOOO(string, *OOOCompactionHead) (result []ulid.ULID, err error) { func (*mockCompactorFailing) CompactOOO(string, *OOOCompactionHead) (result []ulid.ULID, err error) {
@ -6804,9 +6804,9 @@ func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) {
for _, b := range blocks { for _, b := range blocks {
blockDirs = append(blockDirs, b.Dir()) blockDirs = append(blockDirs, b.Dir())
} }
id, err := db.compactor.Compact(db.Dir(), blockDirs, blocks) ids, err := db.compactor.Compact(db.Dir(), blockDirs, blocks)
require.NoError(t, err) require.NoError(t, err)
require.NotEqual(t, ulid.ULID{}, id) require.Len(t, ids, 1)
require.NoError(t, db.reload()) require.NoError(t, db.reload())
require.Len(t, db.Blocks(), 1) require.Len(t, db.Blocks(), 1)
@ -7068,19 +7068,19 @@ func requireEqualOOOSamples(t *testing.T, expectedSamples int, db *DB) {
type mockCompactorFn struct { type mockCompactorFn struct {
planFn func() ([]string, error) planFn func() ([]string, error)
compactFn func() (ulid.ULID, error) compactFn func() ([]ulid.ULID, error)
writeFn func() (ulid.ULID, error) writeFn func() ([]ulid.ULID, error)
} }
func (c *mockCompactorFn) Plan(_ string) ([]string, error) { func (c *mockCompactorFn) Plan(_ string) ([]string, error) {
return c.planFn() return c.planFn()
} }
func (c *mockCompactorFn) Compact(_ string, _ []string, _ []*Block) (ulid.ULID, error) { func (c *mockCompactorFn) Compact(_ string, _ []string, _ []*Block) ([]ulid.ULID, error) {
return c.compactFn() return c.compactFn()
} }
func (c *mockCompactorFn) Write(_ string, _ BlockReader, _, _ int64, _ *BlockMeta) (ulid.ULID, error) { func (c *mockCompactorFn) Write(_ string, _ BlockReader, _, _ int64, _ *BlockMeta) ([]ulid.ULID, error) {
return c.writeFn() return c.writeFn()
} }
@ -7112,11 +7112,11 @@ func TestAbortBlockCompactions(t *testing.T) {
// Our custom Plan() will always return something to compact. // Our custom Plan() will always return something to compact.
return []string{"1", "2", "3"}, nil return []string{"1", "2", "3"}, nil
}, },
compactFn: func() (ulid.ULID, error) { compactFn: func() ([]ulid.ULID, error) {
return ulid.ULID{}, nil return []ulid.ULID{}, nil
}, },
writeFn: func() (ulid.ULID, error) { writeFn: func() ([]ulid.ULID, error) {
return ulid.ULID{}, nil return []ulid.ULID{}, nil
}, },
} }
@ -7135,11 +7135,11 @@ func TestNewCompactorFunc(t *testing.T) {
planFn: func() ([]string, error) { planFn: func() ([]string, error) {
return []string{block1.String(), block2.String()}, nil return []string{block1.String(), block2.String()}, nil
}, },
compactFn: func() (ulid.ULID, error) { compactFn: func() ([]ulid.ULID, error) {
return block1, nil return []ulid.ULID{block1}, nil
}, },
writeFn: func() (ulid.ULID, error) { writeFn: func() ([]ulid.ULID, error) {
return block2, nil return []ulid.ULID{block2}, nil
}, },
}, nil }, nil
} }
@ -7150,10 +7150,87 @@ func TestNewCompactorFunc(t *testing.T) {
plans, err := db.compactor.Plan("") plans, err := db.compactor.Plan("")
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, []string{block1.String(), block2.String()}, plans) require.Equal(t, []string{block1.String(), block2.String()}, plans)
ulid, err := db.compactor.Compact("", nil, nil) ulids, err := db.compactor.Compact("", nil, nil)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, block1, ulid) require.Len(t, ulids, 1)
ulid, err = db.compactor.Write("", nil, 0, 1, nil) require.Equal(t, block1, ulids[0])
ulids, err = db.compactor.Write("", nil, 0, 1, nil)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, block2, ulid) require.Len(t, ulids, 1)
require.Equal(t, block2, ulids[0])
}
func TestBlockQuerierAndBlockChunkQuerier(t *testing.T) {
opts := DefaultOptions()
opts.BlockQuerierFunc = func(b BlockReader, mint, maxt int64) (storage.Querier, error) {
// Only block with hints can be queried.
if len(b.Meta().Compaction.Hints) > 0 {
return NewBlockQuerier(b, mint, maxt)
}
return storage.NoopQuerier(), nil
}
opts.BlockChunkQuerierFunc = func(b BlockReader, mint, maxt int64) (storage.ChunkQuerier, error) {
// Only level 4 compaction block can be queried.
if b.Meta().Compaction.Level == 4 {
return NewBlockChunkQuerier(b, mint, maxt)
}
return storage.NoopChunkedQuerier(), nil
}
db := openTestDB(t, opts, nil)
defer func() {
require.NoError(t, db.Close())
}()
metas := []BlockMeta{
{Compaction: BlockMetaCompaction{Hints: []string{"test-hint"}}},
{Compaction: BlockMetaCompaction{Level: 4}},
}
for i := range metas {
// Include blockID into series to identify which block got touched.
serieses := []storage.Series{storage.NewListSeries(labels.FromMap(map[string]string{"block": fmt.Sprintf("block-%d", i), labels.MetricName: "test_metric"}), []chunks.Sample{sample{t: 0, f: 1}})}
blockDir := createBlock(t, db.Dir(), serieses)
b, err := OpenBlock(db.logger, blockDir, db.chunkPool)
require.NoError(t, err)
// Overwrite meta.json with compaction section for testing purpose.
b.meta.Compaction = metas[i].Compaction
_, err = writeMetaFile(db.logger, blockDir, &b.meta)
require.NoError(t, err)
require.NoError(t, b.Close())
}
require.NoError(t, db.reloadBlocks())
require.Len(t, db.Blocks(), 2)
querier, err := db.Querier(0, 500)
require.NoError(t, err)
defer querier.Close()
matcher := labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, "test_metric")
seriesSet := querier.Select(context.Background(), false, nil, matcher)
count := 0
var lbls labels.Labels
for seriesSet.Next() {
count++
lbls = seriesSet.At().Labels()
}
require.NoError(t, seriesSet.Err())
require.Equal(t, 1, count)
// Make sure only block-0 is queried.
require.Equal(t, "block-0", lbls.Get("block"))
chunkQuerier, err := db.ChunkQuerier(0, 500)
require.NoError(t, err)
defer chunkQuerier.Close()
css := chunkQuerier.Select(context.Background(), false, nil, matcher)
count = 0
// Reset lbls variable.
lbls = labels.EmptyLabels()
for css.Next() {
count++
lbls = css.At().Labels()
}
require.NoError(t, css.Err())
require.Equal(t, 1, count)
// Make sure only block-1 is queried.
require.Equal(t, "block-1", lbls.Get("block"))
} }

View file

@ -1552,7 +1552,7 @@ func (h *Head) gc() (actualInOrderMint, minOOOTime int64, minMmapFile int) {
// Drop old chunks and remember series IDs and hashes if they can be // Drop old chunks and remember series IDs and hashes if they can be
// deleted entirely. // deleted entirely.
deleted, chunksRemoved, actualInOrderMint, minOOOTime, minMmapFile := h.series.gc(mint, minOOOMmapRef) deleted, affected, chunksRemoved, actualInOrderMint, minOOOTime, minMmapFile := h.series.gc(mint, minOOOMmapRef)
seriesRemoved := len(deleted) seriesRemoved := len(deleted)
h.metrics.seriesRemoved.Add(float64(seriesRemoved)) h.metrics.seriesRemoved.Add(float64(seriesRemoved))
@ -1561,7 +1561,7 @@ func (h *Head) gc() (actualInOrderMint, minOOOTime int64, minMmapFile int) {
h.numSeries.Sub(uint64(seriesRemoved)) h.numSeries.Sub(uint64(seriesRemoved))
// Remove deleted series IDs from the postings lists. // Remove deleted series IDs from the postings lists.
h.postings.Delete(deleted) h.postings.Delete(deleted, affected)
// Remove tombstones referring to the deleted series. // Remove tombstones referring to the deleted series.
h.tombstones.DeleteTombstones(deleted) h.tombstones.DeleteTombstones(deleted)
@ -1869,9 +1869,10 @@ func newStripeSeries(stripeSize int, seriesCallback SeriesLifecycleCallback) *st
// but the returned map goes into postings.Delete() which expects a map[storage.SeriesRef]struct // but the returned map goes into postings.Delete() which expects a map[storage.SeriesRef]struct
// and there's no easy way to cast maps. // and there's no easy way to cast maps.
// minMmapFile is the min mmap file number seen in the series (in-order and out-of-order) after gc'ing the series. // minMmapFile is the min mmap file number seen in the series (in-order and out-of-order) after gc'ing the series.
func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (_ map[storage.SeriesRef]struct{}, _ int, _, _ int64, minMmapFile int) { func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (_ map[storage.SeriesRef]struct{}, _ map[labels.Label]struct{}, _ int, _, _ int64, minMmapFile int) {
var ( var (
deleted = map[storage.SeriesRef]struct{}{} deleted = map[storage.SeriesRef]struct{}{}
affected = map[labels.Label]struct{}{}
rmChunks = 0 rmChunks = 0
actualMint int64 = math.MaxInt64 actualMint int64 = math.MaxInt64
minOOOTime int64 = math.MaxInt64 minOOOTime int64 = math.MaxInt64
@ -1927,6 +1928,7 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (
} }
deleted[storage.SeriesRef(series.ref)] = struct{}{} deleted[storage.SeriesRef(series.ref)] = struct{}{}
series.lset.Range(func(l labels.Label) { affected[l] = struct{}{} })
s.hashes[hashShard].del(hash, series.ref) s.hashes[hashShard].del(hash, series.ref)
delete(s.series[refShard], series.ref) delete(s.series[refShard], series.ref)
deletedForCallback[series.ref] = series.lset deletedForCallback[series.ref] = series.lset
@ -1938,7 +1940,7 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (
actualMint = mint actualMint = mint
} }
return deleted, rmChunks, actualMint, minOOOTime, minMmapFile return deleted, affected, rmChunks, actualMint, minOOOTime, minMmapFile
} }
// The iterForDeletion function iterates through all series, invoking the checkDeletedFunc for each. // The iterForDeletion function iterates through all series, invoking the checkDeletedFunc for each.

View file

@ -814,6 +814,80 @@ func TestHead_UnknownWALRecord(t *testing.T) {
require.NoError(t, head.Close()) require.NoError(t, head.Close())
} }
// BenchmarkHead_Truncate is quite heavy, so consider running it with
// -benchtime=10x or similar to get more stable and comparable results.
func BenchmarkHead_Truncate(b *testing.B) {
const total = 1e6
prepare := func(b *testing.B, churn int) *Head {
h, _ := newTestHead(b, 1000, wlog.CompressionNone, false)
b.Cleanup(func() {
require.NoError(b, h.Close())
})
h.initTime(0)
internedItoa := map[int]string{}
var mtx sync.RWMutex
itoa := func(i int) string {
mtx.RLock()
s, ok := internedItoa[i]
mtx.RUnlock()
if ok {
return s
}
mtx.Lock()
s = strconv.Itoa(i)
internedItoa[i] = s
mtx.Unlock()
return s
}
allSeries := [total]labels.Labels{}
nameValues := make([]string, 0, 100)
for i := 0; i < total; i++ {
nameValues = nameValues[:0]
// A thousand labels like lbl_x_of_1000, each with total/1000 values
thousand := "lbl_" + itoa(i%1000) + "_of_1000"
nameValues = append(nameValues, thousand, itoa(i/1000))
// A hundred labels like lbl_x_of_100, each with total/100 values.
hundred := "lbl_" + itoa(i%100) + "_of_100"
nameValues = append(nameValues, hundred, itoa(i/100))
if i%13 == 0 {
ten := "lbl_" + itoa(i%10) + "_of_10"
nameValues = append(nameValues, ten, itoa(i%10))
}
allSeries[i] = labels.FromStrings(append(nameValues, "first", "a", "second", "a", "third", "a")...)
s, _, _ := h.getOrCreate(allSeries[i].Hash(), allSeries[i])
s.mmappedChunks = []*mmappedChunk{
{minTime: 1000 * int64(i/churn), maxTime: 999 + 1000*int64(i/churn)},
}
}
return h
}
for _, churn := range []int{10, 100, 1000} {
b.Run(fmt.Sprintf("churn=%d", churn), func(b *testing.B) {
if b.N > total/churn {
// Just to make sure that benchmark still makes sense.
panic("benchmark not prepared")
}
h := prepare(b, churn)
b.ResetTimer()
for i := 0; i < b.N; i++ {
require.NoError(b, h.Truncate(1000*int64(i)))
// Make sure the benchmark is meaningful and it's actually truncating the expected amount of series.
require.Equal(b, total-churn*i, int(h.NumSeries()))
}
})
}
}
func TestHead_Truncate(t *testing.T) { func TestHead_Truncate(t *testing.T) {
h, _ := newTestHead(t, 1000, wlog.CompressionNone, false) h, _ := newTestHead(t, 1000, wlog.CompressionNone, false)
defer func() { defer func() {

Some files were not shown because too many files have changed in this diff Show more