Merge pull request #505 from grafana/zenador/sync-upstream-22-may-2023

Sync upstream 24 may 2023
This commit is contained in:
zenador 2023-05-24 19:58:41 +08:00 committed by GitHub
commit 8d6690e86a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
80 changed files with 3969 additions and 1028 deletions

View file

@ -14,8 +14,10 @@ build:
all: all:
- netgo - netgo
- builtinassets - builtinassets
- stringlabels
windows: windows:
- builtinassets - builtinassets
- stringlabels
flags: -a flags: -a
ldflags: | ldflags: |
-X github.com/prometheus/common/version.Version={{.Version}} -X github.com/prometheus/common/version.Version={{.Version}}

View file

@ -1,5 +1,29 @@
# Changelog # Changelog
## 2.44.0 / 2023-05-13
This version is built with Go tag `stringlabels`, to use the smaller data
structure for Labels that was optional in the previous release. For more
details about this code change see #10991.
* [CHANGE] Remote-write: Raise default samples per send to 2,000. #12203
* [FEATURE] Remote-read: Handle native histograms. #12085, #12192
* [FEATURE] Promtool: Health and readiness check of prometheus server in CLI. #12096
* [FEATURE] PromQL: Add `query_samples_total` metric, the total number of samples loaded by all queries. #12251
* [ENHANCEMENT] Storage: Optimise buffer used to iterate through samples. #12326
* [ENHANCEMENT] Scrape: Reduce memory allocations on target labels. #12084
* [ENHANCEMENT] PromQL: Use faster heap method for `topk()` / `bottomk()`. #12190
* [ENHANCEMENT] Rules API: Allow filtering by rule name. #12270
* [ENHANCEMENT] Native Histograms: Various fixes and improvements. #11687, #12264, #12272
* [ENHANCEMENT] UI: Search of scraping pools is now case-insensitive. #12207
* [ENHANCEMENT] TSDB: Add an affirmative log message for successful WAL repair. #12135
* [BUGFIX] TSDB: Block compaction failed when shutting down. #12179
* [BUGFIX] TSDB: Out-of-order chunks could be ignored if the write-behind log was deleted. #12127
## 2.43.1 / 2023-05-03
* [BUGFIX] Labels: `Set()` after `Del()` would be ignored, which broke some relabeling rules. #12322
## 2.43.0 / 2023-03-21 ## 2.43.0 / 2023-03-21
We are working on some performance improvements in Prometheus, which are only We are working on some performance improvements in Prometheus, which are only
@ -13,7 +37,7 @@ the gains on their production architecture. We are providing release artefacts
improvements for testing. #10991 improvements for testing. #10991
* [FEATURE] Promtool: Add HTTP client configuration to query commands. #11487 * [FEATURE] Promtool: Add HTTP client configuration to query commands. #11487
* [FEATURE] Scrape: Add `include_scrape_configs` to include scrape configs from different files. #12019 * [FEATURE] Scrape: Add `scrape_config_files` to include scrape configs from different files. #12019
* [FEATURE] HTTP client: Add `no_proxy` to exclude URLs from proxied requests. #12098 * [FEATURE] HTTP client: Add `no_proxy` to exclude URLs from proxied requests. #12098
* [FEATURE] HTTP client: Add `proxy_from_enviroment` to read proxies from env variables. #12098 * [FEATURE] HTTP client: Add `proxy_from_enviroment` to read proxies from env variables. #12098
* [ENHANCEMENT] API: Add support for setting lookback delta per query via the API. #12088 * [ENHANCEMENT] API: Add support for setting lookback delta per query via the API. #12088

View file

@ -1 +1 @@
2.43.0 2.44.0

View file

@ -337,6 +337,9 @@ func main() {
serverOnlyFlag(a, "storage.tsdb.head-chunks-write-queue-size", "Size of the queue through which head chunks are written to the disk to be m-mapped, 0 disables the queue completely. Experimental."). serverOnlyFlag(a, "storage.tsdb.head-chunks-write-queue-size", "Size of the queue through which head chunks are written to the disk to be m-mapped, 0 disables the queue completely. Experimental.").
Default("0").IntVar(&cfg.tsdb.HeadChunksWriteQueueSize) Default("0").IntVar(&cfg.tsdb.HeadChunksWriteQueueSize)
serverOnlyFlag(a, "storage.tsdb.samples-per-chunk", "Target number of samples per chunk.").
Default("120").Hidden().IntVar(&cfg.tsdb.SamplesPerChunk)
agentOnlyFlag(a, "storage.agent.path", "Base path for metrics storage."). agentOnlyFlag(a, "storage.agent.path", "Base path for metrics storage.").
Default("data-agent/").StringVar(&cfg.agentStoragePath) Default("data-agent/").StringVar(&cfg.agentStoragePath)
@ -1050,6 +1053,7 @@ func main() {
startTimeMargin := int64(2 * time.Duration(cfg.tsdb.MinBlockDuration).Seconds() * 1000) startTimeMargin := int64(2 * time.Duration(cfg.tsdb.MinBlockDuration).Seconds() * 1000)
localStorage.Set(db, startTimeMargin) localStorage.Set(db, startTimeMargin)
db.SetWriteNotified(remoteStorage)
close(dbOpen) close(dbOpen)
<-cancel <-cancel
return nil return nil
@ -1482,11 +1486,11 @@ func (s *readyStorage) Snapshot(dir string, withHead bool) error {
} }
// Stats implements the api_v1.TSDBAdminStats interface. // Stats implements the api_v1.TSDBAdminStats interface.
func (s *readyStorage) Stats(statsByLabelName string) (*tsdb.Stats, error) { func (s *readyStorage) Stats(statsByLabelName string, limit int) (*tsdb.Stats, error) {
if x := s.get(); x != nil { if x := s.get(); x != nil {
switch db := x.(type) { switch db := x.(type) {
case *tsdb.DB: case *tsdb.DB:
return db.Head().Stats(statsByLabelName), nil return db.Head().Stats(statsByLabelName, limit), nil
case *agent.DB: case *agent.DB:
return nil, agent.ErrUnsupported return nil, agent.ErrUnsupported
default: default:
@ -1543,6 +1547,7 @@ type tsdbOptions struct {
NoLockfile bool NoLockfile bool
WALCompression bool WALCompression bool
HeadChunksWriteQueueSize int HeadChunksWriteQueueSize int
SamplesPerChunk int
StripeSize int StripeSize int
MinBlockDuration model.Duration MinBlockDuration model.Duration
MaxBlockDuration model.Duration MaxBlockDuration model.Duration
@ -1563,6 +1568,7 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
AllowOverlappingCompaction: true, AllowOverlappingCompaction: true,
WALCompression: opts.WALCompression, WALCompression: opts.WALCompression,
HeadChunksWriteQueueSize: opts.HeadChunksWriteQueueSize, HeadChunksWriteQueueSize: opts.HeadChunksWriteQueueSize,
SamplesPerChunk: opts.SamplesPerChunk,
StripeSize: opts.StripeSize, StripeSize: opts.StripeSize,
MinBlockDuration: int64(time.Duration(opts.MinBlockDuration) / time.Millisecond), MinBlockDuration: int64(time.Duration(opts.MinBlockDuration) / time.Millisecond),
MaxBlockDuration: int64(time.Duration(opts.MaxBlockDuration) / time.Millisecond), MaxBlockDuration: int64(time.Duration(opts.MaxBlockDuration) / time.Millisecond),

View file

@ -146,8 +146,9 @@ var (
// DefaultScrapeConfig is the default scrape configuration. // DefaultScrapeConfig is the default scrape configuration.
DefaultScrapeConfig = ScrapeConfig{ DefaultScrapeConfig = ScrapeConfig{
// ScrapeTimeout and ScrapeInterval default to the // ScrapeTimeout and ScrapeInterval default to the configured
// configured globals. // globals.
ScrapeClassicHistograms: false,
MetricsPath: "/metrics", MetricsPath: "/metrics",
Scheme: "http", Scheme: "http",
HonorLabels: false, HonorLabels: false,
@ -467,6 +468,8 @@ type ScrapeConfig struct {
ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"` ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"`
// The timeout for scraping targets of this config. // The timeout for scraping targets of this config.
ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"` ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"`
// Whether to scrape a classic histogram that is also exposed as a native histogram.
ScrapeClassicHistograms bool `yaml:"scrape_classic_histograms,omitempty"`
// The HTTP resource path on which to fetch metrics from targets. // The HTTP resource path on which to fetch metrics from targets.
MetricsPath string `yaml:"metrics_path,omitempty"` MetricsPath string `yaml:"metrics_path,omitempty"`
// The URL scheme with which to fetch metrics from targets. // The URL scheme with which to fetch metrics from targets.
@ -489,6 +492,9 @@ type ScrapeConfig struct {
// More than this label value length post metric-relabeling will cause the // More than this label value length post metric-relabeling will cause the
// scrape to fail. // scrape to fail.
LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"` LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"`
// More than this many buckets in a native histogram will cause the scrape to
// fail.
NativeHistogramBucketLimit uint `yaml:"native_histogram_bucket_limit,omitempty"`
// We cannot do proper Go type embedding below as the parser will then parse // We cannot do proper Go type embedding below as the parser will then parse
// values arbitrarily into the overflow maps of further-down types. // values arbitrarily into the overflow maps of further-down types.

View file

@ -363,6 +363,7 @@ var expectedConf = &Config{
ServiceDiscoveryConfigs: discovery.Configs{ ServiceDiscoveryConfigs: discovery.Configs{
&consul.SDConfig{ &consul.SDConfig{
Server: "localhost:1234", Server: "localhost:1234",
PathPrefix: "/consul",
Token: "mysecret", Token: "mysecret",
Services: []string{"nginx", "cache", "mysql"}, Services: []string{"nginx", "cache", "mysql"},
ServiceTags: []string{"canary", "v1"}, ServiceTags: []string{"canary", "v1"},

View file

@ -151,6 +151,7 @@ scrape_configs:
consul_sd_configs: consul_sd_configs:
- server: "localhost:1234" - server: "localhost:1234"
token: mysecret token: mysecret
path_prefix: /consul
services: ["nginx", "cache", "mysql"] services: ["nginx", "cache", "mysql"]
tags: ["canary", "v1"] tags: ["canary", "v1"]
node_meta: node_meta:

View file

@ -111,6 +111,7 @@ func init() {
// SDConfig is the configuration for Consul service discovery. // SDConfig is the configuration for Consul service discovery.
type SDConfig struct { type SDConfig struct {
Server string `yaml:"server,omitempty"` Server string `yaml:"server,omitempty"`
PathPrefix string `yaml:"path_prefix,omitempty"`
Token config.Secret `yaml:"token,omitempty"` Token config.Secret `yaml:"token,omitempty"`
Datacenter string `yaml:"datacenter,omitempty"` Datacenter string `yaml:"datacenter,omitempty"`
Namespace string `yaml:"namespace,omitempty"` Namespace string `yaml:"namespace,omitempty"`
@ -211,6 +212,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
clientConf := &consul.Config{ clientConf := &consul.Config{
Address: conf.Server, Address: conf.Server,
PathPrefix: conf.PathPrefix,
Scheme: conf.Scheme, Scheme: conf.Scheme,
Datacenter: conf.Datacenter, Datacenter: conf.Datacenter,
Namespace: conf.Namespace, Namespace: conf.Namespace,

View file

@ -305,7 +305,11 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
} }
if e.withNodeMetadata { if e.withNodeMetadata {
if addr.NodeName != nil {
target = addNodeLabels(target, e.nodeInf, e.logger, addr.NodeName) target = addNodeLabels(target, e.nodeInf, e.logger, addr.NodeName)
} else if addr.TargetRef != nil && addr.TargetRef.Kind == "Node" {
target = addNodeLabels(target, e.nodeInf, e.logger, &addr.TargetRef.Name)
}
} }
pod := e.resolvePodRef(addr.TargetRef) pod := e.resolvePodRef(addr.TargetRef)
@ -466,5 +470,6 @@ func addNodeLabels(tg model.LabelSet, nodeInf cache.SharedInformer, logger log.L
nodeLabelset[model.LabelName(nodeLabelPrefix+ln)] = lv(v) nodeLabelset[model.LabelName(nodeLabelPrefix+ln)] = lv(v)
nodeLabelset[model.LabelName(nodeLabelPresentPrefix+ln)] = presentValue nodeLabelset[model.LabelName(nodeLabelPresentPrefix+ln)] = presentValue
} }
return tg.Merge(nodeLabelset) return tg.Merge(nodeLabelset)
} }

View file

@ -69,6 +69,24 @@ func makeEndpoints() *v1.Endpoints {
}, },
}, },
}, },
{
Addresses: []v1.EndpointAddress{
{
IP: "6.7.8.9",
TargetRef: &v1.ObjectReference{
Kind: "Node",
Name: "barbaz",
},
},
},
Ports: []v1.EndpointPort{
{
Name: "testport",
Port: 9002,
Protocol: v1.ProtocolTCP,
},
},
},
}, },
} }
} }
@ -106,6 +124,14 @@ func TestEndpointsDiscoveryBeforeRun(t *testing.T) {
"__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_port_protocol": "TCP",
"__meta_kubernetes_endpoint_ready": "false", "__meta_kubernetes_endpoint_ready": "false",
}, },
{
"__address__": "6.7.8.9:9002",
"__meta_kubernetes_endpoint_address_target_kind": "Node",
"__meta_kubernetes_endpoint_address_target_name": "barbaz",
"__meta_kubernetes_endpoint_port_name": "testport",
"__meta_kubernetes_endpoint_port_protocol": "TCP",
"__meta_kubernetes_endpoint_ready": "true",
},
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{
"__meta_kubernetes_namespace": "default", "__meta_kubernetes_namespace": "default",
@ -398,6 +424,14 @@ func TestEndpointsDiscoveryWithService(t *testing.T) {
"__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_port_protocol": "TCP",
"__meta_kubernetes_endpoint_ready": "false", "__meta_kubernetes_endpoint_ready": "false",
}, },
{
"__address__": "6.7.8.9:9002",
"__meta_kubernetes_endpoint_address_target_kind": "Node",
"__meta_kubernetes_endpoint_address_target_name": "barbaz",
"__meta_kubernetes_endpoint_port_name": "testport",
"__meta_kubernetes_endpoint_port_protocol": "TCP",
"__meta_kubernetes_endpoint_ready": "true",
},
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{
"__meta_kubernetes_namespace": "default", "__meta_kubernetes_namespace": "default",
@ -466,6 +500,14 @@ func TestEndpointsDiscoveryWithServiceUpdate(t *testing.T) {
"__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_port_protocol": "TCP",
"__meta_kubernetes_endpoint_ready": "false", "__meta_kubernetes_endpoint_ready": "false",
}, },
{
"__address__": "6.7.8.9:9002",
"__meta_kubernetes_endpoint_address_target_kind": "Node",
"__meta_kubernetes_endpoint_address_target_name": "barbaz",
"__meta_kubernetes_endpoint_port_name": "testport",
"__meta_kubernetes_endpoint_port_protocol": "TCP",
"__meta_kubernetes_endpoint_ready": "true",
},
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{
"__meta_kubernetes_namespace": "default", "__meta_kubernetes_namespace": "default",
@ -484,8 +526,10 @@ func TestEndpointsDiscoveryWithServiceUpdate(t *testing.T) {
func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) { func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) {
metadataConfig := AttachMetadataConfig{Node: true} metadataConfig := AttachMetadataConfig{Node: true}
nodeLabels := map[string]string{"az": "us-east1"} nodeLabels1 := map[string]string{"az": "us-east1"}
node := makeNode("foobar", "", "", nodeLabels, nil) nodeLabels2 := map[string]string{"az": "us-west2"}
node1 := makeNode("foobar", "", "", nodeLabels1, nil)
node2 := makeNode("barbaz", "", "", nodeLabels2, nil)
svc := &v1.Service{ svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "testendpoints", Name: "testendpoints",
@ -495,7 +539,7 @@ func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) {
}, },
}, },
} }
n, _ := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), svc, node) n, _ := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), svc, node1, node2)
k8sDiscoveryTest{ k8sDiscoveryTest{
discovery: n, discovery: n,
@ -526,6 +570,17 @@ func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) {
"__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_port_protocol": "TCP",
"__meta_kubernetes_endpoint_ready": "false", "__meta_kubernetes_endpoint_ready": "false",
}, },
{
"__address__": "6.7.8.9:9002",
"__meta_kubernetes_endpoint_address_target_kind": "Node",
"__meta_kubernetes_endpoint_address_target_name": "barbaz",
"__meta_kubernetes_endpoint_port_name": "testport",
"__meta_kubernetes_endpoint_port_protocol": "TCP",
"__meta_kubernetes_endpoint_ready": "true",
"__meta_kubernetes_node_label_az": "us-west2",
"__meta_kubernetes_node_labelpresent_az": "true",
"__meta_kubernetes_node_name": "barbaz",
},
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{
"__meta_kubernetes_namespace": "default", "__meta_kubernetes_namespace": "default",
@ -541,8 +596,10 @@ func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) {
} }
func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) { func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
nodeLabels := map[string]string{"az": "us-east1"} nodeLabels1 := map[string]string{"az": "us-east1"}
nodes := makeNode("foobar", "", "", nodeLabels, nil) nodeLabels2 := map[string]string{"az": "us-west2"}
node1 := makeNode("foobar", "", "", nodeLabels1, nil)
node2 := makeNode("barbaz", "", "", nodeLabels2, nil)
metadataConfig := AttachMetadataConfig{Node: true} metadataConfig := AttachMetadataConfig{Node: true}
svc := &v1.Service{ svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -553,13 +610,13 @@ func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
}, },
}, },
} }
n, c := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), nodes, svc) n, c := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), node1, node2, svc)
k8sDiscoveryTest{ k8sDiscoveryTest{
discovery: n, discovery: n,
afterStart: func() { afterStart: func() {
nodes.Labels["az"] = "eu-central1" node1.Labels["az"] = "eu-central1"
c.CoreV1().Nodes().Update(context.Background(), nodes, metav1.UpdateOptions{}) c.CoreV1().Nodes().Update(context.Background(), node1, metav1.UpdateOptions{})
}, },
expectedMaxItems: 2, expectedMaxItems: 2,
expectedRes: map[string]*targetgroup.Group{ expectedRes: map[string]*targetgroup.Group{
@ -572,7 +629,7 @@ func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
"__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_name": "testport",
"__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_port_protocol": "TCP",
"__meta_kubernetes_endpoint_ready": "true", "__meta_kubernetes_endpoint_ready": "true",
"__meta_kubernetes_node_label_az": "eu-central1", "__meta_kubernetes_node_label_az": "us-east1",
"__meta_kubernetes_node_labelpresent_az": "true", "__meta_kubernetes_node_labelpresent_az": "true",
"__meta_kubernetes_node_name": "foobar", "__meta_kubernetes_node_name": "foobar",
}, },
@ -588,6 +645,17 @@ func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
"__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_port_protocol": "TCP",
"__meta_kubernetes_endpoint_ready": "false", "__meta_kubernetes_endpoint_ready": "false",
}, },
{
"__address__": "6.7.8.9:9002",
"__meta_kubernetes_endpoint_address_target_kind": "Node",
"__meta_kubernetes_endpoint_address_target_name": "barbaz",
"__meta_kubernetes_endpoint_port_name": "testport",
"__meta_kubernetes_endpoint_port_protocol": "TCP",
"__meta_kubernetes_endpoint_ready": "true",
"__meta_kubernetes_node_label_az": "us-west2",
"__meta_kubernetes_node_labelpresent_az": "true",
"__meta_kubernetes_node_name": "barbaz",
},
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{
"__meta_kubernetes_namespace": "default", "__meta_kubernetes_namespace": "default",
@ -699,6 +767,14 @@ func TestEndpointsDiscoveryNamespaces(t *testing.T) {
"__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_port_protocol": "TCP",
"__meta_kubernetes_endpoint_ready": "false", "__meta_kubernetes_endpoint_ready": "false",
}, },
{
"__address__": "6.7.8.9:9002",
"__meta_kubernetes_endpoint_address_target_kind": "Node",
"__meta_kubernetes_endpoint_address_target_name": "barbaz",
"__meta_kubernetes_endpoint_port_name": "testport",
"__meta_kubernetes_endpoint_port_protocol": "TCP",
"__meta_kubernetes_endpoint_ready": "true",
},
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{
"__meta_kubernetes_namespace": "ns1", "__meta_kubernetes_namespace": "ns1",
@ -815,6 +891,14 @@ func TestEndpointsDiscoveryOwnNamespace(t *testing.T) {
"__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_port_protocol": "TCP",
"__meta_kubernetes_endpoint_ready": "false", "__meta_kubernetes_endpoint_ready": "false",
}, },
{
"__address__": "6.7.8.9:9002",
"__meta_kubernetes_endpoint_address_target_kind": "Node",
"__meta_kubernetes_endpoint_address_target_name": "barbaz",
"__meta_kubernetes_endpoint_port_name": "testport",
"__meta_kubernetes_endpoint_port_protocol": "TCP",
"__meta_kubernetes_endpoint_ready": "true",
},
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{
"__meta_kubernetes_namespace": "own-ns", "__meta_kubernetes_namespace": "own-ns",

View file

@ -339,8 +339,12 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
} }
if e.withNodeMetadata { if e.withNodeMetadata {
if ep.targetRef() != nil && ep.targetRef().Kind == "Node" {
target = addNodeLabels(target, e.nodeInf, e.logger, &ep.targetRef().Name)
} else {
target = addNodeLabels(target, e.nodeInf, e.logger, ep.nodename()) target = addNodeLabels(target, e.nodeInf, e.logger, ep.nodename())
} }
}
pod := e.resolvePodRef(ep.targetRef()) pod := e.resolvePodRef(ep.targetRef())
if pod == nil { if pod == nil {

View file

@ -90,6 +90,17 @@ func makeEndpointSliceV1() *v1.EndpointSlice {
Serving: boolptr(true), Serving: boolptr(true),
Terminating: boolptr(true), Terminating: boolptr(true),
}, },
}, {
Addresses: []string{"4.5.6.7"},
Conditions: v1.EndpointConditions{
Ready: boolptr(true),
Serving: boolptr(true),
Terminating: boolptr(false),
},
TargetRef: &corev1.ObjectReference{
Kind: "Node",
Name: "barbaz",
},
}, },
}, },
} }
@ -130,6 +141,17 @@ func makeEndpointSliceV1beta1() *v1beta1.EndpointSlice {
Serving: boolptr(true), Serving: boolptr(true),
Terminating: boolptr(true), Terminating: boolptr(true),
}, },
}, {
Addresses: []string{"4.5.6.7"},
Conditions: v1beta1.EndpointConditions{
Ready: boolptr(true),
Serving: boolptr(true),
Terminating: boolptr(false),
},
TargetRef: &corev1.ObjectReference{
Kind: "Node",
Name: "barbaz",
},
}, },
}, },
} }
@ -183,6 +205,18 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) {
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP", "__meta_kubernetes_endpointslice_port_protocol": "TCP",
}, },
{
"__address__": "4.5.6.7:9000",
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4", "__meta_kubernetes_endpointslice_address_type": "IPv4",
@ -233,6 +267,17 @@ func TestEndpointSliceDiscoveryBeforeRunV1beta1(t *testing.T) {
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP", "__meta_kubernetes_endpointslice_port_protocol": "TCP",
}, },
{
"__address__": "4.5.6.7:9000",
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4", "__meta_kubernetes_endpointslice_address_type": "IPv4",
@ -419,6 +464,18 @@ func TestEndpointSliceDiscoveryDelete(t *testing.T) {
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP", "__meta_kubernetes_endpointslice_port_protocol": "TCP",
}, },
{
"__address__": "4.5.6.7:9000",
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
}, },
Labels: map[model.LabelName]model.LabelValue{ Labels: map[model.LabelName]model.LabelValue{
"__meta_kubernetes_endpointslice_address_type": "IPv4", "__meta_kubernetes_endpointslice_address_type": "IPv4",
@ -503,6 +560,18 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) {
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP", "__meta_kubernetes_endpointslice_port_protocol": "TCP",
}, },
{
"__address__": "4.5.6.7:9000",
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4", "__meta_kubernetes_endpointslice_address_type": "IPv4",
@ -576,6 +645,18 @@ func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) {
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP", "__meta_kubernetes_endpointslice_port_protocol": "TCP",
}, },
{
"__address__": "4.5.6.7:9000",
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4", "__meta_kubernetes_endpointslice_address_type": "IPv4",
@ -644,6 +725,18 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) {
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP", "__meta_kubernetes_endpointslice_port_protocol": "TCP",
}, },
{
"__address__": "4.5.6.7:9000",
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4", "__meta_kubernetes_endpointslice_address_type": "IPv4",
@ -728,6 +821,18 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) {
"__meta_kubernetes_endpointslice_port_protocol": "TCP", "__meta_kubernetes_endpointslice_port_protocol": "TCP",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
}, },
{
"__address__": "4.5.6.7:9000",
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4", "__meta_kubernetes_endpointslice_address_type": "IPv4",
@ -747,7 +852,8 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) {
func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) { func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
metadataConfig := AttachMetadataConfig{Node: true} metadataConfig := AttachMetadataConfig{Node: true}
nodeLabels := map[string]string{"az": "us-east1"} nodeLabels1 := map[string]string{"az": "us-east1"}
nodeLabels2 := map[string]string{"az": "us-west2"}
svc := &corev1.Service{ svc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "testendpoints", Name: "testendpoints",
@ -757,7 +863,7 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
}, },
}, },
} }
objs := []runtime.Object{makeEndpointSliceV1(), makeNode("foobar", "", "", nodeLabels, nil), svc} objs := []runtime.Object{makeEndpointSliceV1(), makeNode("foobar", "", "", nodeLabels1, nil), makeNode("barbaz", "", "", nodeLabels2, nil), svc}
n, _ := makeDiscoveryWithMetadata(RoleEndpointSlice, NamespaceDiscovery{}, metadataConfig, objs...) n, _ := makeDiscoveryWithMetadata(RoleEndpointSlice, NamespaceDiscovery{}, metadataConfig, objs...)
k8sDiscoveryTest{ k8sDiscoveryTest{
@ -804,6 +910,21 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP", "__meta_kubernetes_endpointslice_port_protocol": "TCP",
}, },
{
"__address__": "4.5.6.7:9000",
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
"__meta_kubernetes_node_label_az": "us-west2",
"__meta_kubernetes_node_labelpresent_az": "true",
"__meta_kubernetes_node_name": "barbaz",
},
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4", "__meta_kubernetes_endpointslice_address_type": "IPv4",
@ -821,7 +942,8 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) { func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
metadataConfig := AttachMetadataConfig{Node: true} metadataConfig := AttachMetadataConfig{Node: true}
nodeLabels := map[string]string{"az": "us-east1"} nodeLabels1 := map[string]string{"az": "us-east1"}
nodeLabels2 := map[string]string{"az": "us-west2"}
svc := &corev1.Service{ svc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "testendpoints", Name: "testendpoints",
@ -831,16 +953,17 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
}, },
}, },
} }
node := makeNode("foobar", "", "", nodeLabels, nil) node1 := makeNode("foobar", "", "", nodeLabels1, nil)
objs := []runtime.Object{makeEndpointSliceV1(), node, svc} node2 := makeNode("barbaz", "", "", nodeLabels2, nil)
objs := []runtime.Object{makeEndpointSliceV1(), node1, node2, svc}
n, c := makeDiscoveryWithMetadata(RoleEndpointSlice, NamespaceDiscovery{}, metadataConfig, objs...) n, c := makeDiscoveryWithMetadata(RoleEndpointSlice, NamespaceDiscovery{}, metadataConfig, objs...)
k8sDiscoveryTest{ k8sDiscoveryTest{
discovery: n, discovery: n,
expectedMaxItems: 2, expectedMaxItems: 2,
afterStart: func() { afterStart: func() {
node.Labels["az"] = "us-central1" node1.Labels["az"] = "us-central1"
c.CoreV1().Nodes().Update(context.Background(), node, metav1.UpdateOptions{}) c.CoreV1().Nodes().Update(context.Background(), node1, metav1.UpdateOptions{})
}, },
expectedRes: map[string]*targetgroup.Group{ expectedRes: map[string]*targetgroup.Group{
"endpointslice/default/testendpoints": { "endpointslice/default/testendpoints": {
@ -859,7 +982,7 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP", "__meta_kubernetes_endpointslice_port_protocol": "TCP",
"__meta_kubernetes_node_label_az": "us-central1", "__meta_kubernetes_node_label_az": "us-east1",
"__meta_kubernetes_node_labelpresent_az": "true", "__meta_kubernetes_node_labelpresent_az": "true",
"__meta_kubernetes_node_name": "foobar", "__meta_kubernetes_node_name": "foobar",
}, },
@ -883,6 +1006,21 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP", "__meta_kubernetes_endpointslice_port_protocol": "TCP",
}, },
{
"__address__": "4.5.6.7:9000",
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
"__meta_kubernetes_node_label_az": "us-west2",
"__meta_kubernetes_node_labelpresent_az": "true",
"__meta_kubernetes_node_name": "barbaz",
},
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4", "__meta_kubernetes_endpointslice_address_type": "IPv4",
@ -1007,6 +1145,18 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) {
"__meta_kubernetes_endpointslice_port_protocol": "TCP", "__meta_kubernetes_endpointslice_port_protocol": "TCP",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
}, },
{
"__address__": "4.5.6.7:9000",
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4", "__meta_kubernetes_endpointslice_address_type": "IPv4",
@ -1139,6 +1289,18 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) {
"__meta_kubernetes_endpointslice_port_protocol": "TCP", "__meta_kubernetes_endpointslice_port_protocol": "TCP",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
}, },
{
"__address__": "4.5.6.7:9000",
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4", "__meta_kubernetes_endpointslice_address_type": "IPv4",

View file

@ -761,16 +761,22 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share
indexers[nodeIndex] = func(obj interface{}) ([]string, error) { indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
e, ok := obj.(*apiv1.Endpoints) e, ok := obj.(*apiv1.Endpoints)
if !ok { if !ok {
return nil, fmt.Errorf("object is not a pod") return nil, fmt.Errorf("object is not endpoints")
} }
var nodes []string var nodes []string
for _, target := range e.Subsets { for _, target := range e.Subsets {
for _, addr := range target.Addresses { for _, addr := range target.Addresses {
if addr.NodeName == nil { if addr.TargetRef != nil {
continue switch addr.TargetRef.Kind {
} case "Pod":
if addr.NodeName != nil {
nodes = append(nodes, *addr.NodeName) nodes = append(nodes, *addr.NodeName)
} }
case "Node":
nodes = append(nodes, addr.TargetRef.Name)
}
}
}
} }
return nodes, nil return nodes, nil
} }
@ -789,18 +795,30 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object
switch e := obj.(type) { switch e := obj.(type) {
case *disv1.EndpointSlice: case *disv1.EndpointSlice:
for _, target := range e.Endpoints { for _, target := range e.Endpoints {
if target.NodeName == nil { if target.TargetRef != nil {
continue switch target.TargetRef.Kind {
} case "Pod":
if target.NodeName != nil {
nodes = append(nodes, *target.NodeName) nodes = append(nodes, *target.NodeName)
} }
case "Node":
nodes = append(nodes, target.TargetRef.Name)
}
}
}
case *disv1beta1.EndpointSlice: case *disv1beta1.EndpointSlice:
for _, target := range e.Endpoints { for _, target := range e.Endpoints {
if target.NodeName == nil { if target.TargetRef != nil {
continue switch target.TargetRef.Kind {
} case "Pod":
if target.NodeName != nil {
nodes = append(nodes, *target.NodeName) nodes = append(nodes, *target.NodeName)
} }
case "Node":
nodes = append(nodes, target.TargetRef.Name)
}
}
}
default: default:
return nil, fmt.Errorf("object is not an endpointslice") return nil, fmt.Errorf("object is not an endpointslice")
} }

View file

@ -134,6 +134,10 @@ job_name: <job_name>
# Per-scrape timeout when scraping this job. # Per-scrape timeout when scraping this job.
[ scrape_timeout: <duration> | default = <global_config.scrape_timeout> ] [ scrape_timeout: <duration> | default = <global_config.scrape_timeout> ]
# Whether to scrape a classic histogram that is also exposed as a native
# histogram (has no effect without --enable-feature=native-histograms).
[ scrape_classic_histograms: <boolean> | default = false ]
# The HTTP resource path on which to fetch metrics from targets. # The HTTP resource path on which to fetch metrics from targets.
[ metrics_path: <path> | default = /metrics ] [ metrics_path: <path> | default = /metrics ]
@ -376,6 +380,11 @@ metric_relabel_configs:
# 0 means no limit. This is an experimental feature, this behaviour could # 0 means no limit. This is an experimental feature, this behaviour could
# change in the future. # change in the future.
[ target_limit: <int> | default = 0 ] [ target_limit: <int> | default = 0 ]
# Limit on total number of positive and negative buckets allowed in a single
# native histogram. If this is exceeded, the entire scrape will be treated as
# failed. 0 means no limit.
[ native_histogram_bucket_limit: <int> | default = 0 ]
``` ```
Where `<job_name>` must be unique across all scrape configurations. Where `<job_name>` must be unique across all scrape configurations.
@ -576,6 +585,8 @@ The following meta labels are available on targets during [relabeling](#relabel_
# The information to access the Consul API. It is to be defined # The information to access the Consul API. It is to be defined
# as the Consul documentation requires. # as the Consul documentation requires.
[ server: <host> | default = "localhost:8500" ] [ server: <host> | default = "localhost:8500" ]
# Prefix for URIs for when consul is behind an API gateway (reverse proxy).
[ path_prefix: <string> ]
[ token: <secret> ] [ token: <secret> ]
[ datacenter: <string> ] [ datacenter: <string> ]
# Namespaces are only supported in Consul Enterprise. # Namespaces are only supported in Consul Enterprise.

View file

@ -264,4 +264,4 @@ process ID.
While Prometheus does have recovery mechanisms in the case that there is an While Prometheus does have recovery mechanisms in the case that there is an
abrupt process failure it is recommend to use the `SIGTERM` signal to cleanly abrupt process failure it is recommend to use the `SIGTERM` signal to cleanly
shutdown a Prometheus instance. If you're running on Linux this can be performed shutdown a Prometheus instance. If you're running on Linux this can be performed
by using `kill -s SIGHUP <PID>`, replacing `<PID>` with your Prometheus process ID. by using `kill -s SIGTERM <PID>`, replacing `<PID>` with your Prometheus process ID.

View file

@ -1074,6 +1074,10 @@ The following endpoint returns various cardinality statistics about the Promethe
``` ```
GET /api/v1/status/tsdb GET /api/v1/status/tsdb
``` ```
URL query parameters:
- `limit=<number>`: Limit the number of returned items to a given number for each set of statistics. By default, 10 items are returned.
The `data` section of the query result consists of
- **headStats**: This provides the following data about the head block of the TSDB: - **headStats**: This provides the following data about the head block of the TSDB:
- **numSeries**: The number of series. - **numSeries**: The number of series.
- **chunkCount**: The number of chunks. - **chunkCount**: The number of chunks.

View file

@ -589,8 +589,9 @@ over time and return an instant vector with per-series aggregation results:
Note that all values in the specified interval have the same weight in the Note that all values in the specified interval have the same weight in the
aggregation even if the values are not equally spaced throughout the interval. aggregation even if the values are not equally spaced throughout the interval.
`count_over_time`, `last_over_time`, and `present_over_time` handle native `avg_over_time`, `sum_over_time`, `count_over_time`, `last_over_time`, and
histograms as expected. All other functions ignore histogram samples. `present_over_time` handle native histograms as expected. All other functions
ignore histogram samples.
## Trigonometric Functions ## Trigonometric Functions

View file

@ -318,19 +318,23 @@ histograms is still very limited.
Logical/set binary operators work as expected even if histogram samples are Logical/set binary operators work as expected even if histogram samples are
involved. They only check for the existence of a vector element and don't involved. They only check for the existence of a vector element and don't
change their behavior depending on the sample type of an element (float or change their behavior depending on the sample type of an element (float or
histogram). histogram). The `count` aggregation operator works similarly.
The binary `+` operator between two native histograms and the `sum` aggregation The binary `+` and `-` operators between two native histograms and the `sum`
operator to aggregate native histograms are fully supported. Even if the and `avg` aggregation operators to aggregate native histograms are fully
histograms involved have different bucket layouts, the buckets are supported. Even if the histograms involved have different bucket layouts, the
automatically converted appropriately so that the operation can be buckets are automatically converted appropriately so that the operation can be
performed. (With the currently supported bucket schemas, that's always performed. (With the currently supported bucket schemas, that's always
possible.) If either operator has to sum up a mix of histogram samples and possible.) If either operator has to aggregate a mix of histogram samples and
float samples, the corresponding vector element is removed from the output float samples, the corresponding vector element is removed from the output
vector entirely. vector entirely.
All other operators do not behave in a meaningful way. They either treat the The binary `*` operator works between a native histogram and a float in any
histogram sample as if it were a float sample of value 0, or (in case of order, while the binary `/` operator can be used between a native histogram
arithmetic operations between a scalar and a vector) they leave the histogram and a float in that exact order.
sample unchanged. This behavior will change to a meaningful one before native
histograms are a stable feature. All other operators (and unmentioned cases for the above operators) do not
behave in a meaningful way. They either treat the histogram sample as if it
were a float sample of value 0, or (in case of arithmetic operations between a
scalar and a vector) they leave the histogram sample unchanged. This behavior
will change to a meaningful one before native histograms are a stable feature.

View file

@ -18,12 +18,13 @@ Things considered stable for 2.x:
* Configuration file format (minus the service discovery remote read/write, see below) * Configuration file format (minus the service discovery remote read/write, see below)
* Rule/alert file format * Rule/alert file format
* Console template syntax and semantics * Console template syntax and semantics
* Remote write sending, per the [1.0 specification](https://prometheus.io/docs/concepts/remote_write_spec/).
Things considered unstable for 2.x: Things considered unstable for 2.x:
* Any feature listed as experimental or subject to change, including: * Any feature listed as experimental or subject to change, including:
* The [`holt_winters` PromQL function](https://github.com/prometheus/prometheus/issues/2458) * The [`holt_winters` PromQL function](https://github.com/prometheus/prometheus/issues/2458)
* Remote read, remote write and the remote read endpoint * Remote write receiving, remote read and the remote read endpoint
* Server-side HTTPS and basic authentication * Server-side HTTPS and basic authentication
* Service discovery integrations, with the exception of `static_configs` and `file_sd_configs` * Service discovery integrations, with the exception of `static_configs` and `file_sd_configs`
* Go APIs of packages that are part of the server * Go APIs of packages that are part of the server

View file

@ -8,7 +8,7 @@ require (
github.com/gogo/protobuf v1.3.2 github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4 github.com/golang/snappy v0.0.4
github.com/influxdata/influxdb v1.11.0 github.com/influxdata/influxdb v1.11.0
github.com/prometheus/client_golang v1.14.0 github.com/prometheus/client_golang v1.15.0
github.com/prometheus/common v0.42.0 github.com/prometheus/common v0.42.0
github.com/prometheus/prometheus v0.43.0 github.com/prometheus/prometheus v0.43.0
github.com/stretchr/testify v1.8.2 github.com/stretchr/testify v1.8.2
@ -29,7 +29,7 @@ require (
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect
github.com/kr/pretty v0.3.0 // indirect github.com/kr/text v0.2.0 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
@ -51,7 +51,7 @@ require (
golang.org/x/text v0.8.0 // indirect golang.org/x/text v0.8.0 // indirect
golang.org/x/time v0.3.0 // indirect golang.org/x/time v0.3.0 // indirect
google.golang.org/appengine v1.6.7 // indirect google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.29.0 // indirect google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
) )

View file

@ -141,8 +141,7 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@ -181,8 +180,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= github.com/prometheus/client_golang v1.15.0 h1:5fCgGYogn0hFdhyhLbw7hEsWxufKtY9klyvdNfFlFhM=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_golang v1.15.0/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -204,7 +203,6 @@ github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJf
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/prometheus/prometheus v0.43.0 h1:18iCSfrbAHbXvYFvR38U1Pt4uZmU9SmDcCpCrBKUiGg= github.com/prometheus/prometheus v0.43.0 h1:18iCSfrbAHbXvYFvR38U1Pt4uZmU9SmDcCpCrBKUiGg=
github.com/prometheus/prometheus v0.43.0/go.mod h1:2BA14LgBeqlPuzObSEbh+Y+JwLH2GcqDlJKbF2sA6FM= github.com/prometheus/prometheus v0.43.0/go.mod h1:2BA14LgBeqlPuzObSEbh+Y+JwLH2GcqDlJKbF2sA6FM=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14 h1:yFl3jyaSVLNYXlnNYM5z2pagEk1dYQhfr1p20T1NyKY= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14 h1:yFl3jyaSVLNYXlnNYM5z2pagEk1dYQhfr1p20T1NyKY=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
@ -341,14 +339,12 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.29.0 h1:44S3JjaKmLEE4YIkjzexaP+NzZsudE3Zin5Njn/pYX0= google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
google.golang.org/protobuf v1.29.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

6
go.mod
View file

@ -13,7 +13,7 @@ require (
github.com/cespare/xxhash/v2 v2.2.0 github.com/cespare/xxhash/v2 v2.2.0
github.com/dennwc/varint v1.0.0 github.com/dennwc/varint v1.0.0
github.com/dgraph-io/ristretto v0.1.1 github.com/dgraph-io/ristretto v0.1.1
github.com/digitalocean/godo v1.98.0 github.com/digitalocean/godo v1.99.0
github.com/docker/docker v23.0.4+incompatible github.com/docker/docker v23.0.4+incompatible
github.com/edsrzf/mmap-go v1.1.0 github.com/edsrzf/mmap-go v1.1.0
github.com/envoyproxy/go-control-plane v0.11.0 github.com/envoyproxy/go-control-plane v0.11.0
@ -31,7 +31,7 @@ require (
github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0
github.com/hashicorp/consul/api v1.20.0 github.com/hashicorp/consul/api v1.20.0
github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197 github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197
github.com/hetznercloud/hcloud-go v1.42.0 github.com/hetznercloud/hcloud-go v1.45.1
github.com/ionos-cloud/sdk-go/v6 v6.1.6 github.com/ionos-cloud/sdk-go/v6 v6.1.6
github.com/json-iterator/go v1.1.12 github.com/json-iterator/go v1.1.12
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
@ -43,7 +43,7 @@ require (
github.com/ovh/go-ovh v1.4.1 github.com/ovh/go-ovh v1.4.1
github.com/pkg/errors v0.9.1 github.com/pkg/errors v0.9.1
github.com/prometheus/alertmanager v0.25.0 github.com/prometheus/alertmanager v0.25.0
github.com/prometheus/client_golang v1.15.0 github.com/prometheus/client_golang v1.15.1
github.com/prometheus/client_model v0.3.0 github.com/prometheus/client_model v0.3.0
github.com/prometheus/common v0.42.0 github.com/prometheus/common v0.42.0
github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/assets v0.2.0

12
go.sum
View file

@ -155,8 +155,8 @@ github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkz
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/digitalocean/godo v1.98.0 h1:potyC1eD0N9n5/P4/WmJuKgg+OGYZOBWEW+/aKTX6QQ= github.com/digitalocean/godo v1.99.0 h1:gUHO7n9bDaZFWvbzOum4bXE0/09ZuYA9yA8idQHX57E=
github.com/digitalocean/godo v1.98.0/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzKqFL4R60dmA= github.com/digitalocean/godo v1.99.0/go.mod h1:SsS2oXo2rznfM/nORlZ/6JaUJZFhmKTib1YhopUc8NA=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
@ -464,8 +464,8 @@ github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197/go.mod h1:2TCr
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
github.com/hetznercloud/hcloud-go v1.42.0 h1:Es/CDOForQN3nOOP5Vxh1N/YHjpCg386iYEX5zCgi+A= github.com/hetznercloud/hcloud-go v1.45.1 h1:nl0OOklFfQT5J6AaNIOhl5Ruh3fhmGmhvZEqHbibVuk=
github.com/hetznercloud/hcloud-go v1.42.0/go.mod h1:YADL8AbmQYH0Eo+1lkuyoc8LutT0UeMvaKP47nNUb+Y= github.com/hetznercloud/hcloud-go v1.45.1/go.mod h1:aAUGxSfSnB8/lVXHNEDxtCT1jykaul8kqjD7f5KQXF8=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
@ -655,8 +655,8 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.15.0 h1:5fCgGYogn0hFdhyhLbw7hEsWxufKtY9klyvdNfFlFhM= github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI=
github.com/prometheus/client_golang v1.15.0/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=

View file

@ -159,12 +159,12 @@ func (h *FloatHistogram) ZeroBucket() Bucket[float64] {
} }
} }
// Scale scales the FloatHistogram by the provided factor, i.e. it scales all // Mul multiplies the FloatHistogram by the provided factor, i.e. it scales all
// bucket counts including the zero bucket and the count and the sum of // bucket counts including the zero bucket and the count and the sum of
// observations. The bucket layout stays the same. This method changes the // observations. The bucket layout stays the same. This method changes the
// receiving histogram directly (rather than acting on a copy). It returns a // receiving histogram directly (rather than acting on a copy). It returns a
// pointer to the receiving histogram for convenience. // pointer to the receiving histogram for convenience.
func (h *FloatHistogram) Scale(factor float64) *FloatHistogram { func (h *FloatHistogram) Mul(factor float64) *FloatHistogram {
h.ZeroCount *= factor h.ZeroCount *= factor
h.Count *= factor h.Count *= factor
h.Sum *= factor h.Sum *= factor
@ -177,6 +177,21 @@ func (h *FloatHistogram) Scale(factor float64) *FloatHistogram {
return h return h
} }
// Div works like Scale but divides instead of multiplies.
// When dividing by 0, everything will be set to Inf.
func (h *FloatHistogram) Div(scalar float64) *FloatHistogram {
h.ZeroCount /= scalar
h.Count /= scalar
h.Sum /= scalar
for i := range h.PositiveBuckets {
h.PositiveBuckets[i] /= scalar
}
for i := range h.NegativeBuckets {
h.NegativeBuckets[i] /= scalar
}
return h
}
// Add adds the provided other histogram to the receiving histogram. Count, Sum, // Add adds the provided other histogram to the receiving histogram. Count, Sum,
// and buckets from the other histogram are added to the corresponding // and buckets from the other histogram are added to the corresponding
// components of the receiving histogram. Buckets in the other histogram that do // components of the receiving histogram. Buckets in the other histogram that do

View file

@ -15,12 +15,13 @@ package histogram
import ( import (
"fmt" "fmt"
"math"
"testing" "testing"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestFloatHistogramScale(t *testing.T) { func TestFloatHistogramMul(t *testing.T) {
cases := []struct { cases := []struct {
name string name string
in *FloatHistogram in *FloatHistogram
@ -33,6 +34,30 @@ func TestFloatHistogramScale(t *testing.T) {
3.1415, 3.1415,
&FloatHistogram{}, &FloatHistogram{},
}, },
{
"zero multiplier",
&FloatHistogram{
ZeroThreshold: 0.01,
ZeroCount: 5.5,
Count: 3493.3,
Sum: 2349209.324,
PositiveSpans: []Span{{-2, 1}, {2, 3}},
PositiveBuckets: []float64{1, 3.3, 4.2, 0.1},
NegativeSpans: []Span{{3, 2}, {3, 2}},
NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000},
},
0,
&FloatHistogram{
ZeroThreshold: 0.01,
ZeroCount: 0,
Count: 0,
Sum: 0,
PositiveSpans: []Span{{-2, 1}, {2, 3}},
PositiveBuckets: []float64{0, 0, 0, 0},
NegativeSpans: []Span{{3, 2}, {3, 2}},
NegativeBuckets: []float64{0, 0, 0, 0},
},
},
{ {
"no-op", "no-op",
&FloatHistogram{ &FloatHistogram{
@ -81,17 +106,137 @@ func TestFloatHistogramScale(t *testing.T) {
NegativeBuckets: []float64{6.2, 6, 1.234e5 * 2, 2000}, NegativeBuckets: []float64{6.2, 6, 1.234e5 * 2, 2000},
}, },
}, },
{
"triple",
&FloatHistogram{
ZeroThreshold: 0.01,
ZeroCount: 11,
Count: 30,
Sum: 23,
PositiveSpans: []Span{{-2, 2}, {1, 3}},
PositiveBuckets: []float64{1, 0, 3, 4, 7},
NegativeSpans: []Span{{3, 2}, {3, 2}},
NegativeBuckets: []float64{3, 1, 5, 6},
},
3,
&FloatHistogram{
ZeroThreshold: 0.01,
ZeroCount: 33,
Count: 90,
Sum: 69,
PositiveSpans: []Span{{-2, 2}, {1, 3}},
PositiveBuckets: []float64{3, 0, 9, 12, 21},
NegativeSpans: []Span{{3, 2}, {3, 2}},
NegativeBuckets: []float64{9, 3, 15, 18},
},
},
} }
for _, c := range cases { for _, c := range cases {
t.Run(c.name, func(t *testing.T) { t.Run(c.name, func(t *testing.T) {
require.Equal(t, c.expected, c.in.Scale(c.scale)) require.Equal(t, c.expected, c.in.Mul(c.scale))
// Has it also happened in-place? // Has it also happened in-place?
require.Equal(t, c.expected, c.in) require.Equal(t, c.expected, c.in)
}) })
} }
} }
func TestFloatHistogramDiv(t *testing.T) {
cases := []struct {
name string
fh *FloatHistogram
s float64
expected *FloatHistogram
}{
{
"zero value",
&FloatHistogram{},
3.1415,
&FloatHistogram{},
},
{
"zero divisor",
&FloatHistogram{
ZeroThreshold: 0.01,
ZeroCount: 5.5,
Count: 3493.3,
Sum: 2349209.324,
PositiveSpans: []Span{{-2, 1}, {2, 3}},
PositiveBuckets: []float64{1, 3.3, 4.2, 0.1},
NegativeSpans: []Span{{3, 2}, {3, 2}},
NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000},
},
0,
&FloatHistogram{
ZeroThreshold: 0.01,
ZeroCount: math.Inf(1),
Count: math.Inf(1),
Sum: math.Inf(1),
PositiveSpans: []Span{{-2, 1}, {2, 3}},
PositiveBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1), math.Inf(1)},
NegativeSpans: []Span{{3, 2}, {3, 2}},
NegativeBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1), math.Inf(1)},
},
},
{
"no-op",
&FloatHistogram{
ZeroThreshold: 0.01,
ZeroCount: 5.5,
Count: 3493.3,
Sum: 2349209.324,
PositiveSpans: []Span{{-2, 1}, {2, 3}},
PositiveBuckets: []float64{1, 3.3, 4.2, 0.1},
NegativeSpans: []Span{{3, 2}, {3, 2}},
NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000},
},
1,
&FloatHistogram{
ZeroThreshold: 0.01,
ZeroCount: 5.5,
Count: 3493.3,
Sum: 2349209.324,
PositiveSpans: []Span{{-2, 1}, {2, 3}},
PositiveBuckets: []float64{1, 3.3, 4.2, 0.1},
NegativeSpans: []Span{{3, 2}, {3, 2}},
NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000},
},
},
{
"half",
&FloatHistogram{
ZeroThreshold: 0.01,
ZeroCount: 11,
Count: 30,
Sum: 23,
PositiveSpans: []Span{{-2, 2}, {1, 3}},
PositiveBuckets: []float64{1, 0, 3, 4, 7},
NegativeSpans: []Span{{3, 2}, {3, 2}},
NegativeBuckets: []float64{3, 1, 5, 6},
},
2,
&FloatHistogram{
ZeroThreshold: 0.01,
ZeroCount: 5.5,
Count: 15,
Sum: 11.5,
PositiveSpans: []Span{{-2, 2}, {1, 3}},
PositiveBuckets: []float64{0.5, 0, 1.5, 2, 3.5},
NegativeSpans: []Span{{3, 2}, {3, 2}},
NegativeBuckets: []float64{1.5, 0.5, 2.5, 3},
},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
require.Equal(t, c.expected, c.fh.Div(c.s))
// Has it also happened in-place?
require.Equal(t, c.expected, c.fh)
})
}
}
func TestFloatHistogramDetectReset(t *testing.T) { func TestFloatHistogramDetectReset(t *testing.T) {
cases := []struct { cases := []struct {
name string name string

View file

@ -533,16 +533,15 @@ func (b *Builder) Set(n, v string) *Builder {
} }
func (b *Builder) Get(n string) string { func (b *Builder) Get(n string) string {
for _, d := range b.del { // Del() removes entries from .add but Set() does not remove from .del, so check .add first.
if d == n {
return ""
}
}
for _, a := range b.add { for _, a := range b.add {
if a.Name == n { if a.Name == n {
return a.Value return a.Value
} }
} }
if slices.Contains(b.del, n) {
return ""
}
return b.base.Get(n) return b.base.Get(n)
} }

View file

@ -593,14 +593,15 @@ func (b *Builder) Set(n, v string) *Builder {
} }
func (b *Builder) Get(n string) string { func (b *Builder) Get(n string) string {
if slices.Contains(b.del, n) { // Del() removes entries from .add but Set() does not remove from .del, so check .add first.
return ""
}
for _, a := range b.add { for _, a := range b.add {
if a.Name == n { if a.Name == n {
return a.Value return a.Value
} }
} }
if slices.Contains(b.del, n) {
return ""
}
return b.base.Get(n) return b.base.Get(n)
} }

View file

@ -607,6 +607,13 @@ func TestBuilder(t *testing.T) {
require.Equal(t, tcase.want.BytesWithoutLabels(nil, "aaa", "bbb"), b.Labels().Bytes(nil)) require.Equal(t, tcase.want.BytesWithoutLabels(nil, "aaa", "bbb"), b.Labels().Bytes(nil))
}) })
} }
t.Run("set_after_del", func(t *testing.T) {
b := NewBuilder(FromStrings("aaa", "111"))
b.Del("bbb")
b.Set("bbb", "222")
require.Equal(t, FromStrings("aaa", "111", "bbb", "222"), b.Labels())
require.Equal(t, "222", b.Get("bbb"))
})
} }
func TestScratchBuilder(t *testing.T) { func TestScratchBuilder(t *testing.T) {

View file

@ -397,6 +397,34 @@ func TestRelabel(t *testing.T) {
"foo": "bar", "foo": "bar",
}), }),
}, },
{ // From https://github.com/prometheus/prometheus/issues/12283
input: labels.FromMap(map[string]string{
"__meta_kubernetes_pod_container_port_name": "foo",
"__meta_kubernetes_pod_annotation_XXX_metrics_port": "9091",
}),
relabel: []*Config{
{
Regex: MustNewRegexp("^__meta_kubernetes_pod_container_port_name$"),
Action: LabelDrop,
},
{
SourceLabels: model.LabelNames{"__meta_kubernetes_pod_annotation_XXX_metrics_port"},
Regex: MustNewRegexp("(.+)"),
Action: Replace,
Replacement: "metrics",
TargetLabel: "__meta_kubernetes_pod_container_port_name",
},
{
SourceLabels: model.LabelNames{"__meta_kubernetes_pod_container_port_name"},
Regex: MustNewRegexp("^metrics$"),
Action: Keep,
},
},
output: labels.FromMap(map[string]string{
"__meta_kubernetes_pod_annotation_XXX_metrics_port": "9091",
"__meta_kubernetes_pod_container_port_name": "metrics",
}),
},
{ {
input: labels.FromMap(map[string]string{ input: labels.FromMap(map[string]string{
"a": "foo", "a": "foo",

View file

@ -71,7 +71,7 @@ type Parser interface {
// //
// This function always returns a valid parser, but might additionally // This function always returns a valid parser, but might additionally
// return an error if the content type cannot be parsed. // return an error if the content type cannot be parsed.
func New(b []byte, contentType string) (Parser, error) { func New(b []byte, contentType string, parseClassicHistograms bool) (Parser, error) {
if contentType == "" { if contentType == "" {
return NewPromParser(b), nil return NewPromParser(b), nil
} }
@ -84,7 +84,7 @@ func New(b []byte, contentType string) (Parser, error) {
case "application/openmetrics-text": case "application/openmetrics-text":
return NewOpenMetricsParser(b), nil return NewOpenMetricsParser(b), nil
case "application/vnd.google.protobuf": case "application/vnd.google.protobuf":
return NewProtobufParser(b), nil return NewProtobufParser(b, parseClassicHistograms), nil
default: default:
return NewPromParser(b), nil return NewPromParser(b), nil
} }
@ -100,7 +100,7 @@ const (
EntrySeries Entry = 2 // A series with a simple float64 as value. EntrySeries Entry = 2 // A series with a simple float64 as value.
EntryComment Entry = 3 EntryComment Entry = 3
EntryUnit Entry = 4 EntryUnit Entry = 4
EntryHistogram Entry = 5 // A series with a sparse histogram as a value. EntryHistogram Entry = 5 // A series with a native histogram as a value.
) )
// MetricType represents metric type values. // MetricType represents metric type values.

View file

@ -91,7 +91,7 @@ func TestNewParser(t *testing.T) {
tt := tt // Copy to local variable before going parallel. tt := tt // Copy to local variable before going parallel.
t.Parallel() t.Parallel()
p, err := New([]byte{}, tt.contentType) p, err := New([]byte{}, tt.contentType, false)
tt.validateParser(t, p) tt.validateParser(t, p)
if tt.err == "" { if tt.err == "" {
require.NoError(t, err) require.NoError(t, err)

View file

@ -54,6 +54,8 @@ type ProtobufParser struct {
// quantiles/buckets. // quantiles/buckets.
fieldPos int fieldPos int
fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed. fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed.
redoClassic bool // true after parsing a native histogram if we need to parse it again as a classit histogram.
// state is marked by the entry we are processing. EntryInvalid implies // state is marked by the entry we are processing. EntryInvalid implies
// that we have to decode the next MetricFamily. // that we have to decode the next MetricFamily.
state Entry state Entry
@ -62,17 +64,22 @@ type ProtobufParser struct {
mf *dto.MetricFamily mf *dto.MetricFamily
// Wether to also parse a classic histogram that is also present as a
// native histogram.
parseClassicHistograms bool
// The following are just shenanigans to satisfy the Parser interface. // The following are just shenanigans to satisfy the Parser interface.
metricBytes *bytes.Buffer // A somewhat fluid representation of the current metric. metricBytes *bytes.Buffer // A somewhat fluid representation of the current metric.
} }
// NewProtobufParser returns a parser for the payload in the byte slice. // NewProtobufParser returns a parser for the payload in the byte slice.
func NewProtobufParser(b []byte) Parser { func NewProtobufParser(b []byte, parseClassicHistograms bool) Parser {
return &ProtobufParser{ return &ProtobufParser{
in: b, in: b,
state: EntryInvalid, state: EntryInvalid,
mf: &dto.MetricFamily{}, mf: &dto.MetricFamily{},
metricBytes: &bytes.Buffer{}, metricBytes: &bytes.Buffer{},
parseClassicHistograms: parseClassicHistograms,
} }
} }
@ -106,21 +113,30 @@ func (p *ProtobufParser) Series() ([]byte, *int64, float64) {
v = s.GetQuantile()[p.fieldPos].GetValue() v = s.GetQuantile()[p.fieldPos].GetValue()
} }
case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM:
// This should only happen for a legacy histogram. // This should only happen for a classic histogram.
h := m.GetHistogram() h := m.GetHistogram()
switch p.fieldPos { switch p.fieldPos {
case -2: case -2:
v = h.GetSampleCountFloat()
if v == 0 {
v = float64(h.GetSampleCount()) v = float64(h.GetSampleCount())
}
case -1: case -1:
v = h.GetSampleSum() v = h.GetSampleSum()
default: default:
bb := h.GetBucket() bb := h.GetBucket()
if p.fieldPos >= len(bb) { if p.fieldPos >= len(bb) {
v = h.GetSampleCountFloat()
if v == 0 {
v = float64(h.GetSampleCount()) v = float64(h.GetSampleCount())
}
} else { } else {
v = bb[p.fieldPos].GetCumulativeCountFloat()
if v == 0 {
v = float64(bb[p.fieldPos].GetCumulativeCount()) v = float64(bb[p.fieldPos].GetCumulativeCount())
} }
} }
}
default: default:
panic("encountered unexpected metric type, this is a bug") panic("encountered unexpected metric type, this is a bug")
} }
@ -149,6 +165,9 @@ func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *his
ts = m.GetTimestampMs() ts = m.GetTimestampMs()
h = m.GetHistogram() h = m.GetHistogram()
) )
if p.parseClassicHistograms && len(h.GetBucket()) > 0 {
p.redoClassic = true
}
if h.GetSampleCountFloat() > 0 || h.GetZeroCountFloat() > 0 { if h.GetSampleCountFloat() > 0 || h.GetZeroCountFloat() > 0 {
// It is a float histogram. // It is a float histogram.
fh := histogram.FloatHistogram{ fh := histogram.FloatHistogram{
@ -376,6 +395,12 @@ func (p *ProtobufParser) Next() (Entry, error) {
return EntryInvalid, err return EntryInvalid, err
} }
case EntryHistogram, EntrySeries: case EntryHistogram, EntrySeries:
if p.redoClassic {
p.redoClassic = false
p.state = EntrySeries
p.fieldPos = -3
p.fieldsDone = false
}
t := p.mf.GetType() t := p.mf.GetType()
if p.state == EntrySeries && !p.fieldsDone && if p.state == EntrySeries && !p.fieldsDone &&
(t == dto.MetricType_SUMMARY || (t == dto.MetricType_SUMMARY ||
@ -432,7 +457,7 @@ func (p *ProtobufParser) updateMetricBytes() error {
// state. // state.
func (p *ProtobufParser) getMagicName() string { func (p *ProtobufParser) getMagicName() string {
t := p.mf.GetType() t := p.mf.GetType()
if p.state == EntryHistogram || (t != dto.MetricType_HISTOGRAM && t != dto.MetricType_SUMMARY) { if p.state == EntryHistogram || (t != dto.MetricType_HISTOGRAM && t != dto.MetricType_GAUGE_HISTOGRAM && t != dto.MetricType_SUMMARY) {
return p.mf.GetName() return p.mf.GetName()
} }
if p.fieldPos == -2 { if p.fieldPos == -2 {

View file

@ -30,8 +30,8 @@ import (
dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" dto "github.com/prometheus/prometheus/prompb/io/prometheus/client"
) )
func TestProtobufParse(t *testing.T) { func createTestProtoBuf(t *testing.T) *bytes.Buffer {
textMetricFamilies := []string{ testMetricFamilies := []string{
`name: "go_build_info" `name: "go_build_info"
help: "Build information about the main Go module." help: "Build information about the main Go module."
type: GAUGE type: GAUGE
@ -231,7 +231,6 @@ help: "Test float histogram with many buckets removed to keep it manageable in s
type: HISTOGRAM type: HISTOGRAM
metric: < metric: <
histogram: < histogram: <
sample_count: 175
sample_count_float: 175.0 sample_count_float: 175.0
sample_sum: 0.0008280461746287094 sample_sum: 0.0008280461746287094
bucket: < bucket: <
@ -302,7 +301,6 @@ help: "Like test_float_histogram but as gauge histogram."
type: GAUGE_HISTOGRAM type: GAUGE_HISTOGRAM
metric: < metric: <
histogram: < histogram: <
sample_count: 175
sample_count_float: 175.0 sample_count_float: 175.0
sample_sum: 0.0008280461746287094 sample_sum: 0.0008280461746287094
bucket: < bucket: <
@ -450,9 +448,9 @@ metric: <
} }
varintBuf := make([]byte, binary.MaxVarintLen32) varintBuf := make([]byte, binary.MaxVarintLen32)
inputBuf := &bytes.Buffer{} buf := &bytes.Buffer{}
for _, tmf := range textMetricFamilies { for _, tmf := range testMetricFamilies {
pb := &dto.MetricFamily{} pb := &dto.MetricFamily{}
// From text to proto message. // From text to proto message.
require.NoError(t, proto.UnmarshalText(tmf, pb)) require.NoError(t, proto.UnmarshalText(tmf, pb))
@ -462,11 +460,15 @@ metric: <
// Write first length, then binary protobuf. // Write first length, then binary protobuf.
varintLength := binary.PutUvarint(varintBuf, uint64(len(protoBuf))) varintLength := binary.PutUvarint(varintBuf, uint64(len(protoBuf)))
inputBuf.Write(varintBuf[:varintLength]) buf.Write(varintBuf[:varintLength])
inputBuf.Write(protoBuf) buf.Write(protoBuf)
} }
exp := []struct { return buf
}
func TestProtobufParse(t *testing.T) {
type parseResult struct {
lset labels.Labels lset labels.Labels
m string m string
t int64 t int64
@ -478,7 +480,19 @@ metric: <
shs *histogram.Histogram shs *histogram.Histogram
fhs *histogram.FloatHistogram fhs *histogram.FloatHistogram
e []exemplar.Exemplar e []exemplar.Exemplar
}
inputBuf := createTestProtoBuf(t)
scenarios := []struct {
name string
parser Parser
expected []parseResult
}{ }{
{
name: "ignore classic buckets of native histograms",
parser: NewProtobufParser(inputBuf.Bytes(), false),
expected: []parseResult{
{ {
m: "go_build_info", m: "go_build_info",
help: "Build information about the main Go module.", help: "Build information about the main Go module.",
@ -810,12 +824,587 @@ metric: <
"__name__", "without_quantiles_sum", "__name__", "without_quantiles_sum",
), ),
}, },
},
},
{
name: "parse classic and native buckets",
parser: NewProtobufParser(inputBuf.Bytes(), true),
expected: []parseResult{
{ // 0
m: "go_build_info",
help: "Build information about the main Go module.",
},
{ // 1
m: "go_build_info",
typ: MetricTypeGauge,
},
{ // 2
m: "go_build_info\xFFchecksum\xFF\xFFpath\xFFgithub.com/prometheus/client_golang\xFFversion\xFF(devel)",
v: 1,
lset: labels.FromStrings(
"__name__", "go_build_info",
"checksum", "",
"path", "github.com/prometheus/client_golang",
"version", "(devel)",
),
},
{ // 3
m: "go_memstats_alloc_bytes_total",
help: "Total number of bytes allocated, even if freed.",
},
{ // 4
m: "go_memstats_alloc_bytes_total",
typ: MetricTypeCounter,
},
{ // 5
m: "go_memstats_alloc_bytes_total",
v: 1.546544e+06,
lset: labels.FromStrings(
"__name__", "go_memstats_alloc_bytes_total",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "42"), Value: 12, HasTs: true, Ts: 1625851151233},
},
},
{ // 6
m: "something_untyped",
help: "Just to test the untyped type.",
},
{ // 7
m: "something_untyped",
typ: MetricTypeUnknown,
},
{ // 8
m: "something_untyped",
t: 1234567,
v: 42,
lset: labels.FromStrings(
"__name__", "something_untyped",
),
},
{ // 9
m: "test_histogram",
help: "Test histogram with many buckets removed to keep it manageable in size.",
},
{ // 10
m: "test_histogram",
typ: MetricTypeHistogram,
},
{ // 11
m: "test_histogram",
t: 1234568,
shs: &histogram.Histogram{
Count: 175,
ZeroCount: 2,
Sum: 0.0008280461746287094,
ZeroThreshold: 2.938735877055719e-39,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: -161, Length: 1},
{Offset: 8, Length: 3},
},
NegativeSpans: []histogram.Span{
{Offset: -162, Length: 1},
{Offset: 23, Length: 4},
},
PositiveBuckets: []int64{1, 2, -1, -1},
NegativeBuckets: []int64{1, 3, -2, -1, 1},
},
lset: labels.FromStrings(
"__name__", "test_histogram",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false},
},
},
{ // 12
m: "test_histogram_count",
t: 1234568,
v: 175,
lset: labels.FromStrings(
"__name__", "test_histogram_count",
),
},
{ // 13
m: "test_histogram_sum",
t: 1234568,
v: 0.0008280461746287094,
lset: labels.FromStrings(
"__name__", "test_histogram_sum",
),
},
{ // 14
m: "test_histogram_bucket\xffle\xff-0.0004899999999999998",
t: 1234568,
v: 2,
lset: labels.FromStrings(
"__name__", "test_histogram_bucket",
"le", "-0.0004899999999999998",
),
},
{ // 15
m: "test_histogram_bucket\xffle\xff-0.0003899999999999998",
t: 1234568,
v: 4,
lset: labels.FromStrings(
"__name__", "test_histogram_bucket",
"le", "-0.0003899999999999998",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
},
},
{ // 16
m: "test_histogram_bucket\xffle\xff-0.0002899999999999998",
t: 1234568,
v: 16,
lset: labels.FromStrings(
"__name__", "test_histogram_bucket",
"le", "-0.0002899999999999998",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false},
},
},
{ // 17
m: "test_histogram_bucket\xffle\xff+Inf",
t: 1234568,
v: 175,
lset: labels.FromStrings(
"__name__", "test_histogram_bucket",
"le", "+Inf",
),
},
{ // 18
m: "test_gauge_histogram",
help: "Like test_histogram but as gauge histogram.",
},
{ // 19
m: "test_gauge_histogram",
typ: MetricTypeGaugeHistogram,
},
{ // 20
m: "test_gauge_histogram",
t: 1234568,
shs: &histogram.Histogram{
CounterResetHint: histogram.GaugeType,
Count: 175,
ZeroCount: 2,
Sum: 0.0008280461746287094,
ZeroThreshold: 2.938735877055719e-39,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: -161, Length: 1},
{Offset: 8, Length: 3},
},
NegativeSpans: []histogram.Span{
{Offset: -162, Length: 1},
{Offset: 23, Length: 4},
},
PositiveBuckets: []int64{1, 2, -1, -1},
NegativeBuckets: []int64{1, 3, -2, -1, 1},
},
lset: labels.FromStrings(
"__name__", "test_gauge_histogram",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false},
},
},
{ // 21
m: "test_gauge_histogram_count",
t: 1234568,
v: 175,
lset: labels.FromStrings(
"__name__", "test_gauge_histogram_count",
),
},
{ // 22
m: "test_gauge_histogram_sum",
t: 1234568,
v: 0.0008280461746287094,
lset: labels.FromStrings(
"__name__", "test_gauge_histogram_sum",
),
},
{ // 23
m: "test_gauge_histogram_bucket\xffle\xff-0.0004899999999999998",
t: 1234568,
v: 2,
lset: labels.FromStrings(
"__name__", "test_gauge_histogram_bucket",
"le", "-0.0004899999999999998",
),
},
{ // 24
m: "test_gauge_histogram_bucket\xffle\xff-0.0003899999999999998",
t: 1234568,
v: 4,
lset: labels.FromStrings(
"__name__", "test_gauge_histogram_bucket",
"le", "-0.0003899999999999998",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
},
},
{ // 25
m: "test_gauge_histogram_bucket\xffle\xff-0.0002899999999999998",
t: 1234568,
v: 16,
lset: labels.FromStrings(
"__name__", "test_gauge_histogram_bucket",
"le", "-0.0002899999999999998",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false},
},
},
{ // 26
m: "test_gauge_histogram_bucket\xffle\xff+Inf",
t: 1234568,
v: 175,
lset: labels.FromStrings(
"__name__", "test_gauge_histogram_bucket",
"le", "+Inf",
),
},
{ // 27
m: "test_float_histogram",
help: "Test float histogram with many buckets removed to keep it manageable in size.",
},
{ // 28
m: "test_float_histogram",
typ: MetricTypeHistogram,
},
{ // 29
m: "test_float_histogram",
t: 1234568,
fhs: &histogram.FloatHistogram{
Count: 175.0,
ZeroCount: 2.0,
Sum: 0.0008280461746287094,
ZeroThreshold: 2.938735877055719e-39,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: -161, Length: 1},
{Offset: 8, Length: 3},
},
NegativeSpans: []histogram.Span{
{Offset: -162, Length: 1},
{Offset: 23, Length: 4},
},
PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0},
NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0},
},
lset: labels.FromStrings(
"__name__", "test_float_histogram",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false},
},
},
{ // 30
m: "test_float_histogram_count",
t: 1234568,
v: 175,
lset: labels.FromStrings(
"__name__", "test_float_histogram_count",
),
},
{ // 31
m: "test_float_histogram_sum",
t: 1234568,
v: 0.0008280461746287094,
lset: labels.FromStrings(
"__name__", "test_float_histogram_sum",
),
},
{ // 32
m: "test_float_histogram_bucket\xffle\xff-0.0004899999999999998",
t: 1234568,
v: 2,
lset: labels.FromStrings(
"__name__", "test_float_histogram_bucket",
"le", "-0.0004899999999999998",
),
},
{ // 33
m: "test_float_histogram_bucket\xffle\xff-0.0003899999999999998",
t: 1234568,
v: 4,
lset: labels.FromStrings(
"__name__", "test_float_histogram_bucket",
"le", "-0.0003899999999999998",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
},
},
{ // 34
m: "test_float_histogram_bucket\xffle\xff-0.0002899999999999998",
t: 1234568,
v: 16,
lset: labels.FromStrings(
"__name__", "test_float_histogram_bucket",
"le", "-0.0002899999999999998",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false},
},
},
{ // 35
m: "test_float_histogram_bucket\xffle\xff+Inf",
t: 1234568,
v: 175,
lset: labels.FromStrings(
"__name__", "test_float_histogram_bucket",
"le", "+Inf",
),
},
{ // 36
m: "test_gauge_float_histogram",
help: "Like test_float_histogram but as gauge histogram.",
},
{ // 37
m: "test_gauge_float_histogram",
typ: MetricTypeGaugeHistogram,
},
{ // 38
m: "test_gauge_float_histogram",
t: 1234568,
fhs: &histogram.FloatHistogram{
CounterResetHint: histogram.GaugeType,
Count: 175.0,
ZeroCount: 2.0,
Sum: 0.0008280461746287094,
ZeroThreshold: 2.938735877055719e-39,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: -161, Length: 1},
{Offset: 8, Length: 3},
},
NegativeSpans: []histogram.Span{
{Offset: -162, Length: 1},
{Offset: 23, Length: 4},
},
PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0},
NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0},
},
lset: labels.FromStrings(
"__name__", "test_gauge_float_histogram",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false},
},
},
{ // 39
m: "test_gauge_float_histogram_count",
t: 1234568,
v: 175,
lset: labels.FromStrings(
"__name__", "test_gauge_float_histogram_count",
),
},
{ // 40
m: "test_gauge_float_histogram_sum",
t: 1234568,
v: 0.0008280461746287094,
lset: labels.FromStrings(
"__name__", "test_gauge_float_histogram_sum",
),
},
{ // 41
m: "test_gauge_float_histogram_bucket\xffle\xff-0.0004899999999999998",
t: 1234568,
v: 2,
lset: labels.FromStrings(
"__name__", "test_gauge_float_histogram_bucket",
"le", "-0.0004899999999999998",
),
},
{ // 42
m: "test_gauge_float_histogram_bucket\xffle\xff-0.0003899999999999998",
t: 1234568,
v: 4,
lset: labels.FromStrings(
"__name__", "test_gauge_float_histogram_bucket",
"le", "-0.0003899999999999998",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
},
},
{ // 43
m: "test_gauge_float_histogram_bucket\xffle\xff-0.0002899999999999998",
t: 1234568,
v: 16,
lset: labels.FromStrings(
"__name__", "test_gauge_float_histogram_bucket",
"le", "-0.0002899999999999998",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false},
},
},
{ // 44
m: "test_gauge_float_histogram_bucket\xffle\xff+Inf",
t: 1234568,
v: 175,
lset: labels.FromStrings(
"__name__", "test_gauge_float_histogram_bucket",
"le", "+Inf",
),
},
{ // 45
m: "test_histogram2",
help: "Similar histogram as before but now without sparse buckets.",
},
{ // 46
m: "test_histogram2",
typ: MetricTypeHistogram,
},
{ // 47
m: "test_histogram2_count",
v: 175,
lset: labels.FromStrings(
"__name__", "test_histogram2_count",
),
},
{ // 48
m: "test_histogram2_sum",
v: 0.000828,
lset: labels.FromStrings(
"__name__", "test_histogram2_sum",
),
},
{ // 49
m: "test_histogram2_bucket\xffle\xff-0.00048",
v: 2,
lset: labels.FromStrings(
"__name__", "test_histogram2_bucket",
"le", "-0.00048",
),
},
{ // 50
m: "test_histogram2_bucket\xffle\xff-0.00038",
v: 4,
lset: labels.FromStrings(
"__name__", "test_histogram2_bucket",
"le", "-0.00038",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00038, HasTs: true, Ts: 1625851153146},
},
},
{ // 51
m: "test_histogram2_bucket\xffle\xff1.0",
v: 16,
lset: labels.FromStrings(
"__name__", "test_histogram2_bucket",
"le", "1.0",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.000295, HasTs: false},
},
},
{ // 52
m: "test_histogram2_bucket\xffle\xff+Inf",
v: 175,
lset: labels.FromStrings(
"__name__", "test_histogram2_bucket",
"le", "+Inf",
),
},
{ // 53
m: "rpc_durations_seconds",
help: "RPC latency distributions.",
},
{ // 54
m: "rpc_durations_seconds",
typ: MetricTypeSummary,
},
{ // 55
m: "rpc_durations_seconds_count\xffservice\xffexponential",
v: 262,
lset: labels.FromStrings(
"__name__", "rpc_durations_seconds_count",
"service", "exponential",
),
},
{ // 56
m: "rpc_durations_seconds_sum\xffservice\xffexponential",
v: 0.00025551262820703587,
lset: labels.FromStrings(
"__name__", "rpc_durations_seconds_sum",
"service", "exponential",
),
},
{ // 57
m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.5",
v: 6.442786329648548e-07,
lset: labels.FromStrings(
"__name__", "rpc_durations_seconds",
"quantile", "0.5",
"service", "exponential",
),
},
{ // 58
m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.9",
v: 1.9435742936658396e-06,
lset: labels.FromStrings(
"__name__", "rpc_durations_seconds",
"quantile", "0.9",
"service", "exponential",
),
},
{ // 59
m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.99",
v: 4.0471608667037015e-06,
lset: labels.FromStrings(
"__name__", "rpc_durations_seconds",
"quantile", "0.99",
"service", "exponential",
),
},
{ // 60
m: "without_quantiles",
help: "A summary without quantiles.",
},
{ // 61
m: "without_quantiles",
typ: MetricTypeSummary,
},
{ // 62
m: "without_quantiles_count",
v: 42,
lset: labels.FromStrings(
"__name__", "without_quantiles_count",
),
},
{ // 63
m: "without_quantiles_sum",
v: 1.234,
lset: labels.FromStrings(
"__name__", "without_quantiles_sum",
),
},
},
},
} }
p := NewProtobufParser(inputBuf.Bytes()) for _, scenario := range scenarios {
i := 0 t.Run(scenario.name, func(t *testing.T) {
var (
var res labels.Labels i int
res labels.Labels
p = scenario.parser
exp = scenario.expected
)
for { for {
et, err := p.Next() et, err := p.Next()
@ -891,4 +1480,6 @@ metric: <
i++ i++
} }
require.Equal(t, len(exp), i) require.Equal(t, len(exp), i)
})
}
} }

View file

@ -1850,14 +1850,14 @@ func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, no
} }
case chunkenc.ValFloat: case chunkenc.ValFloat:
t, v = it.At() t, v = it.At()
case chunkenc.ValHistogram, chunkenc.ValFloatHistogram: case chunkenc.ValFloatHistogram:
t, h = it.AtFloatHistogram() t, h = it.AtFloatHistogram()
default: default:
panic(fmt.Errorf("unknown value type %v", valueType)) panic(fmt.Errorf("unknown value type %v", valueType))
} }
if valueType == chunkenc.ValNone || t > refTime { if valueType == chunkenc.ValNone || t > refTime {
var ok bool var ok bool
t, v, _, h, ok = it.PeekPrev() t, v, h, ok = it.PeekPrev()
if !ok || t < refTime-durationMilliseconds(ev.lookbackDelta) { if !ok || t < refTime-durationMilliseconds(ev.lookbackDelta) {
return 0, 0, nil, false return 0, 0, nil, false
} }
@ -2263,15 +2263,12 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
insertedSigs[insertSig] = struct{}{} insertedSigs[insertSig] = struct{}{}
} }
if (hl != nil && hr != nil) || (hl == nil && hr == nil) {
// Both lhs and rhs are of same type.
enh.Out = append(enh.Out, Sample{ enh.Out = append(enh.Out, Sample{
Metric: metric, Metric: metric,
F: floatValue, F: floatValue,
H: histogramValue, H: histogramValue,
}) })
} }
}
return enh.Out return enh.Out
} }
@ -2337,28 +2334,33 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V
// VectorscalarBinop evaluates a binary operation between a Vector and a Scalar. // VectorscalarBinop evaluates a binary operation between a Vector and a Scalar.
func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) Vector { func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) Vector {
for _, lhsSample := range lhs { for _, lhsSample := range lhs {
lv, rv := lhsSample.F, rhs.V lf, rf := lhsSample.F, rhs.V
var rh *histogram.FloatHistogram
lh := lhsSample.H
// lhs always contains the Vector. If the original position was different // lhs always contains the Vector. If the original position was different
// swap for calculating the value. // swap for calculating the value.
if swap { if swap {
lv, rv = rv, lv lf, rf = rf, lf
lh, rh = rh, lh
} }
value, _, keep := vectorElemBinop(op, lv, rv, nil, nil) float, histogram, keep := vectorElemBinop(op, lf, rf, lh, rh)
// Catch cases where the scalar is the LHS in a scalar-vector comparison operation. // Catch cases where the scalar is the LHS in a scalar-vector comparison operation.
// We want to always keep the vector element value as the output value, even if it's on the RHS. // We want to always keep the vector element value as the output value, even if it's on the RHS.
if op.IsComparisonOperator() && swap { if op.IsComparisonOperator() && swap {
value = rv float = rf
histogram = rh
} }
if returnBool { if returnBool {
if keep { if keep {
value = 1.0 float = 1.0
} else { } else {
value = 0.0 float = 0.0
} }
keep = true keep = true
} }
if keep { if keep {
lhsSample.F = value lhsSample.F = float
lhsSample.H = histogram
if shouldDropMetricName(op) || returnBool { if shouldDropMetricName(op) || returnBool {
lhsSample.Metric = enh.DropMetricName(lhsSample.Metric) lhsSample.Metric = enh.DropMetricName(lhsSample.Metric)
} }
@ -2413,16 +2415,33 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram
// The histogram being added must have the larger schema // The histogram being added must have the larger schema
// code (i.e. the higher resolution). // code (i.e. the higher resolution).
if hrhs.Schema >= hlhs.Schema { if hrhs.Schema >= hlhs.Schema {
return 0, hlhs.Copy().Add(hrhs), true return 0, hlhs.Copy().Add(hrhs).Compact(0), true
} }
return 0, hrhs.Copy().Add(hlhs), true return 0, hrhs.Copy().Add(hlhs).Compact(0), true
} }
return lhs + rhs, nil, true return lhs + rhs, nil, true
case parser.SUB: case parser.SUB:
if hlhs != nil && hrhs != nil {
// The histogram being subtracted must have the larger schema
// code (i.e. the higher resolution).
if hrhs.Schema >= hlhs.Schema {
return 0, hlhs.Copy().Sub(hrhs).Compact(0), true
}
return 0, hrhs.Copy().Mul(-1).Add(hlhs).Compact(0), true
}
return lhs - rhs, nil, true return lhs - rhs, nil, true
case parser.MUL: case parser.MUL:
if hlhs != nil && hrhs == nil {
return 0, hlhs.Copy().Mul(rhs), true
}
if hlhs == nil && hrhs != nil {
return 0, hrhs.Copy().Mul(lhs), true
}
return lhs * rhs, nil, true return lhs * rhs, nil, true
case parser.DIV: case parser.DIV:
if hlhs != nil && hrhs == nil {
return 0, hlhs.Copy().Div(rhs), true
}
return lhs / rhs, nil, true return lhs / rhs, nil, true
case parser.POW: case parser.POW:
return math.Pow(lhs, rhs), nil, true return math.Pow(lhs, rhs), nil, true
@ -2452,7 +2471,8 @@ type groupedAggregation struct {
labels labels.Labels labels labels.Labels
floatValue float64 floatValue float64
histogramValue *histogram.FloatHistogram histogramValue *histogram.FloatHistogram
mean float64 floatMean float64
histogramMean *histogram.FloatHistogram
groupCount int groupCount int
heap vectorByValueHeap heap vectorByValueHeap
reverseHeap vectorByReverseValueHeap reverseHeap vectorByReverseValueHeap
@ -2536,7 +2556,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
newAgg := &groupedAggregation{ newAgg := &groupedAggregation{
labels: m, labels: m,
floatValue: s.F, floatValue: s.F,
mean: s.F, floatMean: s.F,
groupCount: 1, groupCount: 1,
} }
switch { switch {
@ -2545,6 +2565,11 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
case op == parser.SUM: case op == parser.SUM:
newAgg.histogramValue = s.H.Copy() newAgg.histogramValue = s.H.Copy()
newAgg.hasHistogram = true newAgg.hasHistogram = true
case op == parser.AVG:
newAgg.histogramMean = s.H.Copy()
newAgg.hasHistogram = true
case op == parser.STDVAR || op == parser.STDDEV:
newAgg.groupCount = 0
} }
result[groupingKey] = newAgg result[groupingKey] = newAgg
@ -2589,9 +2614,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
if s.H.Schema >= group.histogramValue.Schema { if s.H.Schema >= group.histogramValue.Schema {
group.histogramValue.Add(s.H) group.histogramValue.Add(s.H)
} else { } else {
h := s.H.Copy() group.histogramValue = s.H.Copy().Add(group.histogramValue)
h.Add(group.histogramValue)
group.histogramValue = h
} }
} }
// Otherwise the aggregation contained floats // Otherwise the aggregation contained floats
@ -2604,10 +2627,30 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
case parser.AVG: case parser.AVG:
group.groupCount++ group.groupCount++
if math.IsInf(group.mean, 0) { if s.H != nil {
if math.IsInf(s.F, 0) && (group.mean > 0) == (s.F > 0) { group.hasHistogram = true
// The `mean` and `s.V` values are `Inf` of the same sign. They if group.histogramMean != nil {
// can't be subtracted, but the value of `mean` is correct left := s.H.Copy().Div(float64(group.groupCount))
right := group.histogramMean.Copy().Div(float64(group.groupCount))
// The histogram being added/subtracted must have
// an equal or larger schema.
if s.H.Schema >= group.histogramMean.Schema {
toAdd := right.Mul(-1).Add(left)
group.histogramMean.Add(toAdd)
} else {
toAdd := left.Sub(right)
group.histogramMean = toAdd.Add(group.histogramMean)
}
}
// Otherwise the aggregation contained floats
// previously and will be invalid anyway. No
// point in copying the histogram in that case.
} else {
group.hasFloat = true
if math.IsInf(group.floatMean, 0) {
if math.IsInf(s.F, 0) && (group.floatMean > 0) == (s.F > 0) {
// The `floatMean` and `s.F` values are `Inf` of the same sign. They
// can't be subtracted, but the value of `floatMean` is correct
// already. // already.
break break
} }
@ -2622,7 +2665,8 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
} }
} }
// Divide each side of the `-` by `group.groupCount` to avoid float64 overflows. // Divide each side of the `-` by `group.groupCount` to avoid float64 overflows.
group.mean += s.F/float64(group.groupCount) - group.mean/float64(group.groupCount) group.floatMean += s.F/float64(group.groupCount) - group.floatMean/float64(group.groupCount)
}
case parser.GROUP: case parser.GROUP:
// Do nothing. Required to avoid the panic in `default:` below. // Do nothing. Required to avoid the panic in `default:` below.
@ -2641,10 +2685,12 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
group.groupCount++ group.groupCount++
case parser.STDVAR, parser.STDDEV: case parser.STDVAR, parser.STDDEV:
if s.H == nil { // Ignore native histograms.
group.groupCount++ group.groupCount++
delta := s.F - group.mean delta := s.F - group.floatMean
group.mean += delta / float64(group.groupCount) group.floatMean += delta / float64(group.groupCount)
group.floatValue += delta * (s.F - group.mean) group.floatValue += delta * (s.F - group.floatMean)
}
case parser.TOPK: case parser.TOPK:
// We build a heap of up to k elements, with the smallest element at heap[0]. // We build a heap of up to k elements, with the smallest element at heap[0].
@ -2696,7 +2742,16 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
for _, aggr := range orderedResult { for _, aggr := range orderedResult {
switch op { switch op {
case parser.AVG: case parser.AVG:
aggr.floatValue = aggr.mean if aggr.hasFloat && aggr.hasHistogram {
// We cannot aggregate histogram sample with a float64 sample.
// TODO(zenador): Issue warning when plumbing is in place.
continue
}
if aggr.hasHistogram {
aggr.histogramValue = aggr.histogramMean.Compact(0)
} else {
aggr.floatValue = aggr.floatMean
}
case parser.COUNT, parser.COUNT_VALUES: case parser.COUNT, parser.COUNT_VALUES:
aggr.floatValue = float64(aggr.groupCount) aggr.floatValue = float64(aggr.groupCount)
@ -2739,8 +2794,12 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
case parser.SUM: case parser.SUM:
if aggr.hasFloat && aggr.hasHistogram { if aggr.hasFloat && aggr.hasHistogram {
// We cannot aggregate histogram sample with a float64 sample. // We cannot aggregate histogram sample with a float64 sample.
// TODO(zenador): Issue warning when plumbing is in place.
continue continue
} }
if aggr.hasHistogram {
aggr.histogramValue.Compact(0)
}
default: default:
// For other aggregations, we already have the right value. // For other aggregations, we already have the right value.
} }

View file

@ -3986,16 +3986,18 @@ func TestNativeHistogram_HistogramFraction(t *testing.T) {
} }
} }
func TestNativeHistogram_Sum_Count_AddOperator(t *testing.T) { func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) {
// TODO(codesome): Integrate histograms into the PromQL testing framework // TODO(codesome): Integrate histograms into the PromQL testing framework
// and write more tests there. // and write more tests there.
cases := []struct { cases := []struct {
histograms []histogram.Histogram histograms []histogram.Histogram
expected histogram.FloatHistogram expected histogram.FloatHistogram
expectedAvg histogram.FloatHistogram
}{ }{
{ {
histograms: []histogram.Histogram{ histograms: []histogram.Histogram{
{ {
CounterResetHint: histogram.GaugeType,
Schema: 0, Schema: 0,
Count: 21, Count: 21,
Sum: 1234.5, Sum: 1234.5,
@ -4012,6 +4014,182 @@ func TestNativeHistogram_Sum_Count_AddOperator(t *testing.T) {
}, },
NegativeBuckets: []int64{2, 2, -3, 8}, NegativeBuckets: []int64{2, 2, -3, 8},
}, },
{
CounterResetHint: histogram.GaugeType,
Schema: 0,
Count: 36,
Sum: 2345.6,
ZeroThreshold: 0.001,
ZeroCount: 5,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 2, Length: 0},
{Offset: 2, Length: 3},
},
NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
},
{
CounterResetHint: histogram.GaugeType,
Schema: 0,
Count: 36,
Sum: 1111.1,
ZeroThreshold: 0.001,
ZeroCount: 5,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 2, Length: 0},
{Offset: 2, Length: 3},
},
NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
},
{
CounterResetHint: histogram.GaugeType,
Schema: 1, // Everything is 0 just to make the count 4 so avg has nicer numbers.
},
},
expected: histogram.FloatHistogram{
CounterResetHint: histogram.GaugeType,
Schema: 0,
ZeroThreshold: 0.001,
ZeroCount: 14,
Count: 93,
Sum: 4691.2,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 7},
},
PositiveBuckets: []float64{3, 8, 2, 5, 3, 2, 2},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 6},
{Offset: 3, Length: 3},
},
NegativeBuckets: []float64{2, 6, 8, 4, 15, 9, 10, 10, 4},
},
expectedAvg: histogram.FloatHistogram{
CounterResetHint: histogram.GaugeType,
Schema: 0,
ZeroThreshold: 0.001,
ZeroCount: 3.5,
Count: 23.25,
Sum: 1172.8,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 7},
},
PositiveBuckets: []float64{0.75, 2, 0.5, 1.25, 0.75, 0.5, 0.5},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 6},
{Offset: 3, Length: 3},
},
NegativeBuckets: []float64{0.5, 1.5, 2, 1, 3.75, 2.25, 2.5, 2.5, 1},
},
},
}
idx0 := int64(0)
for _, c := range cases {
for _, floatHisto := range []bool{true, false} {
t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) {
test, err := NewTest(t, "")
require.NoError(t, err)
t.Cleanup(test.Close)
seriesName := "sparse_histogram_series"
seriesNameOverTime := "sparse_histogram_series_over_time"
engine := test.QueryEngine()
ts := idx0 * int64(10*time.Minute/time.Millisecond)
app := test.Storage().Appender(context.TODO())
for idx1, h := range c.histograms {
lbls := labels.FromStrings("__name__", seriesName, "idx", fmt.Sprintf("%d", idx1))
// Since we mutate h later, we need to create a copy here.
if floatHisto {
_, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat())
} else {
_, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil)
}
require.NoError(t, err)
lbls = labels.FromStrings("__name__", seriesNameOverTime)
newTs := ts + int64(idx1)*int64(time.Minute/time.Millisecond)
// Since we mutate h later, we need to create a copy here.
if floatHisto {
_, err = app.AppendHistogram(0, lbls, newTs, nil, h.Copy().ToFloat())
} else {
_, err = app.AppendHistogram(0, lbls, newTs, h.Copy(), nil)
}
require.NoError(t, err)
}
require.NoError(t, app.Commit())
queryAndCheck := func(queryString string, ts int64, exp Vector) {
qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts))
require.NoError(t, err)
res := qry.Exec(test.Context())
require.NoError(t, res.Err)
vector, err := res.Vector()
require.NoError(t, err)
require.Equal(t, exp, vector)
}
// sum().
queryString := fmt.Sprintf("sum(%s)", seriesName)
queryAndCheck(queryString, ts, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}})
// + operator.
queryString = fmt.Sprintf(`%s{idx="0"}`, seriesName)
for idx := 1; idx < len(c.histograms); idx++ {
queryString += fmt.Sprintf(` + ignoring(idx) %s{idx="%d"}`, seriesName, idx)
}
queryAndCheck(queryString, ts, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}})
// count().
queryString = fmt.Sprintf("count(%s)", seriesName)
queryAndCheck(queryString, ts, []Sample{{T: ts, F: 4, Metric: labels.EmptyLabels()}})
// avg().
queryString = fmt.Sprintf("avg(%s)", seriesName)
queryAndCheck(queryString, ts, []Sample{{T: ts, H: &c.expectedAvg, Metric: labels.EmptyLabels()}})
offset := int64(len(c.histograms) - 1)
newTs := ts + offset*int64(time.Minute/time.Millisecond)
// sum_over_time().
queryString = fmt.Sprintf("sum_over_time(%s[%dm:1m])", seriesNameOverTime, offset)
queryAndCheck(queryString, newTs, []Sample{{T: newTs, H: &c.expected, Metric: labels.EmptyLabels()}})
// avg_over_time().
queryString = fmt.Sprintf("avg_over_time(%s[%dm:1m])", seriesNameOverTime, offset)
queryAndCheck(queryString, newTs, []Sample{{T: newTs, H: &c.expectedAvg, Metric: labels.EmptyLabels()}})
})
idx0++
}
}
}
func TestNativeHistogram_SubOperator(t *testing.T) {
// TODO(codesome): Integrate histograms into the PromQL testing framework
// and write more tests there.
cases := []struct {
histograms []histogram.Histogram
expected histogram.FloatHistogram
}{
{
histograms: []histogram.Histogram{
{ {
Schema: 0, Schema: 0,
Count: 36, Count: 36,
@ -4033,9 +4211,116 @@ func TestNativeHistogram_Sum_Count_AddOperator(t *testing.T) {
}, },
{ {
Schema: 0, Schema: 0,
Count: 36, Count: 11,
Sum: 1234.5,
ZeroThreshold: 0.001,
ZeroCount: 3,
PositiveSpans: []histogram.Span{
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{2, -1},
NegativeSpans: []histogram.Span{
{Offset: 2, Length: 2},
},
NegativeBuckets: []int64{3, -1},
},
},
expected: histogram.FloatHistogram{
Schema: 0,
Count: 25,
Sum: 1111.1, Sum: 1111.1,
ZeroThreshold: 0.001, ZeroThreshold: 0.001,
ZeroCount: 2,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 4},
},
PositiveBuckets: []float64{1, 1, 2, 1, 1, 1},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 2},
{Offset: 1, Length: 1},
{Offset: 4, Length: 3},
},
NegativeBuckets: []float64{1, 1, 7, 5, 5, 2},
},
},
{
histograms: []histogram.Histogram{
{
Schema: 0,
Count: 36,
Sum: 2345.6,
ZeroThreshold: 0.001,
ZeroCount: 5,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 2, Length: 0},
{Offset: 2, Length: 3},
},
NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
},
{
Schema: 1,
Count: 11,
Sum: 1234.5,
ZeroThreshold: 0.001,
ZeroCount: 3,
PositiveSpans: []histogram.Span{
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{2, -1},
NegativeSpans: []histogram.Span{
{Offset: 2, Length: 2},
},
NegativeBuckets: []int64{3, -1},
},
},
expected: histogram.FloatHistogram{
Schema: 0,
Count: 25,
Sum: 1111.1,
ZeroThreshold: 0.001,
ZeroCount: 2,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 1},
{Offset: 1, Length: 5},
},
PositiveBuckets: []float64{1, 1, 2, 1, 1, 1},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 4, Length: 3},
},
NegativeBuckets: []float64{-2, 2, 2, 7, 5, 5, 2},
},
},
{
histograms: []histogram.Histogram{
{
Schema: 1,
Count: 11,
Sum: 1234.5,
ZeroThreshold: 0.001,
ZeroCount: 3,
PositiveSpans: []histogram.Span{
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{2, -1},
NegativeSpans: []histogram.Span{
{Offset: 2, Length: 2},
},
NegativeBuckets: []int64{3, -1},
},
{
Schema: 0,
Count: 36,
Sum: 2345.6,
ZeroThreshold: 0.001,
ZeroCount: 5, ZeroCount: 5,
PositiveSpans: []histogram.Span{ PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4}, {Offset: 0, Length: 4},
@ -4053,21 +4338,20 @@ func TestNativeHistogram_Sum_Count_AddOperator(t *testing.T) {
}, },
expected: histogram.FloatHistogram{ expected: histogram.FloatHistogram{
Schema: 0, Schema: 0,
Count: -25,
Sum: -1111.1,
ZeroThreshold: 0.001, ZeroThreshold: 0.001,
ZeroCount: 14, ZeroCount: -2,
Count: 93,
Sum: 4691.2,
PositiveSpans: []histogram.Span{ PositiveSpans: []histogram.Span{
{Offset: 0, Length: 3}, {Offset: 0, Length: 1},
{Offset: 0, Length: 4}, {Offset: 1, Length: 5},
}, },
PositiveBuckets: []float64{3, 8, 2, 5, 3, 2, 2}, PositiveBuckets: []float64{-1, -1, -2, -1, -1, -1},
NegativeSpans: []histogram.Span{ NegativeSpans: []histogram.Span{
{Offset: 0, Length: 4}, {Offset: 1, Length: 4},
{Offset: 0, Length: 2}, {Offset: 4, Length: 3},
{Offset: 3, Length: 3},
}, },
NegativeBuckets: []float64{2, 6, 8, 4, 15, 9, 10, 10, 4}, NegativeBuckets: []float64{2, -2, -2, -7, -5, -5, -2},
}, },
}, },
} }
@ -4111,20 +4395,177 @@ func TestNativeHistogram_Sum_Count_AddOperator(t *testing.T) {
require.Equal(t, exp, vector) require.Equal(t, exp, vector)
} }
// sum(). // - operator.
queryString := fmt.Sprintf("sum(%s)", seriesName) queryString := fmt.Sprintf(`%s{idx="0"}`, seriesName)
queryAndCheck(queryString, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}})
// + operator.
queryString = fmt.Sprintf(`%s{idx="0"}`, seriesName)
for idx := 1; idx < len(c.histograms); idx++ { for idx := 1; idx < len(c.histograms); idx++ {
queryString += fmt.Sprintf(` + ignoring(idx) %s{idx="%d"}`, seriesName, idx) queryString += fmt.Sprintf(` - ignoring(idx) %s{idx="%d"}`, seriesName, idx)
} }
queryAndCheck(queryString, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) queryAndCheck(queryString, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}})
})
idx0++
}
}
}
// count(). func TestNativeHistogram_MulDivOperator(t *testing.T) {
queryString = fmt.Sprintf("count(%s)", seriesName) // TODO(codesome): Integrate histograms into the PromQL testing framework
queryAndCheck(queryString, []Sample{{T: ts, F: 3, Metric: labels.EmptyLabels()}}) // and write more tests there.
originalHistogram := histogram.Histogram{
Schema: 0,
Count: 21,
Sum: 33,
ZeroThreshold: 0.001,
ZeroCount: 3,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{3, 0, 0},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 3},
},
NegativeBuckets: []int64{3, 0, 0},
}
cases := []struct {
scalar float64
histogram histogram.Histogram
expectedMul histogram.FloatHistogram
expectedDiv histogram.FloatHistogram
}{
{
scalar: 3,
histogram: originalHistogram,
expectedMul: histogram.FloatHistogram{
Schema: 0,
Count: 63,
Sum: 99,
ZeroThreshold: 0.001,
ZeroCount: 9,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 3},
},
PositiveBuckets: []float64{9, 9, 9},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 3},
},
NegativeBuckets: []float64{9, 9, 9},
},
expectedDiv: histogram.FloatHistogram{
Schema: 0,
Count: 7,
Sum: 11,
ZeroThreshold: 0.001,
ZeroCount: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 3},
},
PositiveBuckets: []float64{1, 1, 1},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 3},
},
NegativeBuckets: []float64{1, 1, 1},
},
},
{
scalar: 0,
histogram: originalHistogram,
expectedMul: histogram.FloatHistogram{
Schema: 0,
Count: 0,
Sum: 0,
ZeroThreshold: 0.001,
ZeroCount: 0,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 3},
},
PositiveBuckets: []float64{0, 0, 0},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 3},
},
NegativeBuckets: []float64{0, 0, 0},
},
expectedDiv: histogram.FloatHistogram{
Schema: 0,
Count: math.Inf(1),
Sum: math.Inf(1),
ZeroThreshold: 0.001,
ZeroCount: math.Inf(1),
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 3},
},
PositiveBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1)},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 3},
},
NegativeBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1)},
},
},
}
idx0 := int64(0)
for _, c := range cases {
for _, floatHisto := range []bool{true, false} {
t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) {
test, err := NewTest(t, "")
require.NoError(t, err)
t.Cleanup(test.Close)
seriesName := "sparse_histogram_series"
floatSeriesName := "float_series"
engine := test.QueryEngine()
ts := idx0 * int64(10*time.Minute/time.Millisecond)
app := test.Storage().Appender(context.TODO())
h := c.histogram
lbls := labels.FromStrings("__name__", seriesName)
// Since we mutate h later, we need to create a copy here.
if floatHisto {
_, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat())
} else {
_, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil)
}
require.NoError(t, err)
_, err = app.Append(0, labels.FromStrings("__name__", floatSeriesName), ts, c.scalar)
require.NoError(t, err)
require.NoError(t, app.Commit())
queryAndCheck := func(queryString string, exp Vector) {
qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts))
require.NoError(t, err)
res := qry.Exec(test.Context())
require.NoError(t, res.Err)
vector, err := res.Vector()
require.NoError(t, err)
require.Equal(t, exp, vector)
}
// histogram * scalar.
queryString := fmt.Sprintf(`%s * %f`, seriesName, c.scalar)
queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}})
// scalar * histogram.
queryString = fmt.Sprintf(`%f * %s`, c.scalar, seriesName)
queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}})
// histogram * float.
queryString = fmt.Sprintf(`%s * %s`, seriesName, floatSeriesName)
queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}})
// float * histogram.
queryString = fmt.Sprintf(`%s * %s`, floatSeriesName, seriesName)
queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}})
// histogram / scalar.
queryString = fmt.Sprintf(`%s / %f`, seriesName, c.scalar)
queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}})
// histogram / float.
queryString = fmt.Sprintf(`%s / %s`, seriesName, floatSeriesName)
queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}})
}) })
idx0++ idx0++
} }

View file

@ -162,7 +162,7 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod
if resultHistogram == nil { if resultHistogram == nil {
resultFloat *= factor resultFloat *= factor
} else { } else {
resultHistogram.Scale(factor) resultHistogram.Mul(factor)
} }
return append(enh.Out, Sample{F: resultFloat, H: resultHistogram}) return append(enh.Out, Sample{F: resultFloat, H: resultHistogram})
@ -443,15 +443,40 @@ func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series)
return append(enh.Out, Sample{F: aggrFn(el)}) return append(enh.Out, Sample{F: aggrFn(el)})
} }
func aggrHistOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) *histogram.FloatHistogram) Vector {
el := vals[0].(Matrix)[0]
return append(enh.Out, Sample{H: aggrFn(el)})
}
// === avg_over_time(Matrix parser.ValueTypeMatrix) Vector === // === avg_over_time(Matrix parser.ValueTypeMatrix) Vector ===
func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
if len(vals[0].(Matrix)[0].Floats) == 0 { if len(vals[0].(Matrix)[0].Floats) > 0 && len(vals[0].(Matrix)[0].Histograms) > 0 {
// TODO(beorn7): The passed values only contain // TODO(zenador): Add warning for mixed floats and histograms.
// histograms. avg_over_time ignores histograms for now. If
// there are only histograms, we have to return without adding
// anything to enh.Out.
return enh.Out return enh.Out
} }
if len(vals[0].(Matrix)[0].Floats) == 0 {
// The passed values only contain histograms.
return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram {
count := 1
mean := s.Histograms[0].H.Copy()
for _, h := range s.Histograms[1:] {
count++
left := h.H.Copy().Div(float64(count))
right := mean.Copy().Div(float64(count))
// The histogram being added/subtracted must have
// an equal or larger schema.
if h.H.Schema >= mean.Schema {
toAdd := right.Mul(-1).Add(left)
mean.Add(toAdd)
} else {
toAdd := left.Sub(right)
mean = toAdd.Add(mean)
}
}
return mean
})
}
return aggrOverTime(vals, enh, func(s Series) float64 { return aggrOverTime(vals, enh, func(s Series) float64 {
var mean, count, c float64 var mean, count, c float64
for _, f := range s.Floats { for _, f := range s.Floats {
@ -558,13 +583,26 @@ func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
// === sum_over_time(Matrix parser.ValueTypeMatrix) Vector === // === sum_over_time(Matrix parser.ValueTypeMatrix) Vector ===
func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
if len(vals[0].(Matrix)[0].Floats) == 0 { if len(vals[0].(Matrix)[0].Floats) > 0 && len(vals[0].(Matrix)[0].Histograms) > 0 {
// TODO(beorn7): The passed values only contain // TODO(zenador): Add warning for mixed floats and histograms.
// histograms. sum_over_time ignores histograms for now. If
// there are only histograms, we have to return without adding
// anything to enh.Out.
return enh.Out return enh.Out
} }
if len(vals[0].(Matrix)[0].Floats) == 0 {
// The passed values only contain histograms.
return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram {
sum := s.Histograms[0].H.Copy()
for _, h := range s.Histograms[1:] {
// The histogram being added must have
// an equal or larger schema.
if h.H.Schema >= sum.Schema {
sum.Add(h.H)
} else {
sum = h.H.Copy().Add(sum)
}
}
return sum
})
}
return aggrOverTime(vals, enh, func(s Series) float64 { return aggrOverTime(vals, enh, func(s Series) float64 {
var sum, c float64 var sum, c float64
for _, f := range s.Floats { for _, f := range s.Floats {

View file

@ -58,7 +58,7 @@ const (
) )
func fuzzParseMetricWithContentType(in []byte, contentType string) int { func fuzzParseMetricWithContentType(in []byte, contentType string) int {
p, warning := textparse.New(in, contentType) p, warning := textparse.New(in, contentType, false)
if warning != nil { if warning != nil {
// An invalid content type is being passed, which should not happen // An invalid content type is being passed, which should not happen
// in this context. // in this context.

54
scrape/clientprotobuf.go Normal file
View file

@ -0,0 +1,54 @@
// Copyright 2023 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package scrape
import (
"bytes"
"encoding/binary"
"github.com/gogo/protobuf/proto"
// Intentionally using client model to simulate client in tests.
dto "github.com/prometheus/client_model/go"
)
// Write a MetricFamily into a protobuf.
// This function is intended for testing scraping by providing protobuf serialized input.
func MetricFamilyToProtobuf(metricFamily *dto.MetricFamily) ([]byte, error) {
buffer := &bytes.Buffer{}
err := AddMetricFamilyToProtobuf(buffer, metricFamily)
if err != nil {
return nil, err
}
return buffer.Bytes(), nil
}
// Append a MetricFamily protobuf representation to a buffer.
// This function is intended for testing scraping by providing protobuf serialized input.
func AddMetricFamilyToProtobuf(buffer *bytes.Buffer, metricFamily *dto.MetricFamily) error {
protoBuf, err := proto.Marshal(metricFamily)
if err != nil {
return err
}
varintBuf := make([]byte, binary.MaxVarintLen32)
varintLength := binary.PutUvarint(varintBuf, uint64(len(protoBuf)))
_, err = buffer.Write(varintBuf[:varintLength])
if err != nil {
return err
}
_, err = buffer.Write(protoBuf)
return err
}

View file

@ -191,6 +191,12 @@ var (
}, },
[]string{"scrape_job"}, []string{"scrape_job"},
) )
targetScrapeNativeHistogramBucketLimit = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "prometheus_target_scrapes_exceeded_native_histogram_bucket_limit_total",
Help: "Total number of scrapes that hit the native histogram bucket limit and were rejected.",
},
)
) )
func init() { func init() {
@ -216,6 +222,7 @@ func init() {
targetScrapeExemplarOutOfOrder, targetScrapeExemplarOutOfOrder,
targetScrapePoolExceededLabelLimits, targetScrapePoolExceededLabelLimits,
targetSyncFailed, targetSyncFailed,
targetScrapeNativeHistogramBucketLimit,
) )
} }
@ -256,11 +263,13 @@ type scrapeLoopOptions struct {
target *Target target *Target
scraper scraper scraper scraper
sampleLimit int sampleLimit int
bucketLimit int
labelLimits *labelLimits labelLimits *labelLimits
honorLabels bool honorLabels bool
honorTimestamps bool honorTimestamps bool
interval time.Duration interval time.Duration
timeout time.Duration timeout time.Duration
scrapeClassicHistograms bool
mrc []*relabel.Config mrc []*relabel.Config
cache *scrapeCache cache *scrapeCache
} }
@ -319,9 +328,11 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed
jitterSeed, jitterSeed,
opts.honorTimestamps, opts.honorTimestamps,
opts.sampleLimit, opts.sampleLimit,
opts.bucketLimit,
opts.labelLimits, opts.labelLimits,
opts.interval, opts.interval,
opts.timeout, opts.timeout,
opts.scrapeClassicHistograms,
options.ExtraMetrics, options.ExtraMetrics,
options.EnableMetadataStorage, options.EnableMetadataStorage,
opts.target, opts.target,
@ -412,6 +423,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
timeout = time.Duration(sp.config.ScrapeTimeout) timeout = time.Duration(sp.config.ScrapeTimeout)
bodySizeLimit = int64(sp.config.BodySizeLimit) bodySizeLimit = int64(sp.config.BodySizeLimit)
sampleLimit = int(sp.config.SampleLimit) sampleLimit = int(sp.config.SampleLimit)
bucketLimit = int(sp.config.NativeHistogramBucketLimit)
labelLimits = &labelLimits{ labelLimits = &labelLimits{
labelLimit: int(sp.config.LabelLimit), labelLimit: int(sp.config.LabelLimit),
labelNameLengthLimit: int(sp.config.LabelNameLengthLimit), labelNameLengthLimit: int(sp.config.LabelNameLengthLimit),
@ -446,6 +458,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
target: t, target: t,
scraper: s, scraper: s,
sampleLimit: sampleLimit, sampleLimit: sampleLimit,
bucketLimit: bucketLimit,
labelLimits: labelLimits, labelLimits: labelLimits,
honorLabels: honorLabels, honorLabels: honorLabels,
honorTimestamps: honorTimestamps, honorTimestamps: honorTimestamps,
@ -530,6 +543,7 @@ func (sp *scrapePool) sync(targets []*Target) {
timeout = time.Duration(sp.config.ScrapeTimeout) timeout = time.Duration(sp.config.ScrapeTimeout)
bodySizeLimit = int64(sp.config.BodySizeLimit) bodySizeLimit = int64(sp.config.BodySizeLimit)
sampleLimit = int(sp.config.SampleLimit) sampleLimit = int(sp.config.SampleLimit)
bucketLimit = int(sp.config.NativeHistogramBucketLimit)
labelLimits = &labelLimits{ labelLimits = &labelLimits{
labelLimit: int(sp.config.LabelLimit), labelLimit: int(sp.config.LabelLimit),
labelNameLengthLimit: int(sp.config.LabelNameLengthLimit), labelNameLengthLimit: int(sp.config.LabelNameLengthLimit),
@ -538,6 +552,7 @@ func (sp *scrapePool) sync(targets []*Target) {
honorLabels = sp.config.HonorLabels honorLabels = sp.config.HonorLabels
honorTimestamps = sp.config.HonorTimestamps honorTimestamps = sp.config.HonorTimestamps
mrc = sp.config.MetricRelabelConfigs mrc = sp.config.MetricRelabelConfigs
scrapeClassicHistograms = sp.config.ScrapeClassicHistograms
) )
sp.targetMtx.Lock() sp.targetMtx.Lock()
@ -559,12 +574,14 @@ func (sp *scrapePool) sync(targets []*Target) {
target: t, target: t,
scraper: s, scraper: s,
sampleLimit: sampleLimit, sampleLimit: sampleLimit,
bucketLimit: bucketLimit,
labelLimits: labelLimits, labelLimits: labelLimits,
honorLabels: honorLabels, honorLabels: honorLabels,
honorTimestamps: honorTimestamps, honorTimestamps: honorTimestamps,
mrc: mrc, mrc: mrc,
interval: interval, interval: interval,
timeout: timeout, timeout: timeout,
scrapeClassicHistograms: scrapeClassicHistograms,
}) })
if err != nil { if err != nil {
l.setForcedError(err) l.setForcedError(err)
@ -731,17 +748,24 @@ func mutateReportSampleLabels(lset labels.Labels, target *Target) labels.Labels
} }
// appender returns an appender for ingested samples from the target. // appender returns an appender for ingested samples from the target.
func appender(app storage.Appender, limit int) storage.Appender { func appender(app storage.Appender, sampleLimit, bucketLimit int) storage.Appender {
app = &timeLimitAppender{ app = &timeLimitAppender{
Appender: app, Appender: app,
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)), maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
} }
// The limit is applied after metrics are potentially dropped via relabeling. // The sampleLimit is applied after metrics are potentially dropped via relabeling.
if limit > 0 { if sampleLimit > 0 {
app = &limitAppender{ app = &limitAppender{
Appender: app, Appender: app,
limit: limit, limit: sampleLimit,
}
}
if bucketLimit > 0 {
app = &bucketLimitAppender{
Appender: app,
limit: bucketLimit,
} }
} }
return app return app
@ -872,9 +896,11 @@ type scrapeLoop struct {
forcedErr error forcedErr error
forcedErrMtx sync.Mutex forcedErrMtx sync.Mutex
sampleLimit int sampleLimit int
bucketLimit int
labelLimits *labelLimits labelLimits *labelLimits
interval time.Duration interval time.Duration
timeout time.Duration timeout time.Duration
scrapeClassicHistograms bool
appender func(ctx context.Context) storage.Appender appender func(ctx context.Context) storage.Appender
sampleMutator labelsMutator sampleMutator labelsMutator
@ -1152,9 +1178,11 @@ func newScrapeLoop(ctx context.Context,
jitterSeed uint64, jitterSeed uint64,
honorTimestamps bool, honorTimestamps bool,
sampleLimit int, sampleLimit int,
bucketLimit int,
labelLimits *labelLimits, labelLimits *labelLimits,
interval time.Duration, interval time.Duration,
timeout time.Duration, timeout time.Duration,
scrapeClassicHistograms bool,
reportExtraMetrics bool, reportExtraMetrics bool,
appendMetadataToWAL bool, appendMetadataToWAL bool,
target *Target, target *Target,
@ -1195,9 +1223,11 @@ func newScrapeLoop(ctx context.Context,
appenderCtx: appenderCtx, appenderCtx: appenderCtx,
honorTimestamps: honorTimestamps, honorTimestamps: honorTimestamps,
sampleLimit: sampleLimit, sampleLimit: sampleLimit,
bucketLimit: bucketLimit,
labelLimits: labelLimits, labelLimits: labelLimits,
interval: interval, interval: interval,
timeout: timeout, timeout: timeout,
scrapeClassicHistograms: scrapeClassicHistograms,
reportExtraMetrics: reportExtraMetrics, reportExtraMetrics: reportExtraMetrics,
appendMetadataToWAL: appendMetadataToWAL, appendMetadataToWAL: appendMetadataToWAL,
} }
@ -1469,7 +1499,7 @@ type appendErrors struct {
} }
func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) { func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) {
p, err := textparse.New(b, contentType) p, err := textparse.New(b, contentType, sl.scrapeClassicHistograms)
if err != nil { if err != nil {
level.Debug(sl.l).Log( level.Debug(sl.l).Log(
"msg", "Invalid content type on scrape, using prometheus parser as fallback.", "msg", "Invalid content type on scrape, using prometheus parser as fallback.",
@ -1482,6 +1512,7 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string,
defTime = timestamp.FromTime(ts) defTime = timestamp.FromTime(ts)
appErrs = appendErrors{} appErrs = appendErrors{}
sampleLimitErr error sampleLimitErr error
bucketLimitErr error
e exemplar.Exemplar // escapes to heap so hoisted out of loop e exemplar.Exemplar // escapes to heap so hoisted out of loop
meta metadata.Metadata meta metadata.Metadata
metadataChanged bool metadataChanged bool
@ -1510,7 +1541,7 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string,
} }
// Take an appender with limits. // Take an appender with limits.
app = appender(app, sl.sampleLimit) app = appender(app, sl.sampleLimit, sl.bucketLimit)
defer func() { defer func() {
if err != nil { if err != nil {
@ -1631,7 +1662,7 @@ loop:
} else { } else {
ref, err = app.Append(ref, lset, t, val) ref, err = app.Append(ref, lset, t, val)
} }
sampleAdded, err = sl.checkAddError(ce, met, parsedTimestamp, err, &sampleLimitErr, &appErrs) sampleAdded, err = sl.checkAddError(ce, met, parsedTimestamp, err, &sampleLimitErr, &bucketLimitErr, &appErrs)
if err != nil { if err != nil {
if err != storage.ErrNotFound { if err != storage.ErrNotFound {
level.Debug(sl.l).Log("msg", "Unexpected error", "series", string(met), "err", err) level.Debug(sl.l).Log("msg", "Unexpected error", "series", string(met), "err", err)
@ -1645,7 +1676,7 @@ loop:
sl.cache.trackStaleness(hash, lset) sl.cache.trackStaleness(hash, lset)
} }
sl.cache.addRef(met, ref, lset, hash) sl.cache.addRef(met, ref, lset, hash)
if sampleAdded && sampleLimitErr == nil { if sampleAdded && sampleLimitErr == nil && bucketLimitErr == nil {
seriesAdded++ seriesAdded++
} }
} }
@ -1681,6 +1712,13 @@ loop:
// We only want to increment this once per scrape, so this is Inc'd outside the loop. // We only want to increment this once per scrape, so this is Inc'd outside the loop.
targetScrapeSampleLimit.Inc() targetScrapeSampleLimit.Inc()
} }
if bucketLimitErr != nil {
if err == nil {
err = bucketLimitErr // If sample limit is hit, that error takes precedence.
}
// We only want to increment this once per scrape, so this is Inc'd outside the loop.
targetScrapeNativeHistogramBucketLimit.Inc()
}
if appErrs.numOutOfOrder > 0 { if appErrs.numOutOfOrder > 0 {
level.Warn(sl.l).Log("msg", "Error on ingesting out-of-order samples", "num_dropped", appErrs.numOutOfOrder) level.Warn(sl.l).Log("msg", "Error on ingesting out-of-order samples", "num_dropped", appErrs.numOutOfOrder)
} }
@ -1710,8 +1748,8 @@ loop:
} }
// Adds samples to the appender, checking the error, and then returns the # of samples added, // Adds samples to the appender, checking the error, and then returns the # of samples added,
// whether the caller should continue to process more samples, and any sample limit errors. // whether the caller should continue to process more samples, and any sample or bucket limit errors.
func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err error, sampleLimitErr *error, appErrs *appendErrors) (bool, error) { func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (bool, error) {
switch errors.Cause(err) { switch errors.Cause(err) {
case nil: case nil:
if tp == nil && ce != nil { if tp == nil && ce != nil {
@ -1740,6 +1778,11 @@ func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err e
// total number of samples scraped. // total number of samples scraped.
*sampleLimitErr = err *sampleLimitErr = err
return false, nil return false, nil
case errBucketLimit:
// Keep on parsing output if we hit the limit, so we report the correct
// total number of samples scraped.
*bucketLimitErr = err
return false, nil
default: default:
return false, err return false, err
} }

View file

@ -30,6 +30,7 @@ import (
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
config_util "github.com/prometheus/common/config" config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -489,7 +490,7 @@ func TestScrapePoolAppender(t *testing.T) {
appl, ok := loop.(*scrapeLoop) appl, ok := loop.(*scrapeLoop)
require.True(t, ok, "Expected scrapeLoop but got %T", loop) require.True(t, ok, "Expected scrapeLoop but got %T", loop)
wrapped := appender(appl.appender(context.Background()), 0) wrapped := appender(appl.appender(context.Background()), 0, 0)
tl, ok := wrapped.(*timeLimitAppender) tl, ok := wrapped.(*timeLimitAppender)
require.True(t, ok, "Expected timeLimitAppender but got %T", wrapped) require.True(t, ok, "Expected timeLimitAppender but got %T", wrapped)
@ -505,7 +506,7 @@ func TestScrapePoolAppender(t *testing.T) {
appl, ok = loop.(*scrapeLoop) appl, ok = loop.(*scrapeLoop)
require.True(t, ok, "Expected scrapeLoop but got %T", loop) require.True(t, ok, "Expected scrapeLoop but got %T", loop)
wrapped = appender(appl.appender(context.Background()), sampleLimit) wrapped = appender(appl.appender(context.Background()), sampleLimit, 0)
sl, ok := wrapped.(*limitAppender) sl, ok := wrapped.(*limitAppender)
require.True(t, ok, "Expected limitAppender but got %T", wrapped) require.True(t, ok, "Expected limitAppender but got %T", wrapped)
@ -515,6 +516,20 @@ func TestScrapePoolAppender(t *testing.T) {
_, ok = tl.Appender.(nopAppender) _, ok = tl.Appender.(nopAppender)
require.True(t, ok, "Expected base appender but got %T", tl.Appender) require.True(t, ok, "Expected base appender but got %T", tl.Appender)
wrapped = appender(appl.appender(context.Background()), sampleLimit, 100)
bl, ok := wrapped.(*bucketLimitAppender)
require.True(t, ok, "Expected bucketLimitAppender but got %T", wrapped)
sl, ok = bl.Appender.(*limitAppender)
require.True(t, ok, "Expected limitAppender but got %T", bl)
tl, ok = sl.Appender.(*timeLimitAppender)
require.True(t, ok, "Expected timeLimitAppender but got %T", sl.Appender)
_, ok = tl.Appender.(nopAppender)
require.True(t, ok, "Expected base appender but got %T", tl.Appender)
} }
func TestScrapePoolRaces(t *testing.T) { func TestScrapePoolRaces(t *testing.T) {
@ -612,12 +627,13 @@ func TestScrapeLoopStopBeforeRun(t *testing.T) {
nopMutator, nopMutator,
nil, nil, 0, nil, nil, 0,
true, true,
0, 0, 0,
nil, nil,
1, 1,
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -684,12 +700,13 @@ func TestScrapeLoopStop(t *testing.T) {
nil, nil,
0, 0,
true, true,
0, 0, 0,
nil, nil,
10*time.Millisecond, 10*time.Millisecond,
time.Hour, time.Hour,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -760,12 +777,13 @@ func TestScrapeLoopRun(t *testing.T) {
nil, nil,
0, 0,
true, true,
0, 0, 0,
nil, nil,
time.Second, time.Second,
time.Hour, time.Hour,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -815,12 +833,13 @@ func TestScrapeLoopRun(t *testing.T) {
nil, nil,
0, 0,
true, true,
0, 0, 0,
nil, nil,
time.Second, time.Second,
100*time.Millisecond, 100*time.Millisecond,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -874,12 +893,13 @@ func TestScrapeLoopForcedErr(t *testing.T) {
nil, nil,
0, 0,
true, true,
0, 0, 0,
nil, nil,
time.Second, time.Second,
time.Hour, time.Hour,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -932,12 +952,13 @@ func TestScrapeLoopMetadata(t *testing.T) {
cache, cache,
0, 0,
true, true,
0, 0, 0,
nil, nil,
0, 0,
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -989,12 +1010,13 @@ func simpleTestScrapeLoop(t testing.TB) (context.Context, *scrapeLoop) {
nil, nil,
0, 0,
true, true,
0, 0, 0,
nil, nil,
0, 0,
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -1049,12 +1071,13 @@ func TestScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T) {
nil, nil,
0, 0,
true, true,
0, 0, 0,
nil, nil,
0, 0,
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -1127,12 +1150,13 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
nil, nil,
0, 0,
true, true,
0, 0, 0,
nil, nil,
10*time.Millisecond, 10*time.Millisecond,
time.Hour, time.Hour,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -1190,12 +1214,13 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
nil, nil,
0, 0,
true, true,
0, 0, 0,
nil, nil,
10*time.Millisecond, 10*time.Millisecond,
time.Hour, time.Hour,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -1256,12 +1281,13 @@ func TestScrapeLoopCache(t *testing.T) {
nil, nil,
0, 0,
true, true,
0, 0, 0,
nil, nil,
10*time.Millisecond, 10*time.Millisecond,
time.Hour, time.Hour,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -1339,12 +1365,13 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) {
nil, nil,
0, 0,
true, true,
0, 0, 0,
nil, nil,
10*time.Millisecond, 10*time.Millisecond,
time.Hour, time.Hour,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -1453,12 +1480,13 @@ func TestScrapeLoopAppend(t *testing.T) {
nil, nil,
0, 0,
true, true,
0, 0, 0,
nil, nil,
0, 0,
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -1548,7 +1576,7 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) {
return mutateSampleLabels(l, &Target{labels: labels.FromStrings(tc.targetLabels...)}, false, nil) return mutateSampleLabels(l, &Target{labels: labels.FromStrings(tc.targetLabels...)}, false, nil)
}, },
nil, nil,
func(ctx context.Context) storage.Appender { return app }, nil, 0, true, 0, nil, 0, 0, false, false, nil, false, func(ctx context.Context) storage.Appender { return app }, nil, 0, true, 0, 0, nil, 0, 0, false, false, false, nil, false,
) )
slApp := sl.appender(context.Background()) slApp := sl.appender(context.Background())
_, _, _, err := sl.append(slApp, []byte(tc.exposedLabels), "", time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC)) _, _, _, err := sl.append(slApp, []byte(tc.exposedLabels), "", time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC))
@ -1579,12 +1607,13 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) {
nil, nil,
0, 0,
true, true,
0, 0, 0,
nil, nil,
0, 0,
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -1592,7 +1621,7 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) {
fakeRef := storage.SeriesRef(1) fakeRef := storage.SeriesRef(1)
expValue := float64(1) expValue := float64(1)
metric := []byte(`metric{n="1"} 1`) metric := []byte(`metric{n="1"} 1`)
p, warning := textparse.New(metric, "") p, warning := textparse.New(metric, "", false)
require.NoError(t, warning) require.NoError(t, warning)
var lset labels.Labels var lset labels.Labels
@ -1637,12 +1666,13 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
nil, nil,
0, 0,
true, true,
app.limit, app.limit, 0,
nil, nil,
0, 0,
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -1697,6 +1727,105 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
require.Equal(t, 0, seriesAdded) require.Equal(t, 0, seriesAdded)
} }
func TestScrapeLoop_HistogramBucketLimit(t *testing.T) {
resApp := &collectResultAppender{}
app := &bucketLimitAppender{Appender: resApp, limit: 2}
sl := newScrapeLoop(context.Background(),
nil, nil, nil,
func(l labels.Labels) labels.Labels {
if l.Has("deleteme") {
return labels.EmptyLabels()
}
return l
},
nopMutator,
func(ctx context.Context) storage.Appender { return app },
nil,
0,
true,
app.limit, 0,
nil,
0,
0,
false,
false,
false,
nil,
false,
)
metric := dto.Metric{}
err := targetScrapeNativeHistogramBucketLimit.Write(&metric)
require.NoError(t, err)
beforeMetricValue := metric.GetCounter().GetValue()
nativeHistogram := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "testing",
Name: "example_native_histogram",
Help: "This is used for testing",
ConstLabels: map[string]string{"some": "value"},
NativeHistogramBucketFactor: 1.1, // 10% increase from bucket to bucket
NativeHistogramMaxBucketNumber: 100, // intentionally higher than the limit we'll use in the scraper
},
[]string{"size"},
)
registry := prometheus.NewRegistry()
registry.Register(nativeHistogram)
nativeHistogram.WithLabelValues("S").Observe(1.0)
nativeHistogram.WithLabelValues("M").Observe(1.0)
nativeHistogram.WithLabelValues("L").Observe(1.0)
nativeHistogram.WithLabelValues("M").Observe(10.0)
nativeHistogram.WithLabelValues("L").Observe(10.0) // in different bucket since > 1*1.1
gathered, err := registry.Gather()
require.NoError(t, err)
require.NotEmpty(t, gathered)
histogramMetricFamily := gathered[0]
msg, err := MetricFamilyToProtobuf(histogramMetricFamily)
require.NoError(t, err)
now := time.Now()
total, added, seriesAdded, err := sl.append(app, msg, "application/vnd.google.protobuf", now)
require.NoError(t, err)
require.Equal(t, 3, total)
require.Equal(t, 3, added)
require.Equal(t, 3, seriesAdded)
err = targetScrapeNativeHistogramBucketLimit.Write(&metric)
require.NoError(t, err)
metricValue := metric.GetCounter().GetValue()
require.Equal(t, beforeMetricValue, metricValue)
beforeMetricValue = metricValue
nativeHistogram.WithLabelValues("L").Observe(100.0) // in different bucket since > 10*1.1
gathered, err = registry.Gather()
require.NoError(t, err)
require.NotEmpty(t, gathered)
histogramMetricFamily = gathered[0]
msg, err = MetricFamilyToProtobuf(histogramMetricFamily)
require.NoError(t, err)
now = time.Now()
total, added, seriesAdded, err = sl.append(app, msg, "application/vnd.google.protobuf", now)
if err != errBucketLimit {
t.Fatalf("Did not see expected histogram bucket limit error: %s", err)
}
require.NoError(t, app.Rollback())
require.Equal(t, 3, total)
require.Equal(t, 3, added)
require.Equal(t, 0, seriesAdded)
err = targetScrapeNativeHistogramBucketLimit.Write(&metric)
require.NoError(t, err)
metricValue = metric.GetCounter().GetValue()
require.Equal(t, beforeMetricValue+1, metricValue)
}
func TestScrapeLoop_ChangingMetricString(t *testing.T) { func TestScrapeLoop_ChangingMetricString(t *testing.T) {
// This is a regression test for the scrape loop cache not properly maintaining // This is a regression test for the scrape loop cache not properly maintaining
// IDs when the string representation of a metric changes across a scrape. Thus // IDs when the string representation of a metric changes across a scrape. Thus
@ -1714,12 +1843,13 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) {
nil, nil,
0, 0,
true, true,
0, 0, 0,
nil, nil,
0, 0,
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -1762,12 +1892,13 @@ func TestScrapeLoopAppendStaleness(t *testing.T) {
nil, nil,
0, 0,
true, true,
0, 0, 0,
nil, nil,
0, 0,
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -1813,12 +1944,13 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) {
nil, nil,
0, 0,
true, true,
0, 0, 0,
nil, nil,
0, 0,
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -1924,12 +2056,13 @@ metric_total{n="2"} 2 # {t="2"} 2.0 20000
nil, nil,
0, 0,
true, true,
0, 0, 0,
nil, nil,
0, 0,
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -1989,12 +2122,13 @@ func TestScrapeLoopAppendExemplarSeries(t *testing.T) {
nil, nil,
0, 0,
true, true,
0, 0, 0,
nil, nil,
0, 0,
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -2041,12 +2175,13 @@ func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) {
nil, nil,
0, 0,
true, true,
0, 0, 0,
nil, nil,
10*time.Millisecond, 10*time.Millisecond,
time.Hour, time.Hour,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -2077,12 +2212,13 @@ func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) {
nil, nil,
0, 0,
true, true,
0, 0, 0,
nil, nil,
10*time.Millisecond, 10*time.Millisecond,
time.Hour, time.Hour,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -2126,12 +2262,13 @@ func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T
nil, nil,
0, 0,
true, true,
0, 0, 0,
nil, nil,
0, 0,
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -2171,12 +2308,13 @@ func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) {
nil, nil,
0, 0,
true, true,
0, 0, 0,
nil, nil,
0, 0,
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -2443,12 +2581,13 @@ func TestScrapeLoop_RespectTimestamps(t *testing.T) {
func(ctx context.Context) storage.Appender { return capp }, func(ctx context.Context) storage.Appender { return capp },
nil, 0, nil, 0,
true, true,
0, 0, 0,
nil, nil,
0, 0,
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -2484,12 +2623,13 @@ func TestScrapeLoop_DiscardTimestamps(t *testing.T) {
func(ctx context.Context) storage.Appender { return capp }, func(ctx context.Context) storage.Appender { return capp },
nil, 0, nil, 0,
false, false,
0, 0, 0,
nil, nil,
0, 0,
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -2524,12 +2664,13 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) {
nil, nil,
0, 0,
true, true,
0, 0, 0,
nil, nil,
0, 0,
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -2582,12 +2723,13 @@ func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) {
nil, nil,
0, 0,
true, true,
0, 0, 0,
nil, nil,
0, 0,
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -2845,12 +2987,13 @@ func TestScrapeAddFast(t *testing.T) {
nil, nil,
0, 0,
true, true,
0, 0, 0,
nil, nil,
0, 0,
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -2908,7 +3051,7 @@ func TestReuseCacheRace(*testing.T) {
func TestCheckAddError(t *testing.T) { func TestCheckAddError(t *testing.T) {
var appErrs appendErrors var appErrs appendErrors
sl := scrapeLoop{l: log.NewNopLogger()} sl := scrapeLoop{l: log.NewNopLogger()}
sl.checkAddError(nil, nil, nil, storage.ErrOutOfOrderSample, nil, &appErrs) sl.checkAddError(nil, nil, nil, storage.ErrOutOfOrderSample, nil, nil, &appErrs)
require.Equal(t, 1, appErrs.numOutOfOrder) require.Equal(t, 1, appErrs.numOutOfOrder)
} }
@ -2931,12 +3074,13 @@ func TestScrapeReportSingleAppender(t *testing.T) {
nil, nil,
0, 0,
true, true,
0, 0, 0,
nil, nil,
10*time.Millisecond, 10*time.Millisecond,
time.Hour, time.Hour,
false, false,
false, false,
false,
nil, nil,
false, false,
) )
@ -3133,12 +3277,13 @@ func TestScrapeLoopLabelLimit(t *testing.T) {
nil, nil,
0, 0,
true, true,
0, 0, 0,
&test.labelLimits, &test.labelLimits,
0, 0,
0, 0,
false, false,
false, false,
false,
nil, nil,
false, false,
) )

View file

@ -27,6 +27,7 @@ import (
"github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/model/relabel"
"github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/model/textparse"
@ -313,7 +314,10 @@ func (ts Targets) Len() int { return len(ts) }
func (ts Targets) Less(i, j int) bool { return ts[i].URL().String() < ts[j].URL().String() } func (ts Targets) Less(i, j int) bool { return ts[i].URL().String() < ts[j].URL().String() }
func (ts Targets) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] } func (ts Targets) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] }
var errSampleLimit = errors.New("sample limit exceeded") var (
errSampleLimit = errors.New("sample limit exceeded")
errBucketLimit = errors.New("histogram bucket limit exceeded")
)
// limitAppender limits the number of total appended samples in a batch. // limitAppender limits the number of total appended samples in a batch.
type limitAppender struct { type limitAppender struct {
@ -355,6 +359,31 @@ func (app *timeLimitAppender) Append(ref storage.SeriesRef, lset labels.Labels,
return ref, nil return ref, nil
} }
// bucketLimitAppender limits the number of total appended samples in a batch.
type bucketLimitAppender struct {
storage.Appender
limit int
}
func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
if h != nil {
if len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit {
return 0, errBucketLimit
}
}
if fh != nil {
if len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit {
return 0, errBucketLimit
}
}
ref, err := app.Appender.AppendHistogram(ref, lset, t, h, fh)
if err != nil {
return 0, err
}
return ref, nil
}
// PopulateLabels builds a label set from the given label set and scrape configuration. // PopulateLabels builds a label set from the given label set and scrape configuration.
// It returns a label set before relabeling was applied as the second return value. // It returns a label set before relabeling was applied as the second return value.
// Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling. // Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling.

View file

@ -31,6 +31,7 @@ import (
"github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
) )
@ -488,3 +489,63 @@ scrape_configs:
}) })
} }
} }
func TestBucketLimitAppender(t *testing.T) {
example := histogram.Histogram{
Schema: 0,
Count: 21,
Sum: 33,
ZeroThreshold: 0.001,
ZeroCount: 3,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{3, 0, 0},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 3},
},
NegativeBuckets: []int64{3, 0, 0},
}
cases := []struct {
h histogram.Histogram
limit int
expectError bool
}{
{
h: example,
limit: 3,
expectError: true,
},
{
h: example,
limit: 10,
expectError: false,
},
}
resApp := &collectResultAppender{}
for _, c := range cases {
for _, floatHisto := range []bool{true, false} {
t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) {
app := &bucketLimitAppender{Appender: resApp, limit: c.limit}
ts := int64(10 * time.Minute / time.Millisecond)
h := c.h
lbls := labels.FromStrings("__name__", "sparse_histogram_series")
var err error
if floatHisto {
_, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat())
} else {
_, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil)
}
if c.expectError {
require.Error(t, err)
} else {
require.NoError(t, err)
}
require.NoError(t, app.Commit())
})
}
}
}

View file

@ -242,15 +242,16 @@ func (s fhSample) Type() chunkenc.ValueType {
type sampleRing struct { type sampleRing struct {
delta int64 delta int64
// Lookback buffers. We use buf for mixed samples, but one of the three // Lookback buffers. We use iBuf for mixed samples, but one of the three
// concrete ones for homogenous samples. (Only one of the four bufs is // concrete ones for homogenous samples. (Only one of the four bufs is
// allowed to be populated!) This avoids the overhead of the interface // allowed to be populated!) This avoids the overhead of the interface
// wrapper for the happy (and by far most common) case of homogenous // wrapper for the happy (and by far most common) case of homogenous
// samples. // samples.
buf []tsdbutil.Sample iBuf []tsdbutil.Sample
fBuf []fSample fBuf []fSample
hBuf []hSample hBuf []hSample
fhBuf []fhSample fhBuf []fhSample
bufInUse bufType
i int // Position of most recent element in ring buffer. i int // Position of most recent element in ring buffer.
f int // Position of first element in ring buffer. f int // Position of first element in ring buffer.
@ -259,6 +260,16 @@ type sampleRing struct {
it sampleRingIterator it sampleRingIterator
} }
type bufType int
const (
noBuf bufType = iota // Nothing yet stored in sampleRing.
iBuf
fBuf
hBuf
fhBuf
)
// newSampleRing creates a new sampleRing. If you do not know the prefereed // newSampleRing creates a new sampleRing. If you do not know the prefereed
// value type yet, use a size of 0 (in which case the provided typ doesn't // value type yet, use a size of 0 (in which case the provided typ doesn't
// matter). On the first add, a buffer of size 16 will be allocated with the // matter). On the first add, a buffer of size 16 will be allocated with the
@ -278,7 +289,7 @@ func newSampleRing(delta int64, size int, typ chunkenc.ValueType) *sampleRing {
case chunkenc.ValFloatHistogram: case chunkenc.ValFloatHistogram:
r.fhBuf = make([]fhSample, size) r.fhBuf = make([]fhSample, size)
default: default:
r.buf = make([]tsdbutil.Sample, size) r.iBuf = make([]tsdbutil.Sample, size)
} }
return r return r
} }
@ -287,6 +298,7 @@ func (r *sampleRing) reset() {
r.l = 0 r.l = 0
r.i = -1 r.i = -1
r.f = 0 r.f = 0
r.bufInUse = noBuf
} }
// Returns the current iterator. Invalidates previously returned iterators. // Returns the current iterator. Invalidates previously returned iterators.
@ -310,18 +322,18 @@ func (it *sampleRingIterator) Next() chunkenc.ValueType {
if it.i >= it.r.l { if it.i >= it.r.l {
return chunkenc.ValNone return chunkenc.ValNone
} }
switch { switch it.r.bufInUse {
case len(it.r.fBuf) > 0: case fBuf:
s := it.r.atF(it.i) s := it.r.atF(it.i)
it.t = s.t it.t = s.t
it.f = s.f it.f = s.f
return chunkenc.ValFloat return chunkenc.ValFloat
case len(it.r.hBuf) > 0: case hBuf:
s := it.r.atH(it.i) s := it.r.atH(it.i)
it.t = s.t it.t = s.t
it.h = s.h it.h = s.h
return chunkenc.ValHistogram return chunkenc.ValHistogram
case len(it.r.fhBuf) > 0: case fhBuf:
s := it.r.atFH(it.i) s := it.r.atFH(it.i)
it.t = s.t it.t = s.t
it.fh = s.fh it.fh = s.fh
@ -372,8 +384,8 @@ func (it *sampleRingIterator) AtT() int64 {
} }
func (r *sampleRing) at(i int) tsdbutil.Sample { func (r *sampleRing) at(i int) tsdbutil.Sample {
j := (r.f + i) % len(r.buf) j := (r.f + i) % len(r.iBuf)
return r.buf[j] return r.iBuf[j]
} }
func (r *sampleRing) atF(i int) fSample { func (r *sampleRing) atF(i int) fSample {
@ -397,91 +409,113 @@ func (r *sampleRing) atFH(i int) fhSample {
// from this package (fSample, hSample, fhSample), call one of the specialized // from this package (fSample, hSample, fhSample), call one of the specialized
// methods addF, addH, or addFH for better performance. // methods addF, addH, or addFH for better performance.
func (r *sampleRing) add(s tsdbutil.Sample) { func (r *sampleRing) add(s tsdbutil.Sample) {
if len(r.buf) == 0 { if r.bufInUse == noBuf {
// First sample.
switch s := s.(type) {
case fSample:
r.bufInUse = fBuf
r.fBuf = addF(s, r.fBuf, r)
case hSample:
r.bufInUse = hBuf
r.hBuf = addH(s, r.hBuf, r)
case fhSample:
r.bufInUse = fhBuf
r.fhBuf = addFH(s, r.fhBuf, r)
}
return
}
if r.bufInUse != iBuf {
// Nothing added to the interface buf yet. Let's check if we can // Nothing added to the interface buf yet. Let's check if we can
// stay specialized. // stay specialized.
switch s := s.(type) { switch s := s.(type) {
case fSample: case fSample:
if len(r.hBuf)+len(r.fhBuf) == 0 { if r.bufInUse == fBuf {
r.fBuf = addF(s, r.fBuf, r) r.fBuf = addF(s, r.fBuf, r)
return return
} }
case hSample: case hSample:
if len(r.fBuf)+len(r.fhBuf) == 0 { if r.bufInUse == hBuf {
r.hBuf = addH(s, r.hBuf, r) r.hBuf = addH(s, r.hBuf, r)
return return
} }
case fhSample: case fhSample:
if len(r.fBuf)+len(r.hBuf) == 0 { if r.bufInUse == fhBuf {
r.fhBuf = addFH(s, r.fhBuf, r) r.fhBuf = addFH(s, r.fhBuf, r)
return return
} }
} }
// The new sample isn't a fit for the already existing // The new sample isn't a fit for the already existing
// ones. Copy the latter into the interface buffer where needed. // ones. Copy the latter into the interface buffer where needed.
switch { switch r.bufInUse {
case len(r.fBuf) > 0: case fBuf:
for _, s := range r.fBuf { for _, s := range r.fBuf {
r.buf = append(r.buf, s) r.iBuf = append(r.iBuf, s)
} }
r.fBuf = nil r.fBuf = nil
case len(r.hBuf) > 0: case hBuf:
for _, s := range r.hBuf { for _, s := range r.hBuf {
r.buf = append(r.buf, s) r.iBuf = append(r.iBuf, s)
} }
r.hBuf = nil r.hBuf = nil
case len(r.fhBuf) > 0: case fhBuf:
for _, s := range r.fhBuf { for _, s := range r.fhBuf {
r.buf = append(r.buf, s) r.iBuf = append(r.iBuf, s)
} }
r.fhBuf = nil r.fhBuf = nil
} }
r.bufInUse = iBuf
} }
r.buf = addSample(s, r.buf, r) r.iBuf = addSample(s, r.iBuf, r)
} }
// addF is a version of the add method specialized for fSample. // addF is a version of the add method specialized for fSample.
func (r *sampleRing) addF(s fSample) { func (r *sampleRing) addF(s fSample) {
switch { switch r.bufInUse {
case len(r.buf) > 0: case fBuf: // Add to existing fSamples.
// Already have interface samples. Add to the interface buf. r.fBuf = addF(s, r.fBuf, r)
r.buf = addSample(s, r.buf, r) case noBuf: // Add first sample.
case len(r.hBuf)+len(r.fhBuf) > 0: r.fBuf = addF(s, r.fBuf, r)
r.bufInUse = fBuf
case iBuf: // Already have interface samples. Add to the interface buf.
r.iBuf = addSample(s, r.iBuf, r)
default:
// Already have specialized samples that are not fSamples. // Already have specialized samples that are not fSamples.
// Need to call the checked add method for conversion. // Need to call the checked add method for conversion.
r.add(s) r.add(s)
default:
r.fBuf = addF(s, r.fBuf, r)
} }
} }
// addH is a version of the add method specialized for hSample. // addH is a version of the add method specialized for hSample.
func (r *sampleRing) addH(s hSample) { func (r *sampleRing) addH(s hSample) {
switch { switch r.bufInUse {
case len(r.buf) > 0: case hBuf: // Add to existing hSamples.
// Already have interface samples. Add to the interface buf. r.hBuf = addH(s, r.hBuf, r)
r.buf = addSample(s, r.buf, r) case noBuf: // Add first sample.
case len(r.fBuf)+len(r.fhBuf) > 0: r.hBuf = addH(s, r.hBuf, r)
// Already have samples that are not hSamples. r.bufInUse = hBuf
case iBuf: // Already have interface samples. Add to the interface buf.
r.iBuf = addSample(s, r.iBuf, r)
default:
// Already have specialized samples that are not hSamples.
// Need to call the checked add method for conversion. // Need to call the checked add method for conversion.
r.add(s) r.add(s)
default:
r.hBuf = addH(s, r.hBuf, r)
} }
} }
// addFH is a version of the add method specialized for fhSample. // addFH is a version of the add method specialized for fhSample.
func (r *sampleRing) addFH(s fhSample) { func (r *sampleRing) addFH(s fhSample) {
switch { switch r.bufInUse {
case len(r.buf) > 0: case fhBuf: // Add to existing fhSamples.
// Already have interface samples. Add to the interface buf. r.fhBuf = addFH(s, r.fhBuf, r)
r.buf = addSample(s, r.buf, r) case noBuf: // Add first sample.
case len(r.fBuf)+len(r.hBuf) > 0: r.fhBuf = addFH(s, r.fhBuf, r)
// Already have samples that are not fhSamples. r.bufInUse = fhBuf
case iBuf: // Already have interface samples. Add to the interface buf.
r.iBuf = addSample(s, r.iBuf, r)
default:
// Already have specialized samples that are not fhSamples.
// Need to call the checked add method for conversion. // Need to call the checked add method for conversion.
r.add(s) r.add(s)
default:
r.fhBuf = addFH(s, r.fhBuf, r)
} }
} }
@ -701,15 +735,15 @@ func (r *sampleRing) reduceDelta(delta int64) bool {
return true return true
} }
switch { switch r.bufInUse {
case len(r.fBuf) > 0: case fBuf:
genericReduceDelta(r.fBuf, r) genericReduceDelta(r.fBuf, r)
case len(r.hBuf) > 0: case hBuf:
genericReduceDelta(r.hBuf, r) genericReduceDelta(r.hBuf, r)
case len(r.fhBuf) > 0: case fhBuf:
genericReduceDelta(r.fhBuf, r) genericReduceDelta(r.fhBuf, r)
default: default:
genericReduceDelta(r.buf, r) genericReduceDelta(r.iBuf, r)
} }
return true return true
} }
@ -733,12 +767,12 @@ func (r *sampleRing) nthLast(n int) (tsdbutil.Sample, bool) {
return fSample{}, false return fSample{}, false
} }
i := r.l - n i := r.l - n
switch { switch r.bufInUse {
case len(r.fBuf) > 0: case fBuf:
return r.atF(i), true return r.atF(i), true
case len(r.hBuf) > 0: case hBuf:
return r.atH(i), true return r.atH(i), true
case len(r.fhBuf) > 0: case fhBuf:
return r.atFH(i), true return r.atFH(i), true
default: default:
return r.at(i), true return r.at(i), true
@ -751,15 +785,15 @@ func (r *sampleRing) samples() []tsdbutil.Sample {
k := r.f + r.l k := r.f + r.l
var j int var j int
switch { switch r.bufInUse {
case len(r.buf) > 0: case iBuf:
if k > len(r.buf) { if k > len(r.iBuf) {
k = len(r.buf) k = len(r.iBuf)
j = r.l - k + r.f j = r.l - k + r.f
} }
n := copy(res, r.buf[r.f:k]) n := copy(res, r.iBuf[r.f:k])
copy(res[n:], r.buf[:j]) copy(res[n:], r.iBuf[:j])
case len(r.fBuf) > 0: case fBuf:
if k > len(r.fBuf) { if k > len(r.fBuf) {
k = len(r.fBuf) k = len(r.fBuf)
j = r.l - k + r.f j = r.l - k + r.f
@ -770,7 +804,7 @@ func (r *sampleRing) samples() []tsdbutil.Sample {
for i, s := range resF { for i, s := range resF {
res[i] = s res[i] = s
} }
case len(r.hBuf) > 0: case hBuf:
if k > len(r.hBuf) { if k > len(r.hBuf) {
k = len(r.hBuf) k = len(r.hBuf)
j = r.l - k + r.f j = r.l - k + r.f
@ -781,7 +815,7 @@ func (r *sampleRing) samples() []tsdbutil.Sample {
for i, s := range resH { for i, s := range resH {
res[i] = s res[i] = s
} }
case len(r.fhBuf) > 0: case fhBuf:
if k > len(r.fhBuf) { if k > len(r.fhBuf) {
k = len(r.fhBuf) k = len(r.fhBuf)
j = r.l - k + r.f j = r.l - k + r.f

View file

@ -21,6 +21,9 @@ import (
) )
// MemoizedSeriesIterator wraps an iterator with a buffer to look back the previous element. // MemoizedSeriesIterator wraps an iterator with a buffer to look back the previous element.
//
// This iterator regards integer histograms as float histograms; calls to Seek() will never return chunkenc.Histogram.
// This iterator deliberately does not implement chunkenc.Iterator.
type MemoizedSeriesIterator struct { type MemoizedSeriesIterator struct {
it chunkenc.Iterator it chunkenc.Iterator
delta int64 delta int64
@ -31,12 +34,7 @@ type MemoizedSeriesIterator struct {
// Keep track of the previously returned value. // Keep track of the previously returned value.
prevTime int64 prevTime int64
prevValue float64 prevValue float64
prevHistogram *histogram.Histogram
prevFloatHistogram *histogram.FloatHistogram prevFloatHistogram *histogram.FloatHistogram
// TODO(beorn7): MemoizedSeriesIterator is currently only used by the
// PromQL engine, which only works with FloatHistograms. For better
// performance, we could change MemoizedSeriesIterator to also only
// handle FloatHistograms.
} }
// NewMemoizedEmptyIterator is like NewMemoizedIterator but it's initialised with an empty iterator. // NewMemoizedEmptyIterator is like NewMemoizedIterator but it's initialised with an empty iterator.
@ -66,11 +64,11 @@ func (b *MemoizedSeriesIterator) Reset(it chunkenc.Iterator) {
// PeekPrev returns the previous element of the iterator. If there is none buffered, // PeekPrev returns the previous element of the iterator. If there is none buffered,
// ok is false. // ok is false.
func (b *MemoizedSeriesIterator) PeekPrev() (t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, ok bool) { func (b *MemoizedSeriesIterator) PeekPrev() (t int64, v float64, fh *histogram.FloatHistogram, ok bool) {
if b.prevTime == math.MinInt64 { if b.prevTime == math.MinInt64 {
return 0, 0, nil, nil, false return 0, 0, nil, false
} }
return b.prevTime, b.prevValue, b.prevHistogram, b.prevFloatHistogram, true return b.prevTime, b.prevValue, b.prevFloatHistogram, true
} }
// Seek advances the iterator to the element at time t or greater. // Seek advances the iterator to the element at time t or greater.
@ -83,8 +81,11 @@ func (b *MemoizedSeriesIterator) Seek(t int64) chunkenc.ValueType {
b.prevTime = math.MinInt64 b.prevTime = math.MinInt64
b.valueType = b.it.Seek(t0) b.valueType = b.it.Seek(t0)
if b.valueType == chunkenc.ValNone { switch b.valueType {
case chunkenc.ValNone:
return chunkenc.ValNone return chunkenc.ValNone
case chunkenc.ValHistogram:
b.valueType = chunkenc.ValFloatHistogram
} }
b.lastTime = b.it.AtT() b.lastTime = b.it.AtT()
} }
@ -100,7 +101,8 @@ func (b *MemoizedSeriesIterator) Seek(t int64) chunkenc.ValueType {
return chunkenc.ValNone return chunkenc.ValNone
} }
// Next advances the iterator to the next element. // Next advances the iterator to the next element. Note that this does not check whether the element being buffered is
// within the time range of the current element and the duration of delta before.
func (b *MemoizedSeriesIterator) Next() chunkenc.ValueType { func (b *MemoizedSeriesIterator) Next() chunkenc.ValueType {
// Keep track of the previous element. // Keep track of the previous element.
switch b.valueType { switch b.valueType {
@ -108,15 +110,9 @@ func (b *MemoizedSeriesIterator) Next() chunkenc.ValueType {
return chunkenc.ValNone return chunkenc.ValNone
case chunkenc.ValFloat: case chunkenc.ValFloat:
b.prevTime, b.prevValue = b.it.At() b.prevTime, b.prevValue = b.it.At()
b.prevHistogram = nil
b.prevFloatHistogram = nil b.prevFloatHistogram = nil
case chunkenc.ValHistogram: case chunkenc.ValHistogram, chunkenc.ValFloatHistogram:
b.prevValue = 0 b.prevValue = 0
b.prevTime, b.prevHistogram = b.it.AtHistogram()
_, b.prevFloatHistogram = b.it.AtFloatHistogram()
case chunkenc.ValFloatHistogram:
b.prevValue = 0
b.prevHistogram = nil
b.prevTime, b.prevFloatHistogram = b.it.AtFloatHistogram() b.prevTime, b.prevFloatHistogram = b.it.AtFloatHistogram()
} }
@ -124,6 +120,9 @@ func (b *MemoizedSeriesIterator) Next() chunkenc.ValueType {
if b.valueType != chunkenc.ValNone { if b.valueType != chunkenc.ValNone {
b.lastTime = b.it.AtT() b.lastTime = b.it.AtT()
} }
if b.valueType == chunkenc.ValHistogram {
b.valueType = chunkenc.ValFloatHistogram
}
return b.valueType return b.valueType
} }
@ -132,21 +131,11 @@ func (b *MemoizedSeriesIterator) At() (int64, float64) {
return b.it.At() return b.it.At()
} }
// AtHistogram returns the current histogram element of the iterator.
func (b *MemoizedSeriesIterator) AtHistogram() (int64, *histogram.Histogram) {
return b.it.AtHistogram()
}
// AtFloatHistogram returns the current float-histogram element of the iterator. // AtFloatHistogram returns the current float-histogram element of the iterator.
func (b *MemoizedSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { func (b *MemoizedSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
return b.it.AtFloatHistogram() return b.it.AtFloatHistogram()
} }
// AtT returns the current timestamp of the iterator.
func (b *MemoizedSeriesIterator) AtT() int64 {
return b.it.AtT()
}
// Err returns the last encountered error. // Err returns the last encountered error.
func (b *MemoizedSeriesIterator) Err() error { func (b *MemoizedSeriesIterator) Err() error {
return b.it.Err() return b.it.Err()

View file

@ -18,23 +18,34 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
) )
func TestMemoizedSeriesIterator(t *testing.T) { func TestMemoizedSeriesIterator(t *testing.T) {
// TODO(beorn7): Include histograms in testing.
var it *MemoizedSeriesIterator var it *MemoizedSeriesIterator
sampleEq := func(ets int64, ev float64) { sampleEq := func(ets int64, ev float64, efh *histogram.FloatHistogram) {
if efh == nil {
ts, v := it.At() ts, v := it.At()
require.Equal(t, ets, ts, "timestamp mismatch") require.Equal(t, ets, ts, "timestamp mismatch")
require.Equal(t, ev, v, "value mismatch") require.Equal(t, ev, v, "value mismatch")
} else {
ts, fh := it.AtFloatHistogram()
require.Equal(t, ets, ts, "timestamp mismatch")
require.Equal(t, efh, fh, "histogram mismatch")
} }
prevSampleEq := func(ets int64, ev float64, eok bool) { }
ts, v, _, _, ok := it.PeekPrev() prevSampleEq := func(ets int64, ev float64, efh *histogram.FloatHistogram, eok bool) {
ts, v, fh, ok := it.PeekPrev()
require.Equal(t, eok, ok, "exist mismatch") require.Equal(t, eok, ok, "exist mismatch")
require.Equal(t, ets, ts, "timestamp mismatch") require.Equal(t, ets, ts, "timestamp mismatch")
if efh == nil {
require.Equal(t, ev, v, "value mismatch") require.Equal(t, ev, v, "value mismatch")
} else {
require.Equal(t, efh, fh, "histogram mismatch")
}
} }
it = NewMemoizedIterator(NewListSeriesIterator(samples{ it = NewMemoizedIterator(NewListSeriesIterator(samples{
@ -46,31 +57,50 @@ func TestMemoizedSeriesIterator(t *testing.T) {
fSample{t: 99, f: 8}, fSample{t: 99, f: 8},
fSample{t: 100, f: 9}, fSample{t: 100, f: 9},
fSample{t: 101, f: 10}, fSample{t: 101, f: 10},
hSample{t: 102, h: tsdbutil.GenerateTestHistogram(0)},
hSample{t: 103, h: tsdbutil.GenerateTestHistogram(1)},
fhSample{t: 104, fh: tsdbutil.GenerateTestFloatHistogram(2)},
fhSample{t: 199, fh: tsdbutil.GenerateTestFloatHistogram(3)},
hSample{t: 200, h: tsdbutil.GenerateTestHistogram(4)},
fhSample{t: 299, fh: tsdbutil.GenerateTestFloatHistogram(5)},
fSample{t: 300, f: 11},
hSample{t: 399, h: tsdbutil.GenerateTestHistogram(6)},
fSample{t: 400, f: 12},
}), 2) }), 2)
require.Equal(t, it.Seek(-123), chunkenc.ValFloat, "seek failed") require.Equal(t, it.Seek(-123), chunkenc.ValFloat, "seek failed")
sampleEq(1, 2) sampleEq(1, 2, nil)
prevSampleEq(0, 0, false) prevSampleEq(0, 0, nil, false)
require.Equal(t, it.Next(), chunkenc.ValFloat, "next failed")
sampleEq(2, 3)
prevSampleEq(1, 2, true)
require.Equal(t, it.Next(), chunkenc.ValFloat, "next failed")
require.Equal(t, it.Next(), chunkenc.ValFloat, "next failed")
require.Equal(t, it.Next(), chunkenc.ValFloat, "next failed")
sampleEq(5, 6)
prevSampleEq(4, 5, true)
require.Equal(t, it.Seek(5), chunkenc.ValFloat, "seek failed") require.Equal(t, it.Seek(5), chunkenc.ValFloat, "seek failed")
sampleEq(5, 6) sampleEq(5, 6, nil)
prevSampleEq(4, 5, true) prevSampleEq(4, 5, nil, true)
require.Equal(t, it.Seek(101), chunkenc.ValFloat, "seek failed") // Seek to a histogram sample with a previous float sample.
sampleEq(101, 10) require.Equal(t, it.Seek(102), chunkenc.ValFloatHistogram, "seek failed")
prevSampleEq(100, 9, true) sampleEq(102, 10, tsdbutil.GenerateTestFloatHistogram(0))
prevSampleEq(101, 10, nil, true)
// Attempt to seek backwards (no-op).
require.Equal(t, it.Seek(50), chunkenc.ValFloatHistogram, "seek failed")
sampleEq(102, 10, tsdbutil.GenerateTestFloatHistogram(0))
prevSampleEq(101, 10, nil, true)
// Seek to a float histogram sample with a previous histogram sample.
require.Equal(t, it.Seek(104), chunkenc.ValFloatHistogram, "seek failed")
sampleEq(104, 0, tsdbutil.GenerateTestFloatHistogram(2))
prevSampleEq(103, 0, tsdbutil.GenerateTestFloatHistogram(1), true)
// Seek to a float sample with a previous float histogram sample.
require.Equal(t, chunkenc.ValFloat, it.Seek(300), "seek failed")
sampleEq(300, 11, nil)
prevSampleEq(299, 0, tsdbutil.GenerateTestFloatHistogram(5), true)
// Seek to a float sample with a previous histogram sample.
require.Equal(t, chunkenc.ValFloat, it.Seek(400), "seek failed")
sampleEq(400, 12, nil)
prevSampleEq(399, 0, tsdbutil.GenerateTestFloatHistogram(6), true)
require.Equal(t, it.Next(), chunkenc.ValNone, "next succeeded unexpectedly")
require.Equal(t, it.Seek(1024), chunkenc.ValNone, "seek succeeded unexpectedly") require.Equal(t, it.Seek(1024), chunkenc.ValNone, "seek succeeded unexpectedly")
} }

View file

@ -76,6 +76,13 @@ func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCal
return s return s
} }
func (s *Storage) Notify() {
for _, q := range s.rws.queues {
// These should all be non blocking
q.watcher.Notify()
}
}
// ApplyConfig updates the state as the new config requires. // ApplyConfig updates the state as the new config requires.
func (s *Storage) ApplyConfig(conf *config.Config) error { func (s *Storage) ApplyConfig(conf *config.Config) error {
s.mtx.Lock() s.mtx.Lock()

View file

@ -297,9 +297,11 @@ func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator {
seriesIter := s.Series.Iterator(nil) seriesIter := s.Series.Iterator(nil)
lastType := chunkenc.ValNone lastType := chunkenc.ValNone
for typ := seriesIter.Next(); typ != chunkenc.ValNone; typ = seriesIter.Next() { for typ := seriesIter.Next(); typ != chunkenc.ValNone; typ = seriesIter.Next() {
chunkCreated := false
if typ != lastType || i >= seriesToChunkEncoderSplit { if typ != lastType || i >= seriesToChunkEncoderSplit {
// Create a new chunk if the sample type changed or too many samples in the current one. // Create a new chunk if the sample type changed or too many samples in the current one.
chks = appendChunk(chks, mint, maxt, chk) chks = appendChunk(chks, mint, maxt, chk)
chunkCreated = true
chk, err = chunkenc.NewEmptyChunk(typ.ChunkEncoding()) chk, err = chunkenc.NewEmptyChunk(typ.ChunkEncoding())
if err != nil { if err != nil {
return errChunksIterator{err: err} return errChunksIterator{err: err}
@ -330,6 +332,7 @@ func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator {
if ok, counterReset := app.AppendHistogram(t, h); !ok { if ok, counterReset := app.AppendHistogram(t, h); !ok {
chks = appendChunk(chks, mint, maxt, chk) chks = appendChunk(chks, mint, maxt, chk)
histChunk := chunkenc.NewHistogramChunk() histChunk := chunkenc.NewHistogramChunk()
chunkCreated = true
if counterReset { if counterReset {
histChunk.SetCounterResetHeader(chunkenc.CounterReset) histChunk.SetCounterResetHeader(chunkenc.CounterReset)
} }
@ -346,11 +349,15 @@ func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator {
panic("unexpected error while appending histogram") panic("unexpected error while appending histogram")
} }
} }
if chunkCreated && h.CounterResetHint == histogram.GaugeType {
chk.(*chunkenc.HistogramChunk).SetCounterResetHeader(chunkenc.GaugeType)
}
case chunkenc.ValFloatHistogram: case chunkenc.ValFloatHistogram:
t, fh = seriesIter.AtFloatHistogram() t, fh = seriesIter.AtFloatHistogram()
if ok, counterReset := app.AppendFloatHistogram(t, fh); !ok { if ok, counterReset := app.AppendFloatHistogram(t, fh); !ok {
chks = appendChunk(chks, mint, maxt, chk) chks = appendChunk(chks, mint, maxt, chk)
floatHistChunk := chunkenc.NewFloatHistogramChunk() floatHistChunk := chunkenc.NewFloatHistogramChunk()
chunkCreated = true
if counterReset { if counterReset {
floatHistChunk.SetCounterResetHeader(chunkenc.CounterReset) floatHistChunk.SetCounterResetHeader(chunkenc.CounterReset)
} }
@ -366,6 +373,9 @@ func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator {
panic("unexpected error while float appending histogram") panic("unexpected error while float appending histogram")
} }
} }
if chunkCreated && fh.CounterResetHint == histogram.GaugeType {
chk.(*chunkenc.FloatHistogramChunk).SetCounterResetHeader(chunkenc.GaugeType)
}
default: default:
return errChunksIterator{err: fmt.Errorf("unknown sample type %s", typ.String())} return errChunksIterator{err: fmt.Errorf("unknown sample type %s", typ.String())}
} }

View file

@ -127,13 +127,12 @@ func TestChunkSeriesSetToSeriesSet(t *testing.T) {
type histogramTest struct { type histogramTest struct {
samples []tsdbutil.Sample samples []tsdbutil.Sample
expectedChunks int expectedCounterResetHeaders []chunkenc.CounterResetHeader
expectedCounterReset bool
} }
func TestHistogramSeriesToChunks(t *testing.T) { func TestHistogramSeriesToChunks(t *testing.T) {
h1 := &histogram.Histogram{ h1 := &histogram.Histogram{
Count: 3, Count: 7,
ZeroCount: 2, ZeroCount: 2,
ZeroThreshold: 0.001, ZeroThreshold: 0.001,
Sum: 100, Sum: 100,
@ -158,7 +157,7 @@ func TestHistogramSeriesToChunks(t *testing.T) {
} }
// Implicit counter reset by reduction in buckets, not appendable. // Implicit counter reset by reduction in buckets, not appendable.
h2down := &histogram.Histogram{ h2down := &histogram.Histogram{
Count: 8, Count: 10,
ZeroCount: 2, ZeroCount: 2,
ZeroThreshold: 0.001, ZeroThreshold: 0.001,
Sum: 100, Sum: 100,
@ -171,7 +170,7 @@ func TestHistogramSeriesToChunks(t *testing.T) {
} }
fh1 := &histogram.FloatHistogram{ fh1 := &histogram.FloatHistogram{
Count: 4, Count: 6,
ZeroCount: 2, ZeroCount: 2,
ZeroThreshold: 0.001, ZeroThreshold: 0.001,
Sum: 100, Sum: 100,
@ -183,7 +182,7 @@ func TestHistogramSeriesToChunks(t *testing.T) {
} }
// Appendable to fh1. // Appendable to fh1.
fh2 := &histogram.FloatHistogram{ fh2 := &histogram.FloatHistogram{
Count: 15, Count: 17,
ZeroCount: 2, ZeroCount: 2,
ZeroThreshold: 0.001, ZeroThreshold: 0.001,
Sum: 100, Sum: 100,
@ -196,7 +195,7 @@ func TestHistogramSeriesToChunks(t *testing.T) {
} }
// Implicit counter reset by reduction in buckets, not appendable. // Implicit counter reset by reduction in buckets, not appendable.
fh2down := &histogram.FloatHistogram{ fh2down := &histogram.FloatHistogram{
Count: 13, Count: 15,
ZeroCount: 2, ZeroCount: 2,
ZeroThreshold: 0.001, ZeroThreshold: 0.001,
Sum: 100, Sum: 100,
@ -208,6 +207,60 @@ func TestHistogramSeriesToChunks(t *testing.T) {
PositiveBuckets: []float64{2, 2, 7, 2}, PositiveBuckets: []float64{2, 2, 7, 2},
} }
// Gauge histogram.
gh1 := &histogram.Histogram{
CounterResetHint: histogram.GaugeType,
Count: 7,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 100,
Schema: 0,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
},
PositiveBuckets: []int64{2, 1}, // Abs: 2, 3
}
gh2 := &histogram.Histogram{
CounterResetHint: histogram.GaugeType,
Count: 12,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 100,
Schema: 0,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{2, 1, -2, 3}, // Abs: 2, 3, 1, 4
}
// Float gauge histogram.
gfh1 := &histogram.FloatHistogram{
CounterResetHint: histogram.GaugeType,
Count: 6,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 100,
Schema: 0,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
},
PositiveBuckets: []float64{3, 1},
}
gfh2 := &histogram.FloatHistogram{
CounterResetHint: histogram.GaugeType,
Count: 17,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 100,
Schema: 0,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []float64{4, 2, 7, 2},
}
staleHistogram := &histogram.Histogram{ staleHistogram := &histogram.Histogram{
Sum: math.Float64frombits(value.StaleNaN), Sum: math.Float64frombits(value.StaleNaN),
} }
@ -220,74 +273,70 @@ func TestHistogramSeriesToChunks(t *testing.T) {
samples: []tsdbutil.Sample{ samples: []tsdbutil.Sample{
hSample{t: 1, h: h1}, hSample{t: 1, h: h1},
}, },
expectedChunks: 1, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset},
}, },
"two histograms encoded to a single chunk": { "two histograms encoded to a single chunk": {
samples: []tsdbutil.Sample{ samples: []tsdbutil.Sample{
hSample{t: 1, h: h1}, hSample{t: 1, h: h1},
hSample{t: 2, h: h2}, hSample{t: 2, h: h2},
}, },
expectedChunks: 1, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset},
}, },
"two histograms encoded to two chunks": { "two histograms encoded to two chunks": {
samples: []tsdbutil.Sample{ samples: []tsdbutil.Sample{
hSample{t: 1, h: h2}, hSample{t: 1, h: h2},
hSample{t: 2, h: h1}, hSample{t: 2, h: h1},
}, },
expectedChunks: 2, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset},
expectedCounterReset: true,
}, },
"histogram and stale sample encoded to two chunks": { "histogram and stale sample encoded to two chunks": {
samples: []tsdbutil.Sample{ samples: []tsdbutil.Sample{
hSample{t: 1, h: staleHistogram}, hSample{t: 1, h: staleHistogram},
hSample{t: 2, h: h1}, hSample{t: 2, h: h1},
}, },
expectedChunks: 2, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
}, },
"histogram and reduction in bucket encoded to two chunks": { "histogram and reduction in bucket encoded to two chunks": {
samples: []tsdbutil.Sample{ samples: []tsdbutil.Sample{
hSample{t: 1, h: h1}, hSample{t: 1, h: h1},
hSample{t: 2, h: h2down}, hSample{t: 2, h: h2down},
}, },
expectedChunks: 2, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset},
expectedCounterReset: true,
}, },
// Float histograms. // Float histograms.
"single float histogram to single chunk": { "single float histogram to single chunk": {
samples: []tsdbutil.Sample{ samples: []tsdbutil.Sample{
fhSample{t: 1, fh: fh1}, fhSample{t: 1, fh: fh1},
}, },
expectedChunks: 1, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset},
}, },
"two float histograms encoded to a single chunk": { "two float histograms encoded to a single chunk": {
samples: []tsdbutil.Sample{ samples: []tsdbutil.Sample{
fhSample{t: 1, fh: fh1}, fhSample{t: 1, fh: fh1},
fhSample{t: 2, fh: fh2}, fhSample{t: 2, fh: fh2},
}, },
expectedChunks: 1, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset},
}, },
"two float histograms encoded to two chunks": { "two float histograms encoded to two chunks": {
samples: []tsdbutil.Sample{ samples: []tsdbutil.Sample{
fhSample{t: 1, fh: fh2}, fhSample{t: 1, fh: fh2},
fhSample{t: 2, fh: fh1}, fhSample{t: 2, fh: fh1},
}, },
expectedChunks: 2, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset},
expectedCounterReset: true,
}, },
"float histogram and stale sample encoded to two chunks": { "float histogram and stale sample encoded to two chunks": {
samples: []tsdbutil.Sample{ samples: []tsdbutil.Sample{
fhSample{t: 1, fh: staleFloatHistogram}, fhSample{t: 1, fh: staleFloatHistogram},
fhSample{t: 2, fh: fh1}, fhSample{t: 2, fh: fh1},
}, },
expectedChunks: 2, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
}, },
"float histogram and reduction in bucket encoded to two chunks": { "float histogram and reduction in bucket encoded to two chunks": {
samples: []tsdbutil.Sample{ samples: []tsdbutil.Sample{
fhSample{t: 1, fh: fh1}, fhSample{t: 1, fh: fh1},
fhSample{t: 2, fh: fh2down}, fhSample{t: 2, fh: fh2down},
}, },
expectedChunks: 2, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset},
expectedCounterReset: true,
}, },
// Mixed. // Mixed.
"histogram and float histogram encoded to two chunks": { "histogram and float histogram encoded to two chunks": {
@ -295,21 +344,61 @@ func TestHistogramSeriesToChunks(t *testing.T) {
hSample{t: 1, h: h1}, hSample{t: 1, h: h1},
fhSample{t: 2, fh: fh2}, fhSample{t: 2, fh: fh2},
}, },
expectedChunks: 2, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
}, },
"float histogram and histogram encoded to two chunks": { "float histogram and histogram encoded to two chunks": {
samples: []tsdbutil.Sample{ samples: []tsdbutil.Sample{
fhSample{t: 1, fh: fh1}, fhSample{t: 1, fh: fh1},
hSample{t: 2, h: h2}, hSample{t: 2, h: h2},
}, },
expectedChunks: 2, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
}, },
"histogram and stale float histogram encoded to two chunks": { "histogram and stale float histogram encoded to two chunks": {
samples: []tsdbutil.Sample{ samples: []tsdbutil.Sample{
hSample{t: 1, h: h1}, hSample{t: 1, h: h1},
fhSample{t: 2, fh: staleFloatHistogram}, fhSample{t: 2, fh: staleFloatHistogram},
}, },
expectedChunks: 2, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
},
"single gauge histogram encoded to one chunk": {
samples: []tsdbutil.Sample{
hSample{t: 1, h: gh1},
},
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
},
"two gauge histograms encoded to one chunk when counter increases": {
samples: []tsdbutil.Sample{
hSample{t: 1, h: gh1},
hSample{t: 2, h: gh2},
},
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
},
"two gauge histograms encoded to one chunk when counter decreases": {
samples: []tsdbutil.Sample{
hSample{t: 1, h: gh2},
hSample{t: 2, h: gh1},
},
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
},
"single gauge float histogram encoded to one chunk": {
samples: []tsdbutil.Sample{
fhSample{t: 1, fh: gfh1},
},
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
},
"two float gauge histograms encoded to one chunk when counter increases": {
samples: []tsdbutil.Sample{
fhSample{t: 1, fh: gfh1},
fhSample{t: 2, fh: gfh2},
},
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
},
"two float gauge histograms encoded to one chunk when counter decreases": {
samples: []tsdbutil.Sample{
fhSample{t: 1, fh: gfh2},
fhSample{t: 2, fh: gfh1},
},
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
}, },
} }
@ -322,13 +411,24 @@ func TestHistogramSeriesToChunks(t *testing.T) {
func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) { func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) {
lbs := labels.FromStrings("__name__", "up", "instance", "localhost:8080") lbs := labels.FromStrings("__name__", "up", "instance", "localhost:8080")
series := NewListSeries(lbs, test.samples) copiedSamples := []tsdbutil.Sample{}
for _, s := range test.samples {
switch cs := s.(type) {
case hSample:
copiedSamples = append(copiedSamples, hSample{t: cs.t, h: cs.h.Copy()})
case fhSample:
copiedSamples = append(copiedSamples, fhSample{t: cs.t, fh: cs.fh.Copy()})
default:
t.Error("internal error, unexpected type")
}
}
series := NewListSeries(lbs, copiedSamples)
encoder := NewSeriesToChunkEncoder(series) encoder := NewSeriesToChunkEncoder(series)
require.EqualValues(t, lbs, encoder.Labels()) require.EqualValues(t, lbs, encoder.Labels())
chks, err := ExpandChunks(encoder.Iterator(nil)) chks, err := ExpandChunks(encoder.Iterator(nil))
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, test.expectedChunks, len(chks)) require.Equal(t, len(test.expectedCounterResetHeaders), len(chks))
// Decode all encoded samples and assert they are equal to the original ones. // Decode all encoded samples and assert they are equal to the original ones.
encodedSamples := expandHistogramSamples(chks) encodedSamples := expandHistogramSamples(chks)
@ -339,8 +439,10 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) {
case hSample: case hSample:
encodedSample, ok := encodedSamples[i].(hSample) encodedSample, ok := encodedSamples[i].(hSample)
require.True(t, ok, "expect histogram", fmt.Sprintf("at idx %d", i)) require.True(t, ok, "expect histogram", fmt.Sprintf("at idx %d", i))
// Ignore counter reset here, will check on chunk level. // Ignore counter reset if not gauge here, will check on chunk level.
if expectedSample.h.CounterResetHint != histogram.GaugeType {
encodedSample.h.CounterResetHint = histogram.UnknownCounterReset encodedSample.h.CounterResetHint = histogram.UnknownCounterReset
}
if value.IsStaleNaN(expectedSample.h.Sum) { if value.IsStaleNaN(expectedSample.h.Sum) {
require.True(t, value.IsStaleNaN(encodedSample.h.Sum), fmt.Sprintf("at idx %d", i)) require.True(t, value.IsStaleNaN(encodedSample.h.Sum), fmt.Sprintf("at idx %d", i))
continue continue
@ -349,8 +451,10 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) {
case fhSample: case fhSample:
encodedSample, ok := encodedSamples[i].(fhSample) encodedSample, ok := encodedSamples[i].(fhSample)
require.True(t, ok, "expect float histogram", fmt.Sprintf("at idx %d", i)) require.True(t, ok, "expect float histogram", fmt.Sprintf("at idx %d", i))
// Ignore counter reset here, will check on chunk level. // Ignore counter reset if not gauge here, will check on chunk level.
if expectedSample.fh.CounterResetHint != histogram.GaugeType {
encodedSample.fh.CounterResetHint = histogram.UnknownCounterReset encodedSample.fh.CounterResetHint = histogram.UnknownCounterReset
}
if value.IsStaleNaN(expectedSample.fh.Sum) { if value.IsStaleNaN(expectedSample.fh.Sum) {
require.True(t, value.IsStaleNaN(encodedSample.fh.Sum), fmt.Sprintf("at idx %d", i)) require.True(t, value.IsStaleNaN(encodedSample.fh.Sum), fmt.Sprintf("at idx %d", i))
continue continue
@ -361,15 +465,8 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) {
} }
} }
// If a counter reset hint is expected, it can only be found in the second chunk. for i, expectedCounterResetHint := range test.expectedCounterResetHeaders {
// Otherwise, we assert an unknown counter reset hint in all chunks. require.Equal(t, expectedCounterResetHint, getCounterResetHint(chks[i]), fmt.Sprintf("chunk at index %d", i))
if test.expectedCounterReset {
require.Equal(t, chunkenc.UnknownCounterReset, getCounterResetHint(chks[0]))
require.Equal(t, chunkenc.CounterReset, getCounterResetHint(chks[1]))
} else {
for _, chk := range chks {
require.Equal(t, chunkenc.UnknownCounterReset, getCounterResetHint(chk))
}
} }
} }

View file

@ -665,7 +665,7 @@ func (db *DB) truncate(mint int64) error {
} }
seg, ok := db.deleted[id] seg, ok := db.deleted[id]
return ok && seg >= first return ok && seg > last
} }
db.metrics.checkpointCreationTotal.Inc() db.metrics.checkpointCreationTotal.Inc()
@ -687,7 +687,7 @@ func (db *DB) truncate(mint int64) error {
// The checkpoint is written and segments before it are truncated, so we // The checkpoint is written and segments before it are truncated, so we
// no longer need to track deleted series that were being kept around. // no longer need to track deleted series that were being kept around.
for ref, segment := range db.deleted { for ref, segment := range db.deleted {
if segment < first { if segment <= last {
delete(db.deleted, ref) delete(db.deleted, ref)
} }
} }

View file

@ -358,9 +358,9 @@ func counterResetInAnyFloatBucket(oldBuckets []xorValue, newBuckets []float64, o
if oldIdx <= newIdx { if oldIdx <= newIdx {
// Moving ahead old bucket and span by 1 index. // Moving ahead old bucket and span by 1 index.
if oldInsideSpanIdx == oldSpans[oldSpanSliceIdx].Length-1 { if oldInsideSpanIdx+1 >= oldSpans[oldSpanSliceIdx].Length {
// Current span is over. // Current span is over.
oldSpanSliceIdx++ oldSpanSliceIdx = nextNonEmptySpanSliceIdx(oldSpanSliceIdx, oldSpans)
oldInsideSpanIdx = 0 oldInsideSpanIdx = 0
if oldSpanSliceIdx >= len(oldSpans) { if oldSpanSliceIdx >= len(oldSpans) {
// All old spans are over. // All old spans are over.
@ -377,9 +377,9 @@ func counterResetInAnyFloatBucket(oldBuckets []xorValue, newBuckets []float64, o
if oldIdx > newIdx { if oldIdx > newIdx {
// Moving ahead new bucket and span by 1 index. // Moving ahead new bucket and span by 1 index.
if newInsideSpanIdx == newSpans[newSpanSliceIdx].Length-1 { if newInsideSpanIdx+1 >= newSpans[newSpanSliceIdx].Length {
// Current span is over. // Current span is over.
newSpanSliceIdx++ newSpanSliceIdx = nextNonEmptySpanSliceIdx(newSpanSliceIdx, newSpans)
newInsideSpanIdx = 0 newInsideSpanIdx = 0
if newSpanSliceIdx >= len(newSpans) { if newSpanSliceIdx >= len(newSpans) {
// All new spans are over. // All new spans are over.

View file

@ -365,6 +365,64 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
} }
} }
func TestFloatHistogramChunkAppendableWithEmptySpan(t *testing.T) {
h1 := &histogram.FloatHistogram{
Schema: 0,
Count: 21,
Sum: 1234.5,
ZeroThreshold: 0.001,
ZeroCount: 4,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []float64{1, 2, 1, 1, 1, 1, 1},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 2, Length: 0},
{Offset: 2, Length: 3},
},
NegativeBuckets: []float64{1, 2, 1, 2, 2, 2, 2},
}
h2 := &histogram.FloatHistogram{
Schema: 0,
Count: 37,
Sum: 2345.6,
ZeroThreshold: 0.001,
ZeroCount: 5,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []float64{1, 3, 1, 2, 1, 1, 1},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 2, Length: 0},
{Offset: 2, Length: 3},
},
NegativeBuckets: []float64{1, 4, 2, 7, 5, 5, 2},
}
c := Chunk(NewFloatHistogramChunk())
// Create fresh appender and add the first histogram.
app, err := c.Appender()
require.NoError(t, err)
require.Equal(t, 0, c.NumSamples())
app.AppendFloatHistogram(1, h1)
require.Equal(t, 1, c.NumSamples())
hApp, _ := app.(*FloatHistogramAppender)
pI, nI, okToAppend, counterReset := hApp.Appendable(h2)
require.Empty(t, pI)
require.Empty(t, nI)
require.True(t, okToAppend)
require.False(t, counterReset)
}
func TestFloatHistogramChunkAppendableGauge(t *testing.T) { func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
c := Chunk(NewFloatHistogramChunk()) c := Chunk(NewFloatHistogramChunk())

View file

@ -386,9 +386,9 @@ func counterResetInAnyBucket(oldBuckets, newBuckets []int64, oldSpans, newSpans
if oldIdx <= newIdx { if oldIdx <= newIdx {
// Moving ahead old bucket and span by 1 index. // Moving ahead old bucket and span by 1 index.
if oldInsideSpanIdx == oldSpans[oldSpanSliceIdx].Length-1 { if oldInsideSpanIdx+1 >= oldSpans[oldSpanSliceIdx].Length {
// Current span is over. // Current span is over.
oldSpanSliceIdx++ oldSpanSliceIdx = nextNonEmptySpanSliceIdx(oldSpanSliceIdx, oldSpans)
oldInsideSpanIdx = 0 oldInsideSpanIdx = 0
if oldSpanSliceIdx >= len(oldSpans) { if oldSpanSliceIdx >= len(oldSpans) {
// All old spans are over. // All old spans are over.
@ -405,9 +405,9 @@ func counterResetInAnyBucket(oldBuckets, newBuckets []int64, oldSpans, newSpans
if oldIdx > newIdx { if oldIdx > newIdx {
// Moving ahead new bucket and span by 1 index. // Moving ahead new bucket and span by 1 index.
if newInsideSpanIdx == newSpans[newSpanSliceIdx].Length-1 { if newInsideSpanIdx+1 >= newSpans[newSpanSliceIdx].Length {
// Current span is over. // Current span is over.
newSpanSliceIdx++ newSpanSliceIdx = nextNonEmptySpanSliceIdx(newSpanSliceIdx, newSpans)
newInsideSpanIdx = 0 newInsideSpanIdx = 0
if newSpanSliceIdx >= len(newSpans) { if newSpanSliceIdx >= len(newSpans) {
// All new spans are over. // All new spans are over.

View file

@ -487,3 +487,10 @@ func counterResetHint(crh CounterResetHeader, numRead uint16) histogram.CounterR
return histogram.UnknownCounterReset return histogram.UnknownCounterReset
} }
} }
// Handle pathological case of empty span when advancing span idx.
func nextNonEmptySpanSliceIdx(idx int, spans []histogram.Span) (newIdx int) {
for idx++; idx < len(spans) && spans[idx].Length == 0; idx++ { //nolint:revive // This "empty" block is intentional
}
return idx
}

View file

@ -14,6 +14,7 @@
package chunkenc package chunkenc
import ( import (
"fmt"
"testing" "testing"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -387,6 +388,64 @@ func TestHistogramChunkAppendable(t *testing.T) {
} }
} }
func TestHistogramChunkAppendableWithEmptySpan(t *testing.T) {
h1 := &histogram.Histogram{
Schema: 0,
Count: 21,
Sum: 1234.5,
ZeroThreshold: 0.001,
ZeroCount: 4,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{1, 1, -1, 0, 0, 0, 0},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 2, Length: 0},
{Offset: 2, Length: 3},
},
NegativeBuckets: []int64{1, 1, -1, 1, 0, 0, 0},
}
h2 := &histogram.Histogram{
Schema: 0,
Count: 37,
Sum: 2345.6,
ZeroThreshold: 0.001,
ZeroCount: 5,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 2, Length: 0},
{Offset: 2, Length: 3},
},
NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
}
c := Chunk(NewHistogramChunk())
// Create fresh appender and add the first histogram.
app, err := c.Appender()
require.NoError(t, err)
require.Equal(t, 0, c.NumSamples())
app.AppendHistogram(1, h1)
require.Equal(t, 1, c.NumSamples())
hApp, _ := app.(*HistogramAppender)
pI, nI, okToAppend, counterReset := hApp.Appendable(h2)
require.Empty(t, pI)
require.Empty(t, nI)
require.True(t, okToAppend)
require.False(t, counterReset)
}
func TestAtFloatHistogram(t *testing.T) { func TestAtFloatHistogram(t *testing.T) {
input := []histogram.Histogram{ input := []histogram.Histogram{
{ {
@ -514,6 +573,10 @@ func TestAtFloatHistogram(t *testing.T) {
app, err := chk.Appender() app, err := chk.Appender()
require.NoError(t, err) require.NoError(t, err)
for i := range input { for i := range input {
if i > 0 {
_, _, okToAppend, _ := app.(*HistogramAppender).Appendable(&input[i])
require.True(t, okToAppend, fmt.Sprintf("idx: %d", i))
}
app.AppendHistogram(int64(i), &input[i]) app.AppendHistogram(int64(i), &input[i])
} }
it := chk.Iterator(nil) it := chk.Iterator(nil)

View file

@ -896,10 +896,10 @@ func debugOutOfOrderChunks(chks []chunks.Meta, logger log.Logger) {
} }
// Looks like the chunk is out of order. // Looks like the chunk is out of order.
prevSafeChk, prevIsSafeChk := prevChk.Chunk.(*safeChunk) prevSafeChk, prevIsSafeChk := prevChk.Chunk.(*safeHeadChunk)
currSafeChk, currIsSafeChk := currChk.Chunk.(*safeChunk) currSafeChk, currIsSafeChk := currChk.Chunk.(*safeHeadChunk)
// Get info out of safeChunk (if possible). // Get info out of safeHeadChunk (if possible).
prevHeadChunkID := chunks.HeadChunkID(0) prevHeadChunkID := chunks.HeadChunkID(0)
currHeadChunkID := chunks.HeadChunkID(0) currHeadChunkID := chunks.HeadChunkID(0)
prevLabels := labels.Labels{} prevLabels := labels.Labels{}

View file

@ -80,6 +80,7 @@ func DefaultOptions() *Options {
NoLockfile: false, NoLockfile: false,
AllowOverlappingCompaction: true, AllowOverlappingCompaction: true,
WALCompression: false, WALCompression: false,
SamplesPerChunk: DefaultSamplesPerChunk,
StripeSize: DefaultStripeSize, StripeSize: DefaultStripeSize,
HeadChunksWriteBufferSize: chunks.DefaultWriteBufferSize, HeadChunksWriteBufferSize: chunks.DefaultWriteBufferSize,
IsolationDisabled: defaultIsolationDisabled, IsolationDisabled: defaultIsolationDisabled,
@ -162,6 +163,9 @@ type Options struct {
// HeadChunksWriteQueueSize configures the size of the chunk write queue used in the head chunks mapper. // HeadChunksWriteQueueSize configures the size of the chunk write queue used in the head chunks mapper.
HeadChunksWriteQueueSize int HeadChunksWriteQueueSize int
// SamplesPerChunk configures the target number of samples per chunk.
SamplesPerChunk int
// SeriesLifecycleCallback specifies a list of callbacks that will be called during a lifecycle of a series. // SeriesLifecycleCallback specifies a list of callbacks that will be called during a lifecycle of a series.
// It is always a no-op in Prometheus and mainly meant for external users who import TSDB. // It is always a no-op in Prometheus and mainly meant for external users who import TSDB.
SeriesLifecycleCallback SeriesLifecycleCallback SeriesLifecycleCallback SeriesLifecycleCallback
@ -265,6 +269,8 @@ type DB struct {
// out-of-order compaction and vertical queries. // out-of-order compaction and vertical queries.
oooWasEnabled atomic.Bool oooWasEnabled atomic.Bool
writeNotified wlog.WriteNotified
registerer prometheus.Registerer registerer prometheus.Registerer
} }
@ -678,6 +684,9 @@ func validateOpts(opts *Options, rngs []int64) (*Options, []int64) {
if opts.HeadChunksWriteQueueSize < 0 { if opts.HeadChunksWriteQueueSize < 0 {
opts.HeadChunksWriteQueueSize = chunks.DefaultWriteQueueSize opts.HeadChunksWriteQueueSize = chunks.DefaultWriteQueueSize
} }
if opts.SamplesPerChunk <= 0 {
opts.SamplesPerChunk = DefaultSamplesPerChunk
}
if opts.MaxBlockChunkSegmentSize <= 0 { if opts.MaxBlockChunkSegmentSize <= 0 {
opts.MaxBlockChunkSegmentSize = chunks.DefaultChunkSegmentSize opts.MaxBlockChunkSegmentSize = chunks.DefaultChunkSegmentSize
} }
@ -823,6 +832,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
headOpts.ChunkWriteBufferSize = opts.HeadChunksWriteBufferSize headOpts.ChunkWriteBufferSize = opts.HeadChunksWriteBufferSize
headOpts.ChunkEndTimeVariance = opts.HeadChunksEndTimeVariance headOpts.ChunkEndTimeVariance = opts.HeadChunksEndTimeVariance
headOpts.ChunkWriteQueueSize = opts.HeadChunksWriteQueueSize headOpts.ChunkWriteQueueSize = opts.HeadChunksWriteQueueSize
headOpts.SamplesPerChunk = opts.SamplesPerChunk
headOpts.StripeSize = opts.StripeSize headOpts.StripeSize = opts.StripeSize
headOpts.SeriesCallback = opts.SeriesLifecycleCallback headOpts.SeriesCallback = opts.SeriesLifecycleCallback
headOpts.EnableExemplarStorage = opts.EnableExemplarStorage headOpts.EnableExemplarStorage = opts.EnableExemplarStorage
@ -845,6 +855,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
if err != nil { if err != nil {
return nil, err return nil, err
} }
db.head.writeNotified = db.writeNotified
// Register metrics after assigning the head block. // Register metrics after assigning the head block.
db.metrics = newDBMetrics(db, r) db.metrics = newDBMetrics(db, r)
@ -2074,6 +2085,12 @@ func (db *DB) CleanTombstones() (err error) {
return nil return nil
} }
func (db *DB) SetWriteNotified(wn wlog.WriteNotified) {
db.writeNotified = wn
// It's possible we already created the head struct, so we should also set the WN for that.
db.head.writeNotified = wn
}
func isBlockDir(fi fs.DirEntry) bool { func isBlockDir(fi fs.DirEntry) bool {
if !fi.IsDir() { if !fi.IsDir() {
return false return false

View file

@ -136,6 +136,8 @@ type Head struct {
stats *HeadStats stats *HeadStats
reg prometheus.Registerer reg prometheus.Registerer
writeNotified wlog.WriteNotified
memTruncationInProcess atomic.Bool memTruncationInProcess atomic.Bool
} }
@ -166,6 +168,8 @@ type HeadOptions struct {
ChunkEndTimeVariance float64 ChunkEndTimeVariance float64
ChunkWriteQueueSize int ChunkWriteQueueSize int
SamplesPerChunk int
// StripeSize sets the number of entries in the hash map, it must be a power of 2. // StripeSize sets the number of entries in the hash map, it must be a power of 2.
// A larger StripeSize will allocate more memory up-front, but will increase performance when handling a large number of series. // A larger StripeSize will allocate more memory up-front, but will increase performance when handling a large number of series.
// A smaller StripeSize reduces the memory allocated, but can decrease performance with large number of series. // A smaller StripeSize reduces the memory allocated, but can decrease performance with large number of series.
@ -189,6 +193,8 @@ type HeadOptions struct {
const ( const (
// DefaultOutOfOrderCapMax is the default maximum size of an in-memory out-of-order chunk. // DefaultOutOfOrderCapMax is the default maximum size of an in-memory out-of-order chunk.
DefaultOutOfOrderCapMax int64 = 32 DefaultOutOfOrderCapMax int64 = 32
// DefaultSamplesPerChunk provides a default target number of samples per chunk.
DefaultSamplesPerChunk = 120
) )
func DefaultHeadOptions() *HeadOptions { func DefaultHeadOptions() *HeadOptions {
@ -199,6 +205,7 @@ func DefaultHeadOptions() *HeadOptions {
ChunkWriteBufferSize: chunks.DefaultWriteBufferSize, ChunkWriteBufferSize: chunks.DefaultWriteBufferSize,
ChunkEndTimeVariance: 0, ChunkEndTimeVariance: 0,
ChunkWriteQueueSize: chunks.DefaultWriteQueueSize, ChunkWriteQueueSize: chunks.DefaultWriteQueueSize,
SamplesPerChunk: DefaultSamplesPerChunk,
StripeSize: DefaultStripeSize, StripeSize: DefaultStripeSize,
SeriesCallback: &noopSeriesLifecycleCallback{}, SeriesCallback: &noopSeriesLifecycleCallback{},
IsolationDisabled: defaultIsolationDisabled, IsolationDisabled: defaultIsolationDisabled,
@ -998,7 +1005,7 @@ func (h *Head) DisableNativeHistograms() {
} }
// PostingsCardinalityStats returns top 10 highest cardinality stats By label and value names. // PostingsCardinalityStats returns top 10 highest cardinality stats By label and value names.
func (h *Head) PostingsCardinalityStats(statsByLabelName string) *index.PostingsStats { func (h *Head) PostingsCardinalityStats(statsByLabelName string, limit int) *index.PostingsStats {
h.cardinalityMutex.Lock() h.cardinalityMutex.Lock()
defer h.cardinalityMutex.Unlock() defer h.cardinalityMutex.Unlock()
currentTime := time.Duration(time.Now().Unix()) * time.Second currentTime := time.Duration(time.Now().Unix()) * time.Second
@ -1009,7 +1016,7 @@ func (h *Head) PostingsCardinalityStats(statsByLabelName string) *index.Postings
if h.cardinalityCache != nil { if h.cardinalityCache != nil {
return h.cardinalityCache return h.cardinalityCache
} }
h.cardinalityCache = h.postings.Stats(statsByLabelName) h.cardinalityCache = h.postings.Stats(statsByLabelName, limit)
h.lastPostingsStatsCall = time.Duration(time.Now().Unix()) * time.Second h.lastPostingsStatsCall = time.Duration(time.Now().Unix()) * time.Second
return h.cardinalityCache return h.cardinalityCache
@ -1239,9 +1246,9 @@ func (h *Head) truncateWAL(mint int64) error {
return true return true
} }
h.deletedMtx.Lock() h.deletedMtx.Lock()
_, ok := h.deleted[id] keepUntil, ok := h.deleted[id]
h.deletedMtx.Unlock() h.deletedMtx.Unlock()
return ok return ok && keepUntil > last
} }
h.metrics.checkpointCreationTotal.Inc() h.metrics.checkpointCreationTotal.Inc()
if _, err = wlog.Checkpoint(h.logger, h.wal, first, last, keep, mint); err != nil { if _, err = wlog.Checkpoint(h.logger, h.wal, first, last, keep, mint); err != nil {
@ -1262,7 +1269,7 @@ func (h *Head) truncateWAL(mint int64) error {
// longer need to track deleted series that are before it. // longer need to track deleted series that are before it.
h.deletedMtx.Lock() h.deletedMtx.Lock()
for ref, segment := range h.deleted { for ref, segment := range h.deleted {
if segment < first { if segment <= last {
delete(h.deleted, ref) delete(h.deleted, ref)
} }
} }
@ -1349,12 +1356,12 @@ type Stats struct {
// Stats returns important current HEAD statistics. Note that it is expensive to // Stats returns important current HEAD statistics. Note that it is expensive to
// calculate these. // calculate these.
func (h *Head) Stats(statsByLabelName string) *Stats { func (h *Head) Stats(statsByLabelName string, limit int) *Stats {
return &Stats{ return &Stats{
NumSeries: h.NumSeries(), NumSeries: h.NumSeries(),
MaxTime: h.MaxTime(), MaxTime: h.MaxTime(),
MinTime: h.MinTime(), MinTime: h.MinTime(),
IndexPostingStats: h.PostingsCardinalityStats(statsByLabelName), IndexPostingStats: h.PostingsCardinalityStats(statsByLabelName, limit),
} }
} }
@ -1634,7 +1641,7 @@ func (h *Head) getOrCreate(hash uint64, lset labels.Labels) (*memSeries, bool, e
func (h *Head) getOrCreateWithID(id chunks.HeadSeriesRef, hash uint64, lset labels.Labels) (*memSeries, bool, error) { func (h *Head) getOrCreateWithID(id chunks.HeadSeriesRef, hash uint64, lset labels.Labels) (*memSeries, bool, error) {
s, created, err := h.series.getOrSet(hash, lset, func() *memSeries { s, created, err := h.series.getOrSet(hash, lset, func() *memSeries {
return newMemSeries(lset, id, labels.StableHash(lset), h.opts.ChunkEndTimeVariance, h.opts.IsolationDisabled) return newMemSeries(lset, id, labels.StableHash(lset), h.opts.ChunkEndTimeVariance, h.opts.IsolationDisabled, h.opts.SamplesPerChunk)
}) })
if err != nil { if err != nil {
return nil, false, err return nil, false, err
@ -1949,6 +1956,7 @@ type memSeries struct {
// to spread chunks writing across time. Doesn't apply to the last chunk of the chunk range. 0 to disable variance. // to spread chunks writing across time. Doesn't apply to the last chunk of the chunk range. 0 to disable variance.
chunkEndTimeVariance float64 chunkEndTimeVariance float64
samplesPerChunk int // Target number of samples per chunk.
nextAt int64 // Timestamp at which to cut the next chunk. nextAt int64 // Timestamp at which to cut the next chunk.
// We keep the last value here (in addition to appending it to the chunk) so we can check for duplicates. // We keep the last value here (in addition to appending it to the chunk) so we can check for duplicates.
@ -1977,13 +1985,14 @@ type memSeriesOOOFields struct {
firstOOOChunkID chunks.HeadChunkID // HeadOOOChunkID for oooMmappedChunks[0]. firstOOOChunkID chunks.HeadChunkID // HeadOOOChunkID for oooMmappedChunks[0].
} }
func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, shardHash uint64, chunkEndTimeVariance float64, isolationDisabled bool) *memSeries { func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, shardHash uint64, chunkEndTimeVariance float64, isolationDisabled bool, samplesPerChunk int) *memSeries {
s := &memSeries{ s := &memSeries{
lset: lset, lset: lset,
ref: id, ref: id,
nextAt: math.MinInt64, nextAt: math.MinInt64,
chunkEndTimeVariance: chunkEndTimeVariance, chunkEndTimeVariance: chunkEndTimeVariance,
shardHash: shardHash, shardHash: shardHash,
samplesPerChunk: samplesPerChunk,
} }
if !isolationDisabled { if !isolationDisabled {
s.txs = newTxRing(4) s.txs = newTxRing(4)

View file

@ -849,6 +849,10 @@ func (a *headAppender) Commit() (err error) {
return errors.Wrap(err, "write to WAL") return errors.Wrap(err, "write to WAL")
} }
if a.head.writeNotified != nil {
a.head.writeNotified.Notify()
}
// No errors logging to WAL, so pass the exemplars along to the in memory storage. // No errors logging to WAL, so pass the exemplars along to the in memory storage.
for _, e := range a.exemplars { for _, e := range a.exemplars {
s := a.head.series.getByID(chunks.HeadSeriesRef(e.ref)) s := a.head.series.getByID(chunks.HeadSeriesRef(e.ref))
@ -1339,11 +1343,6 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram,
func (s *memSeries) appendPreprocessor( func (s *memSeries) appendPreprocessor(
t int64, e chunkenc.Encoding, chunkDiskMapper chunkDiskMapper, chunkRange int64, t int64, e chunkenc.Encoding, chunkDiskMapper chunkDiskMapper, chunkRange int64,
) (c *memChunk, sampleInOrder, chunkCreated bool) { ) (c *memChunk, sampleInOrder, chunkCreated bool) {
// Based on Gorilla white papers this offers near-optimal compression ratio
// so anything bigger that this has diminishing returns and increases
// the time range within which we have to decompress all samples.
const samplesPerChunk = 120
c = s.head() c = s.head()
if c == nil { if c == nil {
@ -1380,7 +1379,7 @@ func (s *memSeries) appendPreprocessor(
// for this chunk that will try to make samples equally distributed within // for this chunk that will try to make samples equally distributed within
// the remaining chunks in the current chunk range. // the remaining chunks in the current chunk range.
// At latest it must happen at the timestamp set when the chunk was cut. // At latest it must happen at the timestamp set when the chunk was cut.
if numSamples == samplesPerChunk/4 { if numSamples == s.samplesPerChunk/4 {
maxNextAt := s.nextAt maxNextAt := s.nextAt
s.nextAt = computeChunkEndTime(c.minTime, c.maxTime, maxNextAt) s.nextAt = computeChunkEndTime(c.minTime, c.maxTime, maxNextAt)
@ -1391,7 +1390,7 @@ func (s *memSeries) appendPreprocessor(
// Since we assume that the rate is higher, we're being conservative and cutting at 2*samplesPerChunk // Since we assume that the rate is higher, we're being conservative and cutting at 2*samplesPerChunk
// as we expect more chunks to come. // as we expect more chunks to come.
// Note that next chunk will have its nextAt recalculated for the new rate. // Note that next chunk will have its nextAt recalculated for the new rate.
if t >= s.nextAt || numSamples >= samplesPerChunk*2 { if t >= s.nextAt || numSamples >= s.samplesPerChunk*2 {
c = s.cutNewHeadChunk(t, e, chunkDiskMapper, chunkRange) c = s.cutNewHeadChunk(t, e, chunkDiskMapper, chunkRange)
chunkCreated = true chunkCreated = true
} }

View file

@ -360,7 +360,7 @@ func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc.
} }
s.Unlock() s.Unlock()
return &safeChunk{ return &safeHeadChunk{
Chunk: chk, Chunk: chk,
s: s, s: s,
cid: cid, cid: cid,
@ -656,15 +656,15 @@ func (b boundedIterator) Seek(t int64) chunkenc.ValueType {
return b.Iterator.Seek(t) return b.Iterator.Seek(t)
} }
// safeChunk makes sure that the chunk can be accessed without a race condition // safeHeadChunk makes sure that the chunk can be accessed without a race condition
type safeChunk struct { type safeHeadChunk struct {
chunkenc.Chunk chunkenc.Chunk
s *memSeries s *memSeries
cid chunks.HeadChunkID cid chunks.HeadChunkID
isoState *isolationState isoState *isolationState
} }
func (c *safeChunk) Iterator(reuseIter chunkenc.Iterator) chunkenc.Iterator { func (c *safeHeadChunk) Iterator(reuseIter chunkenc.Iterator) chunkenc.Iterator {
c.s.Lock() c.s.Lock()
it := c.s.iterator(c.cid, c.Chunk, c.isoState, reuseIter) it := c.s.iterator(c.cid, c.Chunk, c.isoState, reuseIter)
c.s.Unlock() c.s.Unlock()

View file

@ -286,7 +286,7 @@ func BenchmarkLoadWAL(b *testing.B) {
for k := 0; k < c.batches*c.seriesPerBatch; k++ { for k := 0; k < c.batches*c.seriesPerBatch; k++ {
// Create one mmapped chunk per series, with one sample at the given time. // Create one mmapped chunk per series, with one sample at the given time.
lbls := labels.Labels{} lbls := labels.Labels{}
s := newMemSeries(lbls, chunks.HeadSeriesRef(k)*101, labels.StableHash(lbls), 0, defaultIsolationDisabled) s := newMemSeries(lbls, chunks.HeadSeriesRef(k)*101, labels.StableHash(lbls), 0, defaultIsolationDisabled, DefaultSamplesPerChunk)
s.append(c.mmappedChunkT, 42, 0, chunkDiskMapper, c.mmappedChunkT) s.append(c.mmappedChunkT, 42, 0, chunkDiskMapper, c.mmappedChunkT)
s.mmapCurrentHeadChunk(chunkDiskMapper) s.mmapCurrentHeadChunk(chunkDiskMapper)
} }
@ -809,7 +809,7 @@ func TestMemSeries_truncateChunks(t *testing.T) {
} }
lbls := labels.FromStrings("a", "b") lbls := labels.FromStrings("a", "b")
s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled) s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled, DefaultSamplesPerChunk)
for i := 0; i < 4000; i += 5 { for i := 0; i < 4000; i += 5 {
ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper, chunkRange) ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper, chunkRange)
@ -1341,7 +1341,7 @@ func TestMemSeries_append(t *testing.T) {
const chunkRange = 500 const chunkRange = 500
lbls := labels.Labels{} lbls := labels.Labels{}
s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled) s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled, DefaultSamplesPerChunk)
// Add first two samples at the very end of a chunk range and the next two // Add first two samples at the very end of a chunk range and the next two
// on and after it. // on and after it.
@ -1396,7 +1396,7 @@ func TestMemSeries_appendHistogram(t *testing.T) {
chunkRange := int64(1000) chunkRange := int64(1000)
lbls := labels.Labels{} lbls := labels.Labels{}
s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled) s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled, DefaultSamplesPerChunk)
histograms := tsdbutil.GenerateTestHistograms(4) histograms := tsdbutil.GenerateTestHistograms(4)
histogramWithOneMoreBucket := histograms[3].Copy() histogramWithOneMoreBucket := histograms[3].Copy()
@ -1453,7 +1453,7 @@ func TestMemSeries_append_atVariableRate(t *testing.T) {
chunkRange := DefaultBlockDuration chunkRange := DefaultBlockDuration
lbls := labels.Labels{} lbls := labels.Labels{}
s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled) s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled, DefaultSamplesPerChunk)
// At this slow rate, we will fill the chunk in two block durations. // At this slow rate, we will fill the chunk in two block durations.
slowRate := (DefaultBlockDuration * 2) / samplesPerChunk slowRate := (DefaultBlockDuration * 2) / samplesPerChunk
@ -2677,7 +2677,7 @@ func TestIteratorSeekIntoBuffer(t *testing.T) {
const chunkRange = 500 const chunkRange = 500
lbls := labels.Labels{} lbls := labels.Labels{}
s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled) s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled, DefaultSamplesPerChunk)
for i := 0; i < 7; i++ { for i := 0; i < 7; i++ {
ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper, chunkRange) ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper, chunkRange)

View file

@ -156,10 +156,8 @@ type PostingsStats struct {
} }
// Stats calculates the cardinality statistics from postings. // Stats calculates the cardinality statistics from postings.
func (p *MemPostings) Stats(label string) *PostingsStats { func (p *MemPostings) Stats(label string, limit int) *PostingsStats {
const maxNumOfRecords = 10
var size uint64 var size uint64
p.mtx.RLock() p.mtx.RLock()
metrics := &maxHeap{} metrics := &maxHeap{}
@ -168,10 +166,10 @@ func (p *MemPostings) Stats(label string) *PostingsStats {
labelValuePairs := &maxHeap{} labelValuePairs := &maxHeap{}
numLabelPairs := 0 numLabelPairs := 0
metrics.init(maxNumOfRecords) metrics.init(limit)
labels.init(maxNumOfRecords) labels.init(limit)
labelValueLength.init(maxNumOfRecords) labelValueLength.init(limit)
labelValuePairs.init(maxNumOfRecords) labelValuePairs.init(limit)
for n, e := range p.m { for n, e := range p.m {
if n == "" { if n == "" {
@ -184,8 +182,9 @@ func (p *MemPostings) Stats(label string) *PostingsStats {
if n == label { if n == label {
metrics.push(Stat{Name: name, Count: uint64(len(values))}) metrics.push(Stat{Name: name, Count: uint64(len(values))})
} }
labelValuePairs.push(Stat{Name: n + "=" + name, Count: uint64(len(values))}) seriesCnt := uint64(len(values))
size += uint64(len(name)) labelValuePairs.push(Stat{Name: n + "=" + name, Count: seriesCnt})
size += uint64(len(name)) * seriesCnt
} }
labelValueLength.push(Stat{Name: n, Count: size}) labelValueLength.push(Stat{Name: n, Count: size})
} }

View file

@ -912,10 +912,39 @@ func BenchmarkPostings_Stats(b *testing.B) {
} }
b.ResetTimer() b.ResetTimer()
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
p.Stats("__name__") p.Stats("__name__", 10)
} }
} }
func TestMemPostingsStats(t *testing.T) {
// create a new MemPostings
p := NewMemPostings()
// add some postings to the MemPostings
p.Add(1, labels.FromStrings("label", "value1"))
p.Add(1, labels.FromStrings("label", "value2"))
p.Add(1, labels.FromStrings("label", "value3"))
p.Add(2, labels.FromStrings("label", "value1"))
// call the Stats method to calculate the cardinality statistics
stats := p.Stats("label", 10)
// assert that the expected statistics were calculated
require.Equal(t, uint64(2), stats.CardinalityMetricsStats[0].Count)
require.Equal(t, "value1", stats.CardinalityMetricsStats[0].Name)
require.Equal(t, uint64(3), stats.CardinalityLabelStats[0].Count)
require.Equal(t, "label", stats.CardinalityLabelStats[0].Name)
require.Equal(t, uint64(24), stats.LabelValueStats[0].Count)
require.Equal(t, "label", stats.LabelValueStats[0].Name)
require.Equal(t, uint64(2), stats.LabelValuePairsStats[0].Count)
require.Equal(t, "label=value1", stats.LabelValuePairsStats[0].Name)
require.Equal(t, 3, stats.NumLabelPairs)
}
func TestMemPostings_Delete(t *testing.T) { func TestMemPostings_Delete(t *testing.T) {
p := NewMemPostings() p := NewMemPostings()
p.Add(1, labels.FromStrings("lbl1", "a")) p.Add(1, labels.FromStrings("lbl1", "a"))

View file

@ -310,6 +310,22 @@ func postingsForMatcher(ix IndexPostingsReader, m *labels.Matcher) (index.Postin
// inversePostingsForMatcher returns the postings for the series with the label name set but not matching the matcher. // inversePostingsForMatcher returns the postings for the series with the label name set but not matching the matcher.
func inversePostingsForMatcher(ix IndexPostingsReader, m *labels.Matcher) (index.Postings, error) { func inversePostingsForMatcher(ix IndexPostingsReader, m *labels.Matcher) (index.Postings, error) {
// Fast-path for MatchNotRegexp matching.
// Inverse of a MatchNotRegexp is MatchRegexp (double negation).
// Fast-path for set matching.
if m.Type == labels.MatchNotRegexp {
setMatches := m.SetMatches()
if len(setMatches) > 0 {
return ix.Postings(m.Name, setMatches...)
}
}
// Fast-path for MatchNotEqual matching.
// Inverse of a MatchNotEqual is MatchEqual (double negation).
if m.Type == labels.MatchNotEqual {
return ix.Postings(m.Name, m.Value)
}
vals, err := ix.LabelValues(m.Name) vals, err := ix.LabelValues(m.Name)
if err != nil { if err != nil {
return nil, err return nil, err
@ -746,14 +762,35 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool {
if app, err = newChunk.Appender(); err != nil { if app, err = newChunk.Appender(); err != nil {
break break
} }
if hc, ok := p.currChkMeta.Chunk.(*chunkenc.HistogramChunk); ok {
switch hc := p.currChkMeta.Chunk.(type) {
case *chunkenc.HistogramChunk:
newChunk.(*chunkenc.HistogramChunk).SetCounterResetHeader(hc.GetCounterResetHeader()) newChunk.(*chunkenc.HistogramChunk).SetCounterResetHeader(hc.GetCounterResetHeader())
case *safeHeadChunk:
if unwrapped, ok := hc.Chunk.(*chunkenc.HistogramChunk); ok {
newChunk.(*chunkenc.HistogramChunk).SetCounterResetHeader(unwrapped.GetCounterResetHeader())
} else {
err = fmt.Errorf("internal error, could not unwrap safeHeadChunk to histogram chunk: %T", hc.Chunk)
} }
default:
err = fmt.Errorf("internal error, unknown chunk type %T when expecting histogram", p.currChkMeta.Chunk)
}
if err != nil {
break
}
var h *histogram.Histogram var h *histogram.Histogram
t, h = p.currDelIter.AtHistogram() t, h = p.currDelIter.AtHistogram()
p.curr.MinTime = t p.curr.MinTime = t
// Detect missing gauge reset hint.
if h.CounterResetHint == histogram.GaugeType && newChunk.(*chunkenc.HistogramChunk).GetCounterResetHeader() != chunkenc.GaugeType {
err = fmt.Errorf("found gauge histogram in non gauge chunk")
break
}
app.AppendHistogram(t, h) app.AppendHistogram(t, h)
for vt := p.currDelIter.Next(); vt != chunkenc.ValNone; vt = p.currDelIter.Next() { for vt := p.currDelIter.Next(); vt != chunkenc.ValNone; vt = p.currDelIter.Next() {
if vt != chunkenc.ValHistogram { if vt != chunkenc.ValHistogram {
err = fmt.Errorf("found value type %v in histogram chunk", vt) err = fmt.Errorf("found value type %v in histogram chunk", vt)
@ -762,6 +799,20 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool {
t, h = p.currDelIter.AtHistogram() t, h = p.currDelIter.AtHistogram()
// Defend against corrupted chunks. // Defend against corrupted chunks.
if h.CounterResetHint == histogram.GaugeType {
pI, nI, bpI, bnI, _, _, okToAppend := app.(*chunkenc.HistogramAppender).AppendableGauge(h)
if !okToAppend {
err = errors.New("unable to append histogram due to unexpected schema change")
break
}
if len(pI)+len(nI)+len(bpI)+len(bnI) > 0 {
err = fmt.Errorf(
"bucket layout has changed unexpectedly: forward %d positive, %d negative, backward %d positive %d negative bucket interjections required",
len(pI), len(nI), len(bpI), len(bnI),
)
break
}
} else {
pI, nI, okToAppend, counterReset := app.(*chunkenc.HistogramAppender).Appendable(h) pI, nI, okToAppend, counterReset := app.(*chunkenc.HistogramAppender).Appendable(h)
if len(pI)+len(nI) > 0 { if len(pI)+len(nI) > 0 {
err = fmt.Errorf( err = fmt.Errorf(
@ -778,7 +829,7 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool {
err = errors.New("unable to append histogram due to unexpected schema change") err = errors.New("unable to append histogram due to unexpected schema change")
break break
} }
}
app.AppendHistogram(t, h) app.AppendHistogram(t, h)
} }
case chunkenc.ValFloat: case chunkenc.ValFloat:
@ -803,14 +854,35 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool {
if app, err = newChunk.Appender(); err != nil { if app, err = newChunk.Appender(); err != nil {
break break
} }
if hc, ok := p.currChkMeta.Chunk.(*chunkenc.FloatHistogramChunk); ok {
switch hc := p.currChkMeta.Chunk.(type) {
case *chunkenc.FloatHistogramChunk:
newChunk.(*chunkenc.FloatHistogramChunk).SetCounterResetHeader(hc.GetCounterResetHeader()) newChunk.(*chunkenc.FloatHistogramChunk).SetCounterResetHeader(hc.GetCounterResetHeader())
case *safeHeadChunk:
if unwrapped, ok := hc.Chunk.(*chunkenc.FloatHistogramChunk); ok {
newChunk.(*chunkenc.FloatHistogramChunk).SetCounterResetHeader(unwrapped.GetCounterResetHeader())
} else {
err = fmt.Errorf("internal error, could not unwrap safeHeadChunk to float histogram chunk: %T", hc.Chunk)
} }
default:
err = fmt.Errorf("internal error, unknown chunk type %T when expecting float histogram", p.currChkMeta.Chunk)
}
if err != nil {
break
}
var h *histogram.FloatHistogram var h *histogram.FloatHistogram
t, h = p.currDelIter.AtFloatHistogram() t, h = p.currDelIter.AtFloatHistogram()
p.curr.MinTime = t p.curr.MinTime = t
// Detect missing gauge reset hint.
if h.CounterResetHint == histogram.GaugeType && newChunk.(*chunkenc.FloatHistogramChunk).GetCounterResetHeader() != chunkenc.GaugeType {
err = fmt.Errorf("found float gauge histogram in non gauge chunk")
break
}
app.AppendFloatHistogram(t, h) app.AppendFloatHistogram(t, h)
for vt := p.currDelIter.Next(); vt != chunkenc.ValNone; vt = p.currDelIter.Next() { for vt := p.currDelIter.Next(); vt != chunkenc.ValNone; vt = p.currDelIter.Next() {
if vt != chunkenc.ValFloatHistogram { if vt != chunkenc.ValFloatHistogram {
err = fmt.Errorf("found value type %v in histogram chunk", vt) err = fmt.Errorf("found value type %v in histogram chunk", vt)
@ -819,6 +891,20 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool {
t, h = p.currDelIter.AtFloatHistogram() t, h = p.currDelIter.AtFloatHistogram()
// Defend against corrupted chunks. // Defend against corrupted chunks.
if h.CounterResetHint == histogram.GaugeType {
pI, nI, bpI, bnI, _, _, okToAppend := app.(*chunkenc.FloatHistogramAppender).AppendableGauge(h)
if !okToAppend {
err = errors.New("unable to append histogram due to unexpected schema change")
break
}
if len(pI)+len(nI)+len(bpI)+len(bnI) > 0 {
err = fmt.Errorf(
"bucket layout has changed unexpectedly: forward %d positive, %d negative, backward %d positive %d negative bucket interjections required",
len(pI), len(nI), len(bpI), len(bnI),
)
break
}
} else {
pI, nI, okToAppend, counterReset := app.(*chunkenc.FloatHistogramAppender).Appendable(h) pI, nI, okToAppend, counterReset := app.(*chunkenc.FloatHistogramAppender).Appendable(h)
if len(pI)+len(nI) > 0 { if len(pI)+len(nI) > 0 {
err = fmt.Errorf( err = fmt.Errorf(
@ -835,6 +921,7 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool {
err = errors.New("unable to append histogram due to unexpected schema change") err = errors.New("unable to append histogram due to unexpected schema change")
break break
} }
}
app.AppendFloatHistogram(t, h) app.AppendFloatHistogram(t, h)
} }

View file

@ -116,6 +116,7 @@ func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) {
iCharSet := labels.MustNewMatcher(labels.MatchRegexp, "i", "1[0-9]") iCharSet := labels.MustNewMatcher(labels.MatchRegexp, "i", "1[0-9]")
iAlternate := labels.MustNewMatcher(labels.MatchRegexp, "i", "(1|2|3|4|5|6|20|55)") iAlternate := labels.MustNewMatcher(labels.MatchRegexp, "i", "(1|2|3|4|5|6|20|55)")
iXYZ := labels.MustNewMatcher(labels.MatchRegexp, "i", "X|Y|Z") iXYZ := labels.MustNewMatcher(labels.MatchRegexp, "i", "X|Y|Z")
iNotXYZ := labels.MustNewMatcher(labels.MatchNotRegexp, "i", "X|Y|Z")
cases := []struct { cases := []struct {
name string name string
matchers []*labels.Matcher matchers []*labels.Matcher
@ -126,6 +127,7 @@ func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) {
{`n="X",j="foo"`, []*labels.Matcher{nX, jFoo}}, {`n="X",j="foo"`, []*labels.Matcher{nX, jFoo}},
{`j="foo",n="1"`, []*labels.Matcher{jFoo, n1}}, {`j="foo",n="1"`, []*labels.Matcher{jFoo, n1}},
{`n="1",j!="foo"`, []*labels.Matcher{n1, jNotFoo}}, {`n="1",j!="foo"`, []*labels.Matcher{n1, jNotFoo}},
{`n="1",i!="2"`, []*labels.Matcher{n1, iNot2}},
{`n="X",j!="foo"`, []*labels.Matcher{nX, jNotFoo}}, {`n="X",j!="foo"`, []*labels.Matcher{nX, jNotFoo}},
{`i=~"1[0-9]",j=~"foo|bar"`, []*labels.Matcher{iCharSet, jFooBar}}, {`i=~"1[0-9]",j=~"foo|bar"`, []*labels.Matcher{iCharSet, jFooBar}},
{`j=~"foo|bar"`, []*labels.Matcher{jFooBar}}, {`j=~"foo|bar"`, []*labels.Matcher{jFooBar}},
@ -133,6 +135,7 @@ func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) {
{`j=~"X.+"`, []*labels.Matcher{jXplus}}, {`j=~"X.+"`, []*labels.Matcher{jXplus}},
{`i=~"(1|2|3|4|5|6|20|55)"`, []*labels.Matcher{iAlternate}}, {`i=~"(1|2|3|4|5|6|20|55)"`, []*labels.Matcher{iAlternate}},
{`i=~"X|Y|Z"`, []*labels.Matcher{iXYZ}}, {`i=~"X|Y|Z"`, []*labels.Matcher{iXYZ}},
{`i!~"X|Y|Z"`, []*labels.Matcher{iNotXYZ}},
{`i=~".*"`, []*labels.Matcher{iStar}}, {`i=~".*"`, []*labels.Matcher{iStar}},
{`i=~"1.*"`, []*labels.Matcher{i1Star}}, {`i=~"1.*"`, []*labels.Matcher{i1Star}},
{`i=~".*1"`, []*labels.Matcher{iStar1}}, {`i=~".*1"`, []*labels.Matcher{iStar1}},
@ -148,6 +151,7 @@ func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) {
{`n="1",i!="",j=~"X.+"`, []*labels.Matcher{n1, iNotEmpty, jXplus}}, {`n="1",i!="",j=~"X.+"`, []*labels.Matcher{n1, iNotEmpty, jXplus}},
{`n="1",i!="",j=~"XXX|YYY"`, []*labels.Matcher{n1, iNotEmpty, jXXXYYY}}, {`n="1",i!="",j=~"XXX|YYY"`, []*labels.Matcher{n1, iNotEmpty, jXXXYYY}},
{`n="1",i=~"X|Y|Z",j="foo"`, []*labels.Matcher{n1, iXYZ, jFoo}}, {`n="1",i=~"X|Y|Z",j="foo"`, []*labels.Matcher{n1, iXYZ, jFoo}},
{`n="1",i!~"X|Y|Z",j="foo"`, []*labels.Matcher{n1, iNotXYZ, jFoo}},
{`n="1",i=~".+",j="foo"`, []*labels.Matcher{n1, iPlus, jFoo}}, {`n="1",i=~".+",j="foo"`, []*labels.Matcher{n1, iPlus, jFoo}},
{`n="1",i=~"1.+",j="foo"`, []*labels.Matcher{n1, i1Plus, jFoo}}, {`n="1",i=~"1.+",j="foo"`, []*labels.Matcher{n1, i1Plus, jFoo}},
{`n="1",i=~".*1.*",j="foo"`, []*labels.Matcher{n1, iStar1Star, jFoo}}, {`n="1",i=~".*1.*",j="foo"`, []*labels.Matcher{n1, iStar1Star, jFoo}},

View file

@ -29,6 +29,7 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunkenc"
@ -541,6 +542,46 @@ func TestBlockQuerier_AgainstHeadWithOpenChunks(t *testing.T) {
} }
} }
func TestBlockQuerier_TrimmingDoesNotModifyOriginalTombstoneIntervals(t *testing.T) {
c := blockQuerierTestCase{
mint: 2,
maxt: 6,
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "a", "a")},
exp: newMockSeriesSet([]storage.Series{
storage.NewListSeries(labels.FromStrings("a", "a"),
[]tsdbutil.Sample{sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
),
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
[]tsdbutil.Sample{sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
),
}),
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
[]tsdbutil.Sample{sample{3, 4, nil, nil}}, []tsdbutil.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
),
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
[]tsdbutil.Sample{sample{3, 3, nil, nil}}, []tsdbutil.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
),
}),
}
ir, cr, _, _ := createIdxChkReaders(t, testData)
stones := tombstones.NewMemTombstones()
p, err := ir.Postings("a", "a")
require.NoError(t, err)
refs, err := index.ExpandPostings(p)
require.NoError(t, err)
for _, ref := range refs {
stones.AddInterval(ref, tombstones.Interval{Mint: 1, Maxt: 2})
}
testBlockQuerier(t, c, ir, cr, stones)
for _, ref := range refs {
intervals, err := stones.Get(ref)
require.NoError(t, err)
// Without copy, the intervals could be [math.MinInt64, 2].
require.Equal(t, tombstones.Intervals{{Mint: 1, Maxt: 2}}, intervals)
}
}
var testData = []seriesSamples{ var testData = []seriesSamples{
{ {
lset: map[string]string{"a": "a"}, lset: map[string]string{"a": "a"},
@ -907,6 +948,202 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil},
}, },
}, },
{
name: "one histogram chunk",
chks: [][]tsdbutil.Sample{
{
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil},
sample{3, 0, tsdbutil.GenerateTestHistogram(3), nil},
sample{6, 0, tsdbutil.GenerateTestHistogram(6), nil},
},
},
expected: []tsdbutil.Sample{
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
sample{6, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
},
expectedChks: []chunks.Meta{
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
sample{6, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
}),
},
},
{
name: "one histogram chunk intersect with deletion interval",
chks: [][]tsdbutil.Sample{
{
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil},
sample{3, 0, tsdbutil.GenerateTestHistogram(3), nil},
sample{6, 0, tsdbutil.GenerateTestHistogram(6), nil},
},
},
intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}},
expected: []tsdbutil.Sample{
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
},
expectedChks: []chunks.Meta{
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
}),
},
},
{
name: "one float histogram chunk",
chks: [][]tsdbutil.Sample{
{
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)},
sample{3, 0, nil, tsdbutil.GenerateTestFloatHistogram(3)},
sample{6, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
},
},
expected: []tsdbutil.Sample{
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
sample{6, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
},
expectedChks: []chunks.Meta{
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
sample{6, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
}),
},
},
{
name: "one float histogram chunk intersect with deletion interval",
chks: [][]tsdbutil.Sample{
{
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)},
sample{3, 0, nil, tsdbutil.GenerateTestFloatHistogram(3)},
sample{6, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
},
},
intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}},
expected: []tsdbutil.Sample{
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
},
expectedChks: []chunks.Meta{
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
}),
},
},
{
name: "one gauge histogram chunk",
chks: [][]tsdbutil.Sample{
{
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
},
},
expected: []tsdbutil.Sample{
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
},
expectedChks: []chunks.Meta{
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
}),
},
},
{
name: "one gauge histogram chunk intersect with deletion interval",
chks: [][]tsdbutil.Sample{
{
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
},
},
intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}},
expected: []tsdbutil.Sample{
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
},
expectedChks: []chunks.Meta{
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
}),
},
},
{
name: "one gauge float histogram",
chks: [][]tsdbutil.Sample{
{
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
},
},
expected: []tsdbutil.Sample{
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
},
expectedChks: []chunks.Meta{
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
}),
},
},
{
name: "one gauge float histogram chunk intersect with deletion interval",
chks: [][]tsdbutil.Sample{
{
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
},
},
intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}},
expected: []tsdbutil.Sample{
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
},
expectedChks: []chunks.Meta{
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
}),
},
},
} }
for _, tc := range cases { for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
@ -1820,6 +2057,19 @@ func TestPostingsForMatchers(t *testing.T) {
labels.FromStrings("n", "2.5"), labels.FromStrings("n", "2.5"),
}, },
}, },
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "n", "1")},
exp: []labels.Labels{
labels.FromStrings("n", "2"),
labels.FromStrings("n", "2.5"),
},
},
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "n", "1|2.5")},
exp: []labels.Labels{
labels.FromStrings("n", "2"),
},
},
{ {
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^a$")}, matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^a$")},
exp: []labels.Labels{ exp: []labels.Labels{
@ -1909,6 +2159,14 @@ func TestPostingsForMatchers(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
for _, c := range cases { for _, c := range cases {
name := ""
for i, matcher := range c.matchers {
if i > 0 {
name += ","
}
name += matcher.String()
}
t.Run(name, func(t *testing.T) {
exp := map[string]struct{}{} exp := map[string]struct{}{}
for _, l := range c.exp { for _, l := range c.exp {
exp[l.String()] = struct{}{} exp[l.String()] = struct{}{}
@ -1930,6 +2188,7 @@ func TestPostingsForMatchers(t *testing.T) {
if len(exp) != 0 { if len(exp) != 0 {
t.Errorf("Evaluating %v, missing results %+v", c.matchers, exp) t.Errorf("Evaluating %v, missing results %+v", c.matchers, exp)
} }
})
} }
} }
@ -2376,3 +2635,80 @@ func BenchmarkHeadQuerier(b *testing.B) {
require.NoError(b, ss.Err()) require.NoError(b, ss.Err())
} }
} }
// This is a regression test for the case where gauge histograms were not handled by
// populateWithDelChunkSeriesIterator correctly.
func TestQueryWithDeletedHistograms(t *testing.T) {
testcases := map[string]func(int) (*histogram.Histogram, *histogram.FloatHistogram){
"intCounter": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) {
return tsdbutil.GenerateTestHistogram(i), nil
},
"intgauge": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) {
return tsdbutil.GenerateTestGaugeHistogram(rand.Int() % 1000), nil
},
"floatCounter": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) {
return nil, tsdbutil.GenerateTestFloatHistogram(i)
},
"floatGauge": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) {
return nil, tsdbutil.GenerateTestGaugeFloatHistogram(rand.Int() % 1000)
},
}
for name, tc := range testcases {
t.Run(name, func(t *testing.T) {
db := openTestDB(t, nil, nil)
defer func() {
require.NoError(t, db.Close())
}()
db.EnableNativeHistograms()
appender := db.Appender(context.Background())
var (
err error
seriesRef storage.SeriesRef
)
lbs := labels.FromStrings("__name__", "test", "type", name)
for i := 0; i < 100; i++ {
h, fh := tc(i)
seriesRef, err = appender.AppendHistogram(seriesRef, lbs, int64(i), h, fh)
require.NoError(t, err)
}
err = appender.Commit()
require.NoError(t, err)
matcher, err := labels.NewMatcher(labels.MatchEqual, "__name__", "test")
require.NoError(t, err)
// Delete the last 20.
err = db.Delete(80, 100, matcher)
require.NoError(t, err)
chunkQuerier, err := db.ChunkQuerier(context.Background(), 0, 100)
require.NoError(t, err)
css := chunkQuerier.Select(false, nil, matcher)
seriesCount := 0
for css.Next() {
seriesCount++
series := css.At()
sampleCount := 0
it := series.Iterator(nil)
for it.Next() {
chk := it.At()
for cit := chk.Chunk.Iterator(nil); cit.Next() != chunkenc.ValNone; {
sampleCount++
}
}
require.NoError(t, it.Err())
require.Equal(t, 80, sampleCount)
}
require.NoError(t, css.Err())
require.Equal(t, 1, seriesCount)
})
}
}

View file

@ -18,6 +18,7 @@ import (
"fmt" "fmt"
"hash" "hash"
"hash/crc32" "hash/crc32"
"math"
"os" "os"
"path/filepath" "path/filepath"
"sort" "sort"
@ -252,7 +253,14 @@ func NewTestMemTombstones(intervals []Intervals) *MemTombstones {
func (t *MemTombstones) Get(ref storage.SeriesRef) (Intervals, error) { func (t *MemTombstones) Get(ref storage.SeriesRef) (Intervals, error) {
t.mtx.RLock() t.mtx.RLock()
defer t.mtx.RUnlock() defer t.mtx.RUnlock()
return t.intvlGroups[ref], nil intervals, ok := t.intvlGroups[ref]
if !ok {
return nil, nil
}
// Make a copy to avoid race.
res := make(Intervals, len(intervals))
copy(res, intervals)
return res, nil
} }
func (t *MemTombstones) DeleteTombstones(refs map[storage.SeriesRef]struct{}) { func (t *MemTombstones) DeleteTombstones(refs map[storage.SeriesRef]struct{}) {
@ -349,18 +357,24 @@ func (in Intervals) Add(n Interval) Intervals {
// Find min and max indexes of intervals that overlap with the new interval. // Find min and max indexes of intervals that overlap with the new interval.
// Intervals are closed [t1, t2] and t is discreet, so if neighbour intervals are 1 step difference // Intervals are closed [t1, t2] and t is discreet, so if neighbour intervals are 1 step difference
// to the new one, we can merge those together. // to the new one, we can merge those together.
mini := sort.Search(len(in), func(i int) bool { return in[i].Maxt >= n.Mint-1 }) mini := 0
if n.Mint != math.MinInt64 { // Avoid overflow.
mini = sort.Search(len(in), func(i int) bool { return in[i].Maxt >= n.Mint-1 })
if mini == len(in) { if mini == len(in) {
return append(in, n) return append(in, n)
} }
}
maxi := sort.Search(len(in)-mini, func(i int) bool { return in[mini+i].Mint > n.Maxt+1 }) maxi := len(in)
if n.Maxt != math.MaxInt64 { // Avoid overflow.
maxi = sort.Search(len(in)-mini, func(i int) bool { return in[mini+i].Mint > n.Maxt+1 })
if maxi == 0 { if maxi == 0 {
if mini == 0 { if mini == 0 {
return append(Intervals{n}, in...) return append(Intervals{n}, in...)
} }
return append(in[:mini], append(Intervals{n}, in[mini:]...)...) return append(in[:mini], append(Intervals{n}, in[mini:]...)...)
} }
}
if n.Mint < in[mini].Mint { if n.Mint < in[mini].Mint {
in[mini].Mint = n.Mint in[mini].Mint = n.Mint

View file

@ -81,6 +81,22 @@ func TestDeletingTombstones(t *testing.T) {
require.Empty(t, intervals) require.Empty(t, intervals)
} }
func TestTombstonesGetWithCopy(t *testing.T) {
stones := NewMemTombstones()
stones.AddInterval(1, Intervals{{Mint: 1, Maxt: 2}, {Mint: 7, Maxt: 8}, {Mint: 11, Maxt: 12}}...)
intervals0, err := stones.Get(1)
require.NoError(t, err)
require.Equal(t, Intervals{{Mint: 1, Maxt: 2}, {Mint: 7, Maxt: 8}, {Mint: 11, Maxt: 12}}, intervals0)
intervals1 := intervals0.Add(Interval{Mint: 4, Maxt: 6})
require.Equal(t, Intervals{{Mint: 1, Maxt: 2}, {Mint: 4, Maxt: 8}, {Mint: 11, Maxt: 12}}, intervals0) // Original slice changed.
require.Equal(t, Intervals{{Mint: 1, Maxt: 2}, {Mint: 4, Maxt: 8}, {Mint: 11, Maxt: 12}}, intervals1)
intervals2, err := stones.Get(1)
require.NoError(t, err)
require.Equal(t, Intervals{{Mint: 1, Maxt: 2}, {Mint: 7, Maxt: 8}, {Mint: 11, Maxt: 12}}, intervals2)
}
func TestTruncateBefore(t *testing.T) { func TestTruncateBefore(t *testing.T) {
cases := []struct { cases := []struct {
before Intervals before Intervals
@ -210,6 +226,26 @@ func TestAddingNewIntervals(t *testing.T) {
new: Interval{math.MinInt64, 10}, new: Interval{math.MinInt64, 10},
exp: Intervals{{math.MinInt64, math.MaxInt64}}, exp: Intervals{{math.MinInt64, math.MaxInt64}},
}, },
{
exist: Intervals{{9, 10}},
new: Interval{math.MinInt64, 7},
exp: Intervals{{math.MinInt64, 7}, {9, 10}},
},
{
exist: Intervals{{9, 10}},
new: Interval{12, math.MaxInt64},
exp: Intervals{{9, 10}, {12, math.MaxInt64}},
},
{
exist: Intervals{{9, 10}},
new: Interval{math.MinInt64, 8},
exp: Intervals{{math.MinInt64, 10}},
},
{
exist: Intervals{{9, 10}},
new: Interval{11, math.MaxInt64},
exp: Intervals{{9, math.MaxInt64}},
},
} }
for _, c := range cases { for _, c := range cases {

View file

@ -71,9 +71,19 @@ func ChunkFromSamplesGeneric(s Samples) chunks.Meta {
case chunkenc.ValFloat: case chunkenc.ValFloat:
ca.Append(s.Get(i).T(), s.Get(i).F()) ca.Append(s.Get(i).T(), s.Get(i).F())
case chunkenc.ValHistogram: case chunkenc.ValHistogram:
ca.AppendHistogram(s.Get(i).T(), s.Get(i).H()) h := s.Get(i).H()
ca.AppendHistogram(s.Get(i).T(), h)
if i == 0 && h.CounterResetHint == histogram.GaugeType {
hc := c.(*chunkenc.HistogramChunk)
hc.SetCounterResetHeader(chunkenc.GaugeType)
}
case chunkenc.ValFloatHistogram: case chunkenc.ValFloatHistogram:
ca.AppendFloatHistogram(s.Get(i).T(), s.Get(i).FH()) fh := s.Get(i).FH()
ca.AppendFloatHistogram(s.Get(i).T(), fh)
if i == 0 && fh.CounterResetHint == histogram.GaugeType {
hc := c.(*chunkenc.FloatHistogramChunk)
hc.SetCounterResetHeader(chunkenc.GaugeType)
}
default: default:
panic(fmt.Sprintf("unknown sample type %s", sampleType.String())) panic(fmt.Sprintf("unknown sample type %s", sampleType.String()))
} }

View file

@ -108,3 +108,13 @@ func GenerateTestGaugeFloatHistogram(i int) *histogram.FloatHistogram {
h.CounterResetHint = histogram.GaugeType h.CounterResetHint = histogram.GaugeType
return h return h
} }
func SetHistogramNotCounterReset(h *histogram.Histogram) *histogram.Histogram {
h.CounterResetHint = histogram.NotCounterReset
return h
}
func SetFloatHistogramNotCounterReset(h *histogram.FloatHistogram) *histogram.FloatHistogram {
h.CounterResetHint = histogram.NotCounterReset
return h
}

View file

@ -18,7 +18,7 @@ import (
"io" "io"
"math" "math"
"os" "os"
"path" "path/filepath"
"strconv" "strconv"
"strings" "strings"
"time" "time"
@ -34,12 +34,16 @@ import (
) )
const ( const (
readPeriod = 10 * time.Millisecond
checkpointPeriod = 5 * time.Second checkpointPeriod = 5 * time.Second
segmentCheckPeriod = 100 * time.Millisecond segmentCheckPeriod = 100 * time.Millisecond
consumer = "consumer" consumer = "consumer"
) )
var (
ErrIgnorable = errors.New("ignore me")
readTimeout = 15 * time.Second
)
// WriteTo is an interface used by the Watcher to send the samples it's read // WriteTo is an interface used by the Watcher to send the samples it's read
// from the WAL on to somewhere else. Functions will be called concurrently // from the WAL on to somewhere else. Functions will be called concurrently
// and it is left to the implementer to make sure they are safe. // and it is left to the implementer to make sure they are safe.
@ -61,11 +65,17 @@ type WriteTo interface {
SeriesReset(int) SeriesReset(int)
} }
// Used to notifier the watcher that data has been written so that it can read.
type WriteNotified interface {
Notify()
}
type WatcherMetrics struct { type WatcherMetrics struct {
recordsRead *prometheus.CounterVec recordsRead *prometheus.CounterVec
recordDecodeFails *prometheus.CounterVec recordDecodeFails *prometheus.CounterVec
samplesSentPreTailing *prometheus.CounterVec samplesSentPreTailing *prometheus.CounterVec
currentSegment *prometheus.GaugeVec currentSegment *prometheus.GaugeVec
notificationsSkipped *prometheus.CounterVec
} }
// Watcher watches the TSDB WAL for a given WriteTo. // Watcher watches the TSDB WAL for a given WriteTo.
@ -88,7 +98,9 @@ type Watcher struct {
recordDecodeFailsMetric prometheus.Counter recordDecodeFailsMetric prometheus.Counter
samplesSentPreTailing prometheus.Counter samplesSentPreTailing prometheus.Counter
currentSegmentMetric prometheus.Gauge currentSegmentMetric prometheus.Gauge
notificationsSkipped prometheus.Counter
readNotify chan struct{}
quit chan struct{} quit chan struct{}
done chan struct{} done chan struct{}
@ -134,6 +146,15 @@ func NewWatcherMetrics(reg prometheus.Registerer) *WatcherMetrics {
}, },
[]string{consumer}, []string{consumer},
), ),
notificationsSkipped: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "prometheus",
Subsystem: "wal_watcher",
Name: "notifications_skipped_total",
Help: "The number of WAL write notifications that the Watcher has skipped due to already being in a WAL read routine.",
},
[]string{consumer},
),
} }
if reg != nil { if reg != nil {
@ -141,6 +162,7 @@ func NewWatcherMetrics(reg prometheus.Registerer) *WatcherMetrics {
reg.MustRegister(m.recordDecodeFails) reg.MustRegister(m.recordDecodeFails)
reg.MustRegister(m.samplesSentPreTailing) reg.MustRegister(m.samplesSentPreTailing)
reg.MustRegister(m.currentSegment) reg.MustRegister(m.currentSegment)
reg.MustRegister(m.notificationsSkipped)
} }
return m return m
@ -156,11 +178,12 @@ func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logge
writer: writer, writer: writer,
metrics: metrics, metrics: metrics,
readerMetrics: readerMetrics, readerMetrics: readerMetrics,
walDir: path.Join(dir, "wal"), walDir: filepath.Join(dir, "wal"),
name: name, name: name,
sendExemplars: sendExemplars, sendExemplars: sendExemplars,
sendHistograms: sendHistograms, sendHistograms: sendHistograms,
readNotify: make(chan struct{}),
quit: make(chan struct{}), quit: make(chan struct{}),
done: make(chan struct{}), done: make(chan struct{}),
@ -168,6 +191,17 @@ func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logge
} }
} }
func (w *Watcher) Notify() {
select {
case w.readNotify <- struct{}{}:
return
default: // default so we can exit
// we don't need a buffered channel or any buffering since
// for each notification it recv's the watcher will read until EOF
w.notificationsSkipped.Inc()
}
}
func (w *Watcher) setMetrics() { func (w *Watcher) setMetrics() {
// Setup the WAL Watchers metrics. We do this here rather than in the // Setup the WAL Watchers metrics. We do this here rather than in the
// constructor because of the ordering of creating Queue Managers's, // constructor because of the ordering of creating Queue Managers's,
@ -177,6 +211,8 @@ func (w *Watcher) setMetrics() {
w.recordDecodeFailsMetric = w.metrics.recordDecodeFails.WithLabelValues(w.name) w.recordDecodeFailsMetric = w.metrics.recordDecodeFails.WithLabelValues(w.name)
w.samplesSentPreTailing = w.metrics.samplesSentPreTailing.WithLabelValues(w.name) w.samplesSentPreTailing = w.metrics.samplesSentPreTailing.WithLabelValues(w.name)
w.currentSegmentMetric = w.metrics.currentSegment.WithLabelValues(w.name) w.currentSegmentMetric = w.metrics.currentSegment.WithLabelValues(w.name)
w.notificationsSkipped = w.metrics.notificationsSkipped.WithLabelValues(w.name)
} }
} }
@ -262,7 +298,7 @@ func (w *Watcher) Run() error {
// On start, after reading the existing WAL for series records, we have a pointer to what is the latest segment. // On start, after reading the existing WAL for series records, we have a pointer to what is the latest segment.
// On subsequent calls to this function, currentSegment will have been incremented and we should open that segment. // On subsequent calls to this function, currentSegment will have been incremented and we should open that segment.
if err := w.watch(currentSegment, currentSegment >= lastSegment); err != nil { if err := w.watch(currentSegment, currentSegment >= lastSegment); err != nil && !errors.Is(err, ErrIgnorable) {
return err return err
} }
@ -330,6 +366,26 @@ func (w *Watcher) segments(dir string) ([]int, error) {
return refs, nil return refs, nil
} }
func (w *Watcher) readAndHandleError(r *LiveReader, segmentNum int, tail bool, size int64) error {
err := w.readSegment(r, segmentNum, tail)
// Ignore all errors reading to end of segment whilst replaying the WAL.
if !tail {
if err != nil && errors.Cause(err) != io.EOF {
level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err)
} else if r.Offset() != size {
level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", r.Offset(), "size", size)
}
return ErrIgnorable
}
// Otherwise, when we are tailing, non-EOFs are fatal.
if errors.Cause(err) != io.EOF {
return err
}
return nil
}
// Use tail true to indicate that the reader is currently on a segment that is // Use tail true to indicate that the reader is currently on a segment that is
// actively being written to. If false, assume it's a full segment and we're // actively being written to. If false, assume it's a full segment and we're
// replaying it on start to cache the series records. // replaying it on start to cache the series records.
@ -342,7 +398,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error {
reader := NewLiveReader(w.logger, w.readerMetrics, segment) reader := NewLiveReader(w.logger, w.readerMetrics, segment)
readTicker := time.NewTicker(readPeriod) readTicker := time.NewTicker(readTimeout)
defer readTicker.Stop() defer readTicker.Stop()
checkpointTicker := time.NewTicker(checkpointPeriod) checkpointTicker := time.NewTicker(checkpointPeriod)
@ -400,7 +456,6 @@ func (w *Watcher) watch(segmentNum int, tail bool) error {
if last <= segmentNum { if last <= segmentNum {
continue continue
} }
err = w.readSegment(reader, segmentNum, tail) err = w.readSegment(reader, segmentNum, tail)
// Ignore errors reading to end of segment whilst replaying the WAL. // Ignore errors reading to end of segment whilst replaying the WAL.
@ -421,24 +476,23 @@ func (w *Watcher) watch(segmentNum int, tail bool) error {
return nil return nil
// we haven't read due to a notification in quite some time, try reading anyways
case <-readTicker.C: case <-readTicker.C:
err = w.readSegment(reader, segmentNum, tail) level.Debug(w.logger).Log("msg", "Watcher is reading the WAL due to timeout, haven't received any write notifications recently", "timeout", readTimeout)
err := w.readAndHandleError(reader, segmentNum, tail, size)
// Ignore all errors reading to end of segment whilst replaying the WAL. if err != nil {
if !tail {
switch {
case err != nil && errors.Cause(err) != io.EOF:
level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err)
case reader.Offset() != size:
level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size)
}
return nil
}
// Otherwise, when we are tailing, non-EOFs are fatal.
if errors.Cause(err) != io.EOF {
return err return err
} }
// still want to reset the ticker so we don't read too often
readTicker.Reset(readTimeout)
case <-w.readNotify:
err := w.readAndHandleError(reader, segmentNum, tail, size)
if err != nil {
return err
}
// still want to reset the ticker so we don't read too often
readTicker.Reset(readTimeout)
} }
} }
} }
@ -691,7 +745,7 @@ func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) err
func checkpointNum(dir string) (int, error) { func checkpointNum(dir string) (int, error) {
// Checkpoint dir names are in the format checkpoint.000001 // Checkpoint dir names are in the format checkpoint.000001
// dir may contain a hidden directory, so only check the base directory // dir may contain a hidden directory, so only check the base directory
chunks := strings.Split(path.Base(dir), ".") chunks := strings.Split(filepath.Base(dir), ".")
if len(chunks) != 2 { if len(chunks) != 2 {
return 0, errors.Errorf("invalid checkpoint dir string: %s", dir) return 0, errors.Errorf("invalid checkpoint dir string: %s", dir)
} }

View file

@ -104,7 +104,7 @@ func (wtm *writeToMock) SeriesReset(index int) {
} }
} }
func (wtm *writeToMock) checkNumLabels() int { func (wtm *writeToMock) checkNumSeries() int {
wtm.seriesLock.Lock() wtm.seriesLock.Lock()
defer wtm.seriesLock.Unlock() defer wtm.seriesLock.Unlock()
return len(wtm.seriesSegmentIndexes) return len(wtm.seriesSegmentIndexes)
@ -230,9 +230,9 @@ func TestTailSamples(t *testing.T) {
expectedExemplars := seriesCount * exemplarsCount expectedExemplars := seriesCount * exemplarsCount
expectedHistograms := seriesCount * histogramsCount expectedHistograms := seriesCount * histogramsCount
retry(t, defaultRetryInterval, defaultRetries, func() bool { retry(t, defaultRetryInterval, defaultRetries, func() bool {
return wt.checkNumLabels() >= expectedSeries return wt.checkNumSeries() >= expectedSeries
}) })
require.Equal(t, expectedSeries, wt.checkNumLabels(), "did not receive the expected number of series") require.Equal(t, expectedSeries, wt.checkNumSeries(), "did not receive the expected number of series")
require.Equal(t, expectedSamples, wt.samplesAppended, "did not receive the expected number of samples") require.Equal(t, expectedSamples, wt.samplesAppended, "did not receive the expected number of samples")
require.Equal(t, expectedExemplars, wt.exemplarsAppended, "did not receive the expected number of exemplars") require.Equal(t, expectedExemplars, wt.exemplarsAppended, "did not receive the expected number of exemplars")
require.Equal(t, expectedHistograms, wt.histogramsAppended, "did not receive the expected number of histograms") require.Equal(t, expectedHistograms, wt.histogramsAppended, "did not receive the expected number of histograms")
@ -290,7 +290,7 @@ func TestReadToEndNoCheckpoint(t *testing.T) {
} }
} }
require.NoError(t, w.Log(recs...)) require.NoError(t, w.Log(recs...))
readTimeout = time.Second
_, _, err = Segments(w.Dir()) _, _, err = Segments(w.Dir())
require.NoError(t, err) require.NoError(t, err)
@ -299,11 +299,10 @@ func TestReadToEndNoCheckpoint(t *testing.T) {
go watcher.Start() go watcher.Start()
expected := seriesCount expected := seriesCount
retry(t, defaultRetryInterval, defaultRetries, func() bool { require.Eventually(t, func() bool {
return wt.checkNumLabels() >= expected return wt.checkNumSeries() == expected
}) }, 20*time.Second, 1*time.Second)
watcher.Stop() watcher.Stop()
require.Equal(t, expected, wt.checkNumLabels())
}) })
} }
} }
@ -383,16 +382,17 @@ func TestReadToEndWithCheckpoint(t *testing.T) {
_, _, err = Segments(w.Dir()) _, _, err = Segments(w.Dir())
require.NoError(t, err) require.NoError(t, err)
readTimeout = time.Second
wt := newWriteToMock() wt := newWriteToMock()
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false) watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false)
go watcher.Start() go watcher.Start()
expected := seriesCount * 2 expected := seriesCount * 2
retry(t, defaultRetryInterval, defaultRetries, func() bool {
return wt.checkNumLabels() >= expected require.Eventually(t, func() bool {
}) return wt.checkNumSeries() == expected
}, 10*time.Second, 1*time.Second)
watcher.Stop() watcher.Stop()
require.Equal(t, expected, wt.checkNumLabels())
}) })
} }
} }
@ -460,10 +460,10 @@ func TestReadCheckpoint(t *testing.T) {
expectedSeries := seriesCount expectedSeries := seriesCount
retry(t, defaultRetryInterval, defaultRetries, func() bool { retry(t, defaultRetryInterval, defaultRetries, func() bool {
return wt.checkNumLabels() >= expectedSeries return wt.checkNumSeries() >= expectedSeries
}) })
watcher.Stop() watcher.Stop()
require.Equal(t, expectedSeries, wt.checkNumLabels()) require.Equal(t, expectedSeries, wt.checkNumSeries())
}) })
} }
} }
@ -595,6 +595,7 @@ func TestCheckpointSeriesReset(t *testing.T) {
_, _, err = Segments(w.Dir()) _, _, err = Segments(w.Dir())
require.NoError(t, err) require.NoError(t, err)
readTimeout = time.Second
wt := newWriteToMock() wt := newWriteToMock()
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false) watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false)
watcher.MaxSegment = -1 watcher.MaxSegment = -1
@ -602,9 +603,11 @@ func TestCheckpointSeriesReset(t *testing.T) {
expected := seriesCount expected := seriesCount
retry(t, defaultRetryInterval, defaultRetries, func() bool { retry(t, defaultRetryInterval, defaultRetries, func() bool {
return wt.checkNumLabels() >= expected return wt.checkNumSeries() >= expected
}) })
require.Equal(t, seriesCount, wt.checkNumLabels()) require.Eventually(t, func() bool {
return wt.checkNumSeries() == seriesCount
}, 10*time.Second, 1*time.Second)
_, err = Checkpoint(log.NewNopLogger(), w, 2, 4, func(x chunks.HeadSeriesRef) bool { return true }, 0) _, err = Checkpoint(log.NewNopLogger(), w, 2, 4, func(x chunks.HeadSeriesRef) bool { return true }, 0)
require.NoError(t, err) require.NoError(t, err)
@ -621,7 +624,9 @@ func TestCheckpointSeriesReset(t *testing.T) {
// If you modify the checkpoint and truncate segment #'s run the test to see how // If you modify the checkpoint and truncate segment #'s run the test to see how
// many series records you end up with and change the last Equals check accordingly // many series records you end up with and change the last Equals check accordingly
// or modify the Equals to Assert(len(wt.seriesLabels) < seriesCount*10) // or modify the Equals to Assert(len(wt.seriesLabels) < seriesCount*10)
require.Equal(t, tc.segments, wt.checkNumLabels()) require.Eventually(t, func() bool {
return wt.checkNumSeries() == tc.segments
}, 20*time.Second, 1*time.Second)
}) })
} }
} }

View file

@ -188,6 +188,8 @@ type WL struct {
compress bool compress bool
snappyBuf []byte snappyBuf []byte
WriteNotified WriteNotified
metrics *wlMetrics metrics *wlMetrics
} }
@ -343,6 +345,10 @@ func (w *WL) Dir() string {
return w.dir return w.dir
} }
func (w *WL) SetWriteNotified(wn WriteNotified) {
w.WriteNotified = wn
}
func (w *WL) run() { func (w *WL) run() {
Loop: Loop:
for { for {

View file

@ -171,7 +171,7 @@ type TSDBAdminStats interface {
CleanTombstones() error CleanTombstones() error
Delete(mint, maxt int64, ms ...*labels.Matcher) error Delete(mint, maxt int64, ms ...*labels.Matcher) error
Snapshot(dir string, withHead bool) error Snapshot(dir string, withHead bool) error
Stats(statsByLabelName string) (*tsdb.Stats, error) Stats(statsByLabelName string, limit int) (*tsdb.Stats, error)
WALReplayStatus() (tsdb.WALReplayStatus, error) WALReplayStatus() (tsdb.WALReplayStatus, error)
} }
@ -1480,8 +1480,15 @@ func TSDBStatsFromIndexStats(stats []index.Stat) []TSDBStat {
return result return result
} }
func (api *API) serveTSDBStatus(*http.Request) apiFuncResult { func (api *API) serveTSDBStatus(r *http.Request) apiFuncResult {
s, err := api.db.Stats(labels.MetricName) limit := 10
if s := r.FormValue("limit"); s != "" {
var err error
if limit, err = strconv.Atoi(s); err != nil || limit < 1 {
return apiFuncResult{nil, &apiError{errorBadData, errors.New("limit must be a positive number")}, nil, nil}
}
}
s, err := api.db.Stats(labels.MetricName, limit)
if err != nil { if err != nil {
return apiFuncResult{nil, &apiError{errorInternal, err}, nil, nil} return apiFuncResult{nil, &apiError{errorInternal, err}, nil, nil}
} }

View file

@ -2620,7 +2620,7 @@ type fakeDB struct {
func (f *fakeDB) CleanTombstones() error { return f.err } func (f *fakeDB) CleanTombstones() error { return f.err }
func (f *fakeDB) Delete(int64, int64, ...*labels.Matcher) error { return f.err } func (f *fakeDB) Delete(int64, int64, ...*labels.Matcher) error { return f.err }
func (f *fakeDB) Snapshot(string, bool) error { return f.err } func (f *fakeDB) Snapshot(string, bool) error { return f.err }
func (f *fakeDB) Stats(statsByLabelName string) (_ *tsdb.Stats, retErr error) { func (f *fakeDB) Stats(statsByLabelName string, limit int) (_ *tsdb.Stats, retErr error) {
dbDir, err := os.MkdirTemp("", "tsdb-api-ready") dbDir, err := os.MkdirTemp("", "tsdb-api-ready")
if err != nil { if err != nil {
return nil, err return nil, err
@ -2634,7 +2634,7 @@ func (f *fakeDB) Stats(statsByLabelName string) (_ *tsdb.Stats, retErr error) {
opts := tsdb.DefaultHeadOptions() opts := tsdb.DefaultHeadOptions()
opts.ChunkRange = 1000 opts.ChunkRange = 1000
h, _ := tsdb.NewHead(nil, nil, nil, nil, opts, nil) h, _ := tsdb.NewHead(nil, nil, nil, nil, opts, nil)
return h.Stats(statsByLabelName), nil return h.Stats(statsByLabelName, limit), nil
} }
func (f *fakeDB) WALReplayStatus() (tsdb.WALReplayStatus, error) { func (f *fakeDB) WALReplayStatus() (tsdb.WALReplayStatus, error) {
@ -3206,9 +3206,20 @@ func TestTSDBStatus(t *testing.T) {
{ {
db: tsdb, db: tsdb,
endpoint: tsdbStatusAPI, endpoint: tsdbStatusAPI,
errType: errorNone, errType: errorNone,
}, },
{
db: tsdb,
endpoint: tsdbStatusAPI,
values: map[string][]string{"limit": {"20"}},
errType: errorNone,
},
{
db: tsdb,
endpoint: tsdbStatusAPI,
values: map[string][]string{"limit": {"0"}},
errType: errorBadData,
},
} { } {
tc := tc tc := tc
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {

View file

@ -251,7 +251,7 @@ func (notReadyReadStorage) StartTime() (int64, error) {
return 0, errors.Wrap(tsdb.ErrNotReady, "wrap") return 0, errors.Wrap(tsdb.ErrNotReady, "wrap")
} }
func (notReadyReadStorage) Stats(string) (*tsdb.Stats, error) { func (notReadyReadStorage) Stats(string, int) (*tsdb.Stats, error) {
return nil, errors.Wrap(tsdb.ErrNotReady, "wrap") return nil, errors.Wrap(tsdb.ErrNotReady, "wrap")
} }
@ -378,7 +378,7 @@ func TestFederationWithNativeHistograms(t *testing.T) {
body, err := io.ReadAll(res.Body) body, err := io.ReadAll(res.Body)
require.NoError(t, err) require.NoError(t, err)
p := textparse.NewProtobufParser(body) p := textparse.NewProtobufParser(body, false)
var actVec promql.Vector var actVec promql.Vector
metricFamilies := 0 metricFamilies := 0
l := labels.Labels{} l := labels.Labels{}

View file

@ -1,6 +1,6 @@
{ {
"name": "@prometheus-io/codemirror-promql", "name": "@prometheus-io/codemirror-promql",
"version": "0.43.0", "version": "0.44.0",
"description": "a CodeMirror mode for the PromQL language", "description": "a CodeMirror mode for the PromQL language",
"types": "dist/esm/index.d.ts", "types": "dist/esm/index.d.ts",
"module": "dist/esm/index.js", "module": "dist/esm/index.js",
@ -29,7 +29,7 @@
}, },
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
"dependencies": { "dependencies": {
"@prometheus-io/lezer-promql": "0.43.0", "@prometheus-io/lezer-promql": "0.44.0",
"lru-cache": "^6.0.0" "lru-cache": "^6.0.0"
}, },
"devDependencies": { "devDependencies": {

View file

@ -1,6 +1,6 @@
{ {
"name": "@prometheus-io/lezer-promql", "name": "@prometheus-io/lezer-promql",
"version": "0.43.0", "version": "0.44.0",
"description": "lezer-based PromQL grammar", "description": "lezer-based PromQL grammar",
"main": "dist/index.cjs", "main": "dist/index.cjs",
"type": "module", "type": "module",

View file

@ -28,10 +28,10 @@
}, },
"module/codemirror-promql": { "module/codemirror-promql": {
"name": "@prometheus-io/codemirror-promql", "name": "@prometheus-io/codemirror-promql",
"version": "0.43.0", "version": "0.44.0",
"license": "Apache-2.0", "license": "Apache-2.0",
"dependencies": { "dependencies": {
"@prometheus-io/lezer-promql": "0.43.0", "@prometheus-io/lezer-promql": "0.44.0",
"lru-cache": "^6.0.0" "lru-cache": "^6.0.0"
}, },
"devDependencies": { "devDependencies": {
@ -61,7 +61,7 @@
}, },
"module/lezer-promql": { "module/lezer-promql": {
"name": "@prometheus-io/lezer-promql", "name": "@prometheus-io/lezer-promql",
"version": "0.43.0", "version": "0.44.0",
"license": "Apache-2.0", "license": "Apache-2.0",
"devDependencies": { "devDependencies": {
"@lezer/generator": "^1.2.2", "@lezer/generator": "^1.2.2",
@ -20763,7 +20763,7 @@
}, },
"react-app": { "react-app": {
"name": "@prometheus-io/app", "name": "@prometheus-io/app",
"version": "0.43.0", "version": "0.44.0",
"dependencies": { "dependencies": {
"@codemirror/autocomplete": "^6.4.0", "@codemirror/autocomplete": "^6.4.0",
"@codemirror/commands": "^6.2.0", "@codemirror/commands": "^6.2.0",
@ -20781,7 +20781,7 @@
"@lezer/lr": "^1.3.1", "@lezer/lr": "^1.3.1",
"@nexucis/fuzzy": "^0.4.1", "@nexucis/fuzzy": "^0.4.1",
"@nexucis/kvsearch": "^0.8.1", "@nexucis/kvsearch": "^0.8.1",
"@prometheus-io/codemirror-promql": "0.43.0", "@prometheus-io/codemirror-promql": "0.44.0",
"bootstrap": "^4.6.2", "bootstrap": "^4.6.2",
"css.escape": "^1.5.1", "css.escape": "^1.5.1",
"downshift": "^7.2.0", "downshift": "^7.2.0",
@ -23417,7 +23417,7 @@
"@lezer/lr": "^1.3.1", "@lezer/lr": "^1.3.1",
"@nexucis/fuzzy": "^0.4.1", "@nexucis/fuzzy": "^0.4.1",
"@nexucis/kvsearch": "^0.8.1", "@nexucis/kvsearch": "^0.8.1",
"@prometheus-io/codemirror-promql": "0.43.0", "@prometheus-io/codemirror-promql": "0.44.0",
"@testing-library/react-hooks": "^7.0.2", "@testing-library/react-hooks": "^7.0.2",
"@types/enzyme": "^3.10.12", "@types/enzyme": "^3.10.12",
"@types/flot": "0.0.32", "@types/flot": "0.0.32",
@ -23468,7 +23468,7 @@
"@lezer/common": "^1.0.2", "@lezer/common": "^1.0.2",
"@lezer/highlight": "^1.1.3", "@lezer/highlight": "^1.1.3",
"@lezer/lr": "^1.3.1", "@lezer/lr": "^1.3.1",
"@prometheus-io/lezer-promql": "0.43.0", "@prometheus-io/lezer-promql": "0.44.0",
"@types/lru-cache": "^5.1.1", "@types/lru-cache": "^5.1.1",
"isomorphic-fetch": "^3.0.0", "isomorphic-fetch": "^3.0.0",
"lru-cache": "^6.0.0", "lru-cache": "^6.0.0",

View file

@ -1,6 +1,6 @@
{ {
"name": "@prometheus-io/app", "name": "@prometheus-io/app",
"version": "0.43.0", "version": "0.44.0",
"private": true, "private": true,
"dependencies": { "dependencies": {
"@codemirror/autocomplete": "^6.4.0", "@codemirror/autocomplete": "^6.4.0",
@ -19,7 +19,7 @@
"@lezer/common": "^1.0.2", "@lezer/common": "^1.0.2",
"@nexucis/fuzzy": "^0.4.1", "@nexucis/fuzzy": "^0.4.1",
"@nexucis/kvsearch": "^0.8.1", "@nexucis/kvsearch": "^0.8.1",
"@prometheus-io/codemirror-promql": "0.43.0", "@prometheus-io/codemirror-promql": "0.44.0",
"bootstrap": "^4.6.2", "bootstrap": "^4.6.2",
"css.escape": "^1.5.1", "css.escape": "^1.5.1",
"downshift": "^7.2.0", "downshift": "^7.2.0",

View file

@ -52,8 +52,8 @@ type dbAdapter struct {
*tsdb.DB *tsdb.DB
} }
func (a *dbAdapter) Stats(statsByLabelName string) (*tsdb.Stats, error) { func (a *dbAdapter) Stats(statsByLabelName string, limit int) (*tsdb.Stats, error) {
return a.Head().Stats(statsByLabelName), nil return a.Head().Stats(statsByLabelName, limit), nil
} }
func (a *dbAdapter) WALReplayStatus() (tsdb.WALReplayStatus, error) { func (a *dbAdapter) WALReplayStatus() (tsdb.WALReplayStatus, error) {