mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-14 17:44:06 -08:00
Merge pull request #505 from grafana/zenador/sync-upstream-22-may-2023
Sync upstream 24 may 2023
This commit is contained in:
commit
8d6690e86a
|
@ -14,8 +14,10 @@ build:
|
|||
all:
|
||||
- netgo
|
||||
- builtinassets
|
||||
- stringlabels
|
||||
windows:
|
||||
- builtinassets
|
||||
- stringlabels
|
||||
flags: -a
|
||||
ldflags: |
|
||||
-X github.com/prometheus/common/version.Version={{.Version}}
|
||||
|
|
26
CHANGELOG.md
26
CHANGELOG.md
|
@ -1,5 +1,29 @@
|
|||
# Changelog
|
||||
|
||||
## 2.44.0 / 2023-05-13
|
||||
|
||||
This version is built with Go tag `stringlabels`, to use the smaller data
|
||||
structure for Labels that was optional in the previous release. For more
|
||||
details about this code change see #10991.
|
||||
|
||||
* [CHANGE] Remote-write: Raise default samples per send to 2,000. #12203
|
||||
* [FEATURE] Remote-read: Handle native histograms. #12085, #12192
|
||||
* [FEATURE] Promtool: Health and readiness check of prometheus server in CLI. #12096
|
||||
* [FEATURE] PromQL: Add `query_samples_total` metric, the total number of samples loaded by all queries. #12251
|
||||
* [ENHANCEMENT] Storage: Optimise buffer used to iterate through samples. #12326
|
||||
* [ENHANCEMENT] Scrape: Reduce memory allocations on target labels. #12084
|
||||
* [ENHANCEMENT] PromQL: Use faster heap method for `topk()` / `bottomk()`. #12190
|
||||
* [ENHANCEMENT] Rules API: Allow filtering by rule name. #12270
|
||||
* [ENHANCEMENT] Native Histograms: Various fixes and improvements. #11687, #12264, #12272
|
||||
* [ENHANCEMENT] UI: Search of scraping pools is now case-insensitive. #12207
|
||||
* [ENHANCEMENT] TSDB: Add an affirmative log message for successful WAL repair. #12135
|
||||
* [BUGFIX] TSDB: Block compaction failed when shutting down. #12179
|
||||
* [BUGFIX] TSDB: Out-of-order chunks could be ignored if the write-behind log was deleted. #12127
|
||||
|
||||
## 2.43.1 / 2023-05-03
|
||||
|
||||
* [BUGFIX] Labels: `Set()` after `Del()` would be ignored, which broke some relabeling rules. #12322
|
||||
|
||||
## 2.43.0 / 2023-03-21
|
||||
|
||||
We are working on some performance improvements in Prometheus, which are only
|
||||
|
@ -13,7 +37,7 @@ the gains on their production architecture. We are providing release artefacts
|
|||
improvements for testing. #10991
|
||||
|
||||
* [FEATURE] Promtool: Add HTTP client configuration to query commands. #11487
|
||||
* [FEATURE] Scrape: Add `include_scrape_configs` to include scrape configs from different files. #12019
|
||||
* [FEATURE] Scrape: Add `scrape_config_files` to include scrape configs from different files. #12019
|
||||
* [FEATURE] HTTP client: Add `no_proxy` to exclude URLs from proxied requests. #12098
|
||||
* [FEATURE] HTTP client: Add `proxy_from_enviroment` to read proxies from env variables. #12098
|
||||
* [ENHANCEMENT] API: Add support for setting lookback delta per query via the API. #12088
|
||||
|
|
|
@ -337,6 +337,9 @@ func main() {
|
|||
serverOnlyFlag(a, "storage.tsdb.head-chunks-write-queue-size", "Size of the queue through which head chunks are written to the disk to be m-mapped, 0 disables the queue completely. Experimental.").
|
||||
Default("0").IntVar(&cfg.tsdb.HeadChunksWriteQueueSize)
|
||||
|
||||
serverOnlyFlag(a, "storage.tsdb.samples-per-chunk", "Target number of samples per chunk.").
|
||||
Default("120").Hidden().IntVar(&cfg.tsdb.SamplesPerChunk)
|
||||
|
||||
agentOnlyFlag(a, "storage.agent.path", "Base path for metrics storage.").
|
||||
Default("data-agent/").StringVar(&cfg.agentStoragePath)
|
||||
|
||||
|
@ -1050,6 +1053,7 @@ func main() {
|
|||
|
||||
startTimeMargin := int64(2 * time.Duration(cfg.tsdb.MinBlockDuration).Seconds() * 1000)
|
||||
localStorage.Set(db, startTimeMargin)
|
||||
db.SetWriteNotified(remoteStorage)
|
||||
close(dbOpen)
|
||||
<-cancel
|
||||
return nil
|
||||
|
@ -1482,11 +1486,11 @@ func (s *readyStorage) Snapshot(dir string, withHead bool) error {
|
|||
}
|
||||
|
||||
// Stats implements the api_v1.TSDBAdminStats interface.
|
||||
func (s *readyStorage) Stats(statsByLabelName string) (*tsdb.Stats, error) {
|
||||
func (s *readyStorage) Stats(statsByLabelName string, limit int) (*tsdb.Stats, error) {
|
||||
if x := s.get(); x != nil {
|
||||
switch db := x.(type) {
|
||||
case *tsdb.DB:
|
||||
return db.Head().Stats(statsByLabelName), nil
|
||||
return db.Head().Stats(statsByLabelName, limit), nil
|
||||
case *agent.DB:
|
||||
return nil, agent.ErrUnsupported
|
||||
default:
|
||||
|
@ -1543,6 +1547,7 @@ type tsdbOptions struct {
|
|||
NoLockfile bool
|
||||
WALCompression bool
|
||||
HeadChunksWriteQueueSize int
|
||||
SamplesPerChunk int
|
||||
StripeSize int
|
||||
MinBlockDuration model.Duration
|
||||
MaxBlockDuration model.Duration
|
||||
|
@ -1563,6 +1568,7 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
|
|||
AllowOverlappingCompaction: true,
|
||||
WALCompression: opts.WALCompression,
|
||||
HeadChunksWriteQueueSize: opts.HeadChunksWriteQueueSize,
|
||||
SamplesPerChunk: opts.SamplesPerChunk,
|
||||
StripeSize: opts.StripeSize,
|
||||
MinBlockDuration: int64(time.Duration(opts.MinBlockDuration) / time.Millisecond),
|
||||
MaxBlockDuration: int64(time.Duration(opts.MaxBlockDuration) / time.Millisecond),
|
||||
|
|
|
@ -146,13 +146,14 @@ var (
|
|||
|
||||
// DefaultScrapeConfig is the default scrape configuration.
|
||||
DefaultScrapeConfig = ScrapeConfig{
|
||||
// ScrapeTimeout and ScrapeInterval default to the
|
||||
// configured globals.
|
||||
MetricsPath: "/metrics",
|
||||
Scheme: "http",
|
||||
HonorLabels: false,
|
||||
HonorTimestamps: true,
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
// ScrapeTimeout and ScrapeInterval default to the configured
|
||||
// globals.
|
||||
ScrapeClassicHistograms: false,
|
||||
MetricsPath: "/metrics",
|
||||
Scheme: "http",
|
||||
HonorLabels: false,
|
||||
HonorTimestamps: true,
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
}
|
||||
|
||||
// DefaultAlertmanagerConfig is the default alertmanager configuration.
|
||||
|
@ -467,6 +468,8 @@ type ScrapeConfig struct {
|
|||
ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"`
|
||||
// The timeout for scraping targets of this config.
|
||||
ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"`
|
||||
// Whether to scrape a classic histogram that is also exposed as a native histogram.
|
||||
ScrapeClassicHistograms bool `yaml:"scrape_classic_histograms,omitempty"`
|
||||
// The HTTP resource path on which to fetch metrics from targets.
|
||||
MetricsPath string `yaml:"metrics_path,omitempty"`
|
||||
// The URL scheme with which to fetch metrics from targets.
|
||||
|
@ -489,6 +492,9 @@ type ScrapeConfig struct {
|
|||
// More than this label value length post metric-relabeling will cause the
|
||||
// scrape to fail.
|
||||
LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"`
|
||||
// More than this many buckets in a native histogram will cause the scrape to
|
||||
// fail.
|
||||
NativeHistogramBucketLimit uint `yaml:"native_histogram_bucket_limit,omitempty"`
|
||||
|
||||
// We cannot do proper Go type embedding below as the parser will then parse
|
||||
// values arbitrarily into the overflow maps of further-down types.
|
||||
|
|
|
@ -363,6 +363,7 @@ var expectedConf = &Config{
|
|||
ServiceDiscoveryConfigs: discovery.Configs{
|
||||
&consul.SDConfig{
|
||||
Server: "localhost:1234",
|
||||
PathPrefix: "/consul",
|
||||
Token: "mysecret",
|
||||
Services: []string{"nginx", "cache", "mysql"},
|
||||
ServiceTags: []string{"canary", "v1"},
|
||||
|
|
1
config/testdata/conf.good.yml
vendored
1
config/testdata/conf.good.yml
vendored
|
@ -151,6 +151,7 @@ scrape_configs:
|
|||
consul_sd_configs:
|
||||
- server: "localhost:1234"
|
||||
token: mysecret
|
||||
path_prefix: /consul
|
||||
services: ["nginx", "cache", "mysql"]
|
||||
tags: ["canary", "v1"]
|
||||
node_meta:
|
||||
|
|
|
@ -111,6 +111,7 @@ func init() {
|
|||
// SDConfig is the configuration for Consul service discovery.
|
||||
type SDConfig struct {
|
||||
Server string `yaml:"server,omitempty"`
|
||||
PathPrefix string `yaml:"path_prefix,omitempty"`
|
||||
Token config.Secret `yaml:"token,omitempty"`
|
||||
Datacenter string `yaml:"datacenter,omitempty"`
|
||||
Namespace string `yaml:"namespace,omitempty"`
|
||||
|
@ -211,6 +212,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
|||
|
||||
clientConf := &consul.Config{
|
||||
Address: conf.Server,
|
||||
PathPrefix: conf.PathPrefix,
|
||||
Scheme: conf.Scheme,
|
||||
Datacenter: conf.Datacenter,
|
||||
Namespace: conf.Namespace,
|
||||
|
|
|
@ -305,7 +305,11 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
|
|||
}
|
||||
|
||||
if e.withNodeMetadata {
|
||||
target = addNodeLabels(target, e.nodeInf, e.logger, addr.NodeName)
|
||||
if addr.NodeName != nil {
|
||||
target = addNodeLabels(target, e.nodeInf, e.logger, addr.NodeName)
|
||||
} else if addr.TargetRef != nil && addr.TargetRef.Kind == "Node" {
|
||||
target = addNodeLabels(target, e.nodeInf, e.logger, &addr.TargetRef.Name)
|
||||
}
|
||||
}
|
||||
|
||||
pod := e.resolvePodRef(addr.TargetRef)
|
||||
|
@ -466,5 +470,6 @@ func addNodeLabels(tg model.LabelSet, nodeInf cache.SharedInformer, logger log.L
|
|||
nodeLabelset[model.LabelName(nodeLabelPrefix+ln)] = lv(v)
|
||||
nodeLabelset[model.LabelName(nodeLabelPresentPrefix+ln)] = presentValue
|
||||
}
|
||||
|
||||
return tg.Merge(nodeLabelset)
|
||||
}
|
||||
|
|
|
@ -69,6 +69,24 @@ func makeEndpoints() *v1.Endpoints {
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Addresses: []v1.EndpointAddress{
|
||||
{
|
||||
IP: "6.7.8.9",
|
||||
TargetRef: &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: "barbaz",
|
||||
},
|
||||
},
|
||||
},
|
||||
Ports: []v1.EndpointPort{
|
||||
{
|
||||
Name: "testport",
|
||||
Port: 9002,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -106,6 +124,14 @@ func TestEndpointsDiscoveryBeforeRun(t *testing.T) {
|
|||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||
"__meta_kubernetes_endpoint_ready": "false",
|
||||
},
|
||||
{
|
||||
"__address__": "6.7.8.9:9002",
|
||||
"__meta_kubernetes_endpoint_address_target_kind": "Node",
|
||||
"__meta_kubernetes_endpoint_address_target_name": "barbaz",
|
||||
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||
"__meta_kubernetes_endpoint_ready": "true",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
|
@ -398,6 +424,14 @@ func TestEndpointsDiscoveryWithService(t *testing.T) {
|
|||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||
"__meta_kubernetes_endpoint_ready": "false",
|
||||
},
|
||||
{
|
||||
"__address__": "6.7.8.9:9002",
|
||||
"__meta_kubernetes_endpoint_address_target_kind": "Node",
|
||||
"__meta_kubernetes_endpoint_address_target_name": "barbaz",
|
||||
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||
"__meta_kubernetes_endpoint_ready": "true",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
|
@ -466,6 +500,14 @@ func TestEndpointsDiscoveryWithServiceUpdate(t *testing.T) {
|
|||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||
"__meta_kubernetes_endpoint_ready": "false",
|
||||
},
|
||||
{
|
||||
"__address__": "6.7.8.9:9002",
|
||||
"__meta_kubernetes_endpoint_address_target_kind": "Node",
|
||||
"__meta_kubernetes_endpoint_address_target_name": "barbaz",
|
||||
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||
"__meta_kubernetes_endpoint_ready": "true",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
|
@ -484,8 +526,10 @@ func TestEndpointsDiscoveryWithServiceUpdate(t *testing.T) {
|
|||
|
||||
func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) {
|
||||
metadataConfig := AttachMetadataConfig{Node: true}
|
||||
nodeLabels := map[string]string{"az": "us-east1"}
|
||||
node := makeNode("foobar", "", "", nodeLabels, nil)
|
||||
nodeLabels1 := map[string]string{"az": "us-east1"}
|
||||
nodeLabels2 := map[string]string{"az": "us-west2"}
|
||||
node1 := makeNode("foobar", "", "", nodeLabels1, nil)
|
||||
node2 := makeNode("barbaz", "", "", nodeLabels2, nil)
|
||||
svc := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testendpoints",
|
||||
|
@ -495,7 +539,7 @@ func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
n, _ := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), svc, node)
|
||||
n, _ := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), svc, node1, node2)
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
|
@ -526,6 +570,17 @@ func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) {
|
|||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||
"__meta_kubernetes_endpoint_ready": "false",
|
||||
},
|
||||
{
|
||||
"__address__": "6.7.8.9:9002",
|
||||
"__meta_kubernetes_endpoint_address_target_kind": "Node",
|
||||
"__meta_kubernetes_endpoint_address_target_name": "barbaz",
|
||||
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||
"__meta_kubernetes_endpoint_ready": "true",
|
||||
"__meta_kubernetes_node_label_az": "us-west2",
|
||||
"__meta_kubernetes_node_labelpresent_az": "true",
|
||||
"__meta_kubernetes_node_name": "barbaz",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
|
@ -541,8 +596,10 @@ func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
||||
nodeLabels := map[string]string{"az": "us-east1"}
|
||||
nodes := makeNode("foobar", "", "", nodeLabels, nil)
|
||||
nodeLabels1 := map[string]string{"az": "us-east1"}
|
||||
nodeLabels2 := map[string]string{"az": "us-west2"}
|
||||
node1 := makeNode("foobar", "", "", nodeLabels1, nil)
|
||||
node2 := makeNode("barbaz", "", "", nodeLabels2, nil)
|
||||
metadataConfig := AttachMetadataConfig{Node: true}
|
||||
svc := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -553,13 +610,13 @@ func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
n, c := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), nodes, svc)
|
||||
n, c := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), node1, node2, svc)
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
afterStart: func() {
|
||||
nodes.Labels["az"] = "eu-central1"
|
||||
c.CoreV1().Nodes().Update(context.Background(), nodes, metav1.UpdateOptions{})
|
||||
node1.Labels["az"] = "eu-central1"
|
||||
c.CoreV1().Nodes().Update(context.Background(), node1, metav1.UpdateOptions{})
|
||||
},
|
||||
expectedMaxItems: 2,
|
||||
expectedRes: map[string]*targetgroup.Group{
|
||||
|
@ -572,7 +629,7 @@ func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
|||
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||
"__meta_kubernetes_endpoint_ready": "true",
|
||||
"__meta_kubernetes_node_label_az": "eu-central1",
|
||||
"__meta_kubernetes_node_label_az": "us-east1",
|
||||
"__meta_kubernetes_node_labelpresent_az": "true",
|
||||
"__meta_kubernetes_node_name": "foobar",
|
||||
},
|
||||
|
@ -588,6 +645,17 @@ func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
|||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||
"__meta_kubernetes_endpoint_ready": "false",
|
||||
},
|
||||
{
|
||||
"__address__": "6.7.8.9:9002",
|
||||
"__meta_kubernetes_endpoint_address_target_kind": "Node",
|
||||
"__meta_kubernetes_endpoint_address_target_name": "barbaz",
|
||||
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||
"__meta_kubernetes_endpoint_ready": "true",
|
||||
"__meta_kubernetes_node_label_az": "us-west2",
|
||||
"__meta_kubernetes_node_labelpresent_az": "true",
|
||||
"__meta_kubernetes_node_name": "barbaz",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
|
@ -699,6 +767,14 @@ func TestEndpointsDiscoveryNamespaces(t *testing.T) {
|
|||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||
"__meta_kubernetes_endpoint_ready": "false",
|
||||
},
|
||||
{
|
||||
"__address__": "6.7.8.9:9002",
|
||||
"__meta_kubernetes_endpoint_address_target_kind": "Node",
|
||||
"__meta_kubernetes_endpoint_address_target_name": "barbaz",
|
||||
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||
"__meta_kubernetes_endpoint_ready": "true",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"__meta_kubernetes_namespace": "ns1",
|
||||
|
@ -815,6 +891,14 @@ func TestEndpointsDiscoveryOwnNamespace(t *testing.T) {
|
|||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||
"__meta_kubernetes_endpoint_ready": "false",
|
||||
},
|
||||
{
|
||||
"__address__": "6.7.8.9:9002",
|
||||
"__meta_kubernetes_endpoint_address_target_kind": "Node",
|
||||
"__meta_kubernetes_endpoint_address_target_name": "barbaz",
|
||||
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||
"__meta_kubernetes_endpoint_ready": "true",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"__meta_kubernetes_namespace": "own-ns",
|
||||
|
|
|
@ -339,7 +339,11 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
|
|||
}
|
||||
|
||||
if e.withNodeMetadata {
|
||||
target = addNodeLabels(target, e.nodeInf, e.logger, ep.nodename())
|
||||
if ep.targetRef() != nil && ep.targetRef().Kind == "Node" {
|
||||
target = addNodeLabels(target, e.nodeInf, e.logger, &ep.targetRef().Name)
|
||||
} else {
|
||||
target = addNodeLabels(target, e.nodeInf, e.logger, ep.nodename())
|
||||
}
|
||||
}
|
||||
|
||||
pod := e.resolvePodRef(ep.targetRef())
|
||||
|
|
|
@ -90,6 +90,17 @@ func makeEndpointSliceV1() *v1.EndpointSlice {
|
|||
Serving: boolptr(true),
|
||||
Terminating: boolptr(true),
|
||||
},
|
||||
}, {
|
||||
Addresses: []string{"4.5.6.7"},
|
||||
Conditions: v1.EndpointConditions{
|
||||
Ready: boolptr(true),
|
||||
Serving: boolptr(true),
|
||||
Terminating: boolptr(false),
|
||||
},
|
||||
TargetRef: &corev1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: "barbaz",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -130,6 +141,17 @@ func makeEndpointSliceV1beta1() *v1beta1.EndpointSlice {
|
|||
Serving: boolptr(true),
|
||||
Terminating: boolptr(true),
|
||||
},
|
||||
}, {
|
||||
Addresses: []string{"4.5.6.7"},
|
||||
Conditions: v1beta1.EndpointConditions{
|
||||
Ready: boolptr(true),
|
||||
Serving: boolptr(true),
|
||||
Terminating: boolptr(false),
|
||||
},
|
||||
TargetRef: &corev1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: "barbaz",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -183,6 +205,18 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) {
|
|||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "4.5.6.7:9000",
|
||||
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||
|
@ -233,6 +267,17 @@ func TestEndpointSliceDiscoveryBeforeRunV1beta1(t *testing.T) {
|
|||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "4.5.6.7:9000",
|
||||
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||
|
@ -419,6 +464,18 @@ func TestEndpointSliceDiscoveryDelete(t *testing.T) {
|
|||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "4.5.6.7:9000",
|
||||
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
},
|
||||
Labels: map[model.LabelName]model.LabelValue{
|
||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||
|
@ -503,6 +560,18 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) {
|
|||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "4.5.6.7:9000",
|
||||
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||
|
@ -576,6 +645,18 @@ func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) {
|
|||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "4.5.6.7:9000",
|
||||
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||
|
@ -644,6 +725,18 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) {
|
|||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "4.5.6.7:9000",
|
||||
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||
|
@ -728,6 +821,18 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) {
|
|||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
},
|
||||
{
|
||||
"__address__": "4.5.6.7:9000",
|
||||
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||
|
@ -747,7 +852,8 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) {
|
|||
|
||||
func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
|
||||
metadataConfig := AttachMetadataConfig{Node: true}
|
||||
nodeLabels := map[string]string{"az": "us-east1"}
|
||||
nodeLabels1 := map[string]string{"az": "us-east1"}
|
||||
nodeLabels2 := map[string]string{"az": "us-west2"}
|
||||
svc := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testendpoints",
|
||||
|
@ -757,7 +863,7 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
objs := []runtime.Object{makeEndpointSliceV1(), makeNode("foobar", "", "", nodeLabels, nil), svc}
|
||||
objs := []runtime.Object{makeEndpointSliceV1(), makeNode("foobar", "", "", nodeLabels1, nil), makeNode("barbaz", "", "", nodeLabels2, nil), svc}
|
||||
n, _ := makeDiscoveryWithMetadata(RoleEndpointSlice, NamespaceDiscovery{}, metadataConfig, objs...)
|
||||
|
||||
k8sDiscoveryTest{
|
||||
|
@ -804,6 +910,21 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
|
|||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "4.5.6.7:9000",
|
||||
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
"__meta_kubernetes_node_label_az": "us-west2",
|
||||
"__meta_kubernetes_node_labelpresent_az": "true",
|
||||
"__meta_kubernetes_node_name": "barbaz",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||
|
@ -821,7 +942,8 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
|
|||
|
||||
func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
||||
metadataConfig := AttachMetadataConfig{Node: true}
|
||||
nodeLabels := map[string]string{"az": "us-east1"}
|
||||
nodeLabels1 := map[string]string{"az": "us-east1"}
|
||||
nodeLabels2 := map[string]string{"az": "us-west2"}
|
||||
svc := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testendpoints",
|
||||
|
@ -831,16 +953,17 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
node := makeNode("foobar", "", "", nodeLabels, nil)
|
||||
objs := []runtime.Object{makeEndpointSliceV1(), node, svc}
|
||||
node1 := makeNode("foobar", "", "", nodeLabels1, nil)
|
||||
node2 := makeNode("barbaz", "", "", nodeLabels2, nil)
|
||||
objs := []runtime.Object{makeEndpointSliceV1(), node1, node2, svc}
|
||||
n, c := makeDiscoveryWithMetadata(RoleEndpointSlice, NamespaceDiscovery{}, metadataConfig, objs...)
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
expectedMaxItems: 2,
|
||||
afterStart: func() {
|
||||
node.Labels["az"] = "us-central1"
|
||||
c.CoreV1().Nodes().Update(context.Background(), node, metav1.UpdateOptions{})
|
||||
node1.Labels["az"] = "us-central1"
|
||||
c.CoreV1().Nodes().Update(context.Background(), node1, metav1.UpdateOptions{})
|
||||
},
|
||||
expectedRes: map[string]*targetgroup.Group{
|
||||
"endpointslice/default/testendpoints": {
|
||||
|
@ -859,7 +982,7 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
|||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
"__meta_kubernetes_node_label_az": "us-central1",
|
||||
"__meta_kubernetes_node_label_az": "us-east1",
|
||||
"__meta_kubernetes_node_labelpresent_az": "true",
|
||||
"__meta_kubernetes_node_name": "foobar",
|
||||
},
|
||||
|
@ -883,6 +1006,21 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
|||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "4.5.6.7:9000",
|
||||
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
"__meta_kubernetes_node_label_az": "us-west2",
|
||||
"__meta_kubernetes_node_labelpresent_az": "true",
|
||||
"__meta_kubernetes_node_name": "barbaz",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||
|
@ -1007,6 +1145,18 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) {
|
|||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
},
|
||||
{
|
||||
"__address__": "4.5.6.7:9000",
|
||||
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||
|
@ -1139,6 +1289,18 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) {
|
|||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
},
|
||||
{
|
||||
"__address__": "4.5.6.7:9000",
|
||||
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||
|
|
|
@ -761,15 +761,21 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share
|
|||
indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
|
||||
e, ok := obj.(*apiv1.Endpoints)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("object is not a pod")
|
||||
return nil, fmt.Errorf("object is not endpoints")
|
||||
}
|
||||
var nodes []string
|
||||
for _, target := range e.Subsets {
|
||||
for _, addr := range target.Addresses {
|
||||
if addr.NodeName == nil {
|
||||
continue
|
||||
if addr.TargetRef != nil {
|
||||
switch addr.TargetRef.Kind {
|
||||
case "Pod":
|
||||
if addr.NodeName != nil {
|
||||
nodes = append(nodes, *addr.NodeName)
|
||||
}
|
||||
case "Node":
|
||||
nodes = append(nodes, addr.TargetRef.Name)
|
||||
}
|
||||
}
|
||||
nodes = append(nodes, *addr.NodeName)
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
|
@ -789,17 +795,29 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object
|
|||
switch e := obj.(type) {
|
||||
case *disv1.EndpointSlice:
|
||||
for _, target := range e.Endpoints {
|
||||
if target.NodeName == nil {
|
||||
continue
|
||||
if target.TargetRef != nil {
|
||||
switch target.TargetRef.Kind {
|
||||
case "Pod":
|
||||
if target.NodeName != nil {
|
||||
nodes = append(nodes, *target.NodeName)
|
||||
}
|
||||
case "Node":
|
||||
nodes = append(nodes, target.TargetRef.Name)
|
||||
}
|
||||
}
|
||||
nodes = append(nodes, *target.NodeName)
|
||||
}
|
||||
case *disv1beta1.EndpointSlice:
|
||||
for _, target := range e.Endpoints {
|
||||
if target.NodeName == nil {
|
||||
continue
|
||||
if target.TargetRef != nil {
|
||||
switch target.TargetRef.Kind {
|
||||
case "Pod":
|
||||
if target.NodeName != nil {
|
||||
nodes = append(nodes, *target.NodeName)
|
||||
}
|
||||
case "Node":
|
||||
nodes = append(nodes, target.TargetRef.Name)
|
||||
}
|
||||
}
|
||||
nodes = append(nodes, *target.NodeName)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("object is not an endpointslice")
|
||||
|
|
|
@ -134,6 +134,10 @@ job_name: <job_name>
|
|||
# Per-scrape timeout when scraping this job.
|
||||
[ scrape_timeout: <duration> | default = <global_config.scrape_timeout> ]
|
||||
|
||||
# Whether to scrape a classic histogram that is also exposed as a native
|
||||
# histogram (has no effect without --enable-feature=native-histograms).
|
||||
[ scrape_classic_histograms: <boolean> | default = false ]
|
||||
|
||||
# The HTTP resource path on which to fetch metrics from targets.
|
||||
[ metrics_path: <path> | default = /metrics ]
|
||||
|
||||
|
@ -376,6 +380,11 @@ metric_relabel_configs:
|
|||
# 0 means no limit. This is an experimental feature, this behaviour could
|
||||
# change in the future.
|
||||
[ target_limit: <int> | default = 0 ]
|
||||
|
||||
# Limit on total number of positive and negative buckets allowed in a single
|
||||
# native histogram. If this is exceeded, the entire scrape will be treated as
|
||||
# failed. 0 means no limit.
|
||||
[ native_histogram_bucket_limit: <int> | default = 0 ]
|
||||
```
|
||||
|
||||
Where `<job_name>` must be unique across all scrape configurations.
|
||||
|
@ -576,6 +585,8 @@ The following meta labels are available on targets during [relabeling](#relabel_
|
|||
# The information to access the Consul API. It is to be defined
|
||||
# as the Consul documentation requires.
|
||||
[ server: <host> | default = "localhost:8500" ]
|
||||
# Prefix for URIs for when consul is behind an API gateway (reverse proxy).
|
||||
[ path_prefix: <string> ]
|
||||
[ token: <secret> ]
|
||||
[ datacenter: <string> ]
|
||||
# Namespaces are only supported in Consul Enterprise.
|
||||
|
|
|
@ -264,4 +264,4 @@ process ID.
|
|||
While Prometheus does have recovery mechanisms in the case that there is an
|
||||
abrupt process failure it is recommend to use the `SIGTERM` signal to cleanly
|
||||
shutdown a Prometheus instance. If you're running on Linux this can be performed
|
||||
by using `kill -s SIGHUP <PID>`, replacing `<PID>` with your Prometheus process ID.
|
||||
by using `kill -s SIGTERM <PID>`, replacing `<PID>` with your Prometheus process ID.
|
||||
|
|
|
@ -1074,6 +1074,10 @@ The following endpoint returns various cardinality statistics about the Promethe
|
|||
```
|
||||
GET /api/v1/status/tsdb
|
||||
```
|
||||
URL query parameters:
|
||||
- `limit=<number>`: Limit the number of returned items to a given number for each set of statistics. By default, 10 items are returned.
|
||||
|
||||
The `data` section of the query result consists of
|
||||
- **headStats**: This provides the following data about the head block of the TSDB:
|
||||
- **numSeries**: The number of series.
|
||||
- **chunkCount**: The number of chunks.
|
||||
|
|
|
@ -589,8 +589,9 @@ over time and return an instant vector with per-series aggregation results:
|
|||
Note that all values in the specified interval have the same weight in the
|
||||
aggregation even if the values are not equally spaced throughout the interval.
|
||||
|
||||
`count_over_time`, `last_over_time`, and `present_over_time` handle native
|
||||
histograms as expected. All other functions ignore histogram samples.
|
||||
`avg_over_time`, `sum_over_time`, `count_over_time`, `last_over_time`, and
|
||||
`present_over_time` handle native histograms as expected. All other functions
|
||||
ignore histogram samples.
|
||||
|
||||
## Trigonometric Functions
|
||||
|
||||
|
|
|
@ -318,19 +318,23 @@ histograms is still very limited.
|
|||
Logical/set binary operators work as expected even if histogram samples are
|
||||
involved. They only check for the existence of a vector element and don't
|
||||
change their behavior depending on the sample type of an element (float or
|
||||
histogram).
|
||||
histogram). The `count` aggregation operator works similarly.
|
||||
|
||||
The binary `+` operator between two native histograms and the `sum` aggregation
|
||||
operator to aggregate native histograms are fully supported. Even if the
|
||||
histograms involved have different bucket layouts, the buckets are
|
||||
automatically converted appropriately so that the operation can be
|
||||
The binary `+` and `-` operators between two native histograms and the `sum`
|
||||
and `avg` aggregation operators to aggregate native histograms are fully
|
||||
supported. Even if the histograms involved have different bucket layouts, the
|
||||
buckets are automatically converted appropriately so that the operation can be
|
||||
performed. (With the currently supported bucket schemas, that's always
|
||||
possible.) If either operator has to sum up a mix of histogram samples and
|
||||
possible.) If either operator has to aggregate a mix of histogram samples and
|
||||
float samples, the corresponding vector element is removed from the output
|
||||
vector entirely.
|
||||
|
||||
All other operators do not behave in a meaningful way. They either treat the
|
||||
histogram sample as if it were a float sample of value 0, or (in case of
|
||||
arithmetic operations between a scalar and a vector) they leave the histogram
|
||||
sample unchanged. This behavior will change to a meaningful one before native
|
||||
histograms are a stable feature.
|
||||
The binary `*` operator works between a native histogram and a float in any
|
||||
order, while the binary `/` operator can be used between a native histogram
|
||||
and a float in that exact order.
|
||||
|
||||
All other operators (and unmentioned cases for the above operators) do not
|
||||
behave in a meaningful way. They either treat the histogram sample as if it
|
||||
were a float sample of value 0, or (in case of arithmetic operations between a
|
||||
scalar and a vector) they leave the histogram sample unchanged. This behavior
|
||||
will change to a meaningful one before native histograms are a stable feature.
|
||||
|
|
|
@ -18,12 +18,13 @@ Things considered stable for 2.x:
|
|||
* Configuration file format (minus the service discovery remote read/write, see below)
|
||||
* Rule/alert file format
|
||||
* Console template syntax and semantics
|
||||
* Remote write sending, per the [1.0 specification](https://prometheus.io/docs/concepts/remote_write_spec/).
|
||||
|
||||
Things considered unstable for 2.x:
|
||||
|
||||
* Any feature listed as experimental or subject to change, including:
|
||||
* The [`holt_winters` PromQL function](https://github.com/prometheus/prometheus/issues/2458)
|
||||
* Remote read, remote write and the remote read endpoint
|
||||
* Remote write receiving, remote read and the remote read endpoint
|
||||
* Server-side HTTPS and basic authentication
|
||||
* Service discovery integrations, with the exception of `static_configs` and `file_sd_configs`
|
||||
* Go APIs of packages that are part of the server
|
||||
|
|
|
@ -8,7 +8,7 @@ require (
|
|||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/snappy v0.0.4
|
||||
github.com/influxdata/influxdb v1.11.0
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/prometheus/client_golang v1.15.0
|
||||
github.com/prometheus/common v0.42.0
|
||||
github.com/prometheus/prometheus v0.43.0
|
||||
github.com/stretchr/testify v1.8.2
|
||||
|
@ -29,7 +29,7 @@ require (
|
|||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jpillora/backoff v1.0.0 // indirect
|
||||
github.com/kr/pretty v0.3.0 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
|
@ -51,7 +51,7 @@ require (
|
|||
golang.org/x/text v0.8.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.29.0 // indirect
|
||||
google.golang.org/protobuf v1.30.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
|
|
@ -141,8 +141,7 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv
|
|||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
|
@ -181,8 +180,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
|
|||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
||||
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
||||
github.com/prometheus/client_golang v1.15.0 h1:5fCgGYogn0hFdhyhLbw7hEsWxufKtY9klyvdNfFlFhM=
|
||||
github.com/prometheus/client_golang v1.15.0/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
|
@ -204,7 +203,6 @@ github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJf
|
|||
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
|
||||
github.com/prometheus/prometheus v0.43.0 h1:18iCSfrbAHbXvYFvR38U1Pt4uZmU9SmDcCpCrBKUiGg=
|
||||
github.com/prometheus/prometheus v0.43.0/go.mod h1:2BA14LgBeqlPuzObSEbh+Y+JwLH2GcqDlJKbF2sA6FM=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14 h1:yFl3jyaSVLNYXlnNYM5z2pagEk1dYQhfr1p20T1NyKY=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
|
@ -341,14 +339,12 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
|
|||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.29.0 h1:44S3JjaKmLEE4YIkjzexaP+NzZsudE3Zin5Njn/pYX0=
|
||||
google.golang.org/protobuf v1.29.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
|
||||
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
|
6
go.mod
6
go.mod
|
@ -13,7 +13,7 @@ require (
|
|||
github.com/cespare/xxhash/v2 v2.2.0
|
||||
github.com/dennwc/varint v1.0.0
|
||||
github.com/dgraph-io/ristretto v0.1.1
|
||||
github.com/digitalocean/godo v1.98.0
|
||||
github.com/digitalocean/godo v1.99.0
|
||||
github.com/docker/docker v23.0.4+incompatible
|
||||
github.com/edsrzf/mmap-go v1.1.0
|
||||
github.com/envoyproxy/go-control-plane v0.11.0
|
||||
|
@ -31,7 +31,7 @@ require (
|
|||
github.com/grpc-ecosystem/grpc-gateway v1.16.0
|
||||
github.com/hashicorp/consul/api v1.20.0
|
||||
github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197
|
||||
github.com/hetznercloud/hcloud-go v1.42.0
|
||||
github.com/hetznercloud/hcloud-go v1.45.1
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.6
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
|
||||
|
@ -43,7 +43,7 @@ require (
|
|||
github.com/ovh/go-ovh v1.4.1
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/alertmanager v0.25.0
|
||||
github.com/prometheus/client_golang v1.15.0
|
||||
github.com/prometheus/client_golang v1.15.1
|
||||
github.com/prometheus/client_model v0.3.0
|
||||
github.com/prometheus/common v0.42.0
|
||||
github.com/prometheus/common/assets v0.2.0
|
||||
|
|
12
go.sum
12
go.sum
|
@ -155,8 +155,8 @@ github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkz
|
|||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/digitalocean/godo v1.98.0 h1:potyC1eD0N9n5/P4/WmJuKgg+OGYZOBWEW+/aKTX6QQ=
|
||||
github.com/digitalocean/godo v1.98.0/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzKqFL4R60dmA=
|
||||
github.com/digitalocean/godo v1.99.0 h1:gUHO7n9bDaZFWvbzOum4bXE0/09ZuYA9yA8idQHX57E=
|
||||
github.com/digitalocean/godo v1.99.0/go.mod h1:SsS2oXo2rznfM/nORlZ/6JaUJZFhmKTib1YhopUc8NA=
|
||||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
||||
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
|
@ -464,8 +464,8 @@ github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197/go.mod h1:2TCr
|
|||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
|
||||
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
|
||||
github.com/hetznercloud/hcloud-go v1.42.0 h1:Es/CDOForQN3nOOP5Vxh1N/YHjpCg386iYEX5zCgi+A=
|
||||
github.com/hetznercloud/hcloud-go v1.42.0/go.mod h1:YADL8AbmQYH0Eo+1lkuyoc8LutT0UeMvaKP47nNUb+Y=
|
||||
github.com/hetznercloud/hcloud-go v1.45.1 h1:nl0OOklFfQT5J6AaNIOhl5Ruh3fhmGmhvZEqHbibVuk=
|
||||
github.com/hetznercloud/hcloud-go v1.45.1/go.mod h1:aAUGxSfSnB8/lVXHNEDxtCT1jykaul8kqjD7f5KQXF8=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
|
@ -655,8 +655,8 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD
|
|||
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.15.0 h1:5fCgGYogn0hFdhyhLbw7hEsWxufKtY9klyvdNfFlFhM=
|
||||
github.com/prometheus/client_golang v1.15.0/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk=
|
||||
github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI=
|
||||
github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
|
|
|
@ -159,12 +159,12 @@ func (h *FloatHistogram) ZeroBucket() Bucket[float64] {
|
|||
}
|
||||
}
|
||||
|
||||
// Scale scales the FloatHistogram by the provided factor, i.e. it scales all
|
||||
// Mul multiplies the FloatHistogram by the provided factor, i.e. it scales all
|
||||
// bucket counts including the zero bucket and the count and the sum of
|
||||
// observations. The bucket layout stays the same. This method changes the
|
||||
// receiving histogram directly (rather than acting on a copy). It returns a
|
||||
// pointer to the receiving histogram for convenience.
|
||||
func (h *FloatHistogram) Scale(factor float64) *FloatHistogram {
|
||||
func (h *FloatHistogram) Mul(factor float64) *FloatHistogram {
|
||||
h.ZeroCount *= factor
|
||||
h.Count *= factor
|
||||
h.Sum *= factor
|
||||
|
@ -177,6 +177,21 @@ func (h *FloatHistogram) Scale(factor float64) *FloatHistogram {
|
|||
return h
|
||||
}
|
||||
|
||||
// Div works like Scale but divides instead of multiplies.
|
||||
// When dividing by 0, everything will be set to Inf.
|
||||
func (h *FloatHistogram) Div(scalar float64) *FloatHistogram {
|
||||
h.ZeroCount /= scalar
|
||||
h.Count /= scalar
|
||||
h.Sum /= scalar
|
||||
for i := range h.PositiveBuckets {
|
||||
h.PositiveBuckets[i] /= scalar
|
||||
}
|
||||
for i := range h.NegativeBuckets {
|
||||
h.NegativeBuckets[i] /= scalar
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// Add adds the provided other histogram to the receiving histogram. Count, Sum,
|
||||
// and buckets from the other histogram are added to the corresponding
|
||||
// components of the receiving histogram. Buckets in the other histogram that do
|
||||
|
|
|
@ -15,12 +15,13 @@ package histogram
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFloatHistogramScale(t *testing.T) {
|
||||
func TestFloatHistogramMul(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
in *FloatHistogram
|
||||
|
@ -33,6 +34,30 @@ func TestFloatHistogramScale(t *testing.T) {
|
|||
3.1415,
|
||||
&FloatHistogram{},
|
||||
},
|
||||
{
|
||||
"zero multiplier",
|
||||
&FloatHistogram{
|
||||
ZeroThreshold: 0.01,
|
||||
ZeroCount: 5.5,
|
||||
Count: 3493.3,
|
||||
Sum: 2349209.324,
|
||||
PositiveSpans: []Span{{-2, 1}, {2, 3}},
|
||||
PositiveBuckets: []float64{1, 3.3, 4.2, 0.1},
|
||||
NegativeSpans: []Span{{3, 2}, {3, 2}},
|
||||
NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000},
|
||||
},
|
||||
0,
|
||||
&FloatHistogram{
|
||||
ZeroThreshold: 0.01,
|
||||
ZeroCount: 0,
|
||||
Count: 0,
|
||||
Sum: 0,
|
||||
PositiveSpans: []Span{{-2, 1}, {2, 3}},
|
||||
PositiveBuckets: []float64{0, 0, 0, 0},
|
||||
NegativeSpans: []Span{{3, 2}, {3, 2}},
|
||||
NegativeBuckets: []float64{0, 0, 0, 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
"no-op",
|
||||
&FloatHistogram{
|
||||
|
@ -81,17 +106,137 @@ func TestFloatHistogramScale(t *testing.T) {
|
|||
NegativeBuckets: []float64{6.2, 6, 1.234e5 * 2, 2000},
|
||||
},
|
||||
},
|
||||
{
|
||||
"triple",
|
||||
&FloatHistogram{
|
||||
ZeroThreshold: 0.01,
|
||||
ZeroCount: 11,
|
||||
Count: 30,
|
||||
Sum: 23,
|
||||
PositiveSpans: []Span{{-2, 2}, {1, 3}},
|
||||
PositiveBuckets: []float64{1, 0, 3, 4, 7},
|
||||
NegativeSpans: []Span{{3, 2}, {3, 2}},
|
||||
NegativeBuckets: []float64{3, 1, 5, 6},
|
||||
},
|
||||
3,
|
||||
&FloatHistogram{
|
||||
ZeroThreshold: 0.01,
|
||||
ZeroCount: 33,
|
||||
Count: 90,
|
||||
Sum: 69,
|
||||
PositiveSpans: []Span{{-2, 2}, {1, 3}},
|
||||
PositiveBuckets: []float64{3, 0, 9, 12, 21},
|
||||
NegativeSpans: []Span{{3, 2}, {3, 2}},
|
||||
NegativeBuckets: []float64{9, 3, 15, 18},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
require.Equal(t, c.expected, c.in.Scale(c.scale))
|
||||
require.Equal(t, c.expected, c.in.Mul(c.scale))
|
||||
// Has it also happened in-place?
|
||||
require.Equal(t, c.expected, c.in)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFloatHistogramDiv(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
fh *FloatHistogram
|
||||
s float64
|
||||
expected *FloatHistogram
|
||||
}{
|
||||
{
|
||||
"zero value",
|
||||
&FloatHistogram{},
|
||||
3.1415,
|
||||
&FloatHistogram{},
|
||||
},
|
||||
{
|
||||
"zero divisor",
|
||||
&FloatHistogram{
|
||||
ZeroThreshold: 0.01,
|
||||
ZeroCount: 5.5,
|
||||
Count: 3493.3,
|
||||
Sum: 2349209.324,
|
||||
PositiveSpans: []Span{{-2, 1}, {2, 3}},
|
||||
PositiveBuckets: []float64{1, 3.3, 4.2, 0.1},
|
||||
NegativeSpans: []Span{{3, 2}, {3, 2}},
|
||||
NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000},
|
||||
},
|
||||
0,
|
||||
&FloatHistogram{
|
||||
ZeroThreshold: 0.01,
|
||||
ZeroCount: math.Inf(1),
|
||||
Count: math.Inf(1),
|
||||
Sum: math.Inf(1),
|
||||
PositiveSpans: []Span{{-2, 1}, {2, 3}},
|
||||
PositiveBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1), math.Inf(1)},
|
||||
NegativeSpans: []Span{{3, 2}, {3, 2}},
|
||||
NegativeBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1), math.Inf(1)},
|
||||
},
|
||||
},
|
||||
{
|
||||
"no-op",
|
||||
&FloatHistogram{
|
||||
ZeroThreshold: 0.01,
|
||||
ZeroCount: 5.5,
|
||||
Count: 3493.3,
|
||||
Sum: 2349209.324,
|
||||
PositiveSpans: []Span{{-2, 1}, {2, 3}},
|
||||
PositiveBuckets: []float64{1, 3.3, 4.2, 0.1},
|
||||
NegativeSpans: []Span{{3, 2}, {3, 2}},
|
||||
NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000},
|
||||
},
|
||||
1,
|
||||
&FloatHistogram{
|
||||
ZeroThreshold: 0.01,
|
||||
ZeroCount: 5.5,
|
||||
Count: 3493.3,
|
||||
Sum: 2349209.324,
|
||||
PositiveSpans: []Span{{-2, 1}, {2, 3}},
|
||||
PositiveBuckets: []float64{1, 3.3, 4.2, 0.1},
|
||||
NegativeSpans: []Span{{3, 2}, {3, 2}},
|
||||
NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000},
|
||||
},
|
||||
},
|
||||
{
|
||||
"half",
|
||||
&FloatHistogram{
|
||||
ZeroThreshold: 0.01,
|
||||
ZeroCount: 11,
|
||||
Count: 30,
|
||||
Sum: 23,
|
||||
PositiveSpans: []Span{{-2, 2}, {1, 3}},
|
||||
PositiveBuckets: []float64{1, 0, 3, 4, 7},
|
||||
NegativeSpans: []Span{{3, 2}, {3, 2}},
|
||||
NegativeBuckets: []float64{3, 1, 5, 6},
|
||||
},
|
||||
2,
|
||||
&FloatHistogram{
|
||||
ZeroThreshold: 0.01,
|
||||
ZeroCount: 5.5,
|
||||
Count: 15,
|
||||
Sum: 11.5,
|
||||
PositiveSpans: []Span{{-2, 2}, {1, 3}},
|
||||
PositiveBuckets: []float64{0.5, 0, 1.5, 2, 3.5},
|
||||
NegativeSpans: []Span{{3, 2}, {3, 2}},
|
||||
NegativeBuckets: []float64{1.5, 0.5, 2.5, 3},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
require.Equal(t, c.expected, c.fh.Div(c.s))
|
||||
// Has it also happened in-place?
|
||||
require.Equal(t, c.expected, c.fh)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFloatHistogramDetectReset(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
|
|
|
@ -533,16 +533,15 @@ func (b *Builder) Set(n, v string) *Builder {
|
|||
}
|
||||
|
||||
func (b *Builder) Get(n string) string {
|
||||
for _, d := range b.del {
|
||||
if d == n {
|
||||
return ""
|
||||
}
|
||||
}
|
||||
// Del() removes entries from .add but Set() does not remove from .del, so check .add first.
|
||||
for _, a := range b.add {
|
||||
if a.Name == n {
|
||||
return a.Value
|
||||
}
|
||||
}
|
||||
if slices.Contains(b.del, n) {
|
||||
return ""
|
||||
}
|
||||
return b.base.Get(n)
|
||||
}
|
||||
|
||||
|
|
|
@ -593,14 +593,15 @@ func (b *Builder) Set(n, v string) *Builder {
|
|||
}
|
||||
|
||||
func (b *Builder) Get(n string) string {
|
||||
if slices.Contains(b.del, n) {
|
||||
return ""
|
||||
}
|
||||
// Del() removes entries from .add but Set() does not remove from .del, so check .add first.
|
||||
for _, a := range b.add {
|
||||
if a.Name == n {
|
||||
return a.Value
|
||||
}
|
||||
}
|
||||
if slices.Contains(b.del, n) {
|
||||
return ""
|
||||
}
|
||||
return b.base.Get(n)
|
||||
}
|
||||
|
||||
|
|
|
@ -607,6 +607,13 @@ func TestBuilder(t *testing.T) {
|
|||
require.Equal(t, tcase.want.BytesWithoutLabels(nil, "aaa", "bbb"), b.Labels().Bytes(nil))
|
||||
})
|
||||
}
|
||||
t.Run("set_after_del", func(t *testing.T) {
|
||||
b := NewBuilder(FromStrings("aaa", "111"))
|
||||
b.Del("bbb")
|
||||
b.Set("bbb", "222")
|
||||
require.Equal(t, FromStrings("aaa", "111", "bbb", "222"), b.Labels())
|
||||
require.Equal(t, "222", b.Get("bbb"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestScratchBuilder(t *testing.T) {
|
||||
|
|
|
@ -397,6 +397,34 @@ func TestRelabel(t *testing.T) {
|
|||
"foo": "bar",
|
||||
}),
|
||||
},
|
||||
{ // From https://github.com/prometheus/prometheus/issues/12283
|
||||
input: labels.FromMap(map[string]string{
|
||||
"__meta_kubernetes_pod_container_port_name": "foo",
|
||||
"__meta_kubernetes_pod_annotation_XXX_metrics_port": "9091",
|
||||
}),
|
||||
relabel: []*Config{
|
||||
{
|
||||
Regex: MustNewRegexp("^__meta_kubernetes_pod_container_port_name$"),
|
||||
Action: LabelDrop,
|
||||
},
|
||||
{
|
||||
SourceLabels: model.LabelNames{"__meta_kubernetes_pod_annotation_XXX_metrics_port"},
|
||||
Regex: MustNewRegexp("(.+)"),
|
||||
Action: Replace,
|
||||
Replacement: "metrics",
|
||||
TargetLabel: "__meta_kubernetes_pod_container_port_name",
|
||||
},
|
||||
{
|
||||
SourceLabels: model.LabelNames{"__meta_kubernetes_pod_container_port_name"},
|
||||
Regex: MustNewRegexp("^metrics$"),
|
||||
Action: Keep,
|
||||
},
|
||||
},
|
||||
output: labels.FromMap(map[string]string{
|
||||
"__meta_kubernetes_pod_annotation_XXX_metrics_port": "9091",
|
||||
"__meta_kubernetes_pod_container_port_name": "metrics",
|
||||
}),
|
||||
},
|
||||
{
|
||||
input: labels.FromMap(map[string]string{
|
||||
"a": "foo",
|
||||
|
|
|
@ -71,7 +71,7 @@ type Parser interface {
|
|||
//
|
||||
// This function always returns a valid parser, but might additionally
|
||||
// return an error if the content type cannot be parsed.
|
||||
func New(b []byte, contentType string) (Parser, error) {
|
||||
func New(b []byte, contentType string, parseClassicHistograms bool) (Parser, error) {
|
||||
if contentType == "" {
|
||||
return NewPromParser(b), nil
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ func New(b []byte, contentType string) (Parser, error) {
|
|||
case "application/openmetrics-text":
|
||||
return NewOpenMetricsParser(b), nil
|
||||
case "application/vnd.google.protobuf":
|
||||
return NewProtobufParser(b), nil
|
||||
return NewProtobufParser(b, parseClassicHistograms), nil
|
||||
default:
|
||||
return NewPromParser(b), nil
|
||||
}
|
||||
|
@ -100,7 +100,7 @@ const (
|
|||
EntrySeries Entry = 2 // A series with a simple float64 as value.
|
||||
EntryComment Entry = 3
|
||||
EntryUnit Entry = 4
|
||||
EntryHistogram Entry = 5 // A series with a sparse histogram as a value.
|
||||
EntryHistogram Entry = 5 // A series with a native histogram as a value.
|
||||
)
|
||||
|
||||
// MetricType represents metric type values.
|
||||
|
|
|
@ -91,7 +91,7 @@ func TestNewParser(t *testing.T) {
|
|||
tt := tt // Copy to local variable before going parallel.
|
||||
t.Parallel()
|
||||
|
||||
p, err := New([]byte{}, tt.contentType)
|
||||
p, err := New([]byte{}, tt.contentType, false)
|
||||
tt.validateParser(t, p)
|
||||
if tt.err == "" {
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -52,8 +52,10 @@ type ProtobufParser struct {
|
|||
// fieldPos is the position within a Summary or (legacy) Histogram. -2
|
||||
// is the count. -1 is the sum. Otherwise it is the index within
|
||||
// quantiles/buckets.
|
||||
fieldPos int
|
||||
fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed.
|
||||
fieldPos int
|
||||
fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed.
|
||||
redoClassic bool // true after parsing a native histogram if we need to parse it again as a classit histogram.
|
||||
|
||||
// state is marked by the entry we are processing. EntryInvalid implies
|
||||
// that we have to decode the next MetricFamily.
|
||||
state Entry
|
||||
|
@ -62,17 +64,22 @@ type ProtobufParser struct {
|
|||
|
||||
mf *dto.MetricFamily
|
||||
|
||||
// Wether to also parse a classic histogram that is also present as a
|
||||
// native histogram.
|
||||
parseClassicHistograms bool
|
||||
|
||||
// The following are just shenanigans to satisfy the Parser interface.
|
||||
metricBytes *bytes.Buffer // A somewhat fluid representation of the current metric.
|
||||
}
|
||||
|
||||
// NewProtobufParser returns a parser for the payload in the byte slice.
|
||||
func NewProtobufParser(b []byte) Parser {
|
||||
func NewProtobufParser(b []byte, parseClassicHistograms bool) Parser {
|
||||
return &ProtobufParser{
|
||||
in: b,
|
||||
state: EntryInvalid,
|
||||
mf: &dto.MetricFamily{},
|
||||
metricBytes: &bytes.Buffer{},
|
||||
in: b,
|
||||
state: EntryInvalid,
|
||||
mf: &dto.MetricFamily{},
|
||||
metricBytes: &bytes.Buffer{},
|
||||
parseClassicHistograms: parseClassicHistograms,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -106,19 +113,28 @@ func (p *ProtobufParser) Series() ([]byte, *int64, float64) {
|
|||
v = s.GetQuantile()[p.fieldPos].GetValue()
|
||||
}
|
||||
case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM:
|
||||
// This should only happen for a legacy histogram.
|
||||
// This should only happen for a classic histogram.
|
||||
h := m.GetHistogram()
|
||||
switch p.fieldPos {
|
||||
case -2:
|
||||
v = float64(h.GetSampleCount())
|
||||
v = h.GetSampleCountFloat()
|
||||
if v == 0 {
|
||||
v = float64(h.GetSampleCount())
|
||||
}
|
||||
case -1:
|
||||
v = h.GetSampleSum()
|
||||
default:
|
||||
bb := h.GetBucket()
|
||||
if p.fieldPos >= len(bb) {
|
||||
v = float64(h.GetSampleCount())
|
||||
v = h.GetSampleCountFloat()
|
||||
if v == 0 {
|
||||
v = float64(h.GetSampleCount())
|
||||
}
|
||||
} else {
|
||||
v = float64(bb[p.fieldPos].GetCumulativeCount())
|
||||
v = bb[p.fieldPos].GetCumulativeCountFloat()
|
||||
if v == 0 {
|
||||
v = float64(bb[p.fieldPos].GetCumulativeCount())
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
|
@ -149,6 +165,9 @@ func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *his
|
|||
ts = m.GetTimestampMs()
|
||||
h = m.GetHistogram()
|
||||
)
|
||||
if p.parseClassicHistograms && len(h.GetBucket()) > 0 {
|
||||
p.redoClassic = true
|
||||
}
|
||||
if h.GetSampleCountFloat() > 0 || h.GetZeroCountFloat() > 0 {
|
||||
// It is a float histogram.
|
||||
fh := histogram.FloatHistogram{
|
||||
|
@ -376,6 +395,12 @@ func (p *ProtobufParser) Next() (Entry, error) {
|
|||
return EntryInvalid, err
|
||||
}
|
||||
case EntryHistogram, EntrySeries:
|
||||
if p.redoClassic {
|
||||
p.redoClassic = false
|
||||
p.state = EntrySeries
|
||||
p.fieldPos = -3
|
||||
p.fieldsDone = false
|
||||
}
|
||||
t := p.mf.GetType()
|
||||
if p.state == EntrySeries && !p.fieldsDone &&
|
||||
(t == dto.MetricType_SUMMARY ||
|
||||
|
@ -432,7 +457,7 @@ func (p *ProtobufParser) updateMetricBytes() error {
|
|||
// state.
|
||||
func (p *ProtobufParser) getMagicName() string {
|
||||
t := p.mf.GetType()
|
||||
if p.state == EntryHistogram || (t != dto.MetricType_HISTOGRAM && t != dto.MetricType_SUMMARY) {
|
||||
if p.state == EntryHistogram || (t != dto.MetricType_HISTOGRAM && t != dto.MetricType_GAUGE_HISTOGRAM && t != dto.MetricType_SUMMARY) {
|
||||
return p.mf.GetName()
|
||||
}
|
||||
if p.fieldPos == -2 {
|
||||
|
|
File diff suppressed because it is too large
Load diff
149
promql/engine.go
149
promql/engine.go
|
@ -1850,14 +1850,14 @@ func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, no
|
|||
}
|
||||
case chunkenc.ValFloat:
|
||||
t, v = it.At()
|
||||
case chunkenc.ValHistogram, chunkenc.ValFloatHistogram:
|
||||
case chunkenc.ValFloatHistogram:
|
||||
t, h = it.AtFloatHistogram()
|
||||
default:
|
||||
panic(fmt.Errorf("unknown value type %v", valueType))
|
||||
}
|
||||
if valueType == chunkenc.ValNone || t > refTime {
|
||||
var ok bool
|
||||
t, v, _, h, ok = it.PeekPrev()
|
||||
t, v, h, ok = it.PeekPrev()
|
||||
if !ok || t < refTime-durationMilliseconds(ev.lookbackDelta) {
|
||||
return 0, 0, nil, false
|
||||
}
|
||||
|
@ -2263,14 +2263,11 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
|
|||
insertedSigs[insertSig] = struct{}{}
|
||||
}
|
||||
|
||||
if (hl != nil && hr != nil) || (hl == nil && hr == nil) {
|
||||
// Both lhs and rhs are of same type.
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: metric,
|
||||
F: floatValue,
|
||||
H: histogramValue,
|
||||
})
|
||||
}
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: metric,
|
||||
F: floatValue,
|
||||
H: histogramValue,
|
||||
})
|
||||
}
|
||||
return enh.Out
|
||||
}
|
||||
|
@ -2337,28 +2334,33 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V
|
|||
// VectorscalarBinop evaluates a binary operation between a Vector and a Scalar.
|
||||
func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) Vector {
|
||||
for _, lhsSample := range lhs {
|
||||
lv, rv := lhsSample.F, rhs.V
|
||||
lf, rf := lhsSample.F, rhs.V
|
||||
var rh *histogram.FloatHistogram
|
||||
lh := lhsSample.H
|
||||
// lhs always contains the Vector. If the original position was different
|
||||
// swap for calculating the value.
|
||||
if swap {
|
||||
lv, rv = rv, lv
|
||||
lf, rf = rf, lf
|
||||
lh, rh = rh, lh
|
||||
}
|
||||
value, _, keep := vectorElemBinop(op, lv, rv, nil, nil)
|
||||
float, histogram, keep := vectorElemBinop(op, lf, rf, lh, rh)
|
||||
// Catch cases where the scalar is the LHS in a scalar-vector comparison operation.
|
||||
// We want to always keep the vector element value as the output value, even if it's on the RHS.
|
||||
if op.IsComparisonOperator() && swap {
|
||||
value = rv
|
||||
float = rf
|
||||
histogram = rh
|
||||
}
|
||||
if returnBool {
|
||||
if keep {
|
||||
value = 1.0
|
||||
float = 1.0
|
||||
} else {
|
||||
value = 0.0
|
||||
float = 0.0
|
||||
}
|
||||
keep = true
|
||||
}
|
||||
if keep {
|
||||
lhsSample.F = value
|
||||
lhsSample.F = float
|
||||
lhsSample.H = histogram
|
||||
if shouldDropMetricName(op) || returnBool {
|
||||
lhsSample.Metric = enh.DropMetricName(lhsSample.Metric)
|
||||
}
|
||||
|
@ -2413,16 +2415,33 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram
|
|||
// The histogram being added must have the larger schema
|
||||
// code (i.e. the higher resolution).
|
||||
if hrhs.Schema >= hlhs.Schema {
|
||||
return 0, hlhs.Copy().Add(hrhs), true
|
||||
return 0, hlhs.Copy().Add(hrhs).Compact(0), true
|
||||
}
|
||||
return 0, hrhs.Copy().Add(hlhs), true
|
||||
return 0, hrhs.Copy().Add(hlhs).Compact(0), true
|
||||
}
|
||||
return lhs + rhs, nil, true
|
||||
case parser.SUB:
|
||||
if hlhs != nil && hrhs != nil {
|
||||
// The histogram being subtracted must have the larger schema
|
||||
// code (i.e. the higher resolution).
|
||||
if hrhs.Schema >= hlhs.Schema {
|
||||
return 0, hlhs.Copy().Sub(hrhs).Compact(0), true
|
||||
}
|
||||
return 0, hrhs.Copy().Mul(-1).Add(hlhs).Compact(0), true
|
||||
}
|
||||
return lhs - rhs, nil, true
|
||||
case parser.MUL:
|
||||
if hlhs != nil && hrhs == nil {
|
||||
return 0, hlhs.Copy().Mul(rhs), true
|
||||
}
|
||||
if hlhs == nil && hrhs != nil {
|
||||
return 0, hrhs.Copy().Mul(lhs), true
|
||||
}
|
||||
return lhs * rhs, nil, true
|
||||
case parser.DIV:
|
||||
if hlhs != nil && hrhs == nil {
|
||||
return 0, hlhs.Copy().Div(rhs), true
|
||||
}
|
||||
return lhs / rhs, nil, true
|
||||
case parser.POW:
|
||||
return math.Pow(lhs, rhs), nil, true
|
||||
|
@ -2452,7 +2471,8 @@ type groupedAggregation struct {
|
|||
labels labels.Labels
|
||||
floatValue float64
|
||||
histogramValue *histogram.FloatHistogram
|
||||
mean float64
|
||||
floatMean float64
|
||||
histogramMean *histogram.FloatHistogram
|
||||
groupCount int
|
||||
heap vectorByValueHeap
|
||||
reverseHeap vectorByReverseValueHeap
|
||||
|
@ -2536,7 +2556,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
|
|||
newAgg := &groupedAggregation{
|
||||
labels: m,
|
||||
floatValue: s.F,
|
||||
mean: s.F,
|
||||
floatMean: s.F,
|
||||
groupCount: 1,
|
||||
}
|
||||
switch {
|
||||
|
@ -2545,6 +2565,11 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
|
|||
case op == parser.SUM:
|
||||
newAgg.histogramValue = s.H.Copy()
|
||||
newAgg.hasHistogram = true
|
||||
case op == parser.AVG:
|
||||
newAgg.histogramMean = s.H.Copy()
|
||||
newAgg.hasHistogram = true
|
||||
case op == parser.STDVAR || op == parser.STDDEV:
|
||||
newAgg.groupCount = 0
|
||||
}
|
||||
|
||||
result[groupingKey] = newAgg
|
||||
|
@ -2589,9 +2614,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
|
|||
if s.H.Schema >= group.histogramValue.Schema {
|
||||
group.histogramValue.Add(s.H)
|
||||
} else {
|
||||
h := s.H.Copy()
|
||||
h.Add(group.histogramValue)
|
||||
group.histogramValue = h
|
||||
group.histogramValue = s.H.Copy().Add(group.histogramValue)
|
||||
}
|
||||
}
|
||||
// Otherwise the aggregation contained floats
|
||||
|
@ -2604,25 +2627,46 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
|
|||
|
||||
case parser.AVG:
|
||||
group.groupCount++
|
||||
if math.IsInf(group.mean, 0) {
|
||||
if math.IsInf(s.F, 0) && (group.mean > 0) == (s.F > 0) {
|
||||
// The `mean` and `s.V` values are `Inf` of the same sign. They
|
||||
// can't be subtracted, but the value of `mean` is correct
|
||||
// already.
|
||||
break
|
||||
if s.H != nil {
|
||||
group.hasHistogram = true
|
||||
if group.histogramMean != nil {
|
||||
left := s.H.Copy().Div(float64(group.groupCount))
|
||||
right := group.histogramMean.Copy().Div(float64(group.groupCount))
|
||||
// The histogram being added/subtracted must have
|
||||
// an equal or larger schema.
|
||||
if s.H.Schema >= group.histogramMean.Schema {
|
||||
toAdd := right.Mul(-1).Add(left)
|
||||
group.histogramMean.Add(toAdd)
|
||||
} else {
|
||||
toAdd := left.Sub(right)
|
||||
group.histogramMean = toAdd.Add(group.histogramMean)
|
||||
}
|
||||
}
|
||||
if !math.IsInf(s.F, 0) && !math.IsNaN(s.F) {
|
||||
// At this stage, the mean is an infinite. If the added
|
||||
// value is neither an Inf or a Nan, we can keep that mean
|
||||
// value.
|
||||
// This is required because our calculation below removes
|
||||
// the mean value, which would look like Inf += x - Inf and
|
||||
// end up as a NaN.
|
||||
break
|
||||
// Otherwise the aggregation contained floats
|
||||
// previously and will be invalid anyway. No
|
||||
// point in copying the histogram in that case.
|
||||
} else {
|
||||
group.hasFloat = true
|
||||
if math.IsInf(group.floatMean, 0) {
|
||||
if math.IsInf(s.F, 0) && (group.floatMean > 0) == (s.F > 0) {
|
||||
// The `floatMean` and `s.F` values are `Inf` of the same sign. They
|
||||
// can't be subtracted, but the value of `floatMean` is correct
|
||||
// already.
|
||||
break
|
||||
}
|
||||
if !math.IsInf(s.F, 0) && !math.IsNaN(s.F) {
|
||||
// At this stage, the mean is an infinite. If the added
|
||||
// value is neither an Inf or a Nan, we can keep that mean
|
||||
// value.
|
||||
// This is required because our calculation below removes
|
||||
// the mean value, which would look like Inf += x - Inf and
|
||||
// end up as a NaN.
|
||||
break
|
||||
}
|
||||
}
|
||||
// Divide each side of the `-` by `group.groupCount` to avoid float64 overflows.
|
||||
group.floatMean += s.F/float64(group.groupCount) - group.floatMean/float64(group.groupCount)
|
||||
}
|
||||
// Divide each side of the `-` by `group.groupCount` to avoid float64 overflows.
|
||||
group.mean += s.F/float64(group.groupCount) - group.mean/float64(group.groupCount)
|
||||
|
||||
case parser.GROUP:
|
||||
// Do nothing. Required to avoid the panic in `default:` below.
|
||||
|
@ -2641,10 +2685,12 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
|
|||
group.groupCount++
|
||||
|
||||
case parser.STDVAR, parser.STDDEV:
|
||||
group.groupCount++
|
||||
delta := s.F - group.mean
|
||||
group.mean += delta / float64(group.groupCount)
|
||||
group.floatValue += delta * (s.F - group.mean)
|
||||
if s.H == nil { // Ignore native histograms.
|
||||
group.groupCount++
|
||||
delta := s.F - group.floatMean
|
||||
group.floatMean += delta / float64(group.groupCount)
|
||||
group.floatValue += delta * (s.F - group.floatMean)
|
||||
}
|
||||
|
||||
case parser.TOPK:
|
||||
// We build a heap of up to k elements, with the smallest element at heap[0].
|
||||
|
@ -2696,7 +2742,16 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
|
|||
for _, aggr := range orderedResult {
|
||||
switch op {
|
||||
case parser.AVG:
|
||||
aggr.floatValue = aggr.mean
|
||||
if aggr.hasFloat && aggr.hasHistogram {
|
||||
// We cannot aggregate histogram sample with a float64 sample.
|
||||
// TODO(zenador): Issue warning when plumbing is in place.
|
||||
continue
|
||||
}
|
||||
if aggr.hasHistogram {
|
||||
aggr.histogramValue = aggr.histogramMean.Compact(0)
|
||||
} else {
|
||||
aggr.floatValue = aggr.floatMean
|
||||
}
|
||||
|
||||
case parser.COUNT, parser.COUNT_VALUES:
|
||||
aggr.floatValue = float64(aggr.groupCount)
|
||||
|
@ -2739,8 +2794,12 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
|
|||
case parser.SUM:
|
||||
if aggr.hasFloat && aggr.hasHistogram {
|
||||
// We cannot aggregate histogram sample with a float64 sample.
|
||||
// TODO(zenador): Issue warning when plumbing is in place.
|
||||
continue
|
||||
}
|
||||
if aggr.hasHistogram {
|
||||
aggr.histogramValue.Compact(0)
|
||||
}
|
||||
default:
|
||||
// For other aggregations, we already have the right value.
|
||||
}
|
||||
|
|
|
@ -3986,21 +3986,23 @@ func TestNativeHistogram_HistogramFraction(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestNativeHistogram_Sum_Count_AddOperator(t *testing.T) {
|
||||
func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) {
|
||||
// TODO(codesome): Integrate histograms into the PromQL testing framework
|
||||
// and write more tests there.
|
||||
cases := []struct {
|
||||
histograms []histogram.Histogram
|
||||
expected histogram.FloatHistogram
|
||||
histograms []histogram.Histogram
|
||||
expected histogram.FloatHistogram
|
||||
expectedAvg histogram.FloatHistogram
|
||||
}{
|
||||
{
|
||||
histograms: []histogram.Histogram{
|
||||
{
|
||||
Schema: 0,
|
||||
Count: 21,
|
||||
Sum: 1234.5,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 4,
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
Schema: 0,
|
||||
Count: 21,
|
||||
Sum: 1234.5,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 4,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
|
@ -4012,6 +4014,182 @@ func TestNativeHistogram_Sum_Count_AddOperator(t *testing.T) {
|
|||
},
|
||||
NegativeBuckets: []int64{2, 2, -3, 8},
|
||||
},
|
||||
{
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
Schema: 0,
|
||||
Count: 36,
|
||||
Sum: 2345.6,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 5,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 4},
|
||||
{Offset: 0, Length: 0},
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 1, Length: 4},
|
||||
{Offset: 2, Length: 0},
|
||||
{Offset: 2, Length: 3},
|
||||
},
|
||||
NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
|
||||
},
|
||||
{
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
Schema: 0,
|
||||
Count: 36,
|
||||
Sum: 1111.1,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 5,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 4},
|
||||
{Offset: 0, Length: 0},
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 1, Length: 4},
|
||||
{Offset: 2, Length: 0},
|
||||
{Offset: 2, Length: 3},
|
||||
},
|
||||
NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
|
||||
},
|
||||
{
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
Schema: 1, // Everything is 0 just to make the count 4 so avg has nicer numbers.
|
||||
},
|
||||
},
|
||||
expected: histogram.FloatHistogram{
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
Schema: 0,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 14,
|
||||
Count: 93,
|
||||
Sum: 4691.2,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 7},
|
||||
},
|
||||
PositiveBuckets: []float64{3, 8, 2, 5, 3, 2, 2},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 6},
|
||||
{Offset: 3, Length: 3},
|
||||
},
|
||||
NegativeBuckets: []float64{2, 6, 8, 4, 15, 9, 10, 10, 4},
|
||||
},
|
||||
expectedAvg: histogram.FloatHistogram{
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
Schema: 0,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 3.5,
|
||||
Count: 23.25,
|
||||
Sum: 1172.8,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 7},
|
||||
},
|
||||
PositiveBuckets: []float64{0.75, 2, 0.5, 1.25, 0.75, 0.5, 0.5},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 6},
|
||||
{Offset: 3, Length: 3},
|
||||
},
|
||||
NegativeBuckets: []float64{0.5, 1.5, 2, 1, 3.75, 2.25, 2.5, 2.5, 1},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
idx0 := int64(0)
|
||||
for _, c := range cases {
|
||||
for _, floatHisto := range []bool{true, false} {
|
||||
t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) {
|
||||
test, err := NewTest(t, "")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(test.Close)
|
||||
|
||||
seriesName := "sparse_histogram_series"
|
||||
seriesNameOverTime := "sparse_histogram_series_over_time"
|
||||
|
||||
engine := test.QueryEngine()
|
||||
|
||||
ts := idx0 * int64(10*time.Minute/time.Millisecond)
|
||||
app := test.Storage().Appender(context.TODO())
|
||||
for idx1, h := range c.histograms {
|
||||
lbls := labels.FromStrings("__name__", seriesName, "idx", fmt.Sprintf("%d", idx1))
|
||||
// Since we mutate h later, we need to create a copy here.
|
||||
if floatHisto {
|
||||
_, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat())
|
||||
} else {
|
||||
_, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
lbls = labels.FromStrings("__name__", seriesNameOverTime)
|
||||
newTs := ts + int64(idx1)*int64(time.Minute/time.Millisecond)
|
||||
// Since we mutate h later, we need to create a copy here.
|
||||
if floatHisto {
|
||||
_, err = app.AppendHistogram(0, lbls, newTs, nil, h.Copy().ToFloat())
|
||||
} else {
|
||||
_, err = app.AppendHistogram(0, lbls, newTs, h.Copy(), nil)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.NoError(t, app.Commit())
|
||||
|
||||
queryAndCheck := func(queryString string, ts int64, exp Vector) {
|
||||
qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts))
|
||||
require.NoError(t, err)
|
||||
|
||||
res := qry.Exec(test.Context())
|
||||
require.NoError(t, res.Err)
|
||||
|
||||
vector, err := res.Vector()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, exp, vector)
|
||||
}
|
||||
|
||||
// sum().
|
||||
queryString := fmt.Sprintf("sum(%s)", seriesName)
|
||||
queryAndCheck(queryString, ts, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}})
|
||||
|
||||
// + operator.
|
||||
queryString = fmt.Sprintf(`%s{idx="0"}`, seriesName)
|
||||
for idx := 1; idx < len(c.histograms); idx++ {
|
||||
queryString += fmt.Sprintf(` + ignoring(idx) %s{idx="%d"}`, seriesName, idx)
|
||||
}
|
||||
queryAndCheck(queryString, ts, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}})
|
||||
|
||||
// count().
|
||||
queryString = fmt.Sprintf("count(%s)", seriesName)
|
||||
queryAndCheck(queryString, ts, []Sample{{T: ts, F: 4, Metric: labels.EmptyLabels()}})
|
||||
|
||||
// avg().
|
||||
queryString = fmt.Sprintf("avg(%s)", seriesName)
|
||||
queryAndCheck(queryString, ts, []Sample{{T: ts, H: &c.expectedAvg, Metric: labels.EmptyLabels()}})
|
||||
|
||||
offset := int64(len(c.histograms) - 1)
|
||||
newTs := ts + offset*int64(time.Minute/time.Millisecond)
|
||||
|
||||
// sum_over_time().
|
||||
queryString = fmt.Sprintf("sum_over_time(%s[%dm:1m])", seriesNameOverTime, offset)
|
||||
queryAndCheck(queryString, newTs, []Sample{{T: newTs, H: &c.expected, Metric: labels.EmptyLabels()}})
|
||||
|
||||
// avg_over_time().
|
||||
queryString = fmt.Sprintf("avg_over_time(%s[%dm:1m])", seriesNameOverTime, offset)
|
||||
queryAndCheck(queryString, newTs, []Sample{{T: newTs, H: &c.expectedAvg, Metric: labels.EmptyLabels()}})
|
||||
})
|
||||
idx0++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNativeHistogram_SubOperator(t *testing.T) {
|
||||
// TODO(codesome): Integrate histograms into the PromQL testing framework
|
||||
// and write more tests there.
|
||||
cases := []struct {
|
||||
histograms []histogram.Histogram
|
||||
expected histogram.FloatHistogram
|
||||
}{
|
||||
{
|
||||
histograms: []histogram.Histogram{
|
||||
{
|
||||
Schema: 0,
|
||||
Count: 36,
|
||||
|
@ -4031,10 +4209,117 @@ func TestNativeHistogram_Sum_Count_AddOperator(t *testing.T) {
|
|||
},
|
||||
NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
|
||||
},
|
||||
{
|
||||
Schema: 0,
|
||||
Count: 11,
|
||||
Sum: 1234.5,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 3,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{2, -1},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 2, Length: 2},
|
||||
},
|
||||
NegativeBuckets: []int64{3, -1},
|
||||
},
|
||||
},
|
||||
expected: histogram.FloatHistogram{
|
||||
Schema: 0,
|
||||
Count: 25,
|
||||
Sum: 1111.1,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 2,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 4},
|
||||
},
|
||||
PositiveBuckets: []float64{1, 1, 2, 1, 1, 1},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 1, Length: 2},
|
||||
{Offset: 1, Length: 1},
|
||||
{Offset: 4, Length: 3},
|
||||
},
|
||||
NegativeBuckets: []float64{1, 1, 7, 5, 5, 2},
|
||||
},
|
||||
},
|
||||
{
|
||||
histograms: []histogram.Histogram{
|
||||
{
|
||||
Schema: 0,
|
||||
Count: 36,
|
||||
Sum: 1111.1,
|
||||
Sum: 2345.6,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 5,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 4},
|
||||
{Offset: 0, Length: 0},
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 1, Length: 4},
|
||||
{Offset: 2, Length: 0},
|
||||
{Offset: 2, Length: 3},
|
||||
},
|
||||
NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
|
||||
},
|
||||
{
|
||||
Schema: 1,
|
||||
Count: 11,
|
||||
Sum: 1234.5,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 3,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{2, -1},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 2, Length: 2},
|
||||
},
|
||||
NegativeBuckets: []int64{3, -1},
|
||||
},
|
||||
},
|
||||
expected: histogram.FloatHistogram{
|
||||
Schema: 0,
|
||||
Count: 25,
|
||||
Sum: 1111.1,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 2,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 1},
|
||||
{Offset: 1, Length: 5},
|
||||
},
|
||||
PositiveBuckets: []float64{1, 1, 2, 1, 1, 1},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 1, Length: 4},
|
||||
{Offset: 4, Length: 3},
|
||||
},
|
||||
NegativeBuckets: []float64{-2, 2, 2, 7, 5, 5, 2},
|
||||
},
|
||||
},
|
||||
{
|
||||
histograms: []histogram.Histogram{
|
||||
{
|
||||
Schema: 1,
|
||||
Count: 11,
|
||||
Sum: 1234.5,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 3,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{2, -1},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 2, Length: 2},
|
||||
},
|
||||
NegativeBuckets: []int64{3, -1},
|
||||
},
|
||||
{
|
||||
Schema: 0,
|
||||
Count: 36,
|
||||
Sum: 2345.6,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 5,
|
||||
PositiveSpans: []histogram.Span{
|
||||
|
@ -4053,21 +4338,20 @@ func TestNativeHistogram_Sum_Count_AddOperator(t *testing.T) {
|
|||
},
|
||||
expected: histogram.FloatHistogram{
|
||||
Schema: 0,
|
||||
Count: -25,
|
||||
Sum: -1111.1,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 14,
|
||||
Count: 93,
|
||||
Sum: 4691.2,
|
||||
ZeroCount: -2,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 3},
|
||||
{Offset: 0, Length: 4},
|
||||
{Offset: 0, Length: 1},
|
||||
{Offset: 1, Length: 5},
|
||||
},
|
||||
PositiveBuckets: []float64{3, 8, 2, 5, 3, 2, 2},
|
||||
PositiveBuckets: []float64{-1, -1, -2, -1, -1, -1},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 4},
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 3, Length: 3},
|
||||
{Offset: 1, Length: 4},
|
||||
{Offset: 4, Length: 3},
|
||||
},
|
||||
NegativeBuckets: []float64{2, 6, 8, 4, 15, 9, 10, 10, 4},
|
||||
NegativeBuckets: []float64{2, -2, -2, -7, -5, -5, -2},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -4111,20 +4395,177 @@ func TestNativeHistogram_Sum_Count_AddOperator(t *testing.T) {
|
|||
require.Equal(t, exp, vector)
|
||||
}
|
||||
|
||||
// sum().
|
||||
queryString := fmt.Sprintf("sum(%s)", seriesName)
|
||||
queryAndCheck(queryString, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}})
|
||||
|
||||
// + operator.
|
||||
queryString = fmt.Sprintf(`%s{idx="0"}`, seriesName)
|
||||
// - operator.
|
||||
queryString := fmt.Sprintf(`%s{idx="0"}`, seriesName)
|
||||
for idx := 1; idx < len(c.histograms); idx++ {
|
||||
queryString += fmt.Sprintf(` + ignoring(idx) %s{idx="%d"}`, seriesName, idx)
|
||||
queryString += fmt.Sprintf(` - ignoring(idx) %s{idx="%d"}`, seriesName, idx)
|
||||
}
|
||||
queryAndCheck(queryString, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}})
|
||||
})
|
||||
idx0++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// count().
|
||||
queryString = fmt.Sprintf("count(%s)", seriesName)
|
||||
queryAndCheck(queryString, []Sample{{T: ts, F: 3, Metric: labels.EmptyLabels()}})
|
||||
func TestNativeHistogram_MulDivOperator(t *testing.T) {
|
||||
// TODO(codesome): Integrate histograms into the PromQL testing framework
|
||||
// and write more tests there.
|
||||
originalHistogram := histogram.Histogram{
|
||||
Schema: 0,
|
||||
Count: 21,
|
||||
Sum: 33,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 3,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
PositiveBuckets: []int64{3, 0, 0},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
NegativeBuckets: []int64{3, 0, 0},
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
scalar float64
|
||||
histogram histogram.Histogram
|
||||
expectedMul histogram.FloatHistogram
|
||||
expectedDiv histogram.FloatHistogram
|
||||
}{
|
||||
{
|
||||
scalar: 3,
|
||||
histogram: originalHistogram,
|
||||
expectedMul: histogram.FloatHistogram{
|
||||
Schema: 0,
|
||||
Count: 63,
|
||||
Sum: 99,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 9,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
PositiveBuckets: []float64{9, 9, 9},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
NegativeBuckets: []float64{9, 9, 9},
|
||||
},
|
||||
expectedDiv: histogram.FloatHistogram{
|
||||
Schema: 0,
|
||||
Count: 7,
|
||||
Sum: 11,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 1,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
PositiveBuckets: []float64{1, 1, 1},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
NegativeBuckets: []float64{1, 1, 1},
|
||||
},
|
||||
},
|
||||
{
|
||||
scalar: 0,
|
||||
histogram: originalHistogram,
|
||||
expectedMul: histogram.FloatHistogram{
|
||||
Schema: 0,
|
||||
Count: 0,
|
||||
Sum: 0,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 0,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
PositiveBuckets: []float64{0, 0, 0},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
NegativeBuckets: []float64{0, 0, 0},
|
||||
},
|
||||
expectedDiv: histogram.FloatHistogram{
|
||||
Schema: 0,
|
||||
Count: math.Inf(1),
|
||||
Sum: math.Inf(1),
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: math.Inf(1),
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
PositiveBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1)},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
NegativeBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1)},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
idx0 := int64(0)
|
||||
for _, c := range cases {
|
||||
for _, floatHisto := range []bool{true, false} {
|
||||
t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) {
|
||||
test, err := NewTest(t, "")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(test.Close)
|
||||
|
||||
seriesName := "sparse_histogram_series"
|
||||
floatSeriesName := "float_series"
|
||||
|
||||
engine := test.QueryEngine()
|
||||
|
||||
ts := idx0 * int64(10*time.Minute/time.Millisecond)
|
||||
app := test.Storage().Appender(context.TODO())
|
||||
h := c.histogram
|
||||
lbls := labels.FromStrings("__name__", seriesName)
|
||||
// Since we mutate h later, we need to create a copy here.
|
||||
if floatHisto {
|
||||
_, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat())
|
||||
} else {
|
||||
_, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
_, err = app.Append(0, labels.FromStrings("__name__", floatSeriesName), ts, c.scalar)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, app.Commit())
|
||||
|
||||
queryAndCheck := func(queryString string, exp Vector) {
|
||||
qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts))
|
||||
require.NoError(t, err)
|
||||
|
||||
res := qry.Exec(test.Context())
|
||||
require.NoError(t, res.Err)
|
||||
|
||||
vector, err := res.Vector()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, exp, vector)
|
||||
}
|
||||
|
||||
// histogram * scalar.
|
||||
queryString := fmt.Sprintf(`%s * %f`, seriesName, c.scalar)
|
||||
queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}})
|
||||
|
||||
// scalar * histogram.
|
||||
queryString = fmt.Sprintf(`%f * %s`, c.scalar, seriesName)
|
||||
queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}})
|
||||
|
||||
// histogram * float.
|
||||
queryString = fmt.Sprintf(`%s * %s`, seriesName, floatSeriesName)
|
||||
queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}})
|
||||
|
||||
// float * histogram.
|
||||
queryString = fmt.Sprintf(`%s * %s`, floatSeriesName, seriesName)
|
||||
queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}})
|
||||
|
||||
// histogram / scalar.
|
||||
queryString = fmt.Sprintf(`%s / %f`, seriesName, c.scalar)
|
||||
queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}})
|
||||
|
||||
// histogram / float.
|
||||
queryString = fmt.Sprintf(`%s / %s`, seriesName, floatSeriesName)
|
||||
queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}})
|
||||
})
|
||||
idx0++
|
||||
}
|
||||
|
|
|
@ -162,7 +162,7 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod
|
|||
if resultHistogram == nil {
|
||||
resultFloat *= factor
|
||||
} else {
|
||||
resultHistogram.Scale(factor)
|
||||
resultHistogram.Mul(factor)
|
||||
}
|
||||
|
||||
return append(enh.Out, Sample{F: resultFloat, H: resultHistogram})
|
||||
|
@ -443,15 +443,40 @@ func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series)
|
|||
return append(enh.Out, Sample{F: aggrFn(el)})
|
||||
}
|
||||
|
||||
func aggrHistOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) *histogram.FloatHistogram) Vector {
|
||||
el := vals[0].(Matrix)[0]
|
||||
|
||||
return append(enh.Out, Sample{H: aggrFn(el)})
|
||||
}
|
||||
|
||||
// === avg_over_time(Matrix parser.ValueTypeMatrix) Vector ===
|
||||
func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||
if len(vals[0].(Matrix)[0].Floats) == 0 {
|
||||
// TODO(beorn7): The passed values only contain
|
||||
// histograms. avg_over_time ignores histograms for now. If
|
||||
// there are only histograms, we have to return without adding
|
||||
// anything to enh.Out.
|
||||
if len(vals[0].(Matrix)[0].Floats) > 0 && len(vals[0].(Matrix)[0].Histograms) > 0 {
|
||||
// TODO(zenador): Add warning for mixed floats and histograms.
|
||||
return enh.Out
|
||||
}
|
||||
if len(vals[0].(Matrix)[0].Floats) == 0 {
|
||||
// The passed values only contain histograms.
|
||||
return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram {
|
||||
count := 1
|
||||
mean := s.Histograms[0].H.Copy()
|
||||
for _, h := range s.Histograms[1:] {
|
||||
count++
|
||||
left := h.H.Copy().Div(float64(count))
|
||||
right := mean.Copy().Div(float64(count))
|
||||
// The histogram being added/subtracted must have
|
||||
// an equal or larger schema.
|
||||
if h.H.Schema >= mean.Schema {
|
||||
toAdd := right.Mul(-1).Add(left)
|
||||
mean.Add(toAdd)
|
||||
} else {
|
||||
toAdd := left.Sub(right)
|
||||
mean = toAdd.Add(mean)
|
||||
}
|
||||
}
|
||||
return mean
|
||||
})
|
||||
}
|
||||
return aggrOverTime(vals, enh, func(s Series) float64 {
|
||||
var mean, count, c float64
|
||||
for _, f := range s.Floats {
|
||||
|
@ -558,13 +583,26 @@ func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
|
|||
|
||||
// === sum_over_time(Matrix parser.ValueTypeMatrix) Vector ===
|
||||
func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||
if len(vals[0].(Matrix)[0].Floats) == 0 {
|
||||
// TODO(beorn7): The passed values only contain
|
||||
// histograms. sum_over_time ignores histograms for now. If
|
||||
// there are only histograms, we have to return without adding
|
||||
// anything to enh.Out.
|
||||
if len(vals[0].(Matrix)[0].Floats) > 0 && len(vals[0].(Matrix)[0].Histograms) > 0 {
|
||||
// TODO(zenador): Add warning for mixed floats and histograms.
|
||||
return enh.Out
|
||||
}
|
||||
if len(vals[0].(Matrix)[0].Floats) == 0 {
|
||||
// The passed values only contain histograms.
|
||||
return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram {
|
||||
sum := s.Histograms[0].H.Copy()
|
||||
for _, h := range s.Histograms[1:] {
|
||||
// The histogram being added must have
|
||||
// an equal or larger schema.
|
||||
if h.H.Schema >= sum.Schema {
|
||||
sum.Add(h.H)
|
||||
} else {
|
||||
sum = h.H.Copy().Add(sum)
|
||||
}
|
||||
}
|
||||
return sum
|
||||
})
|
||||
}
|
||||
return aggrOverTime(vals, enh, func(s Series) float64 {
|
||||
var sum, c float64
|
||||
for _, f := range s.Floats {
|
||||
|
|
|
@ -58,7 +58,7 @@ const (
|
|||
)
|
||||
|
||||
func fuzzParseMetricWithContentType(in []byte, contentType string) int {
|
||||
p, warning := textparse.New(in, contentType)
|
||||
p, warning := textparse.New(in, contentType, false)
|
||||
if warning != nil {
|
||||
// An invalid content type is being passed, which should not happen
|
||||
// in this context.
|
||||
|
|
54
scrape/clientprotobuf.go
Normal file
54
scrape/clientprotobuf.go
Normal file
|
@ -0,0 +1,54 @@
|
|||
// Copyright 2023 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package scrape
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
||||
// Intentionally using client model to simulate client in tests.
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
// Write a MetricFamily into a protobuf.
|
||||
// This function is intended for testing scraping by providing protobuf serialized input.
|
||||
func MetricFamilyToProtobuf(metricFamily *dto.MetricFamily) ([]byte, error) {
|
||||
buffer := &bytes.Buffer{}
|
||||
err := AddMetricFamilyToProtobuf(buffer, metricFamily)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
// Append a MetricFamily protobuf representation to a buffer.
|
||||
// This function is intended for testing scraping by providing protobuf serialized input.
|
||||
func AddMetricFamilyToProtobuf(buffer *bytes.Buffer, metricFamily *dto.MetricFamily) error {
|
||||
protoBuf, err := proto.Marshal(metricFamily)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
varintBuf := make([]byte, binary.MaxVarintLen32)
|
||||
varintLength := binary.PutUvarint(varintBuf, uint64(len(protoBuf)))
|
||||
|
||||
_, err = buffer.Write(varintBuf[:varintLength])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = buffer.Write(protoBuf)
|
||||
return err
|
||||
}
|
169
scrape/scrape.go
169
scrape/scrape.go
|
@ -191,6 +191,12 @@ var (
|
|||
},
|
||||
[]string{"scrape_job"},
|
||||
)
|
||||
targetScrapeNativeHistogramBucketLimit = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrapes_exceeded_native_histogram_bucket_limit_total",
|
||||
Help: "Total number of scrapes that hit the native histogram bucket limit and were rejected.",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -216,6 +222,7 @@ func init() {
|
|||
targetScrapeExemplarOutOfOrder,
|
||||
targetScrapePoolExceededLabelLimits,
|
||||
targetSyncFailed,
|
||||
targetScrapeNativeHistogramBucketLimit,
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -253,16 +260,18 @@ type labelLimits struct {
|
|||
}
|
||||
|
||||
type scrapeLoopOptions struct {
|
||||
target *Target
|
||||
scraper scraper
|
||||
sampleLimit int
|
||||
labelLimits *labelLimits
|
||||
honorLabels bool
|
||||
honorTimestamps bool
|
||||
interval time.Duration
|
||||
timeout time.Duration
|
||||
mrc []*relabel.Config
|
||||
cache *scrapeCache
|
||||
target *Target
|
||||
scraper scraper
|
||||
sampleLimit int
|
||||
bucketLimit int
|
||||
labelLimits *labelLimits
|
||||
honorLabels bool
|
||||
honorTimestamps bool
|
||||
interval time.Duration
|
||||
timeout time.Duration
|
||||
scrapeClassicHistograms bool
|
||||
mrc []*relabel.Config
|
||||
cache *scrapeCache
|
||||
}
|
||||
|
||||
const maxAheadTime = 10 * time.Minute
|
||||
|
@ -319,9 +328,11 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed
|
|||
jitterSeed,
|
||||
opts.honorTimestamps,
|
||||
opts.sampleLimit,
|
||||
opts.bucketLimit,
|
||||
opts.labelLimits,
|
||||
opts.interval,
|
||||
opts.timeout,
|
||||
opts.scrapeClassicHistograms,
|
||||
options.ExtraMetrics,
|
||||
options.EnableMetadataStorage,
|
||||
opts.target,
|
||||
|
@ -412,6 +423,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
|||
timeout = time.Duration(sp.config.ScrapeTimeout)
|
||||
bodySizeLimit = int64(sp.config.BodySizeLimit)
|
||||
sampleLimit = int(sp.config.SampleLimit)
|
||||
bucketLimit = int(sp.config.NativeHistogramBucketLimit)
|
||||
labelLimits = &labelLimits{
|
||||
labelLimit: int(sp.config.LabelLimit),
|
||||
labelNameLengthLimit: int(sp.config.LabelNameLengthLimit),
|
||||
|
@ -446,6 +458,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
|||
target: t,
|
||||
scraper: s,
|
||||
sampleLimit: sampleLimit,
|
||||
bucketLimit: bucketLimit,
|
||||
labelLimits: labelLimits,
|
||||
honorLabels: honorLabels,
|
||||
honorTimestamps: honorTimestamps,
|
||||
|
@ -530,14 +543,16 @@ func (sp *scrapePool) sync(targets []*Target) {
|
|||
timeout = time.Duration(sp.config.ScrapeTimeout)
|
||||
bodySizeLimit = int64(sp.config.BodySizeLimit)
|
||||
sampleLimit = int(sp.config.SampleLimit)
|
||||
bucketLimit = int(sp.config.NativeHistogramBucketLimit)
|
||||
labelLimits = &labelLimits{
|
||||
labelLimit: int(sp.config.LabelLimit),
|
||||
labelNameLengthLimit: int(sp.config.LabelNameLengthLimit),
|
||||
labelValueLengthLimit: int(sp.config.LabelValueLengthLimit),
|
||||
}
|
||||
honorLabels = sp.config.HonorLabels
|
||||
honorTimestamps = sp.config.HonorTimestamps
|
||||
mrc = sp.config.MetricRelabelConfigs
|
||||
honorLabels = sp.config.HonorLabels
|
||||
honorTimestamps = sp.config.HonorTimestamps
|
||||
mrc = sp.config.MetricRelabelConfigs
|
||||
scrapeClassicHistograms = sp.config.ScrapeClassicHistograms
|
||||
)
|
||||
|
||||
sp.targetMtx.Lock()
|
||||
|
@ -556,15 +571,17 @@ func (sp *scrapePool) sync(targets []*Target) {
|
|||
}
|
||||
s := &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit, acceptHeader: acceptHeader}
|
||||
l := sp.newLoop(scrapeLoopOptions{
|
||||
target: t,
|
||||
scraper: s,
|
||||
sampleLimit: sampleLimit,
|
||||
labelLimits: labelLimits,
|
||||
honorLabels: honorLabels,
|
||||
honorTimestamps: honorTimestamps,
|
||||
mrc: mrc,
|
||||
interval: interval,
|
||||
timeout: timeout,
|
||||
target: t,
|
||||
scraper: s,
|
||||
sampleLimit: sampleLimit,
|
||||
bucketLimit: bucketLimit,
|
||||
labelLimits: labelLimits,
|
||||
honorLabels: honorLabels,
|
||||
honorTimestamps: honorTimestamps,
|
||||
mrc: mrc,
|
||||
interval: interval,
|
||||
timeout: timeout,
|
||||
scrapeClassicHistograms: scrapeClassicHistograms,
|
||||
})
|
||||
if err != nil {
|
||||
l.setForcedError(err)
|
||||
|
@ -731,17 +748,24 @@ func mutateReportSampleLabels(lset labels.Labels, target *Target) labels.Labels
|
|||
}
|
||||
|
||||
// appender returns an appender for ingested samples from the target.
|
||||
func appender(app storage.Appender, limit int) storage.Appender {
|
||||
func appender(app storage.Appender, sampleLimit, bucketLimit int) storage.Appender {
|
||||
app = &timeLimitAppender{
|
||||
Appender: app,
|
||||
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
|
||||
}
|
||||
|
||||
// The limit is applied after metrics are potentially dropped via relabeling.
|
||||
if limit > 0 {
|
||||
// The sampleLimit is applied after metrics are potentially dropped via relabeling.
|
||||
if sampleLimit > 0 {
|
||||
app = &limitAppender{
|
||||
Appender: app,
|
||||
limit: limit,
|
||||
limit: sampleLimit,
|
||||
}
|
||||
}
|
||||
|
||||
if bucketLimit > 0 {
|
||||
app = &bucketLimitAppender{
|
||||
Appender: app,
|
||||
limit: bucketLimit,
|
||||
}
|
||||
}
|
||||
return app
|
||||
|
@ -862,19 +886,21 @@ type cacheEntry struct {
|
|||
}
|
||||
|
||||
type scrapeLoop struct {
|
||||
scraper scraper
|
||||
l log.Logger
|
||||
cache *scrapeCache
|
||||
lastScrapeSize int
|
||||
buffers *pool.Pool
|
||||
jitterSeed uint64
|
||||
honorTimestamps bool
|
||||
forcedErr error
|
||||
forcedErrMtx sync.Mutex
|
||||
sampleLimit int
|
||||
labelLimits *labelLimits
|
||||
interval time.Duration
|
||||
timeout time.Duration
|
||||
scraper scraper
|
||||
l log.Logger
|
||||
cache *scrapeCache
|
||||
lastScrapeSize int
|
||||
buffers *pool.Pool
|
||||
jitterSeed uint64
|
||||
honorTimestamps bool
|
||||
forcedErr error
|
||||
forcedErrMtx sync.Mutex
|
||||
sampleLimit int
|
||||
bucketLimit int
|
||||
labelLimits *labelLimits
|
||||
interval time.Duration
|
||||
timeout time.Duration
|
||||
scrapeClassicHistograms bool
|
||||
|
||||
appender func(ctx context.Context) storage.Appender
|
||||
sampleMutator labelsMutator
|
||||
|
@ -1152,9 +1178,11 @@ func newScrapeLoop(ctx context.Context,
|
|||
jitterSeed uint64,
|
||||
honorTimestamps bool,
|
||||
sampleLimit int,
|
||||
bucketLimit int,
|
||||
labelLimits *labelLimits,
|
||||
interval time.Duration,
|
||||
timeout time.Duration,
|
||||
scrapeClassicHistograms bool,
|
||||
reportExtraMetrics bool,
|
||||
appendMetadataToWAL bool,
|
||||
target *Target,
|
||||
|
@ -1182,24 +1210,26 @@ func newScrapeLoop(ctx context.Context,
|
|||
}
|
||||
|
||||
sl := &scrapeLoop{
|
||||
scraper: sc,
|
||||
buffers: buffers,
|
||||
cache: cache,
|
||||
appender: appender,
|
||||
sampleMutator: sampleMutator,
|
||||
reportSampleMutator: reportSampleMutator,
|
||||
stopped: make(chan struct{}),
|
||||
jitterSeed: jitterSeed,
|
||||
l: l,
|
||||
parentCtx: ctx,
|
||||
appenderCtx: appenderCtx,
|
||||
honorTimestamps: honorTimestamps,
|
||||
sampleLimit: sampleLimit,
|
||||
labelLimits: labelLimits,
|
||||
interval: interval,
|
||||
timeout: timeout,
|
||||
reportExtraMetrics: reportExtraMetrics,
|
||||
appendMetadataToWAL: appendMetadataToWAL,
|
||||
scraper: sc,
|
||||
buffers: buffers,
|
||||
cache: cache,
|
||||
appender: appender,
|
||||
sampleMutator: sampleMutator,
|
||||
reportSampleMutator: reportSampleMutator,
|
||||
stopped: make(chan struct{}),
|
||||
jitterSeed: jitterSeed,
|
||||
l: l,
|
||||
parentCtx: ctx,
|
||||
appenderCtx: appenderCtx,
|
||||
honorTimestamps: honorTimestamps,
|
||||
sampleLimit: sampleLimit,
|
||||
bucketLimit: bucketLimit,
|
||||
labelLimits: labelLimits,
|
||||
interval: interval,
|
||||
timeout: timeout,
|
||||
scrapeClassicHistograms: scrapeClassicHistograms,
|
||||
reportExtraMetrics: reportExtraMetrics,
|
||||
appendMetadataToWAL: appendMetadataToWAL,
|
||||
}
|
||||
sl.ctx, sl.cancel = context.WithCancel(ctx)
|
||||
|
||||
|
@ -1469,7 +1499,7 @@ type appendErrors struct {
|
|||
}
|
||||
|
||||
func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) {
|
||||
p, err := textparse.New(b, contentType)
|
||||
p, err := textparse.New(b, contentType, sl.scrapeClassicHistograms)
|
||||
if err != nil {
|
||||
level.Debug(sl.l).Log(
|
||||
"msg", "Invalid content type on scrape, using prometheus parser as fallback.",
|
||||
|
@ -1482,6 +1512,7 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string,
|
|||
defTime = timestamp.FromTime(ts)
|
||||
appErrs = appendErrors{}
|
||||
sampleLimitErr error
|
||||
bucketLimitErr error
|
||||
e exemplar.Exemplar // escapes to heap so hoisted out of loop
|
||||
meta metadata.Metadata
|
||||
metadataChanged bool
|
||||
|
@ -1510,7 +1541,7 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string,
|
|||
}
|
||||
|
||||
// Take an appender with limits.
|
||||
app = appender(app, sl.sampleLimit)
|
||||
app = appender(app, sl.sampleLimit, sl.bucketLimit)
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
|
@ -1631,7 +1662,7 @@ loop:
|
|||
} else {
|
||||
ref, err = app.Append(ref, lset, t, val)
|
||||
}
|
||||
sampleAdded, err = sl.checkAddError(ce, met, parsedTimestamp, err, &sampleLimitErr, &appErrs)
|
||||
sampleAdded, err = sl.checkAddError(ce, met, parsedTimestamp, err, &sampleLimitErr, &bucketLimitErr, &appErrs)
|
||||
if err != nil {
|
||||
if err != storage.ErrNotFound {
|
||||
level.Debug(sl.l).Log("msg", "Unexpected error", "series", string(met), "err", err)
|
||||
|
@ -1645,7 +1676,7 @@ loop:
|
|||
sl.cache.trackStaleness(hash, lset)
|
||||
}
|
||||
sl.cache.addRef(met, ref, lset, hash)
|
||||
if sampleAdded && sampleLimitErr == nil {
|
||||
if sampleAdded && sampleLimitErr == nil && bucketLimitErr == nil {
|
||||
seriesAdded++
|
||||
}
|
||||
}
|
||||
|
@ -1681,6 +1712,13 @@ loop:
|
|||
// We only want to increment this once per scrape, so this is Inc'd outside the loop.
|
||||
targetScrapeSampleLimit.Inc()
|
||||
}
|
||||
if bucketLimitErr != nil {
|
||||
if err == nil {
|
||||
err = bucketLimitErr // If sample limit is hit, that error takes precedence.
|
||||
}
|
||||
// We only want to increment this once per scrape, so this is Inc'd outside the loop.
|
||||
targetScrapeNativeHistogramBucketLimit.Inc()
|
||||
}
|
||||
if appErrs.numOutOfOrder > 0 {
|
||||
level.Warn(sl.l).Log("msg", "Error on ingesting out-of-order samples", "num_dropped", appErrs.numOutOfOrder)
|
||||
}
|
||||
|
@ -1710,8 +1748,8 @@ loop:
|
|||
}
|
||||
|
||||
// Adds samples to the appender, checking the error, and then returns the # of samples added,
|
||||
// whether the caller should continue to process more samples, and any sample limit errors.
|
||||
func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err error, sampleLimitErr *error, appErrs *appendErrors) (bool, error) {
|
||||
// whether the caller should continue to process more samples, and any sample or bucket limit errors.
|
||||
func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (bool, error) {
|
||||
switch errors.Cause(err) {
|
||||
case nil:
|
||||
if tp == nil && ce != nil {
|
||||
|
@ -1740,6 +1778,11 @@ func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err e
|
|||
// total number of samples scraped.
|
||||
*sampleLimitErr = err
|
||||
return false, nil
|
||||
case errBucketLimit:
|
||||
// Keep on parsing output if we hit the limit, so we report the correct
|
||||
// total number of samples scraped.
|
||||
*bucketLimitErr = err
|
||||
return false, nil
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
config_util "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
@ -489,7 +490,7 @@ func TestScrapePoolAppender(t *testing.T) {
|
|||
appl, ok := loop.(*scrapeLoop)
|
||||
require.True(t, ok, "Expected scrapeLoop but got %T", loop)
|
||||
|
||||
wrapped := appender(appl.appender(context.Background()), 0)
|
||||
wrapped := appender(appl.appender(context.Background()), 0, 0)
|
||||
|
||||
tl, ok := wrapped.(*timeLimitAppender)
|
||||
require.True(t, ok, "Expected timeLimitAppender but got %T", wrapped)
|
||||
|
@ -505,7 +506,7 @@ func TestScrapePoolAppender(t *testing.T) {
|
|||
appl, ok = loop.(*scrapeLoop)
|
||||
require.True(t, ok, "Expected scrapeLoop but got %T", loop)
|
||||
|
||||
wrapped = appender(appl.appender(context.Background()), sampleLimit)
|
||||
wrapped = appender(appl.appender(context.Background()), sampleLimit, 0)
|
||||
|
||||
sl, ok := wrapped.(*limitAppender)
|
||||
require.True(t, ok, "Expected limitAppender but got %T", wrapped)
|
||||
|
@ -515,6 +516,20 @@ func TestScrapePoolAppender(t *testing.T) {
|
|||
|
||||
_, ok = tl.Appender.(nopAppender)
|
||||
require.True(t, ok, "Expected base appender but got %T", tl.Appender)
|
||||
|
||||
wrapped = appender(appl.appender(context.Background()), sampleLimit, 100)
|
||||
|
||||
bl, ok := wrapped.(*bucketLimitAppender)
|
||||
require.True(t, ok, "Expected bucketLimitAppender but got %T", wrapped)
|
||||
|
||||
sl, ok = bl.Appender.(*limitAppender)
|
||||
require.True(t, ok, "Expected limitAppender but got %T", bl)
|
||||
|
||||
tl, ok = sl.Appender.(*timeLimitAppender)
|
||||
require.True(t, ok, "Expected timeLimitAppender but got %T", sl.Appender)
|
||||
|
||||
_, ok = tl.Appender.(nopAppender)
|
||||
require.True(t, ok, "Expected base appender but got %T", tl.Appender)
|
||||
}
|
||||
|
||||
func TestScrapePoolRaces(t *testing.T) {
|
||||
|
@ -612,12 +627,13 @@ func TestScrapeLoopStopBeforeRun(t *testing.T) {
|
|||
nopMutator,
|
||||
nil, nil, 0,
|
||||
true,
|
||||
0,
|
||||
0, 0,
|
||||
nil,
|
||||
1,
|
||||
0,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -684,12 +700,13 @@ func TestScrapeLoopStop(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
0, 0,
|
||||
nil,
|
||||
10*time.Millisecond,
|
||||
time.Hour,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -760,12 +777,13 @@ func TestScrapeLoopRun(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
0, 0,
|
||||
nil,
|
||||
time.Second,
|
||||
time.Hour,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -815,12 +833,13 @@ func TestScrapeLoopRun(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
0, 0,
|
||||
nil,
|
||||
time.Second,
|
||||
100*time.Millisecond,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -874,12 +893,13 @@ func TestScrapeLoopForcedErr(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
0, 0,
|
||||
nil,
|
||||
time.Second,
|
||||
time.Hour,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -932,12 +952,13 @@ func TestScrapeLoopMetadata(t *testing.T) {
|
|||
cache,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
0, 0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -989,12 +1010,13 @@ func simpleTestScrapeLoop(t testing.TB) (context.Context, *scrapeLoop) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
0, 0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -1049,12 +1071,13 @@ func TestScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
0, 0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -1127,12 +1150,13 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
0, 0,
|
||||
nil,
|
||||
10*time.Millisecond,
|
||||
time.Hour,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -1190,12 +1214,13 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
0, 0,
|
||||
nil,
|
||||
10*time.Millisecond,
|
||||
time.Hour,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -1256,12 +1281,13 @@ func TestScrapeLoopCache(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
0, 0,
|
||||
nil,
|
||||
10*time.Millisecond,
|
||||
time.Hour,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -1339,12 +1365,13 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
0, 0,
|
||||
nil,
|
||||
10*time.Millisecond,
|
||||
time.Hour,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -1453,12 +1480,13 @@ func TestScrapeLoopAppend(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
0, 0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -1548,7 +1576,7 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) {
|
|||
return mutateSampleLabels(l, &Target{labels: labels.FromStrings(tc.targetLabels...)}, false, nil)
|
||||
},
|
||||
nil,
|
||||
func(ctx context.Context) storage.Appender { return app }, nil, 0, true, 0, nil, 0, 0, false, false, nil, false,
|
||||
func(ctx context.Context) storage.Appender { return app }, nil, 0, true, 0, 0, nil, 0, 0, false, false, false, nil, false,
|
||||
)
|
||||
slApp := sl.appender(context.Background())
|
||||
_, _, _, err := sl.append(slApp, []byte(tc.exposedLabels), "", time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC))
|
||||
|
@ -1579,12 +1607,13 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
0, 0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -1592,7 +1621,7 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) {
|
|||
fakeRef := storage.SeriesRef(1)
|
||||
expValue := float64(1)
|
||||
metric := []byte(`metric{n="1"} 1`)
|
||||
p, warning := textparse.New(metric, "")
|
||||
p, warning := textparse.New(metric, "", false)
|
||||
require.NoError(t, warning)
|
||||
|
||||
var lset labels.Labels
|
||||
|
@ -1637,12 +1666,13 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
app.limit,
|
||||
app.limit, 0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -1697,6 +1727,105 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
|
|||
require.Equal(t, 0, seriesAdded)
|
||||
}
|
||||
|
||||
func TestScrapeLoop_HistogramBucketLimit(t *testing.T) {
|
||||
resApp := &collectResultAppender{}
|
||||
app := &bucketLimitAppender{Appender: resApp, limit: 2}
|
||||
|
||||
sl := newScrapeLoop(context.Background(),
|
||||
nil, nil, nil,
|
||||
func(l labels.Labels) labels.Labels {
|
||||
if l.Has("deleteme") {
|
||||
return labels.EmptyLabels()
|
||||
}
|
||||
return l
|
||||
},
|
||||
nopMutator,
|
||||
func(ctx context.Context) storage.Appender { return app },
|
||||
nil,
|
||||
0,
|
||||
true,
|
||||
app.limit, 0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
||||
metric := dto.Metric{}
|
||||
err := targetScrapeNativeHistogramBucketLimit.Write(&metric)
|
||||
require.NoError(t, err)
|
||||
beforeMetricValue := metric.GetCounter().GetValue()
|
||||
|
||||
nativeHistogram := prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "testing",
|
||||
Name: "example_native_histogram",
|
||||
Help: "This is used for testing",
|
||||
ConstLabels: map[string]string{"some": "value"},
|
||||
NativeHistogramBucketFactor: 1.1, // 10% increase from bucket to bucket
|
||||
NativeHistogramMaxBucketNumber: 100, // intentionally higher than the limit we'll use in the scraper
|
||||
},
|
||||
[]string{"size"},
|
||||
)
|
||||
registry := prometheus.NewRegistry()
|
||||
registry.Register(nativeHistogram)
|
||||
nativeHistogram.WithLabelValues("S").Observe(1.0)
|
||||
nativeHistogram.WithLabelValues("M").Observe(1.0)
|
||||
nativeHistogram.WithLabelValues("L").Observe(1.0)
|
||||
nativeHistogram.WithLabelValues("M").Observe(10.0)
|
||||
nativeHistogram.WithLabelValues("L").Observe(10.0) // in different bucket since > 1*1.1
|
||||
|
||||
gathered, err := registry.Gather()
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, gathered)
|
||||
|
||||
histogramMetricFamily := gathered[0]
|
||||
msg, err := MetricFamilyToProtobuf(histogramMetricFamily)
|
||||
require.NoError(t, err)
|
||||
|
||||
now := time.Now()
|
||||
total, added, seriesAdded, err := sl.append(app, msg, "application/vnd.google.protobuf", now)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, total)
|
||||
require.Equal(t, 3, added)
|
||||
require.Equal(t, 3, seriesAdded)
|
||||
|
||||
err = targetScrapeNativeHistogramBucketLimit.Write(&metric)
|
||||
require.NoError(t, err)
|
||||
metricValue := metric.GetCounter().GetValue()
|
||||
require.Equal(t, beforeMetricValue, metricValue)
|
||||
beforeMetricValue = metricValue
|
||||
|
||||
nativeHistogram.WithLabelValues("L").Observe(100.0) // in different bucket since > 10*1.1
|
||||
|
||||
gathered, err = registry.Gather()
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, gathered)
|
||||
|
||||
histogramMetricFamily = gathered[0]
|
||||
msg, err = MetricFamilyToProtobuf(histogramMetricFamily)
|
||||
require.NoError(t, err)
|
||||
|
||||
now = time.Now()
|
||||
total, added, seriesAdded, err = sl.append(app, msg, "application/vnd.google.protobuf", now)
|
||||
if err != errBucketLimit {
|
||||
t.Fatalf("Did not see expected histogram bucket limit error: %s", err)
|
||||
}
|
||||
require.NoError(t, app.Rollback())
|
||||
require.Equal(t, 3, total)
|
||||
require.Equal(t, 3, added)
|
||||
require.Equal(t, 0, seriesAdded)
|
||||
|
||||
err = targetScrapeNativeHistogramBucketLimit.Write(&metric)
|
||||
require.NoError(t, err)
|
||||
metricValue = metric.GetCounter().GetValue()
|
||||
require.Equal(t, beforeMetricValue+1, metricValue)
|
||||
}
|
||||
|
||||
func TestScrapeLoop_ChangingMetricString(t *testing.T) {
|
||||
// This is a regression test for the scrape loop cache not properly maintaining
|
||||
// IDs when the string representation of a metric changes across a scrape. Thus
|
||||
|
@ -1714,12 +1843,13 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
0, 0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -1762,12 +1892,13 @@ func TestScrapeLoopAppendStaleness(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
0, 0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -1813,12 +1944,13 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
0, 0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -1924,12 +2056,13 @@ metric_total{n="2"} 2 # {t="2"} 2.0 20000
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
0, 0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -1989,12 +2122,13 @@ func TestScrapeLoopAppendExemplarSeries(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
0, 0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -2041,12 +2175,13 @@ func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
0, 0,
|
||||
nil,
|
||||
10*time.Millisecond,
|
||||
time.Hour,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -2077,12 +2212,13 @@ func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
0, 0,
|
||||
nil,
|
||||
10*time.Millisecond,
|
||||
time.Hour,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -2126,12 +2262,13 @@ func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
0, 0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -2171,12 +2308,13 @@ func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
0, 0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -2443,12 +2581,13 @@ func TestScrapeLoop_RespectTimestamps(t *testing.T) {
|
|||
func(ctx context.Context) storage.Appender { return capp },
|
||||
nil, 0,
|
||||
true,
|
||||
0,
|
||||
0, 0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -2484,12 +2623,13 @@ func TestScrapeLoop_DiscardTimestamps(t *testing.T) {
|
|||
func(ctx context.Context) storage.Appender { return capp },
|
||||
nil, 0,
|
||||
false,
|
||||
0,
|
||||
0, 0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -2524,12 +2664,13 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
0, 0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -2582,12 +2723,13 @@ func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
0, 0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -2845,12 +2987,13 @@ func TestScrapeAddFast(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
0, 0,
|
||||
nil,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -2908,7 +3051,7 @@ func TestReuseCacheRace(*testing.T) {
|
|||
func TestCheckAddError(t *testing.T) {
|
||||
var appErrs appendErrors
|
||||
sl := scrapeLoop{l: log.NewNopLogger()}
|
||||
sl.checkAddError(nil, nil, nil, storage.ErrOutOfOrderSample, nil, &appErrs)
|
||||
sl.checkAddError(nil, nil, nil, storage.ErrOutOfOrderSample, nil, nil, &appErrs)
|
||||
require.Equal(t, 1, appErrs.numOutOfOrder)
|
||||
}
|
||||
|
||||
|
@ -2931,12 +3074,13 @@ func TestScrapeReportSingleAppender(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
0, 0,
|
||||
nil,
|
||||
10*time.Millisecond,
|
||||
time.Hour,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -3133,12 +3277,13 @@ func TestScrapeLoopLabelLimit(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
0, 0,
|
||||
&test.labelLimits,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/relabel"
|
||||
"github.com/prometheus/prometheus/model/textparse"
|
||||
|
@ -313,7 +314,10 @@ func (ts Targets) Len() int { return len(ts) }
|
|||
func (ts Targets) Less(i, j int) bool { return ts[i].URL().String() < ts[j].URL().String() }
|
||||
func (ts Targets) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] }
|
||||
|
||||
var errSampleLimit = errors.New("sample limit exceeded")
|
||||
var (
|
||||
errSampleLimit = errors.New("sample limit exceeded")
|
||||
errBucketLimit = errors.New("histogram bucket limit exceeded")
|
||||
)
|
||||
|
||||
// limitAppender limits the number of total appended samples in a batch.
|
||||
type limitAppender struct {
|
||||
|
@ -355,6 +359,31 @@ func (app *timeLimitAppender) Append(ref storage.SeriesRef, lset labels.Labels,
|
|||
return ref, nil
|
||||
}
|
||||
|
||||
// bucketLimitAppender limits the number of total appended samples in a batch.
|
||||
type bucketLimitAppender struct {
|
||||
storage.Appender
|
||||
|
||||
limit int
|
||||
}
|
||||
|
||||
func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
||||
if h != nil {
|
||||
if len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit {
|
||||
return 0, errBucketLimit
|
||||
}
|
||||
}
|
||||
if fh != nil {
|
||||
if len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit {
|
||||
return 0, errBucketLimit
|
||||
}
|
||||
}
|
||||
ref, err := app.Appender.AppendHistogram(ref, lset, t, h, fh)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return ref, nil
|
||||
}
|
||||
|
||||
// PopulateLabels builds a label set from the given label set and scrape configuration.
|
||||
// It returns a label set before relabeling was applied as the second return value.
|
||||
// Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling.
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
)
|
||||
|
||||
|
@ -488,3 +489,63 @@ scrape_configs:
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBucketLimitAppender(t *testing.T) {
|
||||
example := histogram.Histogram{
|
||||
Schema: 0,
|
||||
Count: 21,
|
||||
Sum: 33,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 3,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
PositiveBuckets: []int64{3, 0, 0},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
NegativeBuckets: []int64{3, 0, 0},
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
h histogram.Histogram
|
||||
limit int
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
h: example,
|
||||
limit: 3,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
h: example,
|
||||
limit: 10,
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
|
||||
resApp := &collectResultAppender{}
|
||||
|
||||
for _, c := range cases {
|
||||
for _, floatHisto := range []bool{true, false} {
|
||||
t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) {
|
||||
app := &bucketLimitAppender{Appender: resApp, limit: c.limit}
|
||||
ts := int64(10 * time.Minute / time.Millisecond)
|
||||
h := c.h
|
||||
lbls := labels.FromStrings("__name__", "sparse_histogram_series")
|
||||
var err error
|
||||
if floatHisto {
|
||||
_, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat())
|
||||
} else {
|
||||
_, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil)
|
||||
}
|
||||
if c.expectError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.NoError(t, app.Commit())
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -242,15 +242,16 @@ func (s fhSample) Type() chunkenc.ValueType {
|
|||
type sampleRing struct {
|
||||
delta int64
|
||||
|
||||
// Lookback buffers. We use buf for mixed samples, but one of the three
|
||||
// Lookback buffers. We use iBuf for mixed samples, but one of the three
|
||||
// concrete ones for homogenous samples. (Only one of the four bufs is
|
||||
// allowed to be populated!) This avoids the overhead of the interface
|
||||
// wrapper for the happy (and by far most common) case of homogenous
|
||||
// samples.
|
||||
buf []tsdbutil.Sample
|
||||
fBuf []fSample
|
||||
hBuf []hSample
|
||||
fhBuf []fhSample
|
||||
iBuf []tsdbutil.Sample
|
||||
fBuf []fSample
|
||||
hBuf []hSample
|
||||
fhBuf []fhSample
|
||||
bufInUse bufType
|
||||
|
||||
i int // Position of most recent element in ring buffer.
|
||||
f int // Position of first element in ring buffer.
|
||||
|
@ -259,6 +260,16 @@ type sampleRing struct {
|
|||
it sampleRingIterator
|
||||
}
|
||||
|
||||
type bufType int
|
||||
|
||||
const (
|
||||
noBuf bufType = iota // Nothing yet stored in sampleRing.
|
||||
iBuf
|
||||
fBuf
|
||||
hBuf
|
||||
fhBuf
|
||||
)
|
||||
|
||||
// newSampleRing creates a new sampleRing. If you do not know the prefereed
|
||||
// value type yet, use a size of 0 (in which case the provided typ doesn't
|
||||
// matter). On the first add, a buffer of size 16 will be allocated with the
|
||||
|
@ -278,7 +289,7 @@ func newSampleRing(delta int64, size int, typ chunkenc.ValueType) *sampleRing {
|
|||
case chunkenc.ValFloatHistogram:
|
||||
r.fhBuf = make([]fhSample, size)
|
||||
default:
|
||||
r.buf = make([]tsdbutil.Sample, size)
|
||||
r.iBuf = make([]tsdbutil.Sample, size)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
@ -287,6 +298,7 @@ func (r *sampleRing) reset() {
|
|||
r.l = 0
|
||||
r.i = -1
|
||||
r.f = 0
|
||||
r.bufInUse = noBuf
|
||||
}
|
||||
|
||||
// Returns the current iterator. Invalidates previously returned iterators.
|
||||
|
@ -310,18 +322,18 @@ func (it *sampleRingIterator) Next() chunkenc.ValueType {
|
|||
if it.i >= it.r.l {
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
switch {
|
||||
case len(it.r.fBuf) > 0:
|
||||
switch it.r.bufInUse {
|
||||
case fBuf:
|
||||
s := it.r.atF(it.i)
|
||||
it.t = s.t
|
||||
it.f = s.f
|
||||
return chunkenc.ValFloat
|
||||
case len(it.r.hBuf) > 0:
|
||||
case hBuf:
|
||||
s := it.r.atH(it.i)
|
||||
it.t = s.t
|
||||
it.h = s.h
|
||||
return chunkenc.ValHistogram
|
||||
case len(it.r.fhBuf) > 0:
|
||||
case fhBuf:
|
||||
s := it.r.atFH(it.i)
|
||||
it.t = s.t
|
||||
it.fh = s.fh
|
||||
|
@ -372,8 +384,8 @@ func (it *sampleRingIterator) AtT() int64 {
|
|||
}
|
||||
|
||||
func (r *sampleRing) at(i int) tsdbutil.Sample {
|
||||
j := (r.f + i) % len(r.buf)
|
||||
return r.buf[j]
|
||||
j := (r.f + i) % len(r.iBuf)
|
||||
return r.iBuf[j]
|
||||
}
|
||||
|
||||
func (r *sampleRing) atF(i int) fSample {
|
||||
|
@ -397,91 +409,113 @@ func (r *sampleRing) atFH(i int) fhSample {
|
|||
// from this package (fSample, hSample, fhSample), call one of the specialized
|
||||
// methods addF, addH, or addFH for better performance.
|
||||
func (r *sampleRing) add(s tsdbutil.Sample) {
|
||||
if len(r.buf) == 0 {
|
||||
if r.bufInUse == noBuf {
|
||||
// First sample.
|
||||
switch s := s.(type) {
|
||||
case fSample:
|
||||
r.bufInUse = fBuf
|
||||
r.fBuf = addF(s, r.fBuf, r)
|
||||
case hSample:
|
||||
r.bufInUse = hBuf
|
||||
r.hBuf = addH(s, r.hBuf, r)
|
||||
case fhSample:
|
||||
r.bufInUse = fhBuf
|
||||
r.fhBuf = addFH(s, r.fhBuf, r)
|
||||
}
|
||||
return
|
||||
}
|
||||
if r.bufInUse != iBuf {
|
||||
// Nothing added to the interface buf yet. Let's check if we can
|
||||
// stay specialized.
|
||||
switch s := s.(type) {
|
||||
case fSample:
|
||||
if len(r.hBuf)+len(r.fhBuf) == 0 {
|
||||
if r.bufInUse == fBuf {
|
||||
r.fBuf = addF(s, r.fBuf, r)
|
||||
return
|
||||
}
|
||||
case hSample:
|
||||
if len(r.fBuf)+len(r.fhBuf) == 0 {
|
||||
if r.bufInUse == hBuf {
|
||||
r.hBuf = addH(s, r.hBuf, r)
|
||||
return
|
||||
}
|
||||
case fhSample:
|
||||
if len(r.fBuf)+len(r.hBuf) == 0 {
|
||||
if r.bufInUse == fhBuf {
|
||||
r.fhBuf = addFH(s, r.fhBuf, r)
|
||||
return
|
||||
}
|
||||
}
|
||||
// The new sample isn't a fit for the already existing
|
||||
// ones. Copy the latter into the interface buffer where needed.
|
||||
switch {
|
||||
case len(r.fBuf) > 0:
|
||||
switch r.bufInUse {
|
||||
case fBuf:
|
||||
for _, s := range r.fBuf {
|
||||
r.buf = append(r.buf, s)
|
||||
r.iBuf = append(r.iBuf, s)
|
||||
}
|
||||
r.fBuf = nil
|
||||
case len(r.hBuf) > 0:
|
||||
case hBuf:
|
||||
for _, s := range r.hBuf {
|
||||
r.buf = append(r.buf, s)
|
||||
r.iBuf = append(r.iBuf, s)
|
||||
}
|
||||
r.hBuf = nil
|
||||
case len(r.fhBuf) > 0:
|
||||
case fhBuf:
|
||||
for _, s := range r.fhBuf {
|
||||
r.buf = append(r.buf, s)
|
||||
r.iBuf = append(r.iBuf, s)
|
||||
}
|
||||
r.fhBuf = nil
|
||||
}
|
||||
r.bufInUse = iBuf
|
||||
}
|
||||
r.buf = addSample(s, r.buf, r)
|
||||
r.iBuf = addSample(s, r.iBuf, r)
|
||||
}
|
||||
|
||||
// addF is a version of the add method specialized for fSample.
|
||||
func (r *sampleRing) addF(s fSample) {
|
||||
switch {
|
||||
case len(r.buf) > 0:
|
||||
// Already have interface samples. Add to the interface buf.
|
||||
r.buf = addSample(s, r.buf, r)
|
||||
case len(r.hBuf)+len(r.fhBuf) > 0:
|
||||
switch r.bufInUse {
|
||||
case fBuf: // Add to existing fSamples.
|
||||
r.fBuf = addF(s, r.fBuf, r)
|
||||
case noBuf: // Add first sample.
|
||||
r.fBuf = addF(s, r.fBuf, r)
|
||||
r.bufInUse = fBuf
|
||||
case iBuf: // Already have interface samples. Add to the interface buf.
|
||||
r.iBuf = addSample(s, r.iBuf, r)
|
||||
default:
|
||||
// Already have specialized samples that are not fSamples.
|
||||
// Need to call the checked add method for conversion.
|
||||
r.add(s)
|
||||
default:
|
||||
r.fBuf = addF(s, r.fBuf, r)
|
||||
}
|
||||
}
|
||||
|
||||
// addH is a version of the add method specialized for hSample.
|
||||
func (r *sampleRing) addH(s hSample) {
|
||||
switch {
|
||||
case len(r.buf) > 0:
|
||||
// Already have interface samples. Add to the interface buf.
|
||||
r.buf = addSample(s, r.buf, r)
|
||||
case len(r.fBuf)+len(r.fhBuf) > 0:
|
||||
// Already have samples that are not hSamples.
|
||||
switch r.bufInUse {
|
||||
case hBuf: // Add to existing hSamples.
|
||||
r.hBuf = addH(s, r.hBuf, r)
|
||||
case noBuf: // Add first sample.
|
||||
r.hBuf = addH(s, r.hBuf, r)
|
||||
r.bufInUse = hBuf
|
||||
case iBuf: // Already have interface samples. Add to the interface buf.
|
||||
r.iBuf = addSample(s, r.iBuf, r)
|
||||
default:
|
||||
// Already have specialized samples that are not hSamples.
|
||||
// Need to call the checked add method for conversion.
|
||||
r.add(s)
|
||||
default:
|
||||
r.hBuf = addH(s, r.hBuf, r)
|
||||
}
|
||||
}
|
||||
|
||||
// addFH is a version of the add method specialized for fhSample.
|
||||
func (r *sampleRing) addFH(s fhSample) {
|
||||
switch {
|
||||
case len(r.buf) > 0:
|
||||
// Already have interface samples. Add to the interface buf.
|
||||
r.buf = addSample(s, r.buf, r)
|
||||
case len(r.fBuf)+len(r.hBuf) > 0:
|
||||
// Already have samples that are not fhSamples.
|
||||
switch r.bufInUse {
|
||||
case fhBuf: // Add to existing fhSamples.
|
||||
r.fhBuf = addFH(s, r.fhBuf, r)
|
||||
case noBuf: // Add first sample.
|
||||
r.fhBuf = addFH(s, r.fhBuf, r)
|
||||
r.bufInUse = fhBuf
|
||||
case iBuf: // Already have interface samples. Add to the interface buf.
|
||||
r.iBuf = addSample(s, r.iBuf, r)
|
||||
default:
|
||||
// Already have specialized samples that are not fhSamples.
|
||||
// Need to call the checked add method for conversion.
|
||||
r.add(s)
|
||||
default:
|
||||
r.fhBuf = addFH(s, r.fhBuf, r)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -701,15 +735,15 @@ func (r *sampleRing) reduceDelta(delta int64) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
switch {
|
||||
case len(r.fBuf) > 0:
|
||||
switch r.bufInUse {
|
||||
case fBuf:
|
||||
genericReduceDelta(r.fBuf, r)
|
||||
case len(r.hBuf) > 0:
|
||||
case hBuf:
|
||||
genericReduceDelta(r.hBuf, r)
|
||||
case len(r.fhBuf) > 0:
|
||||
case fhBuf:
|
||||
genericReduceDelta(r.fhBuf, r)
|
||||
default:
|
||||
genericReduceDelta(r.buf, r)
|
||||
genericReduceDelta(r.iBuf, r)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
@ -733,12 +767,12 @@ func (r *sampleRing) nthLast(n int) (tsdbutil.Sample, bool) {
|
|||
return fSample{}, false
|
||||
}
|
||||
i := r.l - n
|
||||
switch {
|
||||
case len(r.fBuf) > 0:
|
||||
switch r.bufInUse {
|
||||
case fBuf:
|
||||
return r.atF(i), true
|
||||
case len(r.hBuf) > 0:
|
||||
case hBuf:
|
||||
return r.atH(i), true
|
||||
case len(r.fhBuf) > 0:
|
||||
case fhBuf:
|
||||
return r.atFH(i), true
|
||||
default:
|
||||
return r.at(i), true
|
||||
|
@ -751,15 +785,15 @@ func (r *sampleRing) samples() []tsdbutil.Sample {
|
|||
k := r.f + r.l
|
||||
var j int
|
||||
|
||||
switch {
|
||||
case len(r.buf) > 0:
|
||||
if k > len(r.buf) {
|
||||
k = len(r.buf)
|
||||
switch r.bufInUse {
|
||||
case iBuf:
|
||||
if k > len(r.iBuf) {
|
||||
k = len(r.iBuf)
|
||||
j = r.l - k + r.f
|
||||
}
|
||||
n := copy(res, r.buf[r.f:k])
|
||||
copy(res[n:], r.buf[:j])
|
||||
case len(r.fBuf) > 0:
|
||||
n := copy(res, r.iBuf[r.f:k])
|
||||
copy(res[n:], r.iBuf[:j])
|
||||
case fBuf:
|
||||
if k > len(r.fBuf) {
|
||||
k = len(r.fBuf)
|
||||
j = r.l - k + r.f
|
||||
|
@ -770,7 +804,7 @@ func (r *sampleRing) samples() []tsdbutil.Sample {
|
|||
for i, s := range resF {
|
||||
res[i] = s
|
||||
}
|
||||
case len(r.hBuf) > 0:
|
||||
case hBuf:
|
||||
if k > len(r.hBuf) {
|
||||
k = len(r.hBuf)
|
||||
j = r.l - k + r.f
|
||||
|
@ -781,7 +815,7 @@ func (r *sampleRing) samples() []tsdbutil.Sample {
|
|||
for i, s := range resH {
|
||||
res[i] = s
|
||||
}
|
||||
case len(r.fhBuf) > 0:
|
||||
case fhBuf:
|
||||
if k > len(r.fhBuf) {
|
||||
k = len(r.fhBuf)
|
||||
j = r.l - k + r.f
|
||||
|
|
|
@ -21,6 +21,9 @@ import (
|
|||
)
|
||||
|
||||
// MemoizedSeriesIterator wraps an iterator with a buffer to look back the previous element.
|
||||
//
|
||||
// This iterator regards integer histograms as float histograms; calls to Seek() will never return chunkenc.Histogram.
|
||||
// This iterator deliberately does not implement chunkenc.Iterator.
|
||||
type MemoizedSeriesIterator struct {
|
||||
it chunkenc.Iterator
|
||||
delta int64
|
||||
|
@ -31,12 +34,7 @@ type MemoizedSeriesIterator struct {
|
|||
// Keep track of the previously returned value.
|
||||
prevTime int64
|
||||
prevValue float64
|
||||
prevHistogram *histogram.Histogram
|
||||
prevFloatHistogram *histogram.FloatHistogram
|
||||
// TODO(beorn7): MemoizedSeriesIterator is currently only used by the
|
||||
// PromQL engine, which only works with FloatHistograms. For better
|
||||
// performance, we could change MemoizedSeriesIterator to also only
|
||||
// handle FloatHistograms.
|
||||
}
|
||||
|
||||
// NewMemoizedEmptyIterator is like NewMemoizedIterator but it's initialised with an empty iterator.
|
||||
|
@ -66,11 +64,11 @@ func (b *MemoizedSeriesIterator) Reset(it chunkenc.Iterator) {
|
|||
|
||||
// PeekPrev returns the previous element of the iterator. If there is none buffered,
|
||||
// ok is false.
|
||||
func (b *MemoizedSeriesIterator) PeekPrev() (t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, ok bool) {
|
||||
func (b *MemoizedSeriesIterator) PeekPrev() (t int64, v float64, fh *histogram.FloatHistogram, ok bool) {
|
||||
if b.prevTime == math.MinInt64 {
|
||||
return 0, 0, nil, nil, false
|
||||
return 0, 0, nil, false
|
||||
}
|
||||
return b.prevTime, b.prevValue, b.prevHistogram, b.prevFloatHistogram, true
|
||||
return b.prevTime, b.prevValue, b.prevFloatHistogram, true
|
||||
}
|
||||
|
||||
// Seek advances the iterator to the element at time t or greater.
|
||||
|
@ -83,8 +81,11 @@ func (b *MemoizedSeriesIterator) Seek(t int64) chunkenc.ValueType {
|
|||
b.prevTime = math.MinInt64
|
||||
|
||||
b.valueType = b.it.Seek(t0)
|
||||
if b.valueType == chunkenc.ValNone {
|
||||
switch b.valueType {
|
||||
case chunkenc.ValNone:
|
||||
return chunkenc.ValNone
|
||||
case chunkenc.ValHistogram:
|
||||
b.valueType = chunkenc.ValFloatHistogram
|
||||
}
|
||||
b.lastTime = b.it.AtT()
|
||||
}
|
||||
|
@ -100,7 +101,8 @@ func (b *MemoizedSeriesIterator) Seek(t int64) chunkenc.ValueType {
|
|||
return chunkenc.ValNone
|
||||
}
|
||||
|
||||
// Next advances the iterator to the next element.
|
||||
// Next advances the iterator to the next element. Note that this does not check whether the element being buffered is
|
||||
// within the time range of the current element and the duration of delta before.
|
||||
func (b *MemoizedSeriesIterator) Next() chunkenc.ValueType {
|
||||
// Keep track of the previous element.
|
||||
switch b.valueType {
|
||||
|
@ -108,15 +110,9 @@ func (b *MemoizedSeriesIterator) Next() chunkenc.ValueType {
|
|||
return chunkenc.ValNone
|
||||
case chunkenc.ValFloat:
|
||||
b.prevTime, b.prevValue = b.it.At()
|
||||
b.prevHistogram = nil
|
||||
b.prevFloatHistogram = nil
|
||||
case chunkenc.ValHistogram:
|
||||
case chunkenc.ValHistogram, chunkenc.ValFloatHistogram:
|
||||
b.prevValue = 0
|
||||
b.prevTime, b.prevHistogram = b.it.AtHistogram()
|
||||
_, b.prevFloatHistogram = b.it.AtFloatHistogram()
|
||||
case chunkenc.ValFloatHistogram:
|
||||
b.prevValue = 0
|
||||
b.prevHistogram = nil
|
||||
b.prevTime, b.prevFloatHistogram = b.it.AtFloatHistogram()
|
||||
}
|
||||
|
||||
|
@ -124,6 +120,9 @@ func (b *MemoizedSeriesIterator) Next() chunkenc.ValueType {
|
|||
if b.valueType != chunkenc.ValNone {
|
||||
b.lastTime = b.it.AtT()
|
||||
}
|
||||
if b.valueType == chunkenc.ValHistogram {
|
||||
b.valueType = chunkenc.ValFloatHistogram
|
||||
}
|
||||
return b.valueType
|
||||
}
|
||||
|
||||
|
@ -132,21 +131,11 @@ func (b *MemoizedSeriesIterator) At() (int64, float64) {
|
|||
return b.it.At()
|
||||
}
|
||||
|
||||
// AtHistogram returns the current histogram element of the iterator.
|
||||
func (b *MemoizedSeriesIterator) AtHistogram() (int64, *histogram.Histogram) {
|
||||
return b.it.AtHistogram()
|
||||
}
|
||||
|
||||
// AtFloatHistogram returns the current float-histogram element of the iterator.
|
||||
func (b *MemoizedSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
|
||||
return b.it.AtFloatHistogram()
|
||||
}
|
||||
|
||||
// AtT returns the current timestamp of the iterator.
|
||||
func (b *MemoizedSeriesIterator) AtT() int64 {
|
||||
return b.it.AtT()
|
||||
}
|
||||
|
||||
// Err returns the last encountered error.
|
||||
func (b *MemoizedSeriesIterator) Err() error {
|
||||
return b.it.Err()
|
||||
|
|
|
@ -18,23 +18,34 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
||||
)
|
||||
|
||||
func TestMemoizedSeriesIterator(t *testing.T) {
|
||||
// TODO(beorn7): Include histograms in testing.
|
||||
var it *MemoizedSeriesIterator
|
||||
|
||||
sampleEq := func(ets int64, ev float64) {
|
||||
ts, v := it.At()
|
||||
require.Equal(t, ets, ts, "timestamp mismatch")
|
||||
require.Equal(t, ev, v, "value mismatch")
|
||||
sampleEq := func(ets int64, ev float64, efh *histogram.FloatHistogram) {
|
||||
if efh == nil {
|
||||
ts, v := it.At()
|
||||
require.Equal(t, ets, ts, "timestamp mismatch")
|
||||
require.Equal(t, ev, v, "value mismatch")
|
||||
} else {
|
||||
ts, fh := it.AtFloatHistogram()
|
||||
require.Equal(t, ets, ts, "timestamp mismatch")
|
||||
require.Equal(t, efh, fh, "histogram mismatch")
|
||||
}
|
||||
}
|
||||
prevSampleEq := func(ets int64, ev float64, eok bool) {
|
||||
ts, v, _, _, ok := it.PeekPrev()
|
||||
prevSampleEq := func(ets int64, ev float64, efh *histogram.FloatHistogram, eok bool) {
|
||||
ts, v, fh, ok := it.PeekPrev()
|
||||
require.Equal(t, eok, ok, "exist mismatch")
|
||||
require.Equal(t, ets, ts, "timestamp mismatch")
|
||||
require.Equal(t, ev, v, "value mismatch")
|
||||
if efh == nil {
|
||||
require.Equal(t, ev, v, "value mismatch")
|
||||
} else {
|
||||
require.Equal(t, efh, fh, "histogram mismatch")
|
||||
}
|
||||
}
|
||||
|
||||
it = NewMemoizedIterator(NewListSeriesIterator(samples{
|
||||
|
@ -46,31 +57,50 @@ func TestMemoizedSeriesIterator(t *testing.T) {
|
|||
fSample{t: 99, f: 8},
|
||||
fSample{t: 100, f: 9},
|
||||
fSample{t: 101, f: 10},
|
||||
hSample{t: 102, h: tsdbutil.GenerateTestHistogram(0)},
|
||||
hSample{t: 103, h: tsdbutil.GenerateTestHistogram(1)},
|
||||
fhSample{t: 104, fh: tsdbutil.GenerateTestFloatHistogram(2)},
|
||||
fhSample{t: 199, fh: tsdbutil.GenerateTestFloatHistogram(3)},
|
||||
hSample{t: 200, h: tsdbutil.GenerateTestHistogram(4)},
|
||||
fhSample{t: 299, fh: tsdbutil.GenerateTestFloatHistogram(5)},
|
||||
fSample{t: 300, f: 11},
|
||||
hSample{t: 399, h: tsdbutil.GenerateTestHistogram(6)},
|
||||
fSample{t: 400, f: 12},
|
||||
}), 2)
|
||||
|
||||
require.Equal(t, it.Seek(-123), chunkenc.ValFloat, "seek failed")
|
||||
sampleEq(1, 2)
|
||||
prevSampleEq(0, 0, false)
|
||||
|
||||
require.Equal(t, it.Next(), chunkenc.ValFloat, "next failed")
|
||||
sampleEq(2, 3)
|
||||
prevSampleEq(1, 2, true)
|
||||
|
||||
require.Equal(t, it.Next(), chunkenc.ValFloat, "next failed")
|
||||
require.Equal(t, it.Next(), chunkenc.ValFloat, "next failed")
|
||||
require.Equal(t, it.Next(), chunkenc.ValFloat, "next failed")
|
||||
sampleEq(5, 6)
|
||||
prevSampleEq(4, 5, true)
|
||||
sampleEq(1, 2, nil)
|
||||
prevSampleEq(0, 0, nil, false)
|
||||
|
||||
require.Equal(t, it.Seek(5), chunkenc.ValFloat, "seek failed")
|
||||
sampleEq(5, 6)
|
||||
prevSampleEq(4, 5, true)
|
||||
sampleEq(5, 6, nil)
|
||||
prevSampleEq(4, 5, nil, true)
|
||||
|
||||
require.Equal(t, it.Seek(101), chunkenc.ValFloat, "seek failed")
|
||||
sampleEq(101, 10)
|
||||
prevSampleEq(100, 9, true)
|
||||
// Seek to a histogram sample with a previous float sample.
|
||||
require.Equal(t, it.Seek(102), chunkenc.ValFloatHistogram, "seek failed")
|
||||
sampleEq(102, 10, tsdbutil.GenerateTestFloatHistogram(0))
|
||||
prevSampleEq(101, 10, nil, true)
|
||||
|
||||
// Attempt to seek backwards (no-op).
|
||||
require.Equal(t, it.Seek(50), chunkenc.ValFloatHistogram, "seek failed")
|
||||
sampleEq(102, 10, tsdbutil.GenerateTestFloatHistogram(0))
|
||||
prevSampleEq(101, 10, nil, true)
|
||||
|
||||
// Seek to a float histogram sample with a previous histogram sample.
|
||||
require.Equal(t, it.Seek(104), chunkenc.ValFloatHistogram, "seek failed")
|
||||
sampleEq(104, 0, tsdbutil.GenerateTestFloatHistogram(2))
|
||||
prevSampleEq(103, 0, tsdbutil.GenerateTestFloatHistogram(1), true)
|
||||
|
||||
// Seek to a float sample with a previous float histogram sample.
|
||||
require.Equal(t, chunkenc.ValFloat, it.Seek(300), "seek failed")
|
||||
sampleEq(300, 11, nil)
|
||||
prevSampleEq(299, 0, tsdbutil.GenerateTestFloatHistogram(5), true)
|
||||
|
||||
// Seek to a float sample with a previous histogram sample.
|
||||
require.Equal(t, chunkenc.ValFloat, it.Seek(400), "seek failed")
|
||||
sampleEq(400, 12, nil)
|
||||
prevSampleEq(399, 0, tsdbutil.GenerateTestFloatHistogram(6), true)
|
||||
|
||||
require.Equal(t, it.Next(), chunkenc.ValNone, "next succeeded unexpectedly")
|
||||
require.Equal(t, it.Seek(1024), chunkenc.ValNone, "seek succeeded unexpectedly")
|
||||
}
|
||||
|
||||
|
|
|
@ -76,6 +76,13 @@ func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCal
|
|||
return s
|
||||
}
|
||||
|
||||
func (s *Storage) Notify() {
|
||||
for _, q := range s.rws.queues {
|
||||
// These should all be non blocking
|
||||
q.watcher.Notify()
|
||||
}
|
||||
}
|
||||
|
||||
// ApplyConfig updates the state as the new config requires.
|
||||
func (s *Storage) ApplyConfig(conf *config.Config) error {
|
||||
s.mtx.Lock()
|
||||
|
|
|
@ -297,9 +297,11 @@ func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator {
|
|||
seriesIter := s.Series.Iterator(nil)
|
||||
lastType := chunkenc.ValNone
|
||||
for typ := seriesIter.Next(); typ != chunkenc.ValNone; typ = seriesIter.Next() {
|
||||
chunkCreated := false
|
||||
if typ != lastType || i >= seriesToChunkEncoderSplit {
|
||||
// Create a new chunk if the sample type changed or too many samples in the current one.
|
||||
chks = appendChunk(chks, mint, maxt, chk)
|
||||
chunkCreated = true
|
||||
chk, err = chunkenc.NewEmptyChunk(typ.ChunkEncoding())
|
||||
if err != nil {
|
||||
return errChunksIterator{err: err}
|
||||
|
@ -330,6 +332,7 @@ func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator {
|
|||
if ok, counterReset := app.AppendHistogram(t, h); !ok {
|
||||
chks = appendChunk(chks, mint, maxt, chk)
|
||||
histChunk := chunkenc.NewHistogramChunk()
|
||||
chunkCreated = true
|
||||
if counterReset {
|
||||
histChunk.SetCounterResetHeader(chunkenc.CounterReset)
|
||||
}
|
||||
|
@ -346,11 +349,15 @@ func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator {
|
|||
panic("unexpected error while appending histogram")
|
||||
}
|
||||
}
|
||||
if chunkCreated && h.CounterResetHint == histogram.GaugeType {
|
||||
chk.(*chunkenc.HistogramChunk).SetCounterResetHeader(chunkenc.GaugeType)
|
||||
}
|
||||
case chunkenc.ValFloatHistogram:
|
||||
t, fh = seriesIter.AtFloatHistogram()
|
||||
if ok, counterReset := app.AppendFloatHistogram(t, fh); !ok {
|
||||
chks = appendChunk(chks, mint, maxt, chk)
|
||||
floatHistChunk := chunkenc.NewFloatHistogramChunk()
|
||||
chunkCreated = true
|
||||
if counterReset {
|
||||
floatHistChunk.SetCounterResetHeader(chunkenc.CounterReset)
|
||||
}
|
||||
|
@ -366,6 +373,9 @@ func (s *seriesToChunkEncoder) Iterator(it chunks.Iterator) chunks.Iterator {
|
|||
panic("unexpected error while float appending histogram")
|
||||
}
|
||||
}
|
||||
if chunkCreated && fh.CounterResetHint == histogram.GaugeType {
|
||||
chk.(*chunkenc.FloatHistogramChunk).SetCounterResetHeader(chunkenc.GaugeType)
|
||||
}
|
||||
default:
|
||||
return errChunksIterator{err: fmt.Errorf("unknown sample type %s", typ.String())}
|
||||
}
|
||||
|
|
|
@ -126,14 +126,13 @@ func TestChunkSeriesSetToSeriesSet(t *testing.T) {
|
|||
}
|
||||
|
||||
type histogramTest struct {
|
||||
samples []tsdbutil.Sample
|
||||
expectedChunks int
|
||||
expectedCounterReset bool
|
||||
samples []tsdbutil.Sample
|
||||
expectedCounterResetHeaders []chunkenc.CounterResetHeader
|
||||
}
|
||||
|
||||
func TestHistogramSeriesToChunks(t *testing.T) {
|
||||
h1 := &histogram.Histogram{
|
||||
Count: 3,
|
||||
Count: 7,
|
||||
ZeroCount: 2,
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 100,
|
||||
|
@ -158,7 +157,7 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
|||
}
|
||||
// Implicit counter reset by reduction in buckets, not appendable.
|
||||
h2down := &histogram.Histogram{
|
||||
Count: 8,
|
||||
Count: 10,
|
||||
ZeroCount: 2,
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 100,
|
||||
|
@ -171,7 +170,7 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
|||
}
|
||||
|
||||
fh1 := &histogram.FloatHistogram{
|
||||
Count: 4,
|
||||
Count: 6,
|
||||
ZeroCount: 2,
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 100,
|
||||
|
@ -183,7 +182,7 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
|||
}
|
||||
// Appendable to fh1.
|
||||
fh2 := &histogram.FloatHistogram{
|
||||
Count: 15,
|
||||
Count: 17,
|
||||
ZeroCount: 2,
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 100,
|
||||
|
@ -196,7 +195,7 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
|||
}
|
||||
// Implicit counter reset by reduction in buckets, not appendable.
|
||||
fh2down := &histogram.FloatHistogram{
|
||||
Count: 13,
|
||||
Count: 15,
|
||||
ZeroCount: 2,
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 100,
|
||||
|
@ -208,6 +207,60 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
|||
PositiveBuckets: []float64{2, 2, 7, 2},
|
||||
}
|
||||
|
||||
// Gauge histogram.
|
||||
gh1 := &histogram.Histogram{
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
Count: 7,
|
||||
ZeroCount: 2,
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 100,
|
||||
Schema: 0,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{2, 1}, // Abs: 2, 3
|
||||
}
|
||||
gh2 := &histogram.Histogram{
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
Count: 12,
|
||||
ZeroCount: 2,
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 100,
|
||||
Schema: 0,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{2, 1, -2, 3}, // Abs: 2, 3, 1, 4
|
||||
}
|
||||
|
||||
// Float gauge histogram.
|
||||
gfh1 := &histogram.FloatHistogram{
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
Count: 6,
|
||||
ZeroCount: 2,
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 100,
|
||||
Schema: 0,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []float64{3, 1},
|
||||
}
|
||||
gfh2 := &histogram.FloatHistogram{
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
Count: 17,
|
||||
ZeroCount: 2,
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 100,
|
||||
Schema: 0,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []float64{4, 2, 7, 2},
|
||||
}
|
||||
|
||||
staleHistogram := &histogram.Histogram{
|
||||
Sum: math.Float64frombits(value.StaleNaN),
|
||||
}
|
||||
|
@ -220,74 +273,70 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
|||
samples: []tsdbutil.Sample{
|
||||
hSample{t: 1, h: h1},
|
||||
},
|
||||
expectedChunks: 1,
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset},
|
||||
},
|
||||
"two histograms encoded to a single chunk": {
|
||||
samples: []tsdbutil.Sample{
|
||||
hSample{t: 1, h: h1},
|
||||
hSample{t: 2, h: h2},
|
||||
},
|
||||
expectedChunks: 1,
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset},
|
||||
},
|
||||
"two histograms encoded to two chunks": {
|
||||
samples: []tsdbutil.Sample{
|
||||
hSample{t: 1, h: h2},
|
||||
hSample{t: 2, h: h1},
|
||||
},
|
||||
expectedChunks: 2,
|
||||
expectedCounterReset: true,
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset},
|
||||
},
|
||||
"histogram and stale sample encoded to two chunks": {
|
||||
samples: []tsdbutil.Sample{
|
||||
hSample{t: 1, h: staleHistogram},
|
||||
hSample{t: 2, h: h1},
|
||||
},
|
||||
expectedChunks: 2,
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
|
||||
},
|
||||
"histogram and reduction in bucket encoded to two chunks": {
|
||||
samples: []tsdbutil.Sample{
|
||||
hSample{t: 1, h: h1},
|
||||
hSample{t: 2, h: h2down},
|
||||
},
|
||||
expectedChunks: 2,
|
||||
expectedCounterReset: true,
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset},
|
||||
},
|
||||
// Float histograms.
|
||||
"single float histogram to single chunk": {
|
||||
samples: []tsdbutil.Sample{
|
||||
fhSample{t: 1, fh: fh1},
|
||||
},
|
||||
expectedChunks: 1,
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset},
|
||||
},
|
||||
"two float histograms encoded to a single chunk": {
|
||||
samples: []tsdbutil.Sample{
|
||||
fhSample{t: 1, fh: fh1},
|
||||
fhSample{t: 2, fh: fh2},
|
||||
},
|
||||
expectedChunks: 1,
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset},
|
||||
},
|
||||
"two float histograms encoded to two chunks": {
|
||||
samples: []tsdbutil.Sample{
|
||||
fhSample{t: 1, fh: fh2},
|
||||
fhSample{t: 2, fh: fh1},
|
||||
},
|
||||
expectedChunks: 2,
|
||||
expectedCounterReset: true,
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset},
|
||||
},
|
||||
"float histogram and stale sample encoded to two chunks": {
|
||||
samples: []tsdbutil.Sample{
|
||||
fhSample{t: 1, fh: staleFloatHistogram},
|
||||
fhSample{t: 2, fh: fh1},
|
||||
},
|
||||
expectedChunks: 2,
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
|
||||
},
|
||||
"float histogram and reduction in bucket encoded to two chunks": {
|
||||
samples: []tsdbutil.Sample{
|
||||
fhSample{t: 1, fh: fh1},
|
||||
fhSample{t: 2, fh: fh2down},
|
||||
},
|
||||
expectedChunks: 2,
|
||||
expectedCounterReset: true,
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset},
|
||||
},
|
||||
// Mixed.
|
||||
"histogram and float histogram encoded to two chunks": {
|
||||
|
@ -295,21 +344,61 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
|||
hSample{t: 1, h: h1},
|
||||
fhSample{t: 2, fh: fh2},
|
||||
},
|
||||
expectedChunks: 2,
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
|
||||
},
|
||||
"float histogram and histogram encoded to two chunks": {
|
||||
samples: []tsdbutil.Sample{
|
||||
fhSample{t: 1, fh: fh1},
|
||||
hSample{t: 2, h: h2},
|
||||
},
|
||||
expectedChunks: 2,
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
|
||||
},
|
||||
"histogram and stale float histogram encoded to two chunks": {
|
||||
samples: []tsdbutil.Sample{
|
||||
hSample{t: 1, h: h1},
|
||||
fhSample{t: 2, fh: staleFloatHistogram},
|
||||
},
|
||||
expectedChunks: 2,
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
|
||||
},
|
||||
"single gauge histogram encoded to one chunk": {
|
||||
samples: []tsdbutil.Sample{
|
||||
hSample{t: 1, h: gh1},
|
||||
},
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
|
||||
},
|
||||
"two gauge histograms encoded to one chunk when counter increases": {
|
||||
samples: []tsdbutil.Sample{
|
||||
hSample{t: 1, h: gh1},
|
||||
hSample{t: 2, h: gh2},
|
||||
},
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
|
||||
},
|
||||
"two gauge histograms encoded to one chunk when counter decreases": {
|
||||
samples: []tsdbutil.Sample{
|
||||
hSample{t: 1, h: gh2},
|
||||
hSample{t: 2, h: gh1},
|
||||
},
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
|
||||
},
|
||||
"single gauge float histogram encoded to one chunk": {
|
||||
samples: []tsdbutil.Sample{
|
||||
fhSample{t: 1, fh: gfh1},
|
||||
},
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
|
||||
},
|
||||
"two float gauge histograms encoded to one chunk when counter increases": {
|
||||
samples: []tsdbutil.Sample{
|
||||
fhSample{t: 1, fh: gfh1},
|
||||
fhSample{t: 2, fh: gfh2},
|
||||
},
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
|
||||
},
|
||||
"two float gauge histograms encoded to one chunk when counter decreases": {
|
||||
samples: []tsdbutil.Sample{
|
||||
fhSample{t: 1, fh: gfh2},
|
||||
fhSample{t: 2, fh: gfh1},
|
||||
},
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -322,13 +411,24 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
|||
|
||||
func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) {
|
||||
lbs := labels.FromStrings("__name__", "up", "instance", "localhost:8080")
|
||||
series := NewListSeries(lbs, test.samples)
|
||||
copiedSamples := []tsdbutil.Sample{}
|
||||
for _, s := range test.samples {
|
||||
switch cs := s.(type) {
|
||||
case hSample:
|
||||
copiedSamples = append(copiedSamples, hSample{t: cs.t, h: cs.h.Copy()})
|
||||
case fhSample:
|
||||
copiedSamples = append(copiedSamples, fhSample{t: cs.t, fh: cs.fh.Copy()})
|
||||
default:
|
||||
t.Error("internal error, unexpected type")
|
||||
}
|
||||
}
|
||||
series := NewListSeries(lbs, copiedSamples)
|
||||
encoder := NewSeriesToChunkEncoder(series)
|
||||
require.EqualValues(t, lbs, encoder.Labels())
|
||||
|
||||
chks, err := ExpandChunks(encoder.Iterator(nil))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, test.expectedChunks, len(chks))
|
||||
require.Equal(t, len(test.expectedCounterResetHeaders), len(chks))
|
||||
|
||||
// Decode all encoded samples and assert they are equal to the original ones.
|
||||
encodedSamples := expandHistogramSamples(chks)
|
||||
|
@ -339,8 +439,10 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) {
|
|||
case hSample:
|
||||
encodedSample, ok := encodedSamples[i].(hSample)
|
||||
require.True(t, ok, "expect histogram", fmt.Sprintf("at idx %d", i))
|
||||
// Ignore counter reset here, will check on chunk level.
|
||||
encodedSample.h.CounterResetHint = histogram.UnknownCounterReset
|
||||
// Ignore counter reset if not gauge here, will check on chunk level.
|
||||
if expectedSample.h.CounterResetHint != histogram.GaugeType {
|
||||
encodedSample.h.CounterResetHint = histogram.UnknownCounterReset
|
||||
}
|
||||
if value.IsStaleNaN(expectedSample.h.Sum) {
|
||||
require.True(t, value.IsStaleNaN(encodedSample.h.Sum), fmt.Sprintf("at idx %d", i))
|
||||
continue
|
||||
|
@ -349,8 +451,10 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) {
|
|||
case fhSample:
|
||||
encodedSample, ok := encodedSamples[i].(fhSample)
|
||||
require.True(t, ok, "expect float histogram", fmt.Sprintf("at idx %d", i))
|
||||
// Ignore counter reset here, will check on chunk level.
|
||||
encodedSample.fh.CounterResetHint = histogram.UnknownCounterReset
|
||||
// Ignore counter reset if not gauge here, will check on chunk level.
|
||||
if expectedSample.fh.CounterResetHint != histogram.GaugeType {
|
||||
encodedSample.fh.CounterResetHint = histogram.UnknownCounterReset
|
||||
}
|
||||
if value.IsStaleNaN(expectedSample.fh.Sum) {
|
||||
require.True(t, value.IsStaleNaN(encodedSample.fh.Sum), fmt.Sprintf("at idx %d", i))
|
||||
continue
|
||||
|
@ -361,15 +465,8 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) {
|
|||
}
|
||||
}
|
||||
|
||||
// If a counter reset hint is expected, it can only be found in the second chunk.
|
||||
// Otherwise, we assert an unknown counter reset hint in all chunks.
|
||||
if test.expectedCounterReset {
|
||||
require.Equal(t, chunkenc.UnknownCounterReset, getCounterResetHint(chks[0]))
|
||||
require.Equal(t, chunkenc.CounterReset, getCounterResetHint(chks[1]))
|
||||
} else {
|
||||
for _, chk := range chks {
|
||||
require.Equal(t, chunkenc.UnknownCounterReset, getCounterResetHint(chk))
|
||||
}
|
||||
for i, expectedCounterResetHint := range test.expectedCounterResetHeaders {
|
||||
require.Equal(t, expectedCounterResetHint, getCounterResetHint(chks[i]), fmt.Sprintf("chunk at index %d", i))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -665,7 +665,7 @@ func (db *DB) truncate(mint int64) error {
|
|||
}
|
||||
|
||||
seg, ok := db.deleted[id]
|
||||
return ok && seg >= first
|
||||
return ok && seg > last
|
||||
}
|
||||
|
||||
db.metrics.checkpointCreationTotal.Inc()
|
||||
|
@ -687,7 +687,7 @@ func (db *DB) truncate(mint int64) error {
|
|||
// The checkpoint is written and segments before it are truncated, so we
|
||||
// no longer need to track deleted series that were being kept around.
|
||||
for ref, segment := range db.deleted {
|
||||
if segment < first {
|
||||
if segment <= last {
|
||||
delete(db.deleted, ref)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -358,9 +358,9 @@ func counterResetInAnyFloatBucket(oldBuckets []xorValue, newBuckets []float64, o
|
|||
|
||||
if oldIdx <= newIdx {
|
||||
// Moving ahead old bucket and span by 1 index.
|
||||
if oldInsideSpanIdx == oldSpans[oldSpanSliceIdx].Length-1 {
|
||||
if oldInsideSpanIdx+1 >= oldSpans[oldSpanSliceIdx].Length {
|
||||
// Current span is over.
|
||||
oldSpanSliceIdx++
|
||||
oldSpanSliceIdx = nextNonEmptySpanSliceIdx(oldSpanSliceIdx, oldSpans)
|
||||
oldInsideSpanIdx = 0
|
||||
if oldSpanSliceIdx >= len(oldSpans) {
|
||||
// All old spans are over.
|
||||
|
@ -377,9 +377,9 @@ func counterResetInAnyFloatBucket(oldBuckets []xorValue, newBuckets []float64, o
|
|||
|
||||
if oldIdx > newIdx {
|
||||
// Moving ahead new bucket and span by 1 index.
|
||||
if newInsideSpanIdx == newSpans[newSpanSliceIdx].Length-1 {
|
||||
if newInsideSpanIdx+1 >= newSpans[newSpanSliceIdx].Length {
|
||||
// Current span is over.
|
||||
newSpanSliceIdx++
|
||||
newSpanSliceIdx = nextNonEmptySpanSliceIdx(newSpanSliceIdx, newSpans)
|
||||
newInsideSpanIdx = 0
|
||||
if newSpanSliceIdx >= len(newSpans) {
|
||||
// All new spans are over.
|
||||
|
|
|
@ -365,6 +365,64 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestFloatHistogramChunkAppendableWithEmptySpan(t *testing.T) {
|
||||
h1 := &histogram.FloatHistogram{
|
||||
Schema: 0,
|
||||
Count: 21,
|
||||
Sum: 1234.5,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 4,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 4},
|
||||
{Offset: 0, Length: 0},
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
PositiveBuckets: []float64{1, 2, 1, 1, 1, 1, 1},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 1, Length: 4},
|
||||
{Offset: 2, Length: 0},
|
||||
{Offset: 2, Length: 3},
|
||||
},
|
||||
NegativeBuckets: []float64{1, 2, 1, 2, 2, 2, 2},
|
||||
}
|
||||
h2 := &histogram.FloatHistogram{
|
||||
Schema: 0,
|
||||
Count: 37,
|
||||
Sum: 2345.6,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 5,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 4},
|
||||
{Offset: 0, Length: 0},
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
PositiveBuckets: []float64{1, 3, 1, 2, 1, 1, 1},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 1, Length: 4},
|
||||
{Offset: 2, Length: 0},
|
||||
{Offset: 2, Length: 3},
|
||||
},
|
||||
NegativeBuckets: []float64{1, 4, 2, 7, 5, 5, 2},
|
||||
}
|
||||
|
||||
c := Chunk(NewFloatHistogramChunk())
|
||||
|
||||
// Create fresh appender and add the first histogram.
|
||||
app, err := c.Appender()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, c.NumSamples())
|
||||
|
||||
app.AppendFloatHistogram(1, h1)
|
||||
require.Equal(t, 1, c.NumSamples())
|
||||
hApp, _ := app.(*FloatHistogramAppender)
|
||||
|
||||
pI, nI, okToAppend, counterReset := hApp.Appendable(h2)
|
||||
require.Empty(t, pI)
|
||||
require.Empty(t, nI)
|
||||
require.True(t, okToAppend)
|
||||
require.False(t, counterReset)
|
||||
}
|
||||
|
||||
func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
|
||||
c := Chunk(NewFloatHistogramChunk())
|
||||
|
||||
|
|
|
@ -386,9 +386,9 @@ func counterResetInAnyBucket(oldBuckets, newBuckets []int64, oldSpans, newSpans
|
|||
|
||||
if oldIdx <= newIdx {
|
||||
// Moving ahead old bucket and span by 1 index.
|
||||
if oldInsideSpanIdx == oldSpans[oldSpanSliceIdx].Length-1 {
|
||||
if oldInsideSpanIdx+1 >= oldSpans[oldSpanSliceIdx].Length {
|
||||
// Current span is over.
|
||||
oldSpanSliceIdx++
|
||||
oldSpanSliceIdx = nextNonEmptySpanSliceIdx(oldSpanSliceIdx, oldSpans)
|
||||
oldInsideSpanIdx = 0
|
||||
if oldSpanSliceIdx >= len(oldSpans) {
|
||||
// All old spans are over.
|
||||
|
@ -405,9 +405,9 @@ func counterResetInAnyBucket(oldBuckets, newBuckets []int64, oldSpans, newSpans
|
|||
|
||||
if oldIdx > newIdx {
|
||||
// Moving ahead new bucket and span by 1 index.
|
||||
if newInsideSpanIdx == newSpans[newSpanSliceIdx].Length-1 {
|
||||
if newInsideSpanIdx+1 >= newSpans[newSpanSliceIdx].Length {
|
||||
// Current span is over.
|
||||
newSpanSliceIdx++
|
||||
newSpanSliceIdx = nextNonEmptySpanSliceIdx(newSpanSliceIdx, newSpans)
|
||||
newInsideSpanIdx = 0
|
||||
if newSpanSliceIdx >= len(newSpans) {
|
||||
// All new spans are over.
|
||||
|
|
|
@ -487,3 +487,10 @@ func counterResetHint(crh CounterResetHeader, numRead uint16) histogram.CounterR
|
|||
return histogram.UnknownCounterReset
|
||||
}
|
||||
}
|
||||
|
||||
// Handle pathological case of empty span when advancing span idx.
|
||||
func nextNonEmptySpanSliceIdx(idx int, spans []histogram.Span) (newIdx int) {
|
||||
for idx++; idx < len(spans) && spans[idx].Length == 0; idx++ { //nolint:revive // This "empty" block is intentional
|
||||
}
|
||||
return idx
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
package chunkenc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -387,6 +388,64 @@ func TestHistogramChunkAppendable(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestHistogramChunkAppendableWithEmptySpan(t *testing.T) {
|
||||
h1 := &histogram.Histogram{
|
||||
Schema: 0,
|
||||
Count: 21,
|
||||
Sum: 1234.5,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 4,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 4},
|
||||
{Offset: 0, Length: 0},
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 1, -1, 0, 0, 0, 0},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 1, Length: 4},
|
||||
{Offset: 2, Length: 0},
|
||||
{Offset: 2, Length: 3},
|
||||
},
|
||||
NegativeBuckets: []int64{1, 1, -1, 1, 0, 0, 0},
|
||||
}
|
||||
h2 := &histogram.Histogram{
|
||||
Schema: 0,
|
||||
Count: 37,
|
||||
Sum: 2345.6,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 5,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 4},
|
||||
{Offset: 0, Length: 0},
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 1, Length: 4},
|
||||
{Offset: 2, Length: 0},
|
||||
{Offset: 2, Length: 3},
|
||||
},
|
||||
NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
|
||||
}
|
||||
|
||||
c := Chunk(NewHistogramChunk())
|
||||
|
||||
// Create fresh appender and add the first histogram.
|
||||
app, err := c.Appender()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, c.NumSamples())
|
||||
|
||||
app.AppendHistogram(1, h1)
|
||||
require.Equal(t, 1, c.NumSamples())
|
||||
hApp, _ := app.(*HistogramAppender)
|
||||
|
||||
pI, nI, okToAppend, counterReset := hApp.Appendable(h2)
|
||||
require.Empty(t, pI)
|
||||
require.Empty(t, nI)
|
||||
require.True(t, okToAppend)
|
||||
require.False(t, counterReset)
|
||||
}
|
||||
|
||||
func TestAtFloatHistogram(t *testing.T) {
|
||||
input := []histogram.Histogram{
|
||||
{
|
||||
|
@ -514,6 +573,10 @@ func TestAtFloatHistogram(t *testing.T) {
|
|||
app, err := chk.Appender()
|
||||
require.NoError(t, err)
|
||||
for i := range input {
|
||||
if i > 0 {
|
||||
_, _, okToAppend, _ := app.(*HistogramAppender).Appendable(&input[i])
|
||||
require.True(t, okToAppend, fmt.Sprintf("idx: %d", i))
|
||||
}
|
||||
app.AppendHistogram(int64(i), &input[i])
|
||||
}
|
||||
it := chk.Iterator(nil)
|
||||
|
|
|
@ -896,10 +896,10 @@ func debugOutOfOrderChunks(chks []chunks.Meta, logger log.Logger) {
|
|||
}
|
||||
|
||||
// Looks like the chunk is out of order.
|
||||
prevSafeChk, prevIsSafeChk := prevChk.Chunk.(*safeChunk)
|
||||
currSafeChk, currIsSafeChk := currChk.Chunk.(*safeChunk)
|
||||
prevSafeChk, prevIsSafeChk := prevChk.Chunk.(*safeHeadChunk)
|
||||
currSafeChk, currIsSafeChk := currChk.Chunk.(*safeHeadChunk)
|
||||
|
||||
// Get info out of safeChunk (if possible).
|
||||
// Get info out of safeHeadChunk (if possible).
|
||||
prevHeadChunkID := chunks.HeadChunkID(0)
|
||||
currHeadChunkID := chunks.HeadChunkID(0)
|
||||
prevLabels := labels.Labels{}
|
||||
|
|
17
tsdb/db.go
17
tsdb/db.go
|
@ -80,6 +80,7 @@ func DefaultOptions() *Options {
|
|||
NoLockfile: false,
|
||||
AllowOverlappingCompaction: true,
|
||||
WALCompression: false,
|
||||
SamplesPerChunk: DefaultSamplesPerChunk,
|
||||
StripeSize: DefaultStripeSize,
|
||||
HeadChunksWriteBufferSize: chunks.DefaultWriteBufferSize,
|
||||
IsolationDisabled: defaultIsolationDisabled,
|
||||
|
@ -162,6 +163,9 @@ type Options struct {
|
|||
// HeadChunksWriteQueueSize configures the size of the chunk write queue used in the head chunks mapper.
|
||||
HeadChunksWriteQueueSize int
|
||||
|
||||
// SamplesPerChunk configures the target number of samples per chunk.
|
||||
SamplesPerChunk int
|
||||
|
||||
// SeriesLifecycleCallback specifies a list of callbacks that will be called during a lifecycle of a series.
|
||||
// It is always a no-op in Prometheus and mainly meant for external users who import TSDB.
|
||||
SeriesLifecycleCallback SeriesLifecycleCallback
|
||||
|
@ -265,6 +269,8 @@ type DB struct {
|
|||
// out-of-order compaction and vertical queries.
|
||||
oooWasEnabled atomic.Bool
|
||||
|
||||
writeNotified wlog.WriteNotified
|
||||
|
||||
registerer prometheus.Registerer
|
||||
}
|
||||
|
||||
|
@ -678,6 +684,9 @@ func validateOpts(opts *Options, rngs []int64) (*Options, []int64) {
|
|||
if opts.HeadChunksWriteQueueSize < 0 {
|
||||
opts.HeadChunksWriteQueueSize = chunks.DefaultWriteQueueSize
|
||||
}
|
||||
if opts.SamplesPerChunk <= 0 {
|
||||
opts.SamplesPerChunk = DefaultSamplesPerChunk
|
||||
}
|
||||
if opts.MaxBlockChunkSegmentSize <= 0 {
|
||||
opts.MaxBlockChunkSegmentSize = chunks.DefaultChunkSegmentSize
|
||||
}
|
||||
|
@ -823,6 +832,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
|
|||
headOpts.ChunkWriteBufferSize = opts.HeadChunksWriteBufferSize
|
||||
headOpts.ChunkEndTimeVariance = opts.HeadChunksEndTimeVariance
|
||||
headOpts.ChunkWriteQueueSize = opts.HeadChunksWriteQueueSize
|
||||
headOpts.SamplesPerChunk = opts.SamplesPerChunk
|
||||
headOpts.StripeSize = opts.StripeSize
|
||||
headOpts.SeriesCallback = opts.SeriesLifecycleCallback
|
||||
headOpts.EnableExemplarStorage = opts.EnableExemplarStorage
|
||||
|
@ -845,6 +855,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
db.head.writeNotified = db.writeNotified
|
||||
|
||||
// Register metrics after assigning the head block.
|
||||
db.metrics = newDBMetrics(db, r)
|
||||
|
@ -2074,6 +2085,12 @@ func (db *DB) CleanTombstones() (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) SetWriteNotified(wn wlog.WriteNotified) {
|
||||
db.writeNotified = wn
|
||||
// It's possible we already created the head struct, so we should also set the WN for that.
|
||||
db.head.writeNotified = wn
|
||||
}
|
||||
|
||||
func isBlockDir(fi fs.DirEntry) bool {
|
||||
if !fi.IsDir() {
|
||||
return false
|
||||
|
|
29
tsdb/head.go
29
tsdb/head.go
|
@ -136,6 +136,8 @@ type Head struct {
|
|||
stats *HeadStats
|
||||
reg prometheus.Registerer
|
||||
|
||||
writeNotified wlog.WriteNotified
|
||||
|
||||
memTruncationInProcess atomic.Bool
|
||||
}
|
||||
|
||||
|
@ -166,6 +168,8 @@ type HeadOptions struct {
|
|||
ChunkEndTimeVariance float64
|
||||
ChunkWriteQueueSize int
|
||||
|
||||
SamplesPerChunk int
|
||||
|
||||
// StripeSize sets the number of entries in the hash map, it must be a power of 2.
|
||||
// A larger StripeSize will allocate more memory up-front, but will increase performance when handling a large number of series.
|
||||
// A smaller StripeSize reduces the memory allocated, but can decrease performance with large number of series.
|
||||
|
@ -189,6 +193,8 @@ type HeadOptions struct {
|
|||
const (
|
||||
// DefaultOutOfOrderCapMax is the default maximum size of an in-memory out-of-order chunk.
|
||||
DefaultOutOfOrderCapMax int64 = 32
|
||||
// DefaultSamplesPerChunk provides a default target number of samples per chunk.
|
||||
DefaultSamplesPerChunk = 120
|
||||
)
|
||||
|
||||
func DefaultHeadOptions() *HeadOptions {
|
||||
|
@ -199,6 +205,7 @@ func DefaultHeadOptions() *HeadOptions {
|
|||
ChunkWriteBufferSize: chunks.DefaultWriteBufferSize,
|
||||
ChunkEndTimeVariance: 0,
|
||||
ChunkWriteQueueSize: chunks.DefaultWriteQueueSize,
|
||||
SamplesPerChunk: DefaultSamplesPerChunk,
|
||||
StripeSize: DefaultStripeSize,
|
||||
SeriesCallback: &noopSeriesLifecycleCallback{},
|
||||
IsolationDisabled: defaultIsolationDisabled,
|
||||
|
@ -998,7 +1005,7 @@ func (h *Head) DisableNativeHistograms() {
|
|||
}
|
||||
|
||||
// PostingsCardinalityStats returns top 10 highest cardinality stats By label and value names.
|
||||
func (h *Head) PostingsCardinalityStats(statsByLabelName string) *index.PostingsStats {
|
||||
func (h *Head) PostingsCardinalityStats(statsByLabelName string, limit int) *index.PostingsStats {
|
||||
h.cardinalityMutex.Lock()
|
||||
defer h.cardinalityMutex.Unlock()
|
||||
currentTime := time.Duration(time.Now().Unix()) * time.Second
|
||||
|
@ -1009,7 +1016,7 @@ func (h *Head) PostingsCardinalityStats(statsByLabelName string) *index.Postings
|
|||
if h.cardinalityCache != nil {
|
||||
return h.cardinalityCache
|
||||
}
|
||||
h.cardinalityCache = h.postings.Stats(statsByLabelName)
|
||||
h.cardinalityCache = h.postings.Stats(statsByLabelName, limit)
|
||||
h.lastPostingsStatsCall = time.Duration(time.Now().Unix()) * time.Second
|
||||
|
||||
return h.cardinalityCache
|
||||
|
@ -1239,9 +1246,9 @@ func (h *Head) truncateWAL(mint int64) error {
|
|||
return true
|
||||
}
|
||||
h.deletedMtx.Lock()
|
||||
_, ok := h.deleted[id]
|
||||
keepUntil, ok := h.deleted[id]
|
||||
h.deletedMtx.Unlock()
|
||||
return ok
|
||||
return ok && keepUntil > last
|
||||
}
|
||||
h.metrics.checkpointCreationTotal.Inc()
|
||||
if _, err = wlog.Checkpoint(h.logger, h.wal, first, last, keep, mint); err != nil {
|
||||
|
@ -1262,7 +1269,7 @@ func (h *Head) truncateWAL(mint int64) error {
|
|||
// longer need to track deleted series that are before it.
|
||||
h.deletedMtx.Lock()
|
||||
for ref, segment := range h.deleted {
|
||||
if segment < first {
|
||||
if segment <= last {
|
||||
delete(h.deleted, ref)
|
||||
}
|
||||
}
|
||||
|
@ -1349,12 +1356,12 @@ type Stats struct {
|
|||
|
||||
// Stats returns important current HEAD statistics. Note that it is expensive to
|
||||
// calculate these.
|
||||
func (h *Head) Stats(statsByLabelName string) *Stats {
|
||||
func (h *Head) Stats(statsByLabelName string, limit int) *Stats {
|
||||
return &Stats{
|
||||
NumSeries: h.NumSeries(),
|
||||
MaxTime: h.MaxTime(),
|
||||
MinTime: h.MinTime(),
|
||||
IndexPostingStats: h.PostingsCardinalityStats(statsByLabelName),
|
||||
IndexPostingStats: h.PostingsCardinalityStats(statsByLabelName, limit),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1634,7 +1641,7 @@ func (h *Head) getOrCreate(hash uint64, lset labels.Labels) (*memSeries, bool, e
|
|||
|
||||
func (h *Head) getOrCreateWithID(id chunks.HeadSeriesRef, hash uint64, lset labels.Labels) (*memSeries, bool, error) {
|
||||
s, created, err := h.series.getOrSet(hash, lset, func() *memSeries {
|
||||
return newMemSeries(lset, id, labels.StableHash(lset), h.opts.ChunkEndTimeVariance, h.opts.IsolationDisabled)
|
||||
return newMemSeries(lset, id, labels.StableHash(lset), h.opts.ChunkEndTimeVariance, h.opts.IsolationDisabled, h.opts.SamplesPerChunk)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
|
@ -1949,7 +1956,8 @@ type memSeries struct {
|
|||
// to spread chunks writing across time. Doesn't apply to the last chunk of the chunk range. 0 to disable variance.
|
||||
chunkEndTimeVariance float64
|
||||
|
||||
nextAt int64 // Timestamp at which to cut the next chunk.
|
||||
samplesPerChunk int // Target number of samples per chunk.
|
||||
nextAt int64 // Timestamp at which to cut the next chunk.
|
||||
|
||||
// We keep the last value here (in addition to appending it to the chunk) so we can check for duplicates.
|
||||
lastValue float64
|
||||
|
@ -1977,13 +1985,14 @@ type memSeriesOOOFields struct {
|
|||
firstOOOChunkID chunks.HeadChunkID // HeadOOOChunkID for oooMmappedChunks[0].
|
||||
}
|
||||
|
||||
func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, shardHash uint64, chunkEndTimeVariance float64, isolationDisabled bool) *memSeries {
|
||||
func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, shardHash uint64, chunkEndTimeVariance float64, isolationDisabled bool, samplesPerChunk int) *memSeries {
|
||||
s := &memSeries{
|
||||
lset: lset,
|
||||
ref: id,
|
||||
nextAt: math.MinInt64,
|
||||
chunkEndTimeVariance: chunkEndTimeVariance,
|
||||
shardHash: shardHash,
|
||||
samplesPerChunk: samplesPerChunk,
|
||||
}
|
||||
if !isolationDisabled {
|
||||
s.txs = newTxRing(4)
|
||||
|
|
|
@ -849,6 +849,10 @@ func (a *headAppender) Commit() (err error) {
|
|||
return errors.Wrap(err, "write to WAL")
|
||||
}
|
||||
|
||||
if a.head.writeNotified != nil {
|
||||
a.head.writeNotified.Notify()
|
||||
}
|
||||
|
||||
// No errors logging to WAL, so pass the exemplars along to the in memory storage.
|
||||
for _, e := range a.exemplars {
|
||||
s := a.head.series.getByID(chunks.HeadSeriesRef(e.ref))
|
||||
|
@ -1339,11 +1343,6 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram,
|
|||
func (s *memSeries) appendPreprocessor(
|
||||
t int64, e chunkenc.Encoding, chunkDiskMapper chunkDiskMapper, chunkRange int64,
|
||||
) (c *memChunk, sampleInOrder, chunkCreated bool) {
|
||||
// Based on Gorilla white papers this offers near-optimal compression ratio
|
||||
// so anything bigger that this has diminishing returns and increases
|
||||
// the time range within which we have to decompress all samples.
|
||||
const samplesPerChunk = 120
|
||||
|
||||
c = s.head()
|
||||
|
||||
if c == nil {
|
||||
|
@ -1380,7 +1379,7 @@ func (s *memSeries) appendPreprocessor(
|
|||
// for this chunk that will try to make samples equally distributed within
|
||||
// the remaining chunks in the current chunk range.
|
||||
// At latest it must happen at the timestamp set when the chunk was cut.
|
||||
if numSamples == samplesPerChunk/4 {
|
||||
if numSamples == s.samplesPerChunk/4 {
|
||||
maxNextAt := s.nextAt
|
||||
|
||||
s.nextAt = computeChunkEndTime(c.minTime, c.maxTime, maxNextAt)
|
||||
|
@ -1391,7 +1390,7 @@ func (s *memSeries) appendPreprocessor(
|
|||
// Since we assume that the rate is higher, we're being conservative and cutting at 2*samplesPerChunk
|
||||
// as we expect more chunks to come.
|
||||
// Note that next chunk will have its nextAt recalculated for the new rate.
|
||||
if t >= s.nextAt || numSamples >= samplesPerChunk*2 {
|
||||
if t >= s.nextAt || numSamples >= s.samplesPerChunk*2 {
|
||||
c = s.cutNewHeadChunk(t, e, chunkDiskMapper, chunkRange)
|
||||
chunkCreated = true
|
||||
}
|
||||
|
|
|
@ -360,7 +360,7 @@ func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc.
|
|||
}
|
||||
s.Unlock()
|
||||
|
||||
return &safeChunk{
|
||||
return &safeHeadChunk{
|
||||
Chunk: chk,
|
||||
s: s,
|
||||
cid: cid,
|
||||
|
@ -656,15 +656,15 @@ func (b boundedIterator) Seek(t int64) chunkenc.ValueType {
|
|||
return b.Iterator.Seek(t)
|
||||
}
|
||||
|
||||
// safeChunk makes sure that the chunk can be accessed without a race condition
|
||||
type safeChunk struct {
|
||||
// safeHeadChunk makes sure that the chunk can be accessed without a race condition
|
||||
type safeHeadChunk struct {
|
||||
chunkenc.Chunk
|
||||
s *memSeries
|
||||
cid chunks.HeadChunkID
|
||||
isoState *isolationState
|
||||
}
|
||||
|
||||
func (c *safeChunk) Iterator(reuseIter chunkenc.Iterator) chunkenc.Iterator {
|
||||
func (c *safeHeadChunk) Iterator(reuseIter chunkenc.Iterator) chunkenc.Iterator {
|
||||
c.s.Lock()
|
||||
it := c.s.iterator(c.cid, c.Chunk, c.isoState, reuseIter)
|
||||
c.s.Unlock()
|
||||
|
|
|
@ -286,7 +286,7 @@ func BenchmarkLoadWAL(b *testing.B) {
|
|||
for k := 0; k < c.batches*c.seriesPerBatch; k++ {
|
||||
// Create one mmapped chunk per series, with one sample at the given time.
|
||||
lbls := labels.Labels{}
|
||||
s := newMemSeries(lbls, chunks.HeadSeriesRef(k)*101, labels.StableHash(lbls), 0, defaultIsolationDisabled)
|
||||
s := newMemSeries(lbls, chunks.HeadSeriesRef(k)*101, labels.StableHash(lbls), 0, defaultIsolationDisabled, DefaultSamplesPerChunk)
|
||||
s.append(c.mmappedChunkT, 42, 0, chunkDiskMapper, c.mmappedChunkT)
|
||||
s.mmapCurrentHeadChunk(chunkDiskMapper)
|
||||
}
|
||||
|
@ -809,7 +809,7 @@ func TestMemSeries_truncateChunks(t *testing.T) {
|
|||
}
|
||||
|
||||
lbls := labels.FromStrings("a", "b")
|
||||
s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled)
|
||||
s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled, DefaultSamplesPerChunk)
|
||||
|
||||
for i := 0; i < 4000; i += 5 {
|
||||
ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper, chunkRange)
|
||||
|
@ -1341,7 +1341,7 @@ func TestMemSeries_append(t *testing.T) {
|
|||
const chunkRange = 500
|
||||
|
||||
lbls := labels.Labels{}
|
||||
s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled)
|
||||
s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled, DefaultSamplesPerChunk)
|
||||
|
||||
// Add first two samples at the very end of a chunk range and the next two
|
||||
// on and after it.
|
||||
|
@ -1396,7 +1396,7 @@ func TestMemSeries_appendHistogram(t *testing.T) {
|
|||
chunkRange := int64(1000)
|
||||
|
||||
lbls := labels.Labels{}
|
||||
s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled)
|
||||
s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled, DefaultSamplesPerChunk)
|
||||
|
||||
histograms := tsdbutil.GenerateTestHistograms(4)
|
||||
histogramWithOneMoreBucket := histograms[3].Copy()
|
||||
|
@ -1453,7 +1453,7 @@ func TestMemSeries_append_atVariableRate(t *testing.T) {
|
|||
chunkRange := DefaultBlockDuration
|
||||
|
||||
lbls := labels.Labels{}
|
||||
s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled)
|
||||
s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled, DefaultSamplesPerChunk)
|
||||
|
||||
// At this slow rate, we will fill the chunk in two block durations.
|
||||
slowRate := (DefaultBlockDuration * 2) / samplesPerChunk
|
||||
|
@ -2677,7 +2677,7 @@ func TestIteratorSeekIntoBuffer(t *testing.T) {
|
|||
const chunkRange = 500
|
||||
|
||||
lbls := labels.Labels{}
|
||||
s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled)
|
||||
s := newMemSeries(lbls, 1, labels.StableHash(lbls), 0, defaultIsolationDisabled, DefaultSamplesPerChunk)
|
||||
|
||||
for i := 0; i < 7; i++ {
|
||||
ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper, chunkRange)
|
||||
|
|
|
@ -156,10 +156,8 @@ type PostingsStats struct {
|
|||
}
|
||||
|
||||
// Stats calculates the cardinality statistics from postings.
|
||||
func (p *MemPostings) Stats(label string) *PostingsStats {
|
||||
const maxNumOfRecords = 10
|
||||
func (p *MemPostings) Stats(label string, limit int) *PostingsStats {
|
||||
var size uint64
|
||||
|
||||
p.mtx.RLock()
|
||||
|
||||
metrics := &maxHeap{}
|
||||
|
@ -168,10 +166,10 @@ func (p *MemPostings) Stats(label string) *PostingsStats {
|
|||
labelValuePairs := &maxHeap{}
|
||||
numLabelPairs := 0
|
||||
|
||||
metrics.init(maxNumOfRecords)
|
||||
labels.init(maxNumOfRecords)
|
||||
labelValueLength.init(maxNumOfRecords)
|
||||
labelValuePairs.init(maxNumOfRecords)
|
||||
metrics.init(limit)
|
||||
labels.init(limit)
|
||||
labelValueLength.init(limit)
|
||||
labelValuePairs.init(limit)
|
||||
|
||||
for n, e := range p.m {
|
||||
if n == "" {
|
||||
|
@ -184,8 +182,9 @@ func (p *MemPostings) Stats(label string) *PostingsStats {
|
|||
if n == label {
|
||||
metrics.push(Stat{Name: name, Count: uint64(len(values))})
|
||||
}
|
||||
labelValuePairs.push(Stat{Name: n + "=" + name, Count: uint64(len(values))})
|
||||
size += uint64(len(name))
|
||||
seriesCnt := uint64(len(values))
|
||||
labelValuePairs.push(Stat{Name: n + "=" + name, Count: seriesCnt})
|
||||
size += uint64(len(name)) * seriesCnt
|
||||
}
|
||||
labelValueLength.push(Stat{Name: n, Count: size})
|
||||
}
|
||||
|
|
|
@ -912,10 +912,39 @@ func BenchmarkPostings_Stats(b *testing.B) {
|
|||
}
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
p.Stats("__name__")
|
||||
p.Stats("__name__", 10)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemPostingsStats(t *testing.T) {
|
||||
// create a new MemPostings
|
||||
p := NewMemPostings()
|
||||
|
||||
// add some postings to the MemPostings
|
||||
p.Add(1, labels.FromStrings("label", "value1"))
|
||||
p.Add(1, labels.FromStrings("label", "value2"))
|
||||
p.Add(1, labels.FromStrings("label", "value3"))
|
||||
p.Add(2, labels.FromStrings("label", "value1"))
|
||||
|
||||
// call the Stats method to calculate the cardinality statistics
|
||||
stats := p.Stats("label", 10)
|
||||
|
||||
// assert that the expected statistics were calculated
|
||||
require.Equal(t, uint64(2), stats.CardinalityMetricsStats[0].Count)
|
||||
require.Equal(t, "value1", stats.CardinalityMetricsStats[0].Name)
|
||||
|
||||
require.Equal(t, uint64(3), stats.CardinalityLabelStats[0].Count)
|
||||
require.Equal(t, "label", stats.CardinalityLabelStats[0].Name)
|
||||
|
||||
require.Equal(t, uint64(24), stats.LabelValueStats[0].Count)
|
||||
require.Equal(t, "label", stats.LabelValueStats[0].Name)
|
||||
|
||||
require.Equal(t, uint64(2), stats.LabelValuePairsStats[0].Count)
|
||||
require.Equal(t, "label=value1", stats.LabelValuePairsStats[0].Name)
|
||||
|
||||
require.Equal(t, 3, stats.NumLabelPairs)
|
||||
}
|
||||
|
||||
func TestMemPostings_Delete(t *testing.T) {
|
||||
p := NewMemPostings()
|
||||
p.Add(1, labels.FromStrings("lbl1", "a"))
|
||||
|
|
153
tsdb/querier.go
153
tsdb/querier.go
|
@ -310,6 +310,22 @@ func postingsForMatcher(ix IndexPostingsReader, m *labels.Matcher) (index.Postin
|
|||
|
||||
// inversePostingsForMatcher returns the postings for the series with the label name set but not matching the matcher.
|
||||
func inversePostingsForMatcher(ix IndexPostingsReader, m *labels.Matcher) (index.Postings, error) {
|
||||
// Fast-path for MatchNotRegexp matching.
|
||||
// Inverse of a MatchNotRegexp is MatchRegexp (double negation).
|
||||
// Fast-path for set matching.
|
||||
if m.Type == labels.MatchNotRegexp {
|
||||
setMatches := m.SetMatches()
|
||||
if len(setMatches) > 0 {
|
||||
return ix.Postings(m.Name, setMatches...)
|
||||
}
|
||||
}
|
||||
|
||||
// Fast-path for MatchNotEqual matching.
|
||||
// Inverse of a MatchNotEqual is MatchEqual (double negation).
|
||||
if m.Type == labels.MatchNotEqual {
|
||||
return ix.Postings(m.Name, m.Value)
|
||||
}
|
||||
|
||||
vals, err := ix.LabelValues(m.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -746,14 +762,35 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool {
|
|||
if app, err = newChunk.Appender(); err != nil {
|
||||
break
|
||||
}
|
||||
if hc, ok := p.currChkMeta.Chunk.(*chunkenc.HistogramChunk); ok {
|
||||
|
||||
switch hc := p.currChkMeta.Chunk.(type) {
|
||||
case *chunkenc.HistogramChunk:
|
||||
newChunk.(*chunkenc.HistogramChunk).SetCounterResetHeader(hc.GetCounterResetHeader())
|
||||
case *safeHeadChunk:
|
||||
if unwrapped, ok := hc.Chunk.(*chunkenc.HistogramChunk); ok {
|
||||
newChunk.(*chunkenc.HistogramChunk).SetCounterResetHeader(unwrapped.GetCounterResetHeader())
|
||||
} else {
|
||||
err = fmt.Errorf("internal error, could not unwrap safeHeadChunk to histogram chunk: %T", hc.Chunk)
|
||||
}
|
||||
default:
|
||||
err = fmt.Errorf("internal error, unknown chunk type %T when expecting histogram", p.currChkMeta.Chunk)
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
var h *histogram.Histogram
|
||||
t, h = p.currDelIter.AtHistogram()
|
||||
p.curr.MinTime = t
|
||||
|
||||
// Detect missing gauge reset hint.
|
||||
if h.CounterResetHint == histogram.GaugeType && newChunk.(*chunkenc.HistogramChunk).GetCounterResetHeader() != chunkenc.GaugeType {
|
||||
err = fmt.Errorf("found gauge histogram in non gauge chunk")
|
||||
break
|
||||
}
|
||||
|
||||
app.AppendHistogram(t, h)
|
||||
|
||||
for vt := p.currDelIter.Next(); vt != chunkenc.ValNone; vt = p.currDelIter.Next() {
|
||||
if vt != chunkenc.ValHistogram {
|
||||
err = fmt.Errorf("found value type %v in histogram chunk", vt)
|
||||
|
@ -762,23 +799,37 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool {
|
|||
t, h = p.currDelIter.AtHistogram()
|
||||
|
||||
// Defend against corrupted chunks.
|
||||
pI, nI, okToAppend, counterReset := app.(*chunkenc.HistogramAppender).Appendable(h)
|
||||
if len(pI)+len(nI) > 0 {
|
||||
err = fmt.Errorf(
|
||||
"bucket layout has changed unexpectedly: %d positive and %d negative bucket interjections required",
|
||||
len(pI), len(nI),
|
||||
)
|
||||
break
|
||||
if h.CounterResetHint == histogram.GaugeType {
|
||||
pI, nI, bpI, bnI, _, _, okToAppend := app.(*chunkenc.HistogramAppender).AppendableGauge(h)
|
||||
if !okToAppend {
|
||||
err = errors.New("unable to append histogram due to unexpected schema change")
|
||||
break
|
||||
}
|
||||
if len(pI)+len(nI)+len(bpI)+len(bnI) > 0 {
|
||||
err = fmt.Errorf(
|
||||
"bucket layout has changed unexpectedly: forward %d positive, %d negative, backward %d positive %d negative bucket interjections required",
|
||||
len(pI), len(nI), len(bpI), len(bnI),
|
||||
)
|
||||
break
|
||||
}
|
||||
} else {
|
||||
pI, nI, okToAppend, counterReset := app.(*chunkenc.HistogramAppender).Appendable(h)
|
||||
if len(pI)+len(nI) > 0 {
|
||||
err = fmt.Errorf(
|
||||
"bucket layout has changed unexpectedly: %d positive and %d negative bucket interjections required",
|
||||
len(pI), len(nI),
|
||||
)
|
||||
break
|
||||
}
|
||||
if counterReset {
|
||||
err = errors.New("detected unexpected counter reset in histogram")
|
||||
break
|
||||
}
|
||||
if !okToAppend {
|
||||
err = errors.New("unable to append histogram due to unexpected schema change")
|
||||
break
|
||||
}
|
||||
}
|
||||
if counterReset {
|
||||
err = errors.New("detected unexpected counter reset in histogram")
|
||||
break
|
||||
}
|
||||
if !okToAppend {
|
||||
err = errors.New("unable to append histogram due to unexpected schema change")
|
||||
break
|
||||
}
|
||||
|
||||
app.AppendHistogram(t, h)
|
||||
}
|
||||
case chunkenc.ValFloat:
|
||||
|
@ -803,14 +854,35 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool {
|
|||
if app, err = newChunk.Appender(); err != nil {
|
||||
break
|
||||
}
|
||||
if hc, ok := p.currChkMeta.Chunk.(*chunkenc.FloatHistogramChunk); ok {
|
||||
|
||||
switch hc := p.currChkMeta.Chunk.(type) {
|
||||
case *chunkenc.FloatHistogramChunk:
|
||||
newChunk.(*chunkenc.FloatHistogramChunk).SetCounterResetHeader(hc.GetCounterResetHeader())
|
||||
case *safeHeadChunk:
|
||||
if unwrapped, ok := hc.Chunk.(*chunkenc.FloatHistogramChunk); ok {
|
||||
newChunk.(*chunkenc.FloatHistogramChunk).SetCounterResetHeader(unwrapped.GetCounterResetHeader())
|
||||
} else {
|
||||
err = fmt.Errorf("internal error, could not unwrap safeHeadChunk to float histogram chunk: %T", hc.Chunk)
|
||||
}
|
||||
default:
|
||||
err = fmt.Errorf("internal error, unknown chunk type %T when expecting float histogram", p.currChkMeta.Chunk)
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
var h *histogram.FloatHistogram
|
||||
t, h = p.currDelIter.AtFloatHistogram()
|
||||
p.curr.MinTime = t
|
||||
|
||||
// Detect missing gauge reset hint.
|
||||
if h.CounterResetHint == histogram.GaugeType && newChunk.(*chunkenc.FloatHistogramChunk).GetCounterResetHeader() != chunkenc.GaugeType {
|
||||
err = fmt.Errorf("found float gauge histogram in non gauge chunk")
|
||||
break
|
||||
}
|
||||
|
||||
app.AppendFloatHistogram(t, h)
|
||||
|
||||
for vt := p.currDelIter.Next(); vt != chunkenc.ValNone; vt = p.currDelIter.Next() {
|
||||
if vt != chunkenc.ValFloatHistogram {
|
||||
err = fmt.Errorf("found value type %v in histogram chunk", vt)
|
||||
|
@ -819,21 +891,36 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool {
|
|||
t, h = p.currDelIter.AtFloatHistogram()
|
||||
|
||||
// Defend against corrupted chunks.
|
||||
pI, nI, okToAppend, counterReset := app.(*chunkenc.FloatHistogramAppender).Appendable(h)
|
||||
if len(pI)+len(nI) > 0 {
|
||||
err = fmt.Errorf(
|
||||
"bucket layout has changed unexpectedly: %d positive and %d negative bucket interjections required",
|
||||
len(pI), len(nI),
|
||||
)
|
||||
break
|
||||
}
|
||||
if counterReset {
|
||||
err = errors.New("detected unexpected counter reset in histogram")
|
||||
break
|
||||
}
|
||||
if !okToAppend {
|
||||
err = errors.New("unable to append histogram due to unexpected schema change")
|
||||
break
|
||||
if h.CounterResetHint == histogram.GaugeType {
|
||||
pI, nI, bpI, bnI, _, _, okToAppend := app.(*chunkenc.FloatHistogramAppender).AppendableGauge(h)
|
||||
if !okToAppend {
|
||||
err = errors.New("unable to append histogram due to unexpected schema change")
|
||||
break
|
||||
}
|
||||
if len(pI)+len(nI)+len(bpI)+len(bnI) > 0 {
|
||||
err = fmt.Errorf(
|
||||
"bucket layout has changed unexpectedly: forward %d positive, %d negative, backward %d positive %d negative bucket interjections required",
|
||||
len(pI), len(nI), len(bpI), len(bnI),
|
||||
)
|
||||
break
|
||||
}
|
||||
} else {
|
||||
pI, nI, okToAppend, counterReset := app.(*chunkenc.FloatHistogramAppender).Appendable(h)
|
||||
if len(pI)+len(nI) > 0 {
|
||||
err = fmt.Errorf(
|
||||
"bucket layout has changed unexpectedly: %d positive and %d negative bucket interjections required",
|
||||
len(pI), len(nI),
|
||||
)
|
||||
break
|
||||
}
|
||||
if counterReset {
|
||||
err = errors.New("detected unexpected counter reset in histogram")
|
||||
break
|
||||
}
|
||||
if !okToAppend {
|
||||
err = errors.New("unable to append histogram due to unexpected schema change")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
app.AppendFloatHistogram(t, h)
|
||||
|
|
|
@ -116,6 +116,7 @@ func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) {
|
|||
iCharSet := labels.MustNewMatcher(labels.MatchRegexp, "i", "1[0-9]")
|
||||
iAlternate := labels.MustNewMatcher(labels.MatchRegexp, "i", "(1|2|3|4|5|6|20|55)")
|
||||
iXYZ := labels.MustNewMatcher(labels.MatchRegexp, "i", "X|Y|Z")
|
||||
iNotXYZ := labels.MustNewMatcher(labels.MatchNotRegexp, "i", "X|Y|Z")
|
||||
cases := []struct {
|
||||
name string
|
||||
matchers []*labels.Matcher
|
||||
|
@ -126,6 +127,7 @@ func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) {
|
|||
{`n="X",j="foo"`, []*labels.Matcher{nX, jFoo}},
|
||||
{`j="foo",n="1"`, []*labels.Matcher{jFoo, n1}},
|
||||
{`n="1",j!="foo"`, []*labels.Matcher{n1, jNotFoo}},
|
||||
{`n="1",i!="2"`, []*labels.Matcher{n1, iNot2}},
|
||||
{`n="X",j!="foo"`, []*labels.Matcher{nX, jNotFoo}},
|
||||
{`i=~"1[0-9]",j=~"foo|bar"`, []*labels.Matcher{iCharSet, jFooBar}},
|
||||
{`j=~"foo|bar"`, []*labels.Matcher{jFooBar}},
|
||||
|
@ -133,6 +135,7 @@ func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) {
|
|||
{`j=~"X.+"`, []*labels.Matcher{jXplus}},
|
||||
{`i=~"(1|2|3|4|5|6|20|55)"`, []*labels.Matcher{iAlternate}},
|
||||
{`i=~"X|Y|Z"`, []*labels.Matcher{iXYZ}},
|
||||
{`i!~"X|Y|Z"`, []*labels.Matcher{iNotXYZ}},
|
||||
{`i=~".*"`, []*labels.Matcher{iStar}},
|
||||
{`i=~"1.*"`, []*labels.Matcher{i1Star}},
|
||||
{`i=~".*1"`, []*labels.Matcher{iStar1}},
|
||||
|
@ -148,6 +151,7 @@ func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) {
|
|||
{`n="1",i!="",j=~"X.+"`, []*labels.Matcher{n1, iNotEmpty, jXplus}},
|
||||
{`n="1",i!="",j=~"XXX|YYY"`, []*labels.Matcher{n1, iNotEmpty, jXXXYYY}},
|
||||
{`n="1",i=~"X|Y|Z",j="foo"`, []*labels.Matcher{n1, iXYZ, jFoo}},
|
||||
{`n="1",i!~"X|Y|Z",j="foo"`, []*labels.Matcher{n1, iNotXYZ, jFoo}},
|
||||
{`n="1",i=~".+",j="foo"`, []*labels.Matcher{n1, iPlus, jFoo}},
|
||||
{`n="1",i=~"1.+",j="foo"`, []*labels.Matcher{n1, i1Plus, jFoo}},
|
||||
{`n="1",i=~".*1.*",j="foo"`, []*labels.Matcher{n1, iStar1Star, jFoo}},
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
|
@ -541,6 +542,46 @@ func TestBlockQuerier_AgainstHeadWithOpenChunks(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestBlockQuerier_TrimmingDoesNotModifyOriginalTombstoneIntervals(t *testing.T) {
|
||||
c := blockQuerierTestCase{
|
||||
mint: 2,
|
||||
maxt: 6,
|
||||
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "a", "a")},
|
||||
exp: newMockSeriesSet([]storage.Series{
|
||||
storage.NewListSeries(labels.FromStrings("a", "a"),
|
||||
[]tsdbutil.Sample{sample{3, 4, nil, nil}, sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
|
||||
),
|
||||
storage.NewListSeries(labels.FromStrings("a", "a", "b", "b"),
|
||||
[]tsdbutil.Sample{sample{3, 3, nil, nil}, sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
||||
),
|
||||
}),
|
||||
expChks: newMockChunkSeriesSet([]storage.ChunkSeries{
|
||||
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a"),
|
||||
[]tsdbutil.Sample{sample{3, 4, nil, nil}}, []tsdbutil.Sample{sample{5, 2, nil, nil}, sample{6, 3, nil, nil}},
|
||||
),
|
||||
storage.NewListChunkSeriesFromSamples(labels.FromStrings("a", "a", "b", "b"),
|
||||
[]tsdbutil.Sample{sample{3, 3, nil, nil}}, []tsdbutil.Sample{sample{5, 3, nil, nil}, sample{6, 6, nil, nil}},
|
||||
),
|
||||
}),
|
||||
}
|
||||
ir, cr, _, _ := createIdxChkReaders(t, testData)
|
||||
stones := tombstones.NewMemTombstones()
|
||||
p, err := ir.Postings("a", "a")
|
||||
require.NoError(t, err)
|
||||
refs, err := index.ExpandPostings(p)
|
||||
require.NoError(t, err)
|
||||
for _, ref := range refs {
|
||||
stones.AddInterval(ref, tombstones.Interval{Mint: 1, Maxt: 2})
|
||||
}
|
||||
testBlockQuerier(t, c, ir, cr, stones)
|
||||
for _, ref := range refs {
|
||||
intervals, err := stones.Get(ref)
|
||||
require.NoError(t, err)
|
||||
// Without copy, the intervals could be [math.MinInt64, 2].
|
||||
require.Equal(t, tombstones.Intervals{{Mint: 1, Maxt: 2}}, intervals)
|
||||
}
|
||||
}
|
||||
|
||||
var testData = []seriesSamples{
|
||||
{
|
||||
lset: map[string]string{"a": "a"},
|
||||
|
@ -907,6 +948,202 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "one histogram chunk",
|
||||
chks: [][]tsdbutil.Sample{
|
||||
{
|
||||
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
|
||||
sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil},
|
||||
sample{3, 0, tsdbutil.GenerateTestHistogram(3), nil},
|
||||
sample{6, 0, tsdbutil.GenerateTestHistogram(6), nil},
|
||||
},
|
||||
},
|
||||
expected: []tsdbutil.Sample{
|
||||
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
|
||||
sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
|
||||
sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
|
||||
sample{6, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
|
||||
},
|
||||
expectedChks: []chunks.Meta{
|
||||
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
|
||||
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
|
||||
sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
|
||||
sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
|
||||
sample{6, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
|
||||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "one histogram chunk intersect with deletion interval",
|
||||
chks: [][]tsdbutil.Sample{
|
||||
{
|
||||
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
|
||||
sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil},
|
||||
sample{3, 0, tsdbutil.GenerateTestHistogram(3), nil},
|
||||
sample{6, 0, tsdbutil.GenerateTestHistogram(6), nil},
|
||||
},
|
||||
},
|
||||
intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}},
|
||||
expected: []tsdbutil.Sample{
|
||||
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
|
||||
sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
|
||||
sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
|
||||
},
|
||||
expectedChks: []chunks.Meta{
|
||||
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
|
||||
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
|
||||
sample{2, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(2)), nil},
|
||||
sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
|
||||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "one float histogram chunk",
|
||||
chks: [][]tsdbutil.Sample{
|
||||
{
|
||||
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
|
||||
sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)},
|
||||
sample{3, 0, nil, tsdbutil.GenerateTestFloatHistogram(3)},
|
||||
sample{6, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
|
||||
},
|
||||
},
|
||||
expected: []tsdbutil.Sample{
|
||||
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
|
||||
sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
|
||||
sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
|
||||
sample{6, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
|
||||
},
|
||||
expectedChks: []chunks.Meta{
|
||||
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
|
||||
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
|
||||
sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
|
||||
sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
|
||||
sample{6, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
|
||||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "one float histogram chunk intersect with deletion interval",
|
||||
chks: [][]tsdbutil.Sample{
|
||||
{
|
||||
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
|
||||
sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)},
|
||||
sample{3, 0, nil, tsdbutil.GenerateTestFloatHistogram(3)},
|
||||
sample{6, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
|
||||
},
|
||||
},
|
||||
intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}},
|
||||
expected: []tsdbutil.Sample{
|
||||
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
|
||||
sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
|
||||
sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
|
||||
},
|
||||
expectedChks: []chunks.Meta{
|
||||
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
|
||||
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
|
||||
sample{2, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(2))},
|
||||
sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
|
||||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "one gauge histogram chunk",
|
||||
chks: [][]tsdbutil.Sample{
|
||||
{
|
||||
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
|
||||
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
|
||||
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
|
||||
sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
|
||||
},
|
||||
},
|
||||
expected: []tsdbutil.Sample{
|
||||
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
|
||||
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
|
||||
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
|
||||
sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
|
||||
},
|
||||
expectedChks: []chunks.Meta{
|
||||
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
|
||||
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
|
||||
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
|
||||
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
|
||||
sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
|
||||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "one gauge histogram chunk intersect with deletion interval",
|
||||
chks: [][]tsdbutil.Sample{
|
||||
{
|
||||
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
|
||||
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
|
||||
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
|
||||
sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
|
||||
},
|
||||
},
|
||||
intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}},
|
||||
expected: []tsdbutil.Sample{
|
||||
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
|
||||
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
|
||||
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
|
||||
},
|
||||
expectedChks: []chunks.Meta{
|
||||
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
|
||||
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
|
||||
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
|
||||
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
|
||||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "one gauge float histogram",
|
||||
chks: [][]tsdbutil.Sample{
|
||||
{
|
||||
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
|
||||
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
|
||||
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
|
||||
sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
|
||||
},
|
||||
},
|
||||
expected: []tsdbutil.Sample{
|
||||
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
|
||||
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
|
||||
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
|
||||
sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
|
||||
},
|
||||
expectedChks: []chunks.Meta{
|
||||
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
|
||||
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
|
||||
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
|
||||
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
|
||||
sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
|
||||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "one gauge float histogram chunk intersect with deletion interval",
|
||||
chks: [][]tsdbutil.Sample{
|
||||
{
|
||||
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
|
||||
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
|
||||
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
|
||||
sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
|
||||
},
|
||||
},
|
||||
intervals: tombstones.Intervals{{Mint: 5, Maxt: 20}},
|
||||
expected: []tsdbutil.Sample{
|
||||
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
|
||||
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
|
||||
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
|
||||
},
|
||||
expectedChks: []chunks.Meta{
|
||||
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{
|
||||
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
|
||||
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
|
||||
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
|
||||
}),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
@ -1820,6 +2057,19 @@ func TestPostingsForMatchers(t *testing.T) {
|
|||
labels.FromStrings("n", "2.5"),
|
||||
},
|
||||
},
|
||||
{
|
||||
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "n", "1")},
|
||||
exp: []labels.Labels{
|
||||
labels.FromStrings("n", "2"),
|
||||
labels.FromStrings("n", "2.5"),
|
||||
},
|
||||
},
|
||||
{
|
||||
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "n", "1|2.5")},
|
||||
exp: []labels.Labels{
|
||||
labels.FromStrings("n", "2"),
|
||||
},
|
||||
},
|
||||
{
|
||||
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^a$")},
|
||||
exp: []labels.Labels{
|
||||
|
@ -1909,27 +2159,36 @@ func TestPostingsForMatchers(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
for _, c := range cases {
|
||||
exp := map[string]struct{}{}
|
||||
for _, l := range c.exp {
|
||||
exp[l.String()] = struct{}{}
|
||||
}
|
||||
p, err := PostingsForMatchers(ir, c.matchers...)
|
||||
require.NoError(t, err)
|
||||
|
||||
var builder labels.ScratchBuilder
|
||||
for p.Next() {
|
||||
require.NoError(t, ir.Series(p.At(), &builder, &[]chunks.Meta{}))
|
||||
lbls := builder.Labels()
|
||||
if _, ok := exp[lbls.String()]; !ok {
|
||||
t.Errorf("Evaluating %v, unexpected result %s", c.matchers, lbls.String())
|
||||
} else {
|
||||
delete(exp, lbls.String())
|
||||
name := ""
|
||||
for i, matcher := range c.matchers {
|
||||
if i > 0 {
|
||||
name += ","
|
||||
}
|
||||
name += matcher.String()
|
||||
}
|
||||
require.NoError(t, p.Err())
|
||||
if len(exp) != 0 {
|
||||
t.Errorf("Evaluating %v, missing results %+v", c.matchers, exp)
|
||||
}
|
||||
t.Run(name, func(t *testing.T) {
|
||||
exp := map[string]struct{}{}
|
||||
for _, l := range c.exp {
|
||||
exp[l.String()] = struct{}{}
|
||||
}
|
||||
p, err := PostingsForMatchers(ir, c.matchers...)
|
||||
require.NoError(t, err)
|
||||
|
||||
var builder labels.ScratchBuilder
|
||||
for p.Next() {
|
||||
require.NoError(t, ir.Series(p.At(), &builder, &[]chunks.Meta{}))
|
||||
lbls := builder.Labels()
|
||||
if _, ok := exp[lbls.String()]; !ok {
|
||||
t.Errorf("Evaluating %v, unexpected result %s", c.matchers, lbls.String())
|
||||
} else {
|
||||
delete(exp, lbls.String())
|
||||
}
|
||||
}
|
||||
require.NoError(t, p.Err())
|
||||
if len(exp) != 0 {
|
||||
t.Errorf("Evaluating %v, missing results %+v", c.matchers, exp)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2376,3 +2635,80 @@ func BenchmarkHeadQuerier(b *testing.B) {
|
|||
require.NoError(b, ss.Err())
|
||||
}
|
||||
}
|
||||
|
||||
// This is a regression test for the case where gauge histograms were not handled by
|
||||
// populateWithDelChunkSeriesIterator correctly.
|
||||
func TestQueryWithDeletedHistograms(t *testing.T) {
|
||||
testcases := map[string]func(int) (*histogram.Histogram, *histogram.FloatHistogram){
|
||||
"intCounter": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) {
|
||||
return tsdbutil.GenerateTestHistogram(i), nil
|
||||
},
|
||||
"intgauge": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) {
|
||||
return tsdbutil.GenerateTestGaugeHistogram(rand.Int() % 1000), nil
|
||||
},
|
||||
"floatCounter": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) {
|
||||
return nil, tsdbutil.GenerateTestFloatHistogram(i)
|
||||
},
|
||||
"floatGauge": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) {
|
||||
return nil, tsdbutil.GenerateTestGaugeFloatHistogram(rand.Int() % 1000)
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testcases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
db := openTestDB(t, nil, nil)
|
||||
defer func() {
|
||||
require.NoError(t, db.Close())
|
||||
}()
|
||||
|
||||
db.EnableNativeHistograms()
|
||||
appender := db.Appender(context.Background())
|
||||
|
||||
var (
|
||||
err error
|
||||
seriesRef storage.SeriesRef
|
||||
)
|
||||
lbs := labels.FromStrings("__name__", "test", "type", name)
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
h, fh := tc(i)
|
||||
seriesRef, err = appender.AppendHistogram(seriesRef, lbs, int64(i), h, fh)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err = appender.Commit()
|
||||
require.NoError(t, err)
|
||||
|
||||
matcher, err := labels.NewMatcher(labels.MatchEqual, "__name__", "test")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Delete the last 20.
|
||||
err = db.Delete(80, 100, matcher)
|
||||
require.NoError(t, err)
|
||||
|
||||
chunkQuerier, err := db.ChunkQuerier(context.Background(), 0, 100)
|
||||
require.NoError(t, err)
|
||||
|
||||
css := chunkQuerier.Select(false, nil, matcher)
|
||||
|
||||
seriesCount := 0
|
||||
for css.Next() {
|
||||
seriesCount++
|
||||
series := css.At()
|
||||
|
||||
sampleCount := 0
|
||||
it := series.Iterator(nil)
|
||||
for it.Next() {
|
||||
chk := it.At()
|
||||
for cit := chk.Chunk.Iterator(nil); cit.Next() != chunkenc.ValNone; {
|
||||
sampleCount++
|
||||
}
|
||||
}
|
||||
require.NoError(t, it.Err())
|
||||
require.Equal(t, 80, sampleCount)
|
||||
}
|
||||
require.NoError(t, css.Err())
|
||||
require.Equal(t, 1, seriesCount)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"fmt"
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
@ -252,7 +253,14 @@ func NewTestMemTombstones(intervals []Intervals) *MemTombstones {
|
|||
func (t *MemTombstones) Get(ref storage.SeriesRef) (Intervals, error) {
|
||||
t.mtx.RLock()
|
||||
defer t.mtx.RUnlock()
|
||||
return t.intvlGroups[ref], nil
|
||||
intervals, ok := t.intvlGroups[ref]
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
// Make a copy to avoid race.
|
||||
res := make(Intervals, len(intervals))
|
||||
copy(res, intervals)
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (t *MemTombstones) DeleteTombstones(refs map[storage.SeriesRef]struct{}) {
|
||||
|
@ -349,17 +357,23 @@ func (in Intervals) Add(n Interval) Intervals {
|
|||
// Find min and max indexes of intervals that overlap with the new interval.
|
||||
// Intervals are closed [t1, t2] and t is discreet, so if neighbour intervals are 1 step difference
|
||||
// to the new one, we can merge those together.
|
||||
mini := sort.Search(len(in), func(i int) bool { return in[i].Maxt >= n.Mint-1 })
|
||||
if mini == len(in) {
|
||||
return append(in, n)
|
||||
mini := 0
|
||||
if n.Mint != math.MinInt64 { // Avoid overflow.
|
||||
mini = sort.Search(len(in), func(i int) bool { return in[i].Maxt >= n.Mint-1 })
|
||||
if mini == len(in) {
|
||||
return append(in, n)
|
||||
}
|
||||
}
|
||||
|
||||
maxi := sort.Search(len(in)-mini, func(i int) bool { return in[mini+i].Mint > n.Maxt+1 })
|
||||
if maxi == 0 {
|
||||
if mini == 0 {
|
||||
return append(Intervals{n}, in...)
|
||||
maxi := len(in)
|
||||
if n.Maxt != math.MaxInt64 { // Avoid overflow.
|
||||
maxi = sort.Search(len(in)-mini, func(i int) bool { return in[mini+i].Mint > n.Maxt+1 })
|
||||
if maxi == 0 {
|
||||
if mini == 0 {
|
||||
return append(Intervals{n}, in...)
|
||||
}
|
||||
return append(in[:mini], append(Intervals{n}, in[mini:]...)...)
|
||||
}
|
||||
return append(in[:mini], append(Intervals{n}, in[mini:]...)...)
|
||||
}
|
||||
|
||||
if n.Mint < in[mini].Mint {
|
||||
|
|
|
@ -81,6 +81,22 @@ func TestDeletingTombstones(t *testing.T) {
|
|||
require.Empty(t, intervals)
|
||||
}
|
||||
|
||||
func TestTombstonesGetWithCopy(t *testing.T) {
|
||||
stones := NewMemTombstones()
|
||||
stones.AddInterval(1, Intervals{{Mint: 1, Maxt: 2}, {Mint: 7, Maxt: 8}, {Mint: 11, Maxt: 12}}...)
|
||||
|
||||
intervals0, err := stones.Get(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, Intervals{{Mint: 1, Maxt: 2}, {Mint: 7, Maxt: 8}, {Mint: 11, Maxt: 12}}, intervals0)
|
||||
intervals1 := intervals0.Add(Interval{Mint: 4, Maxt: 6})
|
||||
require.Equal(t, Intervals{{Mint: 1, Maxt: 2}, {Mint: 4, Maxt: 8}, {Mint: 11, Maxt: 12}}, intervals0) // Original slice changed.
|
||||
require.Equal(t, Intervals{{Mint: 1, Maxt: 2}, {Mint: 4, Maxt: 8}, {Mint: 11, Maxt: 12}}, intervals1)
|
||||
|
||||
intervals2, err := stones.Get(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, Intervals{{Mint: 1, Maxt: 2}, {Mint: 7, Maxt: 8}, {Mint: 11, Maxt: 12}}, intervals2)
|
||||
}
|
||||
|
||||
func TestTruncateBefore(t *testing.T) {
|
||||
cases := []struct {
|
||||
before Intervals
|
||||
|
@ -210,6 +226,26 @@ func TestAddingNewIntervals(t *testing.T) {
|
|||
new: Interval{math.MinInt64, 10},
|
||||
exp: Intervals{{math.MinInt64, math.MaxInt64}},
|
||||
},
|
||||
{
|
||||
exist: Intervals{{9, 10}},
|
||||
new: Interval{math.MinInt64, 7},
|
||||
exp: Intervals{{math.MinInt64, 7}, {9, 10}},
|
||||
},
|
||||
{
|
||||
exist: Intervals{{9, 10}},
|
||||
new: Interval{12, math.MaxInt64},
|
||||
exp: Intervals{{9, 10}, {12, math.MaxInt64}},
|
||||
},
|
||||
{
|
||||
exist: Intervals{{9, 10}},
|
||||
new: Interval{math.MinInt64, 8},
|
||||
exp: Intervals{{math.MinInt64, 10}},
|
||||
},
|
||||
{
|
||||
exist: Intervals{{9, 10}},
|
||||
new: Interval{11, math.MaxInt64},
|
||||
exp: Intervals{{9, math.MaxInt64}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
|
|
|
@ -71,9 +71,19 @@ func ChunkFromSamplesGeneric(s Samples) chunks.Meta {
|
|||
case chunkenc.ValFloat:
|
||||
ca.Append(s.Get(i).T(), s.Get(i).F())
|
||||
case chunkenc.ValHistogram:
|
||||
ca.AppendHistogram(s.Get(i).T(), s.Get(i).H())
|
||||
h := s.Get(i).H()
|
||||
ca.AppendHistogram(s.Get(i).T(), h)
|
||||
if i == 0 && h.CounterResetHint == histogram.GaugeType {
|
||||
hc := c.(*chunkenc.HistogramChunk)
|
||||
hc.SetCounterResetHeader(chunkenc.GaugeType)
|
||||
}
|
||||
case chunkenc.ValFloatHistogram:
|
||||
ca.AppendFloatHistogram(s.Get(i).T(), s.Get(i).FH())
|
||||
fh := s.Get(i).FH()
|
||||
ca.AppendFloatHistogram(s.Get(i).T(), fh)
|
||||
if i == 0 && fh.CounterResetHint == histogram.GaugeType {
|
||||
hc := c.(*chunkenc.FloatHistogramChunk)
|
||||
hc.SetCounterResetHeader(chunkenc.GaugeType)
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown sample type %s", sampleType.String()))
|
||||
}
|
||||
|
|
|
@ -108,3 +108,13 @@ func GenerateTestGaugeFloatHistogram(i int) *histogram.FloatHistogram {
|
|||
h.CounterResetHint = histogram.GaugeType
|
||||
return h
|
||||
}
|
||||
|
||||
func SetHistogramNotCounterReset(h *histogram.Histogram) *histogram.Histogram {
|
||||
h.CounterResetHint = histogram.NotCounterReset
|
||||
return h
|
||||
}
|
||||
|
||||
func SetFloatHistogramNotCounterReset(h *histogram.FloatHistogram) *histogram.FloatHistogram {
|
||||
h.CounterResetHint = histogram.NotCounterReset
|
||||
return h
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ import (
|
|||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -34,12 +34,16 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
readPeriod = 10 * time.Millisecond
|
||||
checkpointPeriod = 5 * time.Second
|
||||
segmentCheckPeriod = 100 * time.Millisecond
|
||||
consumer = "consumer"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrIgnorable = errors.New("ignore me")
|
||||
readTimeout = 15 * time.Second
|
||||
)
|
||||
|
||||
// WriteTo is an interface used by the Watcher to send the samples it's read
|
||||
// from the WAL on to somewhere else. Functions will be called concurrently
|
||||
// and it is left to the implementer to make sure they are safe.
|
||||
|
@ -61,11 +65,17 @@ type WriteTo interface {
|
|||
SeriesReset(int)
|
||||
}
|
||||
|
||||
// Used to notifier the watcher that data has been written so that it can read.
|
||||
type WriteNotified interface {
|
||||
Notify()
|
||||
}
|
||||
|
||||
type WatcherMetrics struct {
|
||||
recordsRead *prometheus.CounterVec
|
||||
recordDecodeFails *prometheus.CounterVec
|
||||
samplesSentPreTailing *prometheus.CounterVec
|
||||
currentSegment *prometheus.GaugeVec
|
||||
notificationsSkipped *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// Watcher watches the TSDB WAL for a given WriteTo.
|
||||
|
@ -88,9 +98,11 @@ type Watcher struct {
|
|||
recordDecodeFailsMetric prometheus.Counter
|
||||
samplesSentPreTailing prometheus.Counter
|
||||
currentSegmentMetric prometheus.Gauge
|
||||
notificationsSkipped prometheus.Counter
|
||||
|
||||
quit chan struct{}
|
||||
done chan struct{}
|
||||
readNotify chan struct{}
|
||||
quit chan struct{}
|
||||
done chan struct{}
|
||||
|
||||
// For testing, stop when we hit this segment.
|
||||
MaxSegment int
|
||||
|
@ -134,6 +146,15 @@ func NewWatcherMetrics(reg prometheus.Registerer) *WatcherMetrics {
|
|||
},
|
||||
[]string{consumer},
|
||||
),
|
||||
notificationsSkipped: prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "prometheus",
|
||||
Subsystem: "wal_watcher",
|
||||
Name: "notifications_skipped_total",
|
||||
Help: "The number of WAL write notifications that the Watcher has skipped due to already being in a WAL read routine.",
|
||||
},
|
||||
[]string{consumer},
|
||||
),
|
||||
}
|
||||
|
||||
if reg != nil {
|
||||
|
@ -141,6 +162,7 @@ func NewWatcherMetrics(reg prometheus.Registerer) *WatcherMetrics {
|
|||
reg.MustRegister(m.recordDecodeFails)
|
||||
reg.MustRegister(m.samplesSentPreTailing)
|
||||
reg.MustRegister(m.currentSegment)
|
||||
reg.MustRegister(m.notificationsSkipped)
|
||||
}
|
||||
|
||||
return m
|
||||
|
@ -156,18 +178,30 @@ func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logge
|
|||
writer: writer,
|
||||
metrics: metrics,
|
||||
readerMetrics: readerMetrics,
|
||||
walDir: path.Join(dir, "wal"),
|
||||
walDir: filepath.Join(dir, "wal"),
|
||||
name: name,
|
||||
sendExemplars: sendExemplars,
|
||||
sendHistograms: sendHistograms,
|
||||
|
||||
quit: make(chan struct{}),
|
||||
done: make(chan struct{}),
|
||||
readNotify: make(chan struct{}),
|
||||
quit: make(chan struct{}),
|
||||
done: make(chan struct{}),
|
||||
|
||||
MaxSegment: -1,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) Notify() {
|
||||
select {
|
||||
case w.readNotify <- struct{}{}:
|
||||
return
|
||||
default: // default so we can exit
|
||||
// we don't need a buffered channel or any buffering since
|
||||
// for each notification it recv's the watcher will read until EOF
|
||||
w.notificationsSkipped.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) setMetrics() {
|
||||
// Setup the WAL Watchers metrics. We do this here rather than in the
|
||||
// constructor because of the ordering of creating Queue Managers's,
|
||||
|
@ -177,6 +211,8 @@ func (w *Watcher) setMetrics() {
|
|||
w.recordDecodeFailsMetric = w.metrics.recordDecodeFails.WithLabelValues(w.name)
|
||||
w.samplesSentPreTailing = w.metrics.samplesSentPreTailing.WithLabelValues(w.name)
|
||||
w.currentSegmentMetric = w.metrics.currentSegment.WithLabelValues(w.name)
|
||||
w.notificationsSkipped = w.metrics.notificationsSkipped.WithLabelValues(w.name)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -262,7 +298,7 @@ func (w *Watcher) Run() error {
|
|||
|
||||
// On start, after reading the existing WAL for series records, we have a pointer to what is the latest segment.
|
||||
// On subsequent calls to this function, currentSegment will have been incremented and we should open that segment.
|
||||
if err := w.watch(currentSegment, currentSegment >= lastSegment); err != nil {
|
||||
if err := w.watch(currentSegment, currentSegment >= lastSegment); err != nil && !errors.Is(err, ErrIgnorable) {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -330,6 +366,26 @@ func (w *Watcher) segments(dir string) ([]int, error) {
|
|||
return refs, nil
|
||||
}
|
||||
|
||||
func (w *Watcher) readAndHandleError(r *LiveReader, segmentNum int, tail bool, size int64) error {
|
||||
err := w.readSegment(r, segmentNum, tail)
|
||||
|
||||
// Ignore all errors reading to end of segment whilst replaying the WAL.
|
||||
if !tail {
|
||||
if err != nil && errors.Cause(err) != io.EOF {
|
||||
level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err)
|
||||
} else if r.Offset() != size {
|
||||
level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", r.Offset(), "size", size)
|
||||
}
|
||||
return ErrIgnorable
|
||||
}
|
||||
|
||||
// Otherwise, when we are tailing, non-EOFs are fatal.
|
||||
if errors.Cause(err) != io.EOF {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Use tail true to indicate that the reader is currently on a segment that is
|
||||
// actively being written to. If false, assume it's a full segment and we're
|
||||
// replaying it on start to cache the series records.
|
||||
|
@ -342,7 +398,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error {
|
|||
|
||||
reader := NewLiveReader(w.logger, w.readerMetrics, segment)
|
||||
|
||||
readTicker := time.NewTicker(readPeriod)
|
||||
readTicker := time.NewTicker(readTimeout)
|
||||
defer readTicker.Stop()
|
||||
|
||||
checkpointTicker := time.NewTicker(checkpointPeriod)
|
||||
|
@ -400,7 +456,6 @@ func (w *Watcher) watch(segmentNum int, tail bool) error {
|
|||
if last <= segmentNum {
|
||||
continue
|
||||
}
|
||||
|
||||
err = w.readSegment(reader, segmentNum, tail)
|
||||
|
||||
// Ignore errors reading to end of segment whilst replaying the WAL.
|
||||
|
@ -421,24 +476,23 @@ func (w *Watcher) watch(segmentNum int, tail bool) error {
|
|||
|
||||
return nil
|
||||
|
||||
// we haven't read due to a notification in quite some time, try reading anyways
|
||||
case <-readTicker.C:
|
||||
err = w.readSegment(reader, segmentNum, tail)
|
||||
|
||||
// Ignore all errors reading to end of segment whilst replaying the WAL.
|
||||
if !tail {
|
||||
switch {
|
||||
case err != nil && errors.Cause(err) != io.EOF:
|
||||
level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err)
|
||||
case reader.Offset() != size:
|
||||
level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Otherwise, when we are tailing, non-EOFs are fatal.
|
||||
if errors.Cause(err) != io.EOF {
|
||||
level.Debug(w.logger).Log("msg", "Watcher is reading the WAL due to timeout, haven't received any write notifications recently", "timeout", readTimeout)
|
||||
err := w.readAndHandleError(reader, segmentNum, tail, size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// still want to reset the ticker so we don't read too often
|
||||
readTicker.Reset(readTimeout)
|
||||
|
||||
case <-w.readNotify:
|
||||
err := w.readAndHandleError(reader, segmentNum, tail, size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// still want to reset the ticker so we don't read too often
|
||||
readTicker.Reset(readTimeout)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -691,7 +745,7 @@ func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) err
|
|||
func checkpointNum(dir string) (int, error) {
|
||||
// Checkpoint dir names are in the format checkpoint.000001
|
||||
// dir may contain a hidden directory, so only check the base directory
|
||||
chunks := strings.Split(path.Base(dir), ".")
|
||||
chunks := strings.Split(filepath.Base(dir), ".")
|
||||
if len(chunks) != 2 {
|
||||
return 0, errors.Errorf("invalid checkpoint dir string: %s", dir)
|
||||
}
|
||||
|
|
|
@ -104,7 +104,7 @@ func (wtm *writeToMock) SeriesReset(index int) {
|
|||
}
|
||||
}
|
||||
|
||||
func (wtm *writeToMock) checkNumLabels() int {
|
||||
func (wtm *writeToMock) checkNumSeries() int {
|
||||
wtm.seriesLock.Lock()
|
||||
defer wtm.seriesLock.Unlock()
|
||||
return len(wtm.seriesSegmentIndexes)
|
||||
|
@ -230,9 +230,9 @@ func TestTailSamples(t *testing.T) {
|
|||
expectedExemplars := seriesCount * exemplarsCount
|
||||
expectedHistograms := seriesCount * histogramsCount
|
||||
retry(t, defaultRetryInterval, defaultRetries, func() bool {
|
||||
return wt.checkNumLabels() >= expectedSeries
|
||||
return wt.checkNumSeries() >= expectedSeries
|
||||
})
|
||||
require.Equal(t, expectedSeries, wt.checkNumLabels(), "did not receive the expected number of series")
|
||||
require.Equal(t, expectedSeries, wt.checkNumSeries(), "did not receive the expected number of series")
|
||||
require.Equal(t, expectedSamples, wt.samplesAppended, "did not receive the expected number of samples")
|
||||
require.Equal(t, expectedExemplars, wt.exemplarsAppended, "did not receive the expected number of exemplars")
|
||||
require.Equal(t, expectedHistograms, wt.histogramsAppended, "did not receive the expected number of histograms")
|
||||
|
@ -290,7 +290,7 @@ func TestReadToEndNoCheckpoint(t *testing.T) {
|
|||
}
|
||||
}
|
||||
require.NoError(t, w.Log(recs...))
|
||||
|
||||
readTimeout = time.Second
|
||||
_, _, err = Segments(w.Dir())
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -299,11 +299,10 @@ func TestReadToEndNoCheckpoint(t *testing.T) {
|
|||
go watcher.Start()
|
||||
|
||||
expected := seriesCount
|
||||
retry(t, defaultRetryInterval, defaultRetries, func() bool {
|
||||
return wt.checkNumLabels() >= expected
|
||||
})
|
||||
require.Eventually(t, func() bool {
|
||||
return wt.checkNumSeries() == expected
|
||||
}, 20*time.Second, 1*time.Second)
|
||||
watcher.Stop()
|
||||
require.Equal(t, expected, wt.checkNumLabels())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -383,16 +382,17 @@ func TestReadToEndWithCheckpoint(t *testing.T) {
|
|||
|
||||
_, _, err = Segments(w.Dir())
|
||||
require.NoError(t, err)
|
||||
readTimeout = time.Second
|
||||
wt := newWriteToMock()
|
||||
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false)
|
||||
go watcher.Start()
|
||||
|
||||
expected := seriesCount * 2
|
||||
retry(t, defaultRetryInterval, defaultRetries, func() bool {
|
||||
return wt.checkNumLabels() >= expected
|
||||
})
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
return wt.checkNumSeries() == expected
|
||||
}, 10*time.Second, 1*time.Second)
|
||||
watcher.Stop()
|
||||
require.Equal(t, expected, wt.checkNumLabels())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -460,10 +460,10 @@ func TestReadCheckpoint(t *testing.T) {
|
|||
|
||||
expectedSeries := seriesCount
|
||||
retry(t, defaultRetryInterval, defaultRetries, func() bool {
|
||||
return wt.checkNumLabels() >= expectedSeries
|
||||
return wt.checkNumSeries() >= expectedSeries
|
||||
})
|
||||
watcher.Stop()
|
||||
require.Equal(t, expectedSeries, wt.checkNumLabels())
|
||||
require.Equal(t, expectedSeries, wt.checkNumSeries())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -595,6 +595,7 @@ func TestCheckpointSeriesReset(t *testing.T) {
|
|||
_, _, err = Segments(w.Dir())
|
||||
require.NoError(t, err)
|
||||
|
||||
readTimeout = time.Second
|
||||
wt := newWriteToMock()
|
||||
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false)
|
||||
watcher.MaxSegment = -1
|
||||
|
@ -602,9 +603,11 @@ func TestCheckpointSeriesReset(t *testing.T) {
|
|||
|
||||
expected := seriesCount
|
||||
retry(t, defaultRetryInterval, defaultRetries, func() bool {
|
||||
return wt.checkNumLabels() >= expected
|
||||
return wt.checkNumSeries() >= expected
|
||||
})
|
||||
require.Equal(t, seriesCount, wt.checkNumLabels())
|
||||
require.Eventually(t, func() bool {
|
||||
return wt.checkNumSeries() == seriesCount
|
||||
}, 10*time.Second, 1*time.Second)
|
||||
|
||||
_, err = Checkpoint(log.NewNopLogger(), w, 2, 4, func(x chunks.HeadSeriesRef) bool { return true }, 0)
|
||||
require.NoError(t, err)
|
||||
|
@ -621,7 +624,9 @@ func TestCheckpointSeriesReset(t *testing.T) {
|
|||
// If you modify the checkpoint and truncate segment #'s run the test to see how
|
||||
// many series records you end up with and change the last Equals check accordingly
|
||||
// or modify the Equals to Assert(len(wt.seriesLabels) < seriesCount*10)
|
||||
require.Equal(t, tc.segments, wt.checkNumLabels())
|
||||
require.Eventually(t, func() bool {
|
||||
return wt.checkNumSeries() == tc.segments
|
||||
}, 20*time.Second, 1*time.Second)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -188,6 +188,8 @@ type WL struct {
|
|||
compress bool
|
||||
snappyBuf []byte
|
||||
|
||||
WriteNotified WriteNotified
|
||||
|
||||
metrics *wlMetrics
|
||||
}
|
||||
|
||||
|
@ -343,6 +345,10 @@ func (w *WL) Dir() string {
|
|||
return w.dir
|
||||
}
|
||||
|
||||
func (w *WL) SetWriteNotified(wn WriteNotified) {
|
||||
w.WriteNotified = wn
|
||||
}
|
||||
|
||||
func (w *WL) run() {
|
||||
Loop:
|
||||
for {
|
||||
|
|
|
@ -171,7 +171,7 @@ type TSDBAdminStats interface {
|
|||
CleanTombstones() error
|
||||
Delete(mint, maxt int64, ms ...*labels.Matcher) error
|
||||
Snapshot(dir string, withHead bool) error
|
||||
Stats(statsByLabelName string) (*tsdb.Stats, error)
|
||||
Stats(statsByLabelName string, limit int) (*tsdb.Stats, error)
|
||||
WALReplayStatus() (tsdb.WALReplayStatus, error)
|
||||
}
|
||||
|
||||
|
@ -1480,8 +1480,15 @@ func TSDBStatsFromIndexStats(stats []index.Stat) []TSDBStat {
|
|||
return result
|
||||
}
|
||||
|
||||
func (api *API) serveTSDBStatus(*http.Request) apiFuncResult {
|
||||
s, err := api.db.Stats(labels.MetricName)
|
||||
func (api *API) serveTSDBStatus(r *http.Request) apiFuncResult {
|
||||
limit := 10
|
||||
if s := r.FormValue("limit"); s != "" {
|
||||
var err error
|
||||
if limit, err = strconv.Atoi(s); err != nil || limit < 1 {
|
||||
return apiFuncResult{nil, &apiError{errorBadData, errors.New("limit must be a positive number")}, nil, nil}
|
||||
}
|
||||
}
|
||||
s, err := api.db.Stats(labels.MetricName, limit)
|
||||
if err != nil {
|
||||
return apiFuncResult{nil, &apiError{errorInternal, err}, nil, nil}
|
||||
}
|
||||
|
|
|
@ -2620,7 +2620,7 @@ type fakeDB struct {
|
|||
func (f *fakeDB) CleanTombstones() error { return f.err }
|
||||
func (f *fakeDB) Delete(int64, int64, ...*labels.Matcher) error { return f.err }
|
||||
func (f *fakeDB) Snapshot(string, bool) error { return f.err }
|
||||
func (f *fakeDB) Stats(statsByLabelName string) (_ *tsdb.Stats, retErr error) {
|
||||
func (f *fakeDB) Stats(statsByLabelName string, limit int) (_ *tsdb.Stats, retErr error) {
|
||||
dbDir, err := os.MkdirTemp("", "tsdb-api-ready")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -2634,7 +2634,7 @@ func (f *fakeDB) Stats(statsByLabelName string) (_ *tsdb.Stats, retErr error) {
|
|||
opts := tsdb.DefaultHeadOptions()
|
||||
opts.ChunkRange = 1000
|
||||
h, _ := tsdb.NewHead(nil, nil, nil, nil, opts, nil)
|
||||
return h.Stats(statsByLabelName), nil
|
||||
return h.Stats(statsByLabelName, limit), nil
|
||||
}
|
||||
|
||||
func (f *fakeDB) WALReplayStatus() (tsdb.WALReplayStatus, error) {
|
||||
|
@ -3206,8 +3206,19 @@ func TestTSDBStatus(t *testing.T) {
|
|||
{
|
||||
db: tsdb,
|
||||
endpoint: tsdbStatusAPI,
|
||||
|
||||
errType: errorNone,
|
||||
errType: errorNone,
|
||||
},
|
||||
{
|
||||
db: tsdb,
|
||||
endpoint: tsdbStatusAPI,
|
||||
values: map[string][]string{"limit": {"20"}},
|
||||
errType: errorNone,
|
||||
},
|
||||
{
|
||||
db: tsdb,
|
||||
endpoint: tsdbStatusAPI,
|
||||
values: map[string][]string{"limit": {"0"}},
|
||||
errType: errorBadData,
|
||||
},
|
||||
} {
|
||||
tc := tc
|
||||
|
|
|
@ -251,7 +251,7 @@ func (notReadyReadStorage) StartTime() (int64, error) {
|
|||
return 0, errors.Wrap(tsdb.ErrNotReady, "wrap")
|
||||
}
|
||||
|
||||
func (notReadyReadStorage) Stats(string) (*tsdb.Stats, error) {
|
||||
func (notReadyReadStorage) Stats(string, int) (*tsdb.Stats, error) {
|
||||
return nil, errors.Wrap(tsdb.ErrNotReady, "wrap")
|
||||
}
|
||||
|
||||
|
@ -378,7 +378,7 @@ func TestFederationWithNativeHistograms(t *testing.T) {
|
|||
body, err := io.ReadAll(res.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
p := textparse.NewProtobufParser(body)
|
||||
p := textparse.NewProtobufParser(body, false)
|
||||
var actVec promql.Vector
|
||||
metricFamilies := 0
|
||||
l := labels.Labels{}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "@prometheus-io/codemirror-promql",
|
||||
"version": "0.43.0",
|
||||
"version": "0.44.0",
|
||||
"description": "a CodeMirror mode for the PromQL language",
|
||||
"types": "dist/esm/index.d.ts",
|
||||
"module": "dist/esm/index.js",
|
||||
|
@ -29,7 +29,7 @@
|
|||
},
|
||||
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
|
||||
"dependencies": {
|
||||
"@prometheus-io/lezer-promql": "0.43.0",
|
||||
"@prometheus-io/lezer-promql": "0.44.0",
|
||||
"lru-cache": "^6.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "@prometheus-io/lezer-promql",
|
||||
"version": "0.43.0",
|
||||
"version": "0.44.0",
|
||||
"description": "lezer-based PromQL grammar",
|
||||
"main": "dist/index.cjs",
|
||||
"type": "module",
|
||||
|
|
14
web/ui/package-lock.json
generated
14
web/ui/package-lock.json
generated
|
@ -28,10 +28,10 @@
|
|||
},
|
||||
"module/codemirror-promql": {
|
||||
"name": "@prometheus-io/codemirror-promql",
|
||||
"version": "0.43.0",
|
||||
"version": "0.44.0",
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"@prometheus-io/lezer-promql": "0.43.0",
|
||||
"@prometheus-io/lezer-promql": "0.44.0",
|
||||
"lru-cache": "^6.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
@ -61,7 +61,7 @@
|
|||
},
|
||||
"module/lezer-promql": {
|
||||
"name": "@prometheus-io/lezer-promql",
|
||||
"version": "0.43.0",
|
||||
"version": "0.44.0",
|
||||
"license": "Apache-2.0",
|
||||
"devDependencies": {
|
||||
"@lezer/generator": "^1.2.2",
|
||||
|
@ -20763,7 +20763,7 @@
|
|||
},
|
||||
"react-app": {
|
||||
"name": "@prometheus-io/app",
|
||||
"version": "0.43.0",
|
||||
"version": "0.44.0",
|
||||
"dependencies": {
|
||||
"@codemirror/autocomplete": "^6.4.0",
|
||||
"@codemirror/commands": "^6.2.0",
|
||||
|
@ -20781,7 +20781,7 @@
|
|||
"@lezer/lr": "^1.3.1",
|
||||
"@nexucis/fuzzy": "^0.4.1",
|
||||
"@nexucis/kvsearch": "^0.8.1",
|
||||
"@prometheus-io/codemirror-promql": "0.43.0",
|
||||
"@prometheus-io/codemirror-promql": "0.44.0",
|
||||
"bootstrap": "^4.6.2",
|
||||
"css.escape": "^1.5.1",
|
||||
"downshift": "^7.2.0",
|
||||
|
@ -23417,7 +23417,7 @@
|
|||
"@lezer/lr": "^1.3.1",
|
||||
"@nexucis/fuzzy": "^0.4.1",
|
||||
"@nexucis/kvsearch": "^0.8.1",
|
||||
"@prometheus-io/codemirror-promql": "0.43.0",
|
||||
"@prometheus-io/codemirror-promql": "0.44.0",
|
||||
"@testing-library/react-hooks": "^7.0.2",
|
||||
"@types/enzyme": "^3.10.12",
|
||||
"@types/flot": "0.0.32",
|
||||
|
@ -23468,7 +23468,7 @@
|
|||
"@lezer/common": "^1.0.2",
|
||||
"@lezer/highlight": "^1.1.3",
|
||||
"@lezer/lr": "^1.3.1",
|
||||
"@prometheus-io/lezer-promql": "0.43.0",
|
||||
"@prometheus-io/lezer-promql": "0.44.0",
|
||||
"@types/lru-cache": "^5.1.1",
|
||||
"isomorphic-fetch": "^3.0.0",
|
||||
"lru-cache": "^6.0.0",
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "@prometheus-io/app",
|
||||
"version": "0.43.0",
|
||||
"version": "0.44.0",
|
||||
"private": true,
|
||||
"dependencies": {
|
||||
"@codemirror/autocomplete": "^6.4.0",
|
||||
|
@ -19,7 +19,7 @@
|
|||
"@lezer/common": "^1.0.2",
|
||||
"@nexucis/fuzzy": "^0.4.1",
|
||||
"@nexucis/kvsearch": "^0.8.1",
|
||||
"@prometheus-io/codemirror-promql": "0.43.0",
|
||||
"@prometheus-io/codemirror-promql": "0.44.0",
|
||||
"bootstrap": "^4.6.2",
|
||||
"css.escape": "^1.5.1",
|
||||
"downshift": "^7.2.0",
|
||||
|
|
|
@ -52,8 +52,8 @@ type dbAdapter struct {
|
|||
*tsdb.DB
|
||||
}
|
||||
|
||||
func (a *dbAdapter) Stats(statsByLabelName string) (*tsdb.Stats, error) {
|
||||
return a.Head().Stats(statsByLabelName), nil
|
||||
func (a *dbAdapter) Stats(statsByLabelName string, limit int) (*tsdb.Stats, error) {
|
||||
return a.Head().Stats(statsByLabelName, limit), nil
|
||||
}
|
||||
|
||||
func (a *dbAdapter) WALReplayStatus() (tsdb.WALReplayStatus, error) {
|
||||
|
|
Loading…
Reference in a new issue