Merge branch 'main' into refactor/add_max_func_to_maxTimestamp

Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
This commit is contained in:
Arve Knudsen 2024-04-30 11:50:37 +02:00 committed by GitHub
commit 759ca8b207
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
37 changed files with 1287 additions and 630 deletions

View file

@ -3,6 +3,7 @@
## unreleased ## unreleased
* [CHANGE] TSDB: Fix the predicate checking for blocks which are beyond the retention period to include the ones right at the retention boundary. #9633 * [CHANGE] TSDB: Fix the predicate checking for blocks which are beyond the retention period to include the ones right at the retention boundary. #9633
* [ENHANCEMENT] Rules: Add `rule_group_last_restore_duration_seconds` to measure the time it takes to restore a rule group. #13974
## 2.51.2 / 2024-04-09 ## 2.51.2 / 2024-04-09

View file

@ -217,6 +217,7 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
level.Info(logger).Log("msg", "Experimental PromQL functions enabled.") level.Info(logger).Log("msg", "Experimental PromQL functions enabled.")
case "native-histograms": case "native-histograms":
c.tsdb.EnableNativeHistograms = true c.tsdb.EnableNativeHistograms = true
c.scrape.EnableNativeHistogramsIngestion = true
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers. // Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers.
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols

View file

@ -838,6 +838,10 @@ func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxB
} }
func displayHistogram(dataType string, datas []int, total int) { func displayHistogram(dataType string, datas []int, total int) {
if len(datas) == 0 {
fmt.Printf("%s: N/A\n\n", dataType)
return
}
slices.Sort(datas) slices.Sort(datas)
start, end, step := generateBucket(datas[0], datas[len(datas)-1]) start, end, step := generateBucket(datas[0], datas[len(datas)-1])
sum := 0 sum := 0

View file

@ -19,16 +19,6 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
var (
clientGoRequestMetrics = &clientGoRequestMetricAdapter{}
clientGoWorkloadMetrics = &clientGoWorkqueueMetricsProvider{}
)
func init() {
clientGoRequestMetrics.RegisterWithK8sGoClient()
clientGoWorkloadMetrics.RegisterWithK8sGoClient()
}
// Metrics to be used with a discovery manager. // Metrics to be used with a discovery manager.
type Metrics struct { type Metrics struct {
FailedConfigs prometheus.Gauge FailedConfigs prometheus.Gauge

View file

@ -35,6 +35,11 @@ const (
workqueueMetricsNamespace = KubernetesMetricsNamespace + "_workqueue" workqueueMetricsNamespace = KubernetesMetricsNamespace + "_workqueue"
) )
var (
clientGoRequestMetrics = &clientGoRequestMetricAdapter{}
clientGoWorkloadMetrics = &clientGoWorkqueueMetricsProvider{}
)
var ( var (
// Metrics for client-go's HTTP requests. // Metrics for client-go's HTTP requests.
clientGoRequestResultMetricVec = prometheus.NewCounterVec( clientGoRequestResultMetricVec = prometheus.NewCounterVec(
@ -135,6 +140,9 @@ func clientGoMetrics() []prometheus.Collector {
} }
func RegisterK8sClientMetricsWithPrometheus(registerer prometheus.Registerer) error { func RegisterK8sClientMetricsWithPrometheus(registerer prometheus.Registerer) error {
clientGoRequestMetrics.RegisterWithK8sGoClient()
clientGoWorkloadMetrics.RegisterWithK8sGoClient()
for _, collector := range clientGoMetrics() { for _, collector := range clientGoMetrics() {
err := registerer.Register(collector) err := registerer.Register(collector)
if err != nil { if err != nil {

View file

@ -174,20 +174,25 @@ func (d *instanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
labels[instanceTagsLabel] = model.LabelValue(tags) labels[instanceTagsLabel] = model.LabelValue(tags)
} }
addr := ""
if server.IPv6 != nil { if server.IPv6 != nil {
labels[instancePublicIPv6Label] = model.LabelValue(server.IPv6.Address.String()) labels[instancePublicIPv6Label] = model.LabelValue(server.IPv6.Address.String())
addr = server.IPv6.Address.String()
} }
if server.PublicIP != nil { if server.PublicIP != nil {
labels[instancePublicIPv4Label] = model.LabelValue(server.PublicIP.Address.String()) labels[instancePublicIPv4Label] = model.LabelValue(server.PublicIP.Address.String())
addr = server.PublicIP.Address.String()
} }
if server.PrivateIP != nil { if server.PrivateIP != nil {
labels[instancePrivateIPv4Label] = model.LabelValue(*server.PrivateIP) labels[instancePrivateIPv4Label] = model.LabelValue(*server.PrivateIP)
addr = *server.PrivateIP
}
addr := net.JoinHostPort(*server.PrivateIP, strconv.FormatUint(uint64(d.port), 10)) if addr != "" {
addr := net.JoinHostPort(addr, strconv.FormatUint(uint64(d.port), 10))
labels[model.AddressLabel] = model.LabelValue(addr) labels[model.AddressLabel] = model.LabelValue(addr)
targets = append(targets, labels) targets = append(targets, labels)
} }
} }

View file

@ -60,7 +60,7 @@ api_url: %s
tg := tgs[0] tg := tgs[0]
require.NotNil(t, tg) require.NotNil(t, tg)
require.NotNil(t, tg.Targets) require.NotNil(t, tg.Targets)
require.Len(t, tg.Targets, 2) require.Len(t, tg.Targets, 3)
for i, lbls := range []model.LabelSet{ for i, lbls := range []model.LabelSet{
{ {
@ -110,6 +110,28 @@ api_url: %s
"__meta_scaleway_instance_type": "DEV1-S", "__meta_scaleway_instance_type": "DEV1-S",
"__meta_scaleway_instance_zone": "fr-par-1", "__meta_scaleway_instance_zone": "fr-par-1",
}, },
{
"__address__": "51.158.183.115:80",
"__meta_scaleway_instance_boot_type": "local",
"__meta_scaleway_instance_hostname": "routed-dualstack",
"__meta_scaleway_instance_id": "4904366a-7e26-4b65-b97b-6392c761247a",
"__meta_scaleway_instance_image_arch": "x86_64",
"__meta_scaleway_instance_image_id": "3e0a5b84-1d69-4993-8fa4-0d7df52d5160",
"__meta_scaleway_instance_image_name": "Ubuntu 22.04 Jammy Jellyfish",
"__meta_scaleway_instance_location_cluster_id": "19",
"__meta_scaleway_instance_location_hypervisor_id": "1201",
"__meta_scaleway_instance_location_node_id": "24",
"__meta_scaleway_instance_name": "routed-dualstack",
"__meta_scaleway_instance_organization_id": "20b3d507-96ac-454c-a795-bc731b46b12f",
"__meta_scaleway_instance_project_id": "20b3d507-96ac-454c-a795-bc731b46b12f",
"__meta_scaleway_instance_public_ipv4": "51.158.183.115",
"__meta_scaleway_instance_region": "nl-ams",
"__meta_scaleway_instance_security_group_id": "984414da-9fc2-49c0-a925-fed6266fe092",
"__meta_scaleway_instance_security_group_name": "Default security group",
"__meta_scaleway_instance_status": "running",
"__meta_scaleway_instance_type": "DEV1-S",
"__meta_scaleway_instance_zone": "nl-ams-1",
},
} { } {
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
require.Equal(t, lbls, tg.Targets[i]) require.Equal(t, lbls, tg.Targets[i])

View file

@ -216,6 +216,146 @@
"placement_group": null, "placement_group": null,
"private_nics": [], "private_nics": [],
"zone": "fr-par-1" "zone": "fr-par-1"
},
{
"id": "4904366a-7e26-4b65-b97b-6392c761247a",
"name": "routed-dualstack",
"arch": "x86_64",
"commercial_type": "DEV1-S",
"boot_type": "local",
"organization": "20b3d507-96ac-454c-a795-bc731b46b12f",
"project": "20b3d507-96ac-454c-a795-bc731b46b12f",
"hostname": "routed-dualstack",
"image": {
"id": "3e0a5b84-1d69-4993-8fa4-0d7df52d5160",
"name": "Ubuntu 22.04 Jammy Jellyfish",
"organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
"project": "51b656e3-4865-41e8-adbc-0c45bdd780db",
"root_volume": {
"id": "13d945b9-5e78-4f9d-8ac4-c4bc2fa7c31a",
"name": "Ubuntu 22.04 Jammy Jellyfish",
"volume_type": "unified",
"size": 10000000000
},
"extra_volumes": {},
"public": true,
"arch": "x86_64",
"creation_date": "2024-02-22T15:52:56.037007+00:00",
"modification_date": "2024-02-22T15:52:56.037007+00:00",
"default_bootscript": null,
"from_server": null,
"state": "available",
"tags": [],
"zone": "nl-ams-1"
},
"volumes": {
"0": {
"boot": false,
"id": "fe85c817-e67e-4e24-8f13-bde3e9f42620",
"name": "Ubuntu 22.04 Jammy Jellyfish",
"volume_type": "l_ssd",
"export_uri": null,
"organization": "20b3d507-96ac-454c-a795-bc731b46b12f",
"project": "20b3d507-96ac-454c-a795-bc731b46b12f",
"server": {
"id": "4904366a-7e26-4b65-b97b-6392c761247a",
"name": "routed-dualstack"
},
"size": 20000000000,
"state": "available",
"creation_date": "2024-04-19T14:50:14.019739+00:00",
"modification_date": "2024-04-19T14:50:14.019739+00:00",
"tags": [],
"zone": "nl-ams-1"
}
},
"tags": [],
"state": "running",
"protected": false,
"state_detail": "booted",
"public_ip": {
"id": "53f8f861-7a11-4b16-a4bc-fb8f4b4a11d0",
"address": "51.158.183.115",
"dynamic": false,
"gateway": "62.210.0.1",
"netmask": "32",
"family": "inet",
"provisioning_mode": "dhcp",
"tags": [],
"state": "attached",
"ipam_id": "ec3499ff-a664-49b7-818a-9fe4b95aee5e"
},
"public_ips": [
{
"id": "53f8f861-7a11-4b16-a4bc-fb8f4b4a11d0",
"address": "51.158.183.115",
"dynamic": false,
"gateway": "62.210.0.1",
"netmask": "32",
"family": "inet",
"provisioning_mode": "dhcp",
"tags": [],
"state": "attached",
"ipam_id": "ec3499ff-a664-49b7-818a-9fe4b95aee5e"
},
{
"id": "f52a8c81-0875-4aee-b96e-eccfc6bec367",
"address": "2001:bc8:1640:1568:dc00:ff:fe21:91b",
"dynamic": false,
"gateway": "fe80::dc00:ff:fe21:91c",
"netmask": "64",
"family": "inet6",
"provisioning_mode": "slaac",
"tags": [],
"state": "attached",
"ipam_id": "40d1e6ea-e932-42f9-8acb-55398bec7ad6"
}
],
"mac_address": "de:00:00:21:09:1b",
"routed_ip_enabled": true,
"ipv6": null,
"extra_networks": [],
"dynamic_ip_required": false,
"enable_ipv6": false,
"private_ip": null,
"creation_date": "2024-04-19T14:50:14.019739+00:00",
"modification_date": "2024-04-19T14:52:21.181670+00:00",
"bootscript": {
"id": "5a520dda-96d6-4ed2-acd1-1d526b6859fe",
"public": true,
"title": "x86_64 mainline 4.4.182 rev1",
"architecture": "x86_64",
"organization": "11111111-1111-4111-8111-111111111111",
"project": "11111111-1111-4111-8111-111111111111",
"kernel": "http://10.196.2.9/kernel/x86_64-mainline-lts-4.4-4.4.182-rev1/vmlinuz-4.4.182",
"dtb": "",
"initrd": "http://10.196.2.9/initrd/initrd-Linux-x86_64-v3.14.6.gz",
"bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
"default": true,
"zone": "nl-ams-1"
},
"security_group": {
"id": "984414da-9fc2-49c0-a925-fed6266fe092",
"name": "Default security group"
},
"location": {
"zone_id": "ams1",
"platform_id": "23",
"cluster_id": "19",
"hypervisor_id": "1201",
"node_id": "24"
},
"maintenances": [],
"allowed_actions": [
"poweroff",
"terminate",
"reboot",
"stop_in_place",
"backup"
],
"placement_group": null,
"private_nics": [],
"zone": "nl-ams-1"
} }
] ]
} }

View file

@ -2952,9 +2952,10 @@ The following meta labels are available on targets during [relabeling](#relabel_
* `__meta_scaleway_instance_type`: commercial type of the server * `__meta_scaleway_instance_type`: commercial type of the server
* `__meta_scaleway_instance_zone`: the zone of the server (ex: `fr-par-1`, complete list [here](https://developers.scaleway.com/en/products/instance/api/#introduction)) * `__meta_scaleway_instance_zone`: the zone of the server (ex: `fr-par-1`, complete list [here](https://developers.scaleway.com/en/products/instance/api/#introduction))
This role uses the private IPv4 address by default. This can be This role uses the first address it finds in the following order: private IPv4, public IPv4, public IPv6. This can be
changed with relabeling, as demonstrated in [the Prometheus scaleway-sd changed with relabeling, as demonstrated in [the Prometheus scaleway-sd
configuration file](/documentation/examples/prometheus-scaleway.yml). configuration file](/documentation/examples/prometheus-scaleway.yml).
Should an instance have no address before relabeling, it will not be added to the target list and you will not be able to relabel it.
#### Baremetal role #### Baremetal role

View file

@ -84,8 +84,10 @@ or 31 days, whichever is smaller.
Prometheus has several flags that configure local storage. The most important are: Prometheus has several flags that configure local storage. The most important are:
- `--storage.tsdb.path`: Where Prometheus writes its database. Defaults to `data/`. - `--storage.tsdb.path`: Where Prometheus writes its database. Defaults to `data/`.
- `--storage.tsdb.retention.time`: When to remove old data. Defaults to `15d`. - `--storage.tsdb.retention.time`: How long to retain samples in storage. When this flag is
Overrides `storage.tsdb.retention` if this flag is set to anything other than default. set, it overrides `storage.tsdb.retention`. If neither this flag nor `storage.tsdb.retention`
nor `storage.tsdb.retention.size` is set, the retention time defaults to `15d`.
Supported units: y, w, d, h, m, s, ms.
- `--storage.tsdb.retention.size`: The maximum number of bytes of storage blocks to retain. - `--storage.tsdb.retention.size`: The maximum number of bytes of storage blocks to retain.
The oldest data will be removed first. Defaults to `0` or disabled. Units supported: The oldest data will be removed first. Defaults to `0` or disabled. Units supported:
B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. Only B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. Only

4
go.mod
View file

@ -11,7 +11,7 @@ require (
github.com/KimMachineGun/automemlimit v0.6.0 github.com/KimMachineGun/automemlimit v0.6.0
github.com/alecthomas/kingpin/v2 v2.4.0 github.com/alecthomas/kingpin/v2 v2.4.0
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9
github.com/aws/aws-sdk-go v1.51.24 github.com/aws/aws-sdk-go v1.51.25
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
github.com/cespare/xxhash/v2 v2.3.0 github.com/cespare/xxhash/v2 v2.3.0
github.com/dennwc/varint v1.0.0 github.com/dennwc/varint v1.0.0
@ -36,7 +36,7 @@ require (
github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0
github.com/hashicorp/consul/api v1.28.2 github.com/hashicorp/consul/api v1.28.2
github.com/hashicorp/nomad/api v0.0.0-20240418183417-ea5f2f6748c7 github.com/hashicorp/nomad/api v0.0.0-20240418183417-ea5f2f6748c7
github.com/hetznercloud/hcloud-go/v2 v2.7.1 github.com/hetznercloud/hcloud-go/v2 v2.7.2
github.com/ionos-cloud/sdk-go/v6 v6.1.11 github.com/ionos-cloud/sdk-go/v6 v6.1.11
github.com/json-iterator/go v1.1.12 github.com/json-iterator/go v1.1.12
github.com/klauspost/compress v1.17.8 github.com/klauspost/compress v1.17.8

8
go.sum
View file

@ -92,8 +92,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.51.24 h1:nwL5MaommPkwb7Ixk24eWkdx5HY4of1gD10kFFVAl6A= github.com/aws/aws-sdk-go v1.51.25 h1:DjTT8mtmsachhV6yrXR8+yhnG6120dazr720nopRsls=
github.com/aws/aws-sdk-go v1.51.24/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go v1.51.25/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
@ -413,8 +413,8 @@ github.com/hashicorp/nomad/api v0.0.0-20240418183417-ea5f2f6748c7/go.mod h1:svtx
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
github.com/hetznercloud/hcloud-go/v2 v2.7.1 h1:D4domwRSLOyBL/bwzd1O7hunBbKmeEHZTa7GmCYrniY= github.com/hetznercloud/hcloud-go/v2 v2.7.2 h1:UlE7n1GQZacCfyjv9tDVUN7HZfOXErPIfM/M039u9A0=
github.com/hetznercloud/hcloud-go/v2 v2.7.1/go.mod h1:49tIV+pXRJTUC7fbFZ03s45LKqSQdOPP5y91eOnJo/k= github.com/hetznercloud/hcloud-go/v2 v2.7.2/go.mod h1:49tIV+pXRJTUC7fbFZ03s45LKqSQdOPP5y91eOnJo/k=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=

View file

@ -96,12 +96,14 @@ func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, io
err = file.Truncate(int64(filesize)) err = file.Truncate(int64(filesize))
if err != nil { if err != nil {
file.Close()
level.Error(logger).Log("msg", "Error setting filesize.", "filesize", filesize, "err", err) level.Error(logger).Log("msg", "Error setting filesize.", "filesize", filesize, "err", err)
return nil, nil, err return nil, nil, err
} }
fileAsBytes, err := mmap.Map(file, mmap.RDWR, 0) fileAsBytes, err := mmap.Map(file, mmap.RDWR, 0)
if err != nil { if err != nil {
file.Close()
level.Error(logger).Log("msg", "Failed to mmap", "file", filename, "Attempted size", filesize, "err", err) level.Error(logger).Log("msg", "Failed to mmap", "file", filename, "Attempted size", filesize, "err", err)
return nil, nil, err return nil, nil, err
} }

View file

@ -110,10 +110,7 @@ func TestMMapFile(t *testing.T) {
filename := file.Name() filename := file.Name()
defer os.Remove(filename) defer os.Remove(filename)
fileAsBytes, closer, err := getMMapedFile(filename, 2, nil) fileAsBytes, _, err := getMMapedFile(filename, 2, nil)
if err != nil {
t.Cleanup(func() { closer.Close() })
}
require.NoError(t, err) require.NoError(t, err)
copy(fileAsBytes, "ab") copy(fileAsBytes, "ab")

View file

@ -457,8 +457,17 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc,
} }
} }
// If the alert was previously firing, keep it around for a given // If the alert is resolved (was firing but is now inactive) keep it for
// retention time so it is reported as resolved to the AlertManager. // at least the retention period. This is important for a number of reasons:
//
// 1. It allows for Prometheus to be more resilient to network issues that
// would otherwise prevent a resolved alert from being reported as resolved
// to Alertmanager.
//
// 2. It helps reduce the chance of resolved notifications being lost if
// Alertmanager crashes or restarts between receiving the resolved alert
// from Prometheus and sending the resolved notification. This tends to
// occur for routes with large Group intervals.
if a.State == StatePending || (!a.ResolvedAt.IsZero() && ts.Sub(a.ResolvedAt) > resolvedRetention) { if a.State == StatePending || (!a.ResolvedAt.IsZero() && ts.Sub(a.ResolvedAt) > resolvedRetention) {
delete(r.active, fp) delete(r.active, fp)
} }

View file

@ -230,7 +230,11 @@ func (g *Group) run(ctx context.Context) {
g.evalIterationFunc(ctx, g, evalTimestamp) g.evalIterationFunc(ctx, g, evalTimestamp)
} }
g.RestoreForState(time.Now()) restoreStartTime := time.Now()
g.RestoreForState(restoreStartTime)
totalRestoreTimeSeconds := time.Since(restoreStartTime).Seconds()
g.metrics.GroupLastRestoreDuration.WithLabelValues(GroupKey(g.file, g.name)).Set(totalRestoreTimeSeconds)
level.Debug(g.logger).Log("msg", "'for' state restoration completed", "duration_seconds", totalRestoreTimeSeconds)
g.shouldRestore = false g.shouldRestore = false
} }
@ -779,17 +783,18 @@ const namespace = "prometheus"
// Metrics for rule evaluation. // Metrics for rule evaluation.
type Metrics struct { type Metrics struct {
EvalDuration prometheus.Summary EvalDuration prometheus.Summary
IterationDuration prometheus.Summary IterationDuration prometheus.Summary
IterationsMissed *prometheus.CounterVec IterationsMissed *prometheus.CounterVec
IterationsScheduled *prometheus.CounterVec IterationsScheduled *prometheus.CounterVec
EvalTotal *prometheus.CounterVec EvalTotal *prometheus.CounterVec
EvalFailures *prometheus.CounterVec EvalFailures *prometheus.CounterVec
GroupInterval *prometheus.GaugeVec GroupInterval *prometheus.GaugeVec
GroupLastEvalTime *prometheus.GaugeVec GroupLastEvalTime *prometheus.GaugeVec
GroupLastDuration *prometheus.GaugeVec GroupLastDuration *prometheus.GaugeVec
GroupRules *prometheus.GaugeVec GroupLastRestoreDuration *prometheus.GaugeVec
GroupSamples *prometheus.GaugeVec GroupRules *prometheus.GaugeVec
GroupSamples *prometheus.GaugeVec
} }
// NewGroupMetrics creates a new instance of Metrics and registers it with the provided registerer, // NewGroupMetrics creates a new instance of Metrics and registers it with the provided registerer,
@ -865,6 +870,14 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics {
}, },
[]string{"rule_group"}, []string{"rule_group"},
), ),
GroupLastRestoreDuration: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Name: "rule_group_last_restore_duration_seconds",
Help: "The duration of the last alert rules alerts restoration using the `ALERTS_FOR_STATE` series.",
},
[]string{"rule_group"},
),
GroupRules: prometheus.NewGaugeVec( GroupRules: prometheus.NewGaugeVec(
prometheus.GaugeOpts{ prometheus.GaugeOpts{
Namespace: namespace, Namespace: namespace,
@ -894,6 +907,7 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics {
m.GroupInterval, m.GroupInterval,
m.GroupLastEvalTime, m.GroupLastEvalTime,
m.GroupLastDuration, m.GroupLastDuration,
m.GroupLastRestoreDuration,
m.GroupRules, m.GroupRules,
m.GroupSamples, m.GroupSamples,
) )

View file

@ -81,6 +81,8 @@ type Options struct {
// Option to enable the ingestion of the created timestamp as a synthetic zero sample. // Option to enable the ingestion of the created timestamp as a synthetic zero sample.
// See: https://github.com/prometheus/proposals/blob/main/proposals/2023-06-13_created-timestamp.md // See: https://github.com/prometheus/proposals/blob/main/proposals/2023-06-13_created-timestamp.md
EnableCreatedTimestampZeroIngestion bool EnableCreatedTimestampZeroIngestion bool
// Option to enable the ingestion of native histograms.
EnableNativeHistogramsIngestion bool
// Optional HTTP client options to use when scraping. // Optional HTTP client options to use when scraping.
HTTPClientOptions []config_util.HTTPClientOption HTTPClientOptions []config_util.HTTPClientOption

View file

@ -178,6 +178,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
opts.interval, opts.interval,
opts.timeout, opts.timeout,
opts.scrapeClassicHistograms, opts.scrapeClassicHistograms,
options.EnableNativeHistogramsIngestion,
options.EnableCreatedTimestampZeroIngestion, options.EnableCreatedTimestampZeroIngestion,
options.ExtraMetrics, options.ExtraMetrics,
options.EnableMetadataStorage, options.EnableMetadataStorage,
@ -827,7 +828,10 @@ type scrapeLoop struct {
interval time.Duration interval time.Duration
timeout time.Duration timeout time.Duration
scrapeClassicHistograms bool scrapeClassicHistograms bool
enableCTZeroIngestion bool
// Feature flagged options.
enableNativeHistogramIngestion bool
enableCTZeroIngestion bool
appender func(ctx context.Context) storage.Appender appender func(ctx context.Context) storage.Appender
symbolTable *labels.SymbolTable symbolTable *labels.SymbolTable
@ -1123,6 +1127,7 @@ func newScrapeLoop(ctx context.Context,
interval time.Duration, interval time.Duration,
timeout time.Duration, timeout time.Duration,
scrapeClassicHistograms bool, scrapeClassicHistograms bool,
enableNativeHistogramIngestion bool,
enableCTZeroIngestion bool, enableCTZeroIngestion bool,
reportExtraMetrics bool, reportExtraMetrics bool,
appendMetadataToWAL bool, appendMetadataToWAL bool,
@ -1153,33 +1158,34 @@ func newScrapeLoop(ctx context.Context,
} }
sl := &scrapeLoop{ sl := &scrapeLoop{
scraper: sc, scraper: sc,
buffers: buffers, buffers: buffers,
cache: cache, cache: cache,
appender: appender, appender: appender,
symbolTable: symbolTable, symbolTable: symbolTable,
sampleMutator: sampleMutator, sampleMutator: sampleMutator,
reportSampleMutator: reportSampleMutator, reportSampleMutator: reportSampleMutator,
stopped: make(chan struct{}), stopped: make(chan struct{}),
offsetSeed: offsetSeed, offsetSeed: offsetSeed,
l: l, l: l,
parentCtx: ctx, parentCtx: ctx,
appenderCtx: appenderCtx, appenderCtx: appenderCtx,
honorTimestamps: honorTimestamps, honorTimestamps: honorTimestamps,
trackTimestampsStaleness: trackTimestampsStaleness, trackTimestampsStaleness: trackTimestampsStaleness,
enableCompression: enableCompression, enableCompression: enableCompression,
sampleLimit: sampleLimit, sampleLimit: sampleLimit,
bucketLimit: bucketLimit, bucketLimit: bucketLimit,
maxSchema: maxSchema, maxSchema: maxSchema,
labelLimits: labelLimits, labelLimits: labelLimits,
interval: interval, interval: interval,
timeout: timeout, timeout: timeout,
scrapeClassicHistograms: scrapeClassicHistograms, scrapeClassicHistograms: scrapeClassicHistograms,
enableCTZeroIngestion: enableCTZeroIngestion, enableNativeHistogramIngestion: enableNativeHistogramIngestion,
reportExtraMetrics: reportExtraMetrics, enableCTZeroIngestion: enableCTZeroIngestion,
appendMetadataToWAL: appendMetadataToWAL, reportExtraMetrics: reportExtraMetrics,
metrics: metrics, appendMetadataToWAL: appendMetadataToWAL,
skipOffsetting: skipOffsetting, metrics: metrics,
skipOffsetting: skipOffsetting,
} }
sl.ctx, sl.cancel = context.WithCancel(ctx) sl.ctx, sl.cancel = context.WithCancel(ctx)
@ -1627,7 +1633,7 @@ loop:
} }
} }
if isHistogram { if isHistogram && sl.enableNativeHistogramIngestion {
if h != nil { if h != nil {
ref, err = app.AppendHistogram(ref, lset, t, h, nil) ref, err = app.AppendHistogram(ref, lset, t, h, nil)
} else { } else {

View file

@ -678,6 +678,7 @@ func newBasicScrapeLoop(t testing.TB, ctx context.Context, scraper scraper, app
false, false,
false, false,
false, false,
false,
nil, nil,
false, false,
newTestScrapeMetrics(t), newTestScrapeMetrics(t),
@ -819,6 +820,7 @@ func TestScrapeLoopRun(t *testing.T) {
false, false,
false, false,
false, false,
false,
nil, nil,
false, false,
scrapeMetrics, scrapeMetrics,
@ -962,6 +964,7 @@ func TestScrapeLoopMetadata(t *testing.T) {
false, false,
false, false,
false, false,
false,
nil, nil,
false, false,
scrapeMetrics, scrapeMetrics,
@ -1571,6 +1574,7 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) {
app := &bucketLimitAppender{Appender: resApp, limit: 2} app := &bucketLimitAppender{Appender: resApp, limit: 2}
sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0)
sl.enableNativeHistogramIngestion = true
sl.sampleMutator = func(l labels.Labels) labels.Labels { sl.sampleMutator = func(l labels.Labels) labels.Labels {
if l.Has("deleteme") { if l.Has("deleteme") {
return labels.EmptyLabels() return labels.EmptyLabels()
@ -1797,14 +1801,15 @@ func TestScrapeLoopAppendStalenessIfTrackTimestampStaleness(t *testing.T) {
func TestScrapeLoopAppendExemplar(t *testing.T) { func TestScrapeLoopAppendExemplar(t *testing.T) {
tests := []struct { tests := []struct {
title string title string
scrapeClassicHistograms bool scrapeClassicHistograms bool
scrapeText string enableNativeHistogramsIngestion bool
contentType string scrapeText string
discoveryLabels []string contentType string
floats []floatSample discoveryLabels []string
histograms []histogramSample floats []floatSample
exemplars []exemplar.Exemplar histograms []histogramSample
exemplars []exemplar.Exemplar
}{ }{
{ {
title: "Metric without exemplars", title: "Metric without exemplars",
@ -1862,6 +1867,8 @@ metric_total{n="2"} 2 # {t="2"} 2.0 20000
}, },
{ {
title: "Native histogram with three exemplars", title: "Native histogram with three exemplars",
enableNativeHistogramsIngestion: true,
scrapeText: `name: "test_histogram" scrapeText: `name: "test_histogram"
help: "Test histogram with many buckets removed to keep it manageable in size." help: "Test histogram with many buckets removed to keep it manageable in size."
type: HISTOGRAM type: HISTOGRAM
@ -1976,6 +1983,8 @@ metric: <
}, },
{ {
title: "Native histogram with three exemplars scraped as classic histogram", title: "Native histogram with three exemplars scraped as classic histogram",
enableNativeHistogramsIngestion: true,
scrapeText: `name: "test_histogram" scrapeText: `name: "test_histogram"
help: "Test histogram with many buckets removed to keep it manageable in size." help: "Test histogram with many buckets removed to keep it manageable in size."
type: HISTOGRAM type: HISTOGRAM
@ -2115,6 +2124,7 @@ metric: <
} }
sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0)
sl.enableNativeHistogramIngestion = test.enableNativeHistogramsIngestion
sl.sampleMutator = func(l labels.Labels) labels.Labels { sl.sampleMutator = func(l labels.Labels) labels.Labels {
return mutateSampleLabels(l, discoveryLabels, false, nil) return mutateSampleLabels(l, discoveryLabels, false, nil)
} }
@ -3710,7 +3720,7 @@ scrape_configs:
s.DB.EnableNativeHistograms() s.DB.EnableNativeHistograms()
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()
mng, err := NewManager(nil, nil, s, reg) mng, err := NewManager(&Options{EnableNativeHistogramsIngestion: true}, nil, s, reg)
require.NoError(t, err) require.NoError(t, err)
cfg, err := config.Load(configStr, false, log.NewNopLogger()) cfg, err := config.Load(configStr, false, log.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)

View file

@ -1,22 +0,0 @@
## Copying from opentelemetry/opentelemetry-collector-contrib
This files in the `prometheus/` and `prometheusremotewrite/` are copied from the OpenTelemetry Project[^1].
This is done instead of adding a go.mod dependency because OpenTelemetry depends on `prometheus/prometheus` and a cyclic dependency will be created. This is just a temporary solution and the long-term solution is to move the required packages from OpenTelemetry into `prometheus/prometheus`.
To update the dependency is a multi-step process:
1. Vendor the latest `prometheus/prometheus`@`main` into [`opentelemetry/opentelemetry-collector-contrib`](https://github.com/open-telemetry/opentelemetry-collector-contrib)
1. Update the VERSION in `update-copy.sh`.
1. Run `./update-copy.sh`.
### Why copy?
This is because the packages we copy depend on the [`prompb`](https://github.com/prometheus/prometheus/blob/main/prompb) package. While the package is relatively stable, there are still changes. For example, https://github.com/prometheus/prometheus/pull/11935 changed the types.
This means if we depend on the upstream packages directly, we will never able to make the changes like above. Hence we're copying the code for now.
### I need to manually change these files
When we do want to make changes to the types in `prompb`, we might need to edit the files directly. That is OK, please let @gouthamve or @jesusvazquez know so they can take care of updating the upstream code (by vendoring in `prometheus/prometheus` upstream and resolving conflicts) and then will run the copy
script again to keep things updated.
[^1]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/translator/prometheus and https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/translator/prometheusremotewrite

View file

@ -1,9 +1,20 @@
// DO NOT EDIT. COPIED AS-IS. SEE ../README.md // Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_label.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
// Copyright The OpenTelemetry Authors package prometheus
// SPDX-License-Identifier: Apache-2.0
package prometheus // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
import ( import (
"strings" "strings"

View file

@ -1,9 +1,20 @@
// DO NOT EDIT. COPIED AS-IS. SEE ../README.md // Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_name.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
// Copyright The OpenTelemetry Authors package prometheus
// SPDX-License-Identifier: Apache-2.0
package prometheus // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
import ( import (
"strings" "strings"

View file

@ -1,9 +1,20 @@
// DO NOT EDIT. COPIED AS-IS. SEE ../README.md // Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/unit_to_ucum.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
// Copyright The OpenTelemetry Authors package prometheus
// SPDX-License-Identifier: Apache-2.0
package prometheus // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
import "strings" import "strings"

View file

@ -1,29 +1,42 @@
// DO NOT EDIT. COPIED AS-IS. SEE ../README.md // Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/helper.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
// Copyright The OpenTelemetry Authors package prometheusremotewrite
// SPDX-License-Identifier: Apache-2.0
package prometheusremotewrite // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
import ( import (
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"log" "log"
"math" "math"
"slices"
"sort" "sort"
"strconv" "strconv"
"strings"
"time" "time"
"unicode/utf8" "unicode/utf8"
"github.com/cespare/xxhash/v2"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/prompb"
"go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pmetric"
conventions "go.opentelemetry.io/collector/semconv/v1.6.1" conventions "go.opentelemetry.io/collector/semconv/v1.6.1"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/prompb"
prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
) )
@ -48,7 +61,7 @@ const (
) )
type bucketBoundsData struct { type bucketBoundsData struct {
sig string ts *prompb.TimeSeries
bound float64 bound float64
} }
@ -66,94 +79,47 @@ func (a ByLabelName) Len() int { return len(a) }
func (a ByLabelName) Less(i, j int) bool { return a[i].Name < a[j].Name } func (a ByLabelName) Less(i, j int) bool { return a[i].Name < a[j].Name }
func (a ByLabelName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a ByLabelName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// addSample finds a TimeSeries in tsMap that corresponds to the label set labels, and add sample to the TimeSeries; it // timeSeriesSignature returns a hashed label set signature.
// creates a new TimeSeries in the map if not found and returns the time series signature. // The label slice should not contain duplicate label names; this method sorts the slice by label name before creating
// tsMap will be unmodified if either labels or sample is nil, but can still be modified if the exemplar is nil.
func addSample(tsMap map[string]*prompb.TimeSeries, sample *prompb.Sample, labels []prompb.Label,
datatype string) string {
if sample == nil || labels == nil || tsMap == nil {
// This shouldn't happen
return ""
}
sig := timeSeriesSignature(datatype, labels)
ts := tsMap[sig]
if ts != nil {
ts.Samples = append(ts.Samples, *sample)
} else {
newTs := &prompb.TimeSeries{
Labels: labels,
Samples: []prompb.Sample{*sample},
}
tsMap[sig] = newTs
}
return sig
}
// addExemplars finds a bucket bound that corresponds to the exemplars value and add the exemplar to the specific sig;
// we only add exemplars if samples are presents
// tsMap is unmodified if either of its parameters is nil and samples are nil.
func addExemplars(tsMap map[string]*prompb.TimeSeries, exemplars []prompb.Exemplar, bucketBoundsData []bucketBoundsData) {
if len(tsMap) == 0 || len(bucketBoundsData) == 0 || len(exemplars) == 0 {
return
}
sort.Sort(byBucketBoundsData(bucketBoundsData))
for _, exemplar := range exemplars {
addExemplar(tsMap, bucketBoundsData, exemplar)
}
}
func addExemplar(tsMap map[string]*prompb.TimeSeries, bucketBounds []bucketBoundsData, exemplar prompb.Exemplar) {
for _, bucketBound := range bucketBounds {
sig := bucketBound.sig
bound := bucketBound.bound
ts := tsMap[sig]
if ts != nil && len(ts.Samples) > 0 && exemplar.Value <= bound {
ts.Exemplars = append(ts.Exemplars, exemplar)
return
}
}
}
// timeSeries return a string signature in the form of:
//
// TYPE-label1-value1- ... -labelN-valueN
//
// the label slice should not contain duplicate label names; this method sorts the slice by label name before creating
// the signature. // the signature.
func timeSeriesSignature(datatype string, labels []prompb.Label) string { // The algorithm is the same as in Prometheus' labels.StableHash function.
length := len(datatype) func timeSeriesSignature(labels []prompb.Label) uint64 {
for _, lb := range labels {
length += 2 + len(lb.GetName()) + len(lb.GetValue())
}
b := strings.Builder{}
b.Grow(length)
b.WriteString(datatype)
sort.Sort(ByLabelName(labels)) sort.Sort(ByLabelName(labels))
for _, lb := range labels { // Use xxhash.Sum64(b) for fast path as it's faster.
b.WriteString("-") b := make([]byte, 0, 1024)
b.WriteString(lb.GetName()) for i, v := range labels {
b.WriteString("-") if len(b)+len(v.Name)+len(v.Value)+2 >= cap(b) {
b.WriteString(lb.GetValue()) // If labels entry is 1KB+ do not allocate whole entry.
} h := xxhash.New()
_, _ = h.Write(b)
for _, v := range labels[i:] {
_, _ = h.WriteString(v.Name)
_, _ = h.Write(seps)
_, _ = h.WriteString(v.Value)
_, _ = h.Write(seps)
}
return h.Sum64()
}
return b.String() b = append(b, v.Name...)
b = append(b, seps[0])
b = append(b, v.Value...)
b = append(b, seps[0])
}
return xxhash.Sum64(b)
} }
var seps = []byte{'\xff'}
// createAttributes creates a slice of Prometheus Labels with OTLP attributes and pairs of string values. // createAttributes creates a slice of Prometheus Labels with OTLP attributes and pairs of string values.
// Unpaired string values are ignored. String pairs overwrite OTLP labels if collisions happen, and overwrites are // Unpaired string values are ignored. String pairs overwrite OTLP labels if collisions happen and
// logged. Resulting label names are sanitized. // if logOnOverwrite is true, the overwrite is logged. Resulting label names are sanitized.
func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externalLabels map[string]string, extras ...string) []prompb.Label { func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externalLabels map[string]string,
serviceName, haveServiceName := resource.Attributes().Get(conventions.AttributeServiceName) ignoreAttrs []string, logOnOverwrite bool, extras ...string) []prompb.Label {
instance, haveInstanceID := resource.Attributes().Get(conventions.AttributeServiceInstanceID) resourceAttrs := resource.Attributes()
serviceName, haveServiceName := resourceAttrs.Get(conventions.AttributeServiceName)
instance, haveInstanceID := resourceAttrs.Get(conventions.AttributeServiceInstanceID)
// Calculate the maximum possible number of labels we could return so we can preallocate l // Calculate the maximum possible number of labels we could return so we can preallocate l
maxLabelCount := attributes.Len() + len(externalLabels) + len(extras)/2 maxLabelCount := attributes.Len() + len(externalLabels) + len(extras)/2
@ -171,9 +137,13 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
// Ensure attributes are sorted by key for consistent merging of keys which // Ensure attributes are sorted by key for consistent merging of keys which
// collide when sanitized. // collide when sanitized.
labels := make([]prompb.Label, 0, attributes.Len()) labels := make([]prompb.Label, 0, maxLabelCount)
// XXX: Should we always drop service namespace/service name/service instance ID from the labels
// (as they get mapped to other Prometheus labels)?
attributes.Range(func(key string, value pcommon.Value) bool { attributes.Range(func(key string, value pcommon.Value) bool {
labels = append(labels, prompb.Label{Name: key, Value: value.AsString()}) if !slices.Contains(ignoreAttrs, key) {
labels = append(labels, prompb.Label{Name: key, Value: value.AsString()})
}
return true return true
}) })
sort.Stable(ByLabelName(labels)) sort.Stable(ByLabelName(labels))
@ -190,7 +160,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
// Map service.name + service.namespace to job // Map service.name + service.namespace to job
if haveServiceName { if haveServiceName {
val := serviceName.AsString() val := serviceName.AsString()
if serviceNamespace, ok := resource.Attributes().Get(conventions.AttributeServiceNamespace); ok { if serviceNamespace, ok := resourceAttrs.Get(conventions.AttributeServiceNamespace); ok {
val = fmt.Sprintf("%s/%s", serviceNamespace.AsString(), val) val = fmt.Sprintf("%s/%s", serviceNamespace.AsString(), val)
} }
l[model.JobLabel] = val l[model.JobLabel] = val
@ -213,7 +183,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
break break
} }
_, found := l[extras[i]] _, found := l[extras[i]]
if found { if found && logOnOverwrite {
log.Println("label " + extras[i] + " is overwritten. Check if Prometheus reserved labels are used.") log.Println("label " + extras[i] + " is overwritten. Check if Prometheus reserved labels are used.")
} }
// internal labels should be maintained // internal labels should be maintained
@ -224,12 +194,12 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
l[name] = extras[i+1] l[name] = extras[i+1]
} }
s := make([]prompb.Label, 0, len(l)) labels = labels[:0]
for k, v := range l { for k, v := range l {
s = append(s, prompb.Label{Name: k, Value: v}) labels = append(labels, prompb.Label{Name: k, Value: v})
} }
return s return labels
} }
// isValidAggregationTemporality checks whether an OTel metric has a valid // isValidAggregationTemporality checks whether an OTel metric has a valid
@ -249,100 +219,84 @@ func isValidAggregationTemporality(metric pmetric.Metric) bool {
return false return false
} }
// addSingleHistogramDataPoint converts pt to 2 + min(len(ExplicitBounds), len(BucketCount)) + 1 samples. It func (c *prometheusConverter) addHistogramDataPoints(dataPoints pmetric.HistogramDataPointSlice,
// ignore extra buckets if len(ExplicitBounds) > len(BucketCounts) resource pcommon.Resource, settings Settings, baseName string) {
func addSingleHistogramDataPoint(pt pmetric.HistogramDataPoint, resource pcommon.Resource, metric pmetric.Metric, settings Settings, tsMap map[string]*prompb.TimeSeries, baseName string) { for x := 0; x < dataPoints.Len(); x++ {
timestamp := convertTimeStamp(pt.Timestamp()) pt := dataPoints.At(x)
baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels) timestamp := convertTimeStamp(pt.Timestamp())
baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nil, false)
createLabels := func(nameSuffix string, extras ...string) []prompb.Label { // If the sum is unset, it indicates the _sum metric point should be
extraLabelCount := len(extras) / 2 // omitted
labels := make([]prompb.Label, len(baseLabels), len(baseLabels)+extraLabelCount+1) // +1 for name if pt.HasSum() {
copy(labels, baseLabels) // treat sum as a sample in an individual TimeSeries
sum := &prompb.Sample{
Value: pt.Sum(),
Timestamp: timestamp,
}
if pt.Flags().NoRecordedValue() {
sum.Value = math.Float64frombits(value.StaleNaN)
}
sumlabels := createLabels(baseName+sumStr, baseLabels)
c.addSample(sum, sumlabels)
for extrasIdx := 0; extrasIdx < extraLabelCount; extrasIdx++ {
labels = append(labels, prompb.Label{Name: extras[extrasIdx], Value: extras[extrasIdx+1]})
} }
// sum, count, and buckets of the histogram should append suffix to baseName // treat count as a sample in an individual TimeSeries
labels = append(labels, prompb.Label{Name: model.MetricNameLabel, Value: baseName + nameSuffix}) count := &prompb.Sample{
Value: float64(pt.Count()),
return labels
}
// If the sum is unset, it indicates the _sum metric point should be
// omitted
if pt.HasSum() {
// treat sum as a sample in an individual TimeSeries
sum := &prompb.Sample{
Value: pt.Sum(),
Timestamp: timestamp, Timestamp: timestamp,
} }
if pt.Flags().NoRecordedValue() { if pt.Flags().NoRecordedValue() {
sum.Value = math.Float64frombits(value.StaleNaN) count.Value = math.Float64frombits(value.StaleNaN)
} }
sumlabels := createLabels(sumStr) countlabels := createLabels(baseName+countStr, baseLabels)
addSample(tsMap, sum, sumlabels, metric.Type().String()) c.addSample(count, countlabels)
} // cumulative count for conversion to cumulative histogram
var cumulativeCount uint64
// treat count as a sample in an individual TimeSeries var bucketBounds []bucketBoundsData
count := &prompb.Sample{
Value: float64(pt.Count()),
Timestamp: timestamp,
}
if pt.Flags().NoRecordedValue() {
count.Value = math.Float64frombits(value.StaleNaN)
}
countlabels := createLabels(countStr) // process each bound, based on histograms proto definition, # of buckets = # of explicit bounds + 1
addSample(tsMap, count, countlabels, metric.Type().String()) for i := 0; i < pt.ExplicitBounds().Len() && i < pt.BucketCounts().Len(); i++ {
bound := pt.ExplicitBounds().At(i)
cumulativeCount += pt.BucketCounts().At(i)
bucket := &prompb.Sample{
Value: float64(cumulativeCount),
Timestamp: timestamp,
}
if pt.Flags().NoRecordedValue() {
bucket.Value = math.Float64frombits(value.StaleNaN)
}
boundStr := strconv.FormatFloat(bound, 'f', -1, 64)
labels := createLabels(baseName+bucketStr, baseLabels, leStr, boundStr)
ts := c.addSample(bucket, labels)
// cumulative count for conversion to cumulative histogram bucketBounds = append(bucketBounds, bucketBoundsData{ts: ts, bound: bound})
var cumulativeCount uint64 }
// add le=+Inf bucket
promExemplars := getPromExemplars[pmetric.HistogramDataPoint](pt) infBucket := &prompb.Sample{
var bucketBounds []bucketBoundsData
// process each bound, based on histograms proto definition, # of buckets = # of explicit bounds + 1
for i := 0; i < pt.ExplicitBounds().Len() && i < pt.BucketCounts().Len(); i++ {
bound := pt.ExplicitBounds().At(i)
cumulativeCount += pt.BucketCounts().At(i)
bucket := &prompb.Sample{
Value: float64(cumulativeCount),
Timestamp: timestamp, Timestamp: timestamp,
} }
if pt.Flags().NoRecordedValue() { if pt.Flags().NoRecordedValue() {
bucket.Value = math.Float64frombits(value.StaleNaN) infBucket.Value = math.Float64frombits(value.StaleNaN)
} else {
infBucket.Value = float64(pt.Count())
} }
boundStr := strconv.FormatFloat(bound, 'f', -1, 64) infLabels := createLabels(baseName+bucketStr, baseLabels, leStr, pInfStr)
labels := createLabels(bucketStr, leStr, boundStr) ts := c.addSample(infBucket, infLabels)
sig := addSample(tsMap, bucket, labels, metric.Type().String())
bucketBounds = append(bucketBounds, bucketBoundsData{sig: sig, bound: bound}) bucketBounds = append(bucketBounds, bucketBoundsData{ts: ts, bound: math.Inf(1)})
} c.addExemplars(pt, bucketBounds)
// add le=+Inf bucket
infBucket := &prompb.Sample{
Timestamp: timestamp,
}
if pt.Flags().NoRecordedValue() {
infBucket.Value = math.Float64frombits(value.StaleNaN)
} else {
infBucket.Value = float64(pt.Count())
}
infLabels := createLabels(bucketStr, leStr, pInfStr)
sig := addSample(tsMap, infBucket, infLabels, metric.Type().String())
bucketBounds = append(bucketBounds, bucketBoundsData{sig: sig, bound: math.Inf(1)}) startTimestamp := pt.StartTimestamp()
addExemplars(tsMap, promExemplars, bucketBounds) if settings.ExportCreatedMetric && startTimestamp != 0 {
labels := createLabels(baseName+createdSuffix, baseLabels)
// add _created time series if needed c.addTimeSeriesIfNeeded(labels, startTimestamp, pt.Timestamp())
startTimestamp := pt.StartTimestamp() }
if settings.ExportCreatedMetric && startTimestamp != 0 {
labels := createLabels(createdSuffix)
addCreatedTimeSeriesIfNeeded(tsMap, labels, startTimestamp, pt.Timestamp(), metric.Type().String())
} }
} }
@ -441,129 +395,177 @@ func mostRecentTimestampInMetric(metric pmetric.Metric) pcommon.Timestamp {
return ts return ts
} }
// addSingleSummaryDataPoint converts pt to len(QuantileValues) + 2 samples. func (c *prometheusConverter) addSummaryDataPoints(dataPoints pmetric.SummaryDataPointSlice, resource pcommon.Resource,
func addSingleSummaryDataPoint(pt pmetric.SummaryDataPoint, resource pcommon.Resource, metric pmetric.Metric, settings Settings, settings Settings, baseName string) {
tsMap map[string]*prompb.TimeSeries, baseName string) { for x := 0; x < dataPoints.Len(); x++ {
timestamp := convertTimeStamp(pt.Timestamp()) pt := dataPoints.At(x)
baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels) timestamp := convertTimeStamp(pt.Timestamp())
baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nil, false)
createLabels := func(name string, extras ...string) []prompb.Label { // treat sum as a sample in an individual TimeSeries
extraLabelCount := len(extras) / 2 sum := &prompb.Sample{
labels := make([]prompb.Label, len(baseLabels), len(baseLabels)+extraLabelCount+1) // +1 for name Value: pt.Sum(),
copy(labels, baseLabels)
for extrasIdx := 0; extrasIdx < extraLabelCount; extrasIdx++ {
labels = append(labels, prompb.Label{Name: extras[extrasIdx], Value: extras[extrasIdx+1]})
}
labels = append(labels, prompb.Label{Name: model.MetricNameLabel, Value: name})
return labels
}
// treat sum as a sample in an individual TimeSeries
sum := &prompb.Sample{
Value: pt.Sum(),
Timestamp: timestamp,
}
if pt.Flags().NoRecordedValue() {
sum.Value = math.Float64frombits(value.StaleNaN)
}
// sum and count of the summary should append suffix to baseName
sumlabels := createLabels(baseName + sumStr)
addSample(tsMap, sum, sumlabels, metric.Type().String())
// treat count as a sample in an individual TimeSeries
count := &prompb.Sample{
Value: float64(pt.Count()),
Timestamp: timestamp,
}
if pt.Flags().NoRecordedValue() {
count.Value = math.Float64frombits(value.StaleNaN)
}
countlabels := createLabels(baseName + countStr)
addSample(tsMap, count, countlabels, metric.Type().String())
// process each percentile/quantile
for i := 0; i < pt.QuantileValues().Len(); i++ {
qt := pt.QuantileValues().At(i)
quantile := &prompb.Sample{
Value: qt.Value(),
Timestamp: timestamp, Timestamp: timestamp,
} }
if pt.Flags().NoRecordedValue() { if pt.Flags().NoRecordedValue() {
quantile.Value = math.Float64frombits(value.StaleNaN) sum.Value = math.Float64frombits(value.StaleNaN)
} }
percentileStr := strconv.FormatFloat(qt.Quantile(), 'f', -1, 64) // sum and count of the summary should append suffix to baseName
qtlabels := createLabels(baseName, quantileStr, percentileStr) sumlabels := createLabels(baseName+sumStr, baseLabels)
addSample(tsMap, quantile, qtlabels, metric.Type().String()) c.addSample(sum, sumlabels)
}
// add _created time series if needed // treat count as a sample in an individual TimeSeries
startTimestamp := pt.StartTimestamp() count := &prompb.Sample{
if settings.ExportCreatedMetric && startTimestamp != 0 { Value: float64(pt.Count()),
createdLabels := createLabels(baseName + createdSuffix) Timestamp: timestamp,
addCreatedTimeSeriesIfNeeded(tsMap, createdLabels, startTimestamp, pt.Timestamp(), metric.Type().String()) }
if pt.Flags().NoRecordedValue() {
count.Value = math.Float64frombits(value.StaleNaN)
}
countlabels := createLabels(baseName+countStr, baseLabels)
c.addSample(count, countlabels)
// process each percentile/quantile
for i := 0; i < pt.QuantileValues().Len(); i++ {
qt := pt.QuantileValues().At(i)
quantile := &prompb.Sample{
Value: qt.Value(),
Timestamp: timestamp,
}
if pt.Flags().NoRecordedValue() {
quantile.Value = math.Float64frombits(value.StaleNaN)
}
percentileStr := strconv.FormatFloat(qt.Quantile(), 'f', -1, 64)
qtlabels := createLabels(baseName, baseLabels, quantileStr, percentileStr)
c.addSample(quantile, qtlabels)
}
startTimestamp := pt.StartTimestamp()
if settings.ExportCreatedMetric && startTimestamp != 0 {
createdLabels := createLabels(baseName+createdSuffix, baseLabels)
c.addTimeSeriesIfNeeded(createdLabels, startTimestamp, pt.Timestamp())
}
} }
} }
// addCreatedTimeSeriesIfNeeded adds {name}_created time series with a single // createLabels returns a copy of baseLabels, adding to it the pair model.MetricNameLabel=name.
// sample. If the series exists, then new samples won't be added. // If extras are provided, corresponding label pairs are also added to the returned slice.
func addCreatedTimeSeriesIfNeeded( // If extras is uneven length, the last (unpaired) extra will be ignored.
series map[string]*prompb.TimeSeries, func createLabels(name string, baseLabels []prompb.Label, extras ...string) []prompb.Label {
labels []prompb.Label, extraLabelCount := len(extras) / 2
startTimestamp pcommon.Timestamp, labels := make([]prompb.Label, len(baseLabels), len(baseLabels)+extraLabelCount+1) // +1 for name
timestamp pcommon.Timestamp, copy(labels, baseLabels)
metricType string,
) { n := len(extras)
sig := timeSeriesSignature(metricType, labels) n -= n % 2
if _, ok := series[sig]; !ok { for extrasIdx := 0; extrasIdx < n; extrasIdx += 2 {
series[sig] = &prompb.TimeSeries{ labels = append(labels, prompb.Label{Name: extras[extrasIdx], Value: extras[extrasIdx+1]})
Labels: labels, }
Samples: []prompb.Sample{
{ // convert ns to ms labels = append(labels, prompb.Label{Name: model.MetricNameLabel, Value: name})
Value: float64(convertTimeStamp(startTimestamp)), return labels
Timestamp: convertTimeStamp(timestamp), }
},
// getOrCreateTimeSeries returns the time series corresponding to the label set if existent, and false.
// Otherwise it creates a new one and returns that, and true.
func (c *prometheusConverter) getOrCreateTimeSeries(lbls []prompb.Label) (*prompb.TimeSeries, bool) {
h := timeSeriesSignature(lbls)
ts := c.unique[h]
if ts != nil {
if isSameMetric(ts, lbls) {
// We already have this metric
return ts, false
}
// Look for a matching conflict
for _, cTS := range c.conflicts[h] {
if isSameMetric(cTS, lbls) {
// We already have this metric
return cTS, false
}
}
// New conflict
ts = &prompb.TimeSeries{
Labels: lbls,
}
c.conflicts[h] = append(c.conflicts[h], ts)
return ts, true
}
// This metric is new
ts = &prompb.TimeSeries{
Labels: lbls,
}
c.unique[h] = ts
return ts, true
}
// addTimeSeriesIfNeeded adds a corresponding time series if it doesn't already exist.
// If the time series doesn't already exist, it gets added with startTimestamp for its value and timestamp for its timestamp,
// both converted to milliseconds.
func (c *prometheusConverter) addTimeSeriesIfNeeded(lbls []prompb.Label, startTimestamp pcommon.Timestamp, timestamp pcommon.Timestamp) {
ts, created := c.getOrCreateTimeSeries(lbls)
if created {
ts.Samples = []prompb.Sample{
{
// convert ns to ms
Value: float64(convertTimeStamp(startTimestamp)),
Timestamp: convertTimeStamp(timestamp),
}, },
} }
} }
} }
// addResourceTargetInfo converts the resource to the target info metric // addResourceTargetInfo converts the resource to the target info metric.
func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timestamp pcommon.Timestamp, tsMap map[string]*prompb.TimeSeries) { func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timestamp pcommon.Timestamp, converter *prometheusConverter) {
if settings.DisableTargetInfo { if settings.DisableTargetInfo || timestamp == 0 {
return return
} }
// Use resource attributes (other than those used for job+instance) as the
// metric labels for the target info metric attributes := resource.Attributes()
attributes := pcommon.NewMap() identifyingAttrs := []string{
resource.Attributes().CopyTo(attributes) conventions.AttributeServiceNamespace,
attributes.RemoveIf(func(k string, _ pcommon.Value) bool { conventions.AttributeServiceName,
switch k { conventions.AttributeServiceInstanceID,
case conventions.AttributeServiceName, conventions.AttributeServiceNamespace, conventions.AttributeServiceInstanceID: }
// Remove resource attributes used for job + instance nonIdentifyingAttrsCount := attributes.Len()
return true for _, a := range identifyingAttrs {
default: _, haveAttr := attributes.Get(a)
return false if haveAttr {
nonIdentifyingAttrsCount--
} }
}) }
if attributes.Len() == 0 { if nonIdentifyingAttrsCount == 0 {
// If we only have job + instance, then target_info isn't useful, so don't add it. // If we only have job + instance, then target_info isn't useful, so don't add it.
return return
} }
// create parameters for addSample
name := targetMetricName name := targetMetricName
if len(settings.Namespace) > 0 { if len(settings.Namespace) > 0 {
name = settings.Namespace + "_" + name name = settings.Namespace + "_" + name
} }
labels := createAttributes(resource, attributes, settings.ExternalLabels, model.MetricNameLabel, name)
labels := createAttributes(resource, attributes, settings.ExternalLabels, identifyingAttrs, false, model.MetricNameLabel, name)
haveIdentifier := false
for _, l := range labels {
if l.Name == model.JobLabel || l.Name == model.InstanceLabel {
haveIdentifier = true
break
}
}
if !haveIdentifier {
// We need at least one identifying label to generate target_info.
return
}
sample := &prompb.Sample{ sample := &prompb.Sample{
Value: float64(1), Value: float64(1),
// convert ns to ms // convert ns to ms
Timestamp: convertTimeStamp(timestamp), Timestamp: convertTimeStamp(timestamp),
} }
addSample(tsMap, sample, labels, infoType) converter.addSample(sample, labels)
} }
// convertTimeStamp converts OTLP timestamp in ns to timestamp in ms // convertTimeStamp converts OTLP timestamp in ns to timestamp in ms

View file

@ -1,58 +1,59 @@
// DO NOT EDIT. COPIED AS-IS. SEE ../README.md // Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/histograms.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
// Copyright The OpenTelemetry Authors package prometheusremotewrite
// SPDX-License-Identifier: Apache-2.0
package prometheusremotewrite // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
import ( import (
"fmt" "fmt"
"math" "math"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/prompb"
"go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pmetric"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/prompb"
) )
const defaultZeroThreshold = 1e-128 const defaultZeroThreshold = 1e-128
func addSingleExponentialHistogramDataPoint( func (c *prometheusConverter) addExponentialHistogramDataPoints(dataPoints pmetric.ExponentialHistogramDataPointSlice,
metric string, resource pcommon.Resource, settings Settings, baseName string) error {
pt pmetric.ExponentialHistogramDataPoint, for x := 0; x < dataPoints.Len(); x++ {
resource pcommon.Resource, pt := dataPoints.At(x)
settings Settings, lbls := createAttributes(
series map[string]*prompb.TimeSeries, resource,
) error { pt.Attributes(),
labels := createAttributes( settings.ExternalLabels,
resource, nil,
pt.Attributes(), true,
settings.ExternalLabels, model.MetricNameLabel,
model.MetricNameLabel, baseName,
metric, )
) ts, _ := c.getOrCreateTimeSeries(lbls)
sig := timeSeriesSignature( histogram, err := exponentialToNativeHistogram(pt)
pmetric.MetricTypeExponentialHistogram.String(), if err != nil {
labels, return err
)
ts, ok := series[sig]
if !ok {
ts = &prompb.TimeSeries{
Labels: labels,
} }
series[sig] = ts ts.Histograms = append(ts.Histograms, histogram)
}
histogram, err := exponentialToNativeHistogram(pt) exemplars := getPromExemplars[pmetric.ExponentialHistogramDataPoint](pt)
if err != nil { ts.Exemplars = append(ts.Exemplars, exemplars...)
return err
} }
ts.Histograms = append(ts.Histograms, histogram)
exemplars := getPromExemplars[pmetric.ExponentialHistogramDataPoint](pt)
ts.Exemplars = append(ts.Exemplars, exemplars...)
return nil return nil
} }

View file

@ -1,19 +1,32 @@
// DO NOT EDIT. COPIED AS-IS. SEE ../README.md // Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/metrics_to_prw.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
// Copyright The OpenTelemetry Authors package prometheusremotewrite
// SPDX-License-Identifier: Apache-2.0
package prometheusremotewrite // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
import ( import (
"errors" "errors"
"fmt" "fmt"
"sort"
"strconv"
"github.com/prometheus/prometheus/prompb"
"go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pmetric"
"go.uber.org/multierr" "go.uber.org/multierr"
"github.com/prometheus/prometheus/prompb"
prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
) )
@ -27,9 +40,33 @@ type Settings struct {
} }
// FromMetrics converts pmetric.Metrics to Prometheus remote write format. // FromMetrics converts pmetric.Metrics to Prometheus remote write format.
func FromMetrics(md pmetric.Metrics, settings Settings) (tsMap map[string]*prompb.TimeSeries, errs error) { func FromMetrics(md pmetric.Metrics, settings Settings) (map[string]*prompb.TimeSeries, error) {
tsMap = make(map[string]*prompb.TimeSeries) c := newPrometheusConverter()
errs := c.fromMetrics(md, settings)
tss := c.timeSeries()
out := make(map[string]*prompb.TimeSeries, len(tss))
for i := range tss {
out[strconv.Itoa(i)] = &tss[i]
}
return out, errs
}
// prometheusConverter converts from OTel write format to Prometheus write format.
type prometheusConverter struct {
unique map[uint64]*prompb.TimeSeries
conflicts map[uint64][]*prompb.TimeSeries
}
func newPrometheusConverter() *prometheusConverter {
return &prometheusConverter{
unique: map[uint64]*prompb.TimeSeries{},
conflicts: map[uint64][]*prompb.TimeSeries{},
}
}
// fromMetrics converts pmetric.Metrics to Prometheus remote write format.
func (c *prometheusConverter) fromMetrics(md pmetric.Metrics, settings Settings) (errs error) {
resourceMetricsSlice := md.ResourceMetrics() resourceMetricsSlice := md.ResourceMetrics()
for i := 0; i < resourceMetricsSlice.Len(); i++ { for i := 0; i < resourceMetricsSlice.Len(); i++ {
resourceMetrics := resourceMetricsSlice.At(i) resourceMetrics := resourceMetricsSlice.At(i)
@ -39,8 +76,7 @@ func FromMetrics(md pmetric.Metrics, settings Settings) (tsMap map[string]*promp
// use with the "target" info metric // use with the "target" info metric
var mostRecentTimestamp pcommon.Timestamp var mostRecentTimestamp pcommon.Timestamp
for j := 0; j < scopeMetricsSlice.Len(); j++ { for j := 0; j < scopeMetricsSlice.Len(); j++ {
scopeMetrics := scopeMetricsSlice.At(j) metricSlice := scopeMetricsSlice.At(j).Metrics()
metricSlice := scopeMetrics.Metrics()
// TODO: decide if instrumentation library information should be exported as labels // TODO: decide if instrumentation library information should be exported as labels
for k := 0; k < metricSlice.Len(); k++ { for k := 0; k < metricSlice.Len(); k++ {
@ -54,65 +90,125 @@ func FromMetrics(md pmetric.Metrics, settings Settings) (tsMap map[string]*promp
promName := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes) promName := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes)
// handle individual metric based on type // handle individual metrics based on type
//exhaustive:enforce //exhaustive:enforce
switch metric.Type() { switch metric.Type() {
case pmetric.MetricTypeGauge: case pmetric.MetricTypeGauge:
dataPoints := metric.Gauge().DataPoints() dataPoints := metric.Gauge().DataPoints()
if dataPoints.Len() == 0 { if dataPoints.Len() == 0 {
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
break
} }
for x := 0; x < dataPoints.Len(); x++ { c.addGaugeNumberDataPoints(dataPoints, resource, settings, promName)
addSingleGaugeNumberDataPoint(dataPoints.At(x), resource, metric, settings, tsMap, promName)
}
case pmetric.MetricTypeSum: case pmetric.MetricTypeSum:
dataPoints := metric.Sum().DataPoints() dataPoints := metric.Sum().DataPoints()
if dataPoints.Len() == 0 { if dataPoints.Len() == 0 {
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
break
} }
for x := 0; x < dataPoints.Len(); x++ { c.addSumNumberDataPoints(dataPoints, resource, metric, settings, promName)
addSingleSumNumberDataPoint(dataPoints.At(x), resource, metric, settings, tsMap, promName)
}
case pmetric.MetricTypeHistogram: case pmetric.MetricTypeHistogram:
dataPoints := metric.Histogram().DataPoints() dataPoints := metric.Histogram().DataPoints()
if dataPoints.Len() == 0 { if dataPoints.Len() == 0 {
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
break
} }
for x := 0; x < dataPoints.Len(); x++ { c.addHistogramDataPoints(dataPoints, resource, settings, promName)
addSingleHistogramDataPoint(dataPoints.At(x), resource, metric, settings, tsMap, promName)
}
case pmetric.MetricTypeExponentialHistogram: case pmetric.MetricTypeExponentialHistogram:
dataPoints := metric.ExponentialHistogram().DataPoints() dataPoints := metric.ExponentialHistogram().DataPoints()
if dataPoints.Len() == 0 { if dataPoints.Len() == 0 {
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
break
} }
for x := 0; x < dataPoints.Len(); x++ { errs = multierr.Append(errs, c.addExponentialHistogramDataPoints(
errs = multierr.Append( dataPoints,
errs, resource,
addSingleExponentialHistogramDataPoint( settings,
promName, promName,
dataPoints.At(x), ))
resource,
settings,
tsMap,
),
)
}
case pmetric.MetricTypeSummary: case pmetric.MetricTypeSummary:
dataPoints := metric.Summary().DataPoints() dataPoints := metric.Summary().DataPoints()
if dataPoints.Len() == 0 { if dataPoints.Len() == 0 {
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
break
} }
for x := 0; x < dataPoints.Len(); x++ { c.addSummaryDataPoints(dataPoints, resource, settings, promName)
addSingleSummaryDataPoint(dataPoints.At(x), resource, metric, settings, tsMap, promName)
}
default: default:
errs = multierr.Append(errs, errors.New("unsupported metric type")) errs = multierr.Append(errs, errors.New("unsupported metric type"))
} }
} }
} }
addResourceTargetInfo(resource, settings, mostRecentTimestamp, tsMap) addResourceTargetInfo(resource, settings, mostRecentTimestamp, c)
} }
return return
} }
// timeSeries returns a slice of the prompb.TimeSeries that were converted from OTel format.
func (c *prometheusConverter) timeSeries() []prompb.TimeSeries {
conflicts := 0
for _, ts := range c.conflicts {
conflicts += len(ts)
}
allTS := make([]prompb.TimeSeries, 0, len(c.unique)+conflicts)
for _, ts := range c.unique {
allTS = append(allTS, *ts)
}
for _, cTS := range c.conflicts {
for _, ts := range cTS {
allTS = append(allTS, *ts)
}
}
return allTS
}
func isSameMetric(ts *prompb.TimeSeries, lbls []prompb.Label) bool {
if len(ts.Labels) != len(lbls) {
return false
}
for i, l := range ts.Labels {
if l.Name != ts.Labels[i].Name || l.Value != ts.Labels[i].Value {
return false
}
}
return true
}
// addExemplars adds exemplars for the dataPoint. For each exemplar, if it can find a bucket bound corresponding to its value,
// the exemplar is added to the bucket bound's time series, provided that the time series' has samples.
func (c *prometheusConverter) addExemplars(dataPoint pmetric.HistogramDataPoint, bucketBounds []bucketBoundsData) {
if len(bucketBounds) == 0 {
return
}
exemplars := getPromExemplars(dataPoint)
if len(exemplars) == 0 {
return
}
sort.Sort(byBucketBoundsData(bucketBounds))
for _, exemplar := range exemplars {
for _, bound := range bucketBounds {
if len(bound.ts.Samples) > 0 && exemplar.Value <= bound.bound {
bound.ts.Exemplars = append(bound.ts.Exemplars, exemplar)
break
}
}
}
}
// addSample finds a TimeSeries that corresponds to lbls, and adds sample to it.
// If there is no corresponding TimeSeries already, it's created.
// The corresponding TimeSeries is returned.
// If either lbls is nil/empty or sample is nil, nothing is done.
func (c *prometheusConverter) addSample(sample *prompb.Sample, lbls []prompb.Label) *prompb.TimeSeries {
if sample == nil || len(lbls) == 0 {
// This shouldn't happen
return nil
}
ts, _ := c.getOrCreateTimeSeries(lbls)
ts.Samples = append(ts.Samples, *sample)
return ts
}

View file

@ -1,106 +1,110 @@
// DO NOT EDIT. COPIED AS-IS. SEE ../README.md // Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/number_data_points.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
// Copyright The OpenTelemetry Authors package prometheusremotewrite
// SPDX-License-Identifier: Apache-2.0
package prometheusremotewrite // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
import ( import (
"math" "math"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/prompb"
"go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pmetric"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/prompb"
) )
// addSingleGaugeNumberDataPoint converts the Gauge metric data point to a func (c *prometheusConverter) addGaugeNumberDataPoints(dataPoints pmetric.NumberDataPointSlice,
// Prometheus time series with samples and labels. The result is stored in the resource pcommon.Resource, settings Settings, name string) {
// series map. for x := 0; x < dataPoints.Len(); x++ {
func addSingleGaugeNumberDataPoint( pt := dataPoints.At(x)
pt pmetric.NumberDataPoint, labels := createAttributes(
resource pcommon.Resource, resource,
metric pmetric.Metric, pt.Attributes(),
settings Settings, settings.ExternalLabels,
series map[string]*prompb.TimeSeries, nil,
name string, true,
) { model.MetricNameLabel,
labels := createAttributes( name,
resource, )
pt.Attributes(), sample := &prompb.Sample{
settings.ExternalLabels, // convert ns to ms
model.MetricNameLabel, Timestamp: convertTimeStamp(pt.Timestamp()),
name, }
) switch pt.ValueType() {
sample := &prompb.Sample{ case pmetric.NumberDataPointValueTypeInt:
// convert ns to ms sample.Value = float64(pt.IntValue())
Timestamp: convertTimeStamp(pt.Timestamp()), case pmetric.NumberDataPointValueTypeDouble:
sample.Value = pt.DoubleValue()
}
if pt.Flags().NoRecordedValue() {
sample.Value = math.Float64frombits(value.StaleNaN)
}
c.addSample(sample, labels)
} }
switch pt.ValueType() {
case pmetric.NumberDataPointValueTypeInt:
sample.Value = float64(pt.IntValue())
case pmetric.NumberDataPointValueTypeDouble:
sample.Value = pt.DoubleValue()
}
if pt.Flags().NoRecordedValue() {
sample.Value = math.Float64frombits(value.StaleNaN)
}
addSample(series, sample, labels, metric.Type().String())
} }
// addSingleSumNumberDataPoint converts the Sum metric data point to a Prometheus func (c *prometheusConverter) addSumNumberDataPoints(dataPoints pmetric.NumberDataPointSlice,
// time series with samples, labels and exemplars. The result is stored in the resource pcommon.Resource, metric pmetric.Metric, settings Settings, name string) {
// series map. for x := 0; x < dataPoints.Len(); x++ {
func addSingleSumNumberDataPoint( pt := dataPoints.At(x)
pt pmetric.NumberDataPoint, lbls := createAttributes(
resource pcommon.Resource, resource,
metric pmetric.Metric, pt.Attributes(),
settings Settings, settings.ExternalLabels,
series map[string]*prompb.TimeSeries, nil,
name string, true,
) { model.MetricNameLabel,
labels := createAttributes( name,
resource, )
pt.Attributes(), sample := &prompb.Sample{
settings.ExternalLabels, // convert ns to ms
model.MetricNameLabel, name, Timestamp: convertTimeStamp(pt.Timestamp()),
) }
sample := &prompb.Sample{ switch pt.ValueType() {
// convert ns to ms case pmetric.NumberDataPointValueTypeInt:
Timestamp: convertTimeStamp(pt.Timestamp()), sample.Value = float64(pt.IntValue())
} case pmetric.NumberDataPointValueTypeDouble:
switch pt.ValueType() { sample.Value = pt.DoubleValue()
case pmetric.NumberDataPointValueTypeInt: }
sample.Value = float64(pt.IntValue()) if pt.Flags().NoRecordedValue() {
case pmetric.NumberDataPointValueTypeDouble: sample.Value = math.Float64frombits(value.StaleNaN)
sample.Value = pt.DoubleValue() }
} ts := c.addSample(sample, lbls)
if pt.Flags().NoRecordedValue() { if ts != nil {
sample.Value = math.Float64frombits(value.StaleNaN) exemplars := getPromExemplars[pmetric.NumberDataPoint](pt)
} ts.Exemplars = append(ts.Exemplars, exemplars...)
sig := addSample(series, sample, labels, metric.Type().String())
if ts := series[sig]; sig != "" && ts != nil {
exemplars := getPromExemplars[pmetric.NumberDataPoint](pt)
ts.Exemplars = append(ts.Exemplars, exemplars...)
}
// add _created time series if needed
if settings.ExportCreatedMetric && metric.Sum().IsMonotonic() {
startTimestamp := pt.StartTimestamp()
if startTimestamp == 0 {
return
} }
createdLabels := make([]prompb.Label, len(labels)) // add created time series if needed
copy(createdLabels, labels) if settings.ExportCreatedMetric && metric.Sum().IsMonotonic() {
for i, l := range createdLabels { startTimestamp := pt.StartTimestamp()
if l.Name == model.MetricNameLabel { if startTimestamp == 0 {
createdLabels[i].Value = name + createdSuffix return
break
} }
createdLabels := make([]prompb.Label, len(lbls))
copy(createdLabels, lbls)
for i, l := range createdLabels {
if l.Name == model.MetricNameLabel {
createdLabels[i].Value = name + createdSuffix
break
}
}
c.addTimeSeriesIfNeeded(createdLabels, startTimestamp, pt.Timestamp())
} }
addCreatedTimeSeriesIfNeeded(series, createdLabels, startTimestamp, pt.Timestamp(), metric.Type().String())
} }
} }

View file

@ -1,14 +1,25 @@
// DO NOT EDIT. COPIED AS-IS. SEE ../README.md // Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/otlp_to_openmetrics_metadata.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
// Copyright The OpenTelemetry Authors package prometheusremotewrite
// SPDX-License-Identifier: Apache-2.0
package prometheusremotewrite // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
import ( import (
"github.com/prometheus/prometheus/prompb"
"go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pmetric"
"github.com/prometheus/prometheus/prompb"
prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
) )

View file

@ -1,27 +0,0 @@
#!/bin/bash
set -xe
OTEL_VERSION=v0.95.0
git clone https://github.com/open-telemetry/opentelemetry-collector-contrib ./tmp
cd ./tmp
git checkout $OTEL_VERSION
cd ..
rm -rf ./prometheusremotewrite/*
cp -r ./tmp/pkg/translator/prometheusremotewrite/*.go ./prometheusremotewrite
rm -rf ./prometheusremotewrite/*_test.go
rm -rf ./prometheus/*
cp -r ./tmp/pkg/translator/prometheus/*.go ./prometheus
rm -rf ./prometheus/*_test.go
rm -rf ./tmp
case $(sed --help 2>&1) in
*GNU*) set sed -i;;
*) set sed -i '';;
esac
"$@" -e 's#github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus#github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus#g' ./prometheusremotewrite/*.go ./prometheus/*.go
"$@" -e '1s#^#// DO NOT EDIT. COPIED AS-IS. SEE ../README.md\n\n#g' ./prometheusremotewrite/*.go ./prometheus/*.go

View file

@ -331,7 +331,7 @@ func postingsForMatcher(ctx context.Context, ix IndexReader, m *labels.Matcher)
return nil, err return nil, err
} }
var res []string res := vals[:0]
for _, val := range vals { for _, val := range vals {
if m.Matches(val) { if m.Matches(val) {
res = append(res, val) res = append(res, val)
@ -368,7 +368,7 @@ func inversePostingsForMatcher(ctx context.Context, ix IndexReader, m *labels.Ma
return nil, err return nil, err
} }
var res []string res := vals[:0]
// If the inverse match is ="", we just want all the values. // If the inverse match is ="", we just want all the values.
if m.Type == labels.MatchEqual && m.Value == "" { if m.Type == labels.MatchEqual && m.Value == "" {
res = vals res = vals

View file

@ -251,6 +251,12 @@ describe('analyzeCompletion test', () => {
pos: 11, // cursor is between the bracket after the string myL pos: 11, // cursor is between the bracket after the string myL
expectedContext: [{ kind: ContextKind.LabelName }], expectedContext: [{ kind: ContextKind.LabelName }],
}, },
{
title: 'continue to autocomplete QuotedLabelName in aggregate modifier',
expr: 'sum by ("myL")',
pos: 12, // cursor is between the bracket after the string myL
expectedContext: [{ kind: ContextKind.LabelName }],
},
{ {
title: 'autocomplete labelName in a list', title: 'autocomplete labelName in a list',
expr: 'sum by (myLabel1,)', expr: 'sum by (myLabel1,)',
@ -263,6 +269,12 @@ describe('analyzeCompletion test', () => {
pos: 23, // cursor is between the bracket after the string myLab pos: 23, // cursor is between the bracket after the string myLab
expectedContext: [{ kind: ContextKind.LabelName }], expectedContext: [{ kind: ContextKind.LabelName }],
}, },
{
title: 'autocomplete labelName in a list 2',
expr: 'sum by ("myLabel1", "myLab")',
pos: 27, // cursor is between the bracket after the string myLab
expectedContext: [{ kind: ContextKind.LabelName }],
},
{ {
title: 'autocomplete labelName associated to a metric', title: 'autocomplete labelName associated to a metric',
expr: 'metric_name{}', expr: 'metric_name{}',
@ -299,6 +311,12 @@ describe('analyzeCompletion test', () => {
pos: 22, // cursor is between the bracket after the comma pos: 22, // cursor is between the bracket after the comma
expectedContext: [{ kind: ContextKind.LabelName, metricName: '' }], expectedContext: [{ kind: ContextKind.LabelName, metricName: '' }],
}, },
{
title: 'continue to autocomplete quoted labelName associated to a metric',
expr: '{"metric_"}',
pos: 10, // cursor is between the bracket after the string metric_
expectedContext: [{ kind: ContextKind.MetricName, metricName: 'metric_' }],
},
{ {
title: 'autocomplete the labelValue with metricName + labelName', title: 'autocomplete the labelValue with metricName + labelName',
expr: 'metric_name{labelName=""}', expr: 'metric_name{labelName=""}',
@ -342,6 +360,30 @@ describe('analyzeCompletion test', () => {
}, },
], ],
}, },
{
title: 'autocomplete the labelValue with metricName + quoted labelName',
expr: 'metric_name{labelName="labelValue", "labelName"!=""}',
pos: 50, // cursor is between the quotes
expectedContext: [
{
kind: ContextKind.LabelValue,
metricName: 'metric_name',
labelName: 'labelName',
matchers: [
{
name: 'labelName',
type: Neq,
value: '',
},
{
name: 'labelName',
type: EqlSingle,
value: 'labelValue',
},
],
},
],
},
{ {
title: 'autocomplete the labelValue associated to a labelName', title: 'autocomplete the labelValue associated to a labelName',
expr: '{labelName=""}', expr: '{labelName=""}',
@ -427,6 +469,12 @@ describe('analyzeCompletion test', () => {
pos: 22, // cursor is after '!' pos: 22, // cursor is after '!'
expectedContext: [{ kind: ContextKind.MatchOp }], expectedContext: [{ kind: ContextKind.MatchOp }],
}, },
{
title: 'autocomplete matchOp 3',
expr: 'metric_name{"labelName"!}',
pos: 24, // cursor is after '!'
expectedContext: [{ kind: ContextKind.BinOp }],
},
{ {
title: 'autocomplete duration with offset', title: 'autocomplete duration with offset',
expr: 'http_requests_total offset 5', expr: 'http_requests_total offset 5',

View file

@ -29,7 +29,6 @@ import {
GroupingLabels, GroupingLabels,
Gte, Gte,
Gtr, Gtr,
LabelMatcher,
LabelMatchers, LabelMatchers,
LabelName, LabelName,
Lss, Lss,
@ -52,6 +51,9 @@ import {
SubqueryExpr, SubqueryExpr,
Unless, Unless,
VectorSelector, VectorSelector,
UnquotedLabelMatcher,
QuotedLabelMatcher,
QuotedLabelName,
} from '@prometheus-io/lezer-promql'; } from '@prometheus-io/lezer-promql';
import { Completion, CompletionContext, CompletionResult } from '@codemirror/autocomplete'; import { Completion, CompletionContext, CompletionResult } from '@codemirror/autocomplete';
import { EditorState } from '@codemirror/state'; import { EditorState } from '@codemirror/state';
@ -181,7 +183,10 @@ export function computeStartCompletePosition(node: SyntaxNode, pos: number): num
let start = node.from; let start = node.from;
if (node.type.id === LabelMatchers || node.type.id === GroupingLabels) { if (node.type.id === LabelMatchers || node.type.id === GroupingLabels) {
start = computeStartCompleteLabelPositionInLabelMatcherOrInGroupingLabel(node, pos); start = computeStartCompleteLabelPositionInLabelMatcherOrInGroupingLabel(node, pos);
} else if (node.type.id === FunctionCallBody || (node.type.id === StringLiteral && node.parent?.type.id === LabelMatcher)) { } else if (
node.type.id === FunctionCallBody ||
(node.type.id === StringLiteral && (node.parent?.type.id === UnquotedLabelMatcher || node.parent?.type.id === QuotedLabelMatcher))
) {
// When the cursor is between bracket, quote, we need to increment the starting position to avoid to consider the open bracket/ first string. // When the cursor is between bracket, quote, we need to increment the starting position to avoid to consider the open bracket/ first string.
start++; start++;
} else if ( } else if (
@ -212,7 +217,7 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context
result.push({ kind: ContextKind.Duration }); result.push({ kind: ContextKind.Duration });
break; break;
} }
if (node.parent?.type.id === LabelMatcher) { if (node.parent?.type.id === UnquotedLabelMatcher || node.parent?.type.id === QuotedLabelMatcher) {
// In this case the current token is not itself a valid match op yet: // In this case the current token is not itself a valid match op yet:
// metric_name{labelName!} // metric_name{labelName!}
result.push({ kind: ContextKind.MatchOp }); result.push({ kind: ContextKind.MatchOp });
@ -380,7 +385,7 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context
// sum by (myL) // sum by (myL)
// So we have to continue to autocomplete any kind of labelName // So we have to continue to autocomplete any kind of labelName
result.push({ kind: ContextKind.LabelName }); result.push({ kind: ContextKind.LabelName });
} else if (node.parent?.type.id === LabelMatcher) { } else if (node.parent?.type.id === UnquotedLabelMatcher) {
// In that case we are in the given situation: // In that case we are in the given situation:
// metric_name{myL} or {myL} // metric_name{myL} or {myL}
// so we have or to continue to autocomplete any kind of labelName or // so we have or to continue to autocomplete any kind of labelName or
@ -389,9 +394,9 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context
} }
break; break;
case StringLiteral: case StringLiteral:
if (node.parent?.type.id === LabelMatcher) { if (node.parent?.type.id === UnquotedLabelMatcher || node.parent?.type.id === QuotedLabelMatcher) {
// In this case we are in the given situation: // In this case we are in the given situation:
// metric_name{labelName=""} // metric_name{labelName=""} or metric_name{"labelName"=""}
// So we can autocomplete the labelValue // So we can autocomplete the labelValue
// Get the labelName. // Get the labelName.
@ -399,18 +404,34 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context
let labelName = ''; let labelName = '';
if (node.parent.firstChild?.type.id === LabelName) { if (node.parent.firstChild?.type.id === LabelName) {
labelName = state.sliceDoc(node.parent.firstChild.from, node.parent.firstChild.to); labelName = state.sliceDoc(node.parent.firstChild.from, node.parent.firstChild.to);
} else if (node.parent.firstChild?.type.id === QuotedLabelName) {
labelName = state.sliceDoc(node.parent.firstChild.from, node.parent.firstChild.to).slice(1, -1);
} }
// then find the metricName if it exists // then find the metricName if it exists
const metricName = getMetricNameInVectorSelector(node, state); const metricName = getMetricNameInVectorSelector(node, state);
// finally get the full matcher available // finally get the full matcher available
const matcherNode = walkBackward(node, LabelMatchers); const matcherNode = walkBackward(node, LabelMatchers);
const labelMatchers = buildLabelMatchers(matcherNode ? matcherNode.getChildren(LabelMatcher) : [], state); const labelMatcherOpts = [QuotedLabelName, QuotedLabelMatcher, UnquotedLabelMatcher];
let labelMatchers: Matcher[] = [];
for (const labelMatcherOpt of labelMatcherOpts) {
labelMatchers = labelMatchers.concat(buildLabelMatchers(matcherNode ? matcherNode.getChildren(labelMatcherOpt) : [], state));
}
result.push({ result.push({
kind: ContextKind.LabelValue, kind: ContextKind.LabelValue,
metricName: metricName, metricName: metricName,
labelName: labelName, labelName: labelName,
matchers: labelMatchers, matchers: labelMatchers,
}); });
} else if (node.parent?.parent?.type.id === GroupingLabels) {
// In this case we are in the given situation:
// sum by ("myL")
// So we have to continue to autocomplete any kind of labelName
result.push({ kind: ContextKind.LabelName });
} else if (node.parent?.parent?.type.id === LabelMatchers) {
// In that case we are in the given situation:
// {""} or {"metric_"}
// since this is for the QuotedMetricName we need to continue to autocomplete for the metric names
result.push({ kind: ContextKind.MetricName, metricName: state.sliceDoc(node.from, node.to).slice(1, -1) });
} }
break; break;
case NumberLiteral: case NumberLiteral:

View file

@ -12,33 +12,75 @@
// limitations under the License. // limitations under the License.
import { SyntaxNode } from '@lezer/common'; import { SyntaxNode } from '@lezer/common';
import { EqlRegex, EqlSingle, LabelName, MatchOp, Neq, NeqRegex, StringLiteral } from '@prometheus-io/lezer-promql'; import {
EqlRegex,
EqlSingle,
LabelName,
MatchOp,
Neq,
NeqRegex,
StringLiteral,
UnquotedLabelMatcher,
QuotedLabelMatcher,
QuotedLabelName,
} from '@prometheus-io/lezer-promql';
import { EditorState } from '@codemirror/state'; import { EditorState } from '@codemirror/state';
import { Matcher } from '../types'; import { Matcher } from '../types';
function createMatcher(labelMatcher: SyntaxNode, state: EditorState): Matcher { function createMatcher(labelMatcher: SyntaxNode, state: EditorState): Matcher {
const matcher = new Matcher(0, '', ''); const matcher = new Matcher(0, '', '');
const cursor = labelMatcher.cursor(); const cursor = labelMatcher.cursor();
if (!cursor.next()) { switch (cursor.type.id) {
// weird case, that would mean the labelMatcher doesn't have any child. case QuotedLabelMatcher:
return matcher; if (!cursor.next()) {
} // weird case, that would mean the QuotedLabelMatcher doesn't have any child.
do { return matcher;
switch (cursor.type.id) { }
case LabelName: do {
matcher.name = state.sliceDoc(cursor.from, cursor.to); switch (cursor.type.id) {
break; case QuotedLabelName:
case MatchOp: matcher.name = state.sliceDoc(cursor.from, cursor.to).slice(1, -1);
const ope = cursor.node.firstChild; break;
if (ope) { case MatchOp:
matcher.type = ope.type.id; const ope = cursor.node.firstChild;
if (ope) {
matcher.type = ope.type.id;
}
break;
case StringLiteral:
matcher.value = state.sliceDoc(cursor.from, cursor.to).slice(1, -1);
break;
} }
break; } while (cursor.nextSibling());
case StringLiteral: break;
matcher.value = state.sliceDoc(cursor.from, cursor.to).slice(1, -1); case UnquotedLabelMatcher:
break; if (!cursor.next()) {
} // weird case, that would mean the UnquotedLabelMatcher doesn't have any child.
} while (cursor.nextSibling()); return matcher;
}
do {
switch (cursor.type.id) {
case LabelName:
matcher.name = state.sliceDoc(cursor.from, cursor.to);
break;
case MatchOp:
const ope = cursor.node.firstChild;
if (ope) {
matcher.type = ope.type.id;
}
break;
case StringLiteral:
matcher.value = state.sliceDoc(cursor.from, cursor.to).slice(1, -1);
break;
}
} while (cursor.nextSibling());
break;
case QuotedLabelName:
matcher.name = '__name__';
matcher.value = state.sliceDoc(cursor.from, cursor.to).slice(1, -1);
matcher.type = EqlSingle;
break;
}
return matcher; return matcher;
} }

View file

@ -204,6 +204,11 @@ describe('promql operations', () => {
expectedValueType: ValueType.vector, expectedValueType: ValueType.vector,
expectedDiag: [] as Diagnostic[], expectedDiag: [] as Diagnostic[],
}, },
{
expr: 'foo and on(test,"blub") bar',
expectedValueType: ValueType.vector,
expectedDiag: [] as Diagnostic[],
},
{ {
expr: 'foo and on() bar', expr: 'foo and on() bar',
expectedValueType: ValueType.vector, expectedValueType: ValueType.vector,
@ -214,6 +219,11 @@ describe('promql operations', () => {
expectedValueType: ValueType.vector, expectedValueType: ValueType.vector,
expectedDiag: [] as Diagnostic[], expectedDiag: [] as Diagnostic[],
}, },
{
expr: 'foo and ignoring(test,"blub") bar',
expectedValueType: ValueType.vector,
expectedDiag: [] as Diagnostic[],
},
{ {
expr: 'foo and ignoring() bar', expr: 'foo and ignoring() bar',
expectedValueType: ValueType.vector, expectedValueType: ValueType.vector,
@ -229,6 +239,11 @@ describe('promql operations', () => {
expectedValueType: ValueType.vector, expectedValueType: ValueType.vector,
expectedDiag: [] as Diagnostic[], expectedDiag: [] as Diagnostic[],
}, },
{
expr: 'foo / on(test,blub) group_left("bar") bar',
expectedValueType: ValueType.vector,
expectedDiag: [] as Diagnostic[],
},
{ {
expr: 'foo / ignoring(test,blub) group_left(blub) bar', expr: 'foo / ignoring(test,blub) group_left(blub) bar',
expectedValueType: ValueType.vector, expectedValueType: ValueType.vector,
@ -825,6 +840,134 @@ describe('promql operations', () => {
expectedValueType: ValueType.vector, expectedValueType: ValueType.vector,
expectedDiag: [], expectedDiag: [],
}, },
{
expr: '{"foo"}',
expectedValueType: ValueType.vector,
expectedDiag: [],
},
{
// with metric name in the middle
expr: '{a="b","foo",c~="d"}',
expectedValueType: ValueType.vector,
expectedDiag: [],
},
{
expr: '{"foo", a="bc"}',
expectedValueType: ValueType.vector,
expectedDiag: [],
},
{
expr: '{"colon:in:the:middle"}',
expectedValueType: ValueType.vector,
expectedDiag: [],
},
{
expr: '{"dot.in.the.middle"}',
expectedValueType: ValueType.vector,
expectedDiag: [],
},
{
expr: '{"😀 in metric name"}',
expectedValueType: ValueType.vector,
expectedDiag: [],
},
{
// quotes with escape
expr: '{"this is \"foo\" metric"}', // eslint-disable-line
expectedValueType: ValueType.vector,
expectedDiag: [],
},
{
expr: '{"foo","colon:in:the:middle"="val"}',
expectedValueType: ValueType.vector,
expectedDiag: [],
},
{
expr: '{"foo","dot.in.the.middle"="val"}',
expectedValueType: ValueType.vector,
expectedDiag: [],
},
{
expr: '{"foo","😀 in label name"="val"}',
expectedValueType: ValueType.vector,
expectedDiag: [],
},
{
// quotes with escape
expr: '{"foo","this is \"bar\" label"="val"}', // eslint-disable-line
expectedValueType: ValueType.vector,
expectedDiag: [],
},
{
expr: 'foo{"bar"}',
expectedValueType: ValueType.vector,
expectedDiag: [
{
from: 0,
message: 'metric name must not be set twice: foo or bar',
severity: 'error',
to: 10,
},
],
},
{
expr: '{"foo", __name__="bar"}',
expectedValueType: ValueType.vector,
expectedDiag: [
{
from: 0,
message: 'metric name must not be set twice: foo or bar',
severity: 'error',
to: 23,
},
],
},
{
expr: '{"foo", "__name__"="bar"}',
expectedValueType: ValueType.vector,
expectedDiag: [
{
from: 0,
message: 'metric name must not be set twice: foo or bar',
severity: 'error',
to: 25,
},
],
},
{
expr: '{"__name__"="foo", __name__="bar"}',
expectedValueType: ValueType.vector,
expectedDiag: [
{
from: 0,
message: 'metric name must not be set twice: foo or bar',
severity: 'error',
to: 34,
},
],
},
{
expr: '{"foo", "bar"}',
expectedValueType: ValueType.vector,
expectedDiag: [
{
from: 0,
to: 14,
message: 'metric name must not be set twice: foo or bar',
severity: 'error',
},
],
},
{
expr: `{'foo\`metric':'bar'}`, // eslint-disable-line
expectedValueType: ValueType.vector,
expectedDiag: [],
},
{
expr: '{`foo\"metric`=`bar`}', // eslint-disable-line
expectedValueType: ValueType.vector,
expectedDiag: [],
},
]; ];
testCases.forEach((value) => { testCases.forEach((value) => {
const state = createEditorState(value.expr); const state = createEditorState(value.expr);

View file

@ -27,7 +27,6 @@ import {
Gte, Gte,
Gtr, Gtr,
Identifier, Identifier,
LabelMatcher,
LabelMatchers, LabelMatchers,
Lss, Lss,
Lte, Lte,
@ -36,11 +35,14 @@ import {
Or, Or,
ParenExpr, ParenExpr,
Quantile, Quantile,
QuotedLabelMatcher,
QuotedLabelName,
StepInvariantExpr, StepInvariantExpr,
SubqueryExpr, SubqueryExpr,
Topk, Topk,
UnaryExpr, UnaryExpr,
Unless, Unless,
UnquotedLabelMatcher,
VectorSelector, VectorSelector,
} from '@prometheus-io/lezer-promql'; } from '@prometheus-io/lezer-promql';
import { containsAtLeastOneChild } from './path-finder'; import { containsAtLeastOneChild } from './path-finder';
@ -282,7 +284,11 @@ export class Parser {
private checkVectorSelector(node: SyntaxNode): void { private checkVectorSelector(node: SyntaxNode): void {
const matchList = node.getChild(LabelMatchers); const matchList = node.getChild(LabelMatchers);
const labelMatchers = buildLabelMatchers(matchList ? matchList.getChildren(LabelMatcher) : [], this.state); const labelMatcherOpts = [QuotedLabelName, QuotedLabelMatcher, UnquotedLabelMatcher];
let labelMatchers: Matcher[] = [];
for (const labelMatcherOpt of labelMatcherOpts) {
labelMatchers = labelMatchers.concat(buildLabelMatchers(matchList ? matchList.getChildren(labelMatcherOpt) : [], this.state));
}
let vectorSelectorName = ''; let vectorSelectorName = '';
// VectorSelector ( Identifier ) // VectorSelector ( Identifier )
// https://github.com/promlabs/lezer-promql/blob/71e2f9fa5ae6f5c5547d5738966cd2512e6b99a8/src/promql.grammar#L200 // https://github.com/promlabs/lezer-promql/blob/71e2f9fa5ae6f5c5547d5738966cd2512e6b99a8/src/promql.grammar#L200
@ -301,6 +307,14 @@ export class Parser {
// adding the metric name as a Matcher to avoid a false positive for this kind of expression: // adding the metric name as a Matcher to avoid a false positive for this kind of expression:
// foo{bare=''} // foo{bare=''}
labelMatchers.push(new Matcher(EqlSingle, '__name__', vectorSelectorName)); labelMatchers.push(new Matcher(EqlSingle, '__name__', vectorSelectorName));
} else {
// In this case when metric name is not set outside the braces
// It is checking whether metric name is set twice like in :
// {__name__:"foo", "foo"}, {"foo", "bar"}
const labelMatchersMetricName = labelMatchers.filter((lm) => lm.name === '__name__');
if (labelMatchersMetricName.length > 1) {
this.addDiagnostic(node, `metric name must not be set twice: ${labelMatchersMetricName[0].value} or ${labelMatchersMetricName[1].value}`);
}
} }
// A Vector selector must contain at least one non-empty matcher to prevent // A Vector selector must contain at least one non-empty matcher to prevent

View file

@ -97,7 +97,7 @@ binModifiers {
} }
GroupingLabels { GroupingLabels {
"(" (LabelName ("," LabelName)* ","?)? ")" "(" ((LabelName | QuotedLabelName) ("," (LabelName | QuotedLabelName))* ","?)? ")"
} }
FunctionCall { FunctionCall {
@ -220,7 +220,7 @@ VectorSelector {
} }
LabelMatchers { LabelMatchers {
"{" (LabelMatcher ("," LabelMatcher)* ","?)? "}" "{" ((UnquotedLabelMatcher | QuotedLabelMatcher | QuotedLabelName)("," (UnquotedLabelMatcher | QuotedLabelMatcher | QuotedLabelName))* ","?)? "}"
} }
MatchOp { MatchOp {
@ -230,8 +230,16 @@ MatchOp {
NeqRegex NeqRegex
} }
LabelMatcher { UnquotedLabelMatcher {
LabelName MatchOp StringLiteral LabelName MatchOp StringLiteral
}
QuotedLabelMatcher {
QuotedLabelName MatchOp StringLiteral
}
QuotedLabelName {
StringLiteral
} }
StepInvariantExpr { StepInvariantExpr {

View file

@ -112,6 +112,54 @@ PromQL(
) )
) )
# Quoted label name in grouping labels
sum by("job", mode) (test_metric) / on("job") group_left sum by("job")(test_metric)
==>
PromQL(
BinaryExpr(
AggregateExpr(
AggregateOp(Sum),
AggregateModifier(
By,
GroupingLabels(
QuotedLabelName(StringLiteral),
LabelName
)
),
FunctionCallBody(
VectorSelector(
Identifier
)
)
),
Div,
MatchingModifierClause(
On,
GroupingLabels(
QuotedLabelName(StringLiteral)
)
GroupLeft
),
AggregateExpr(
AggregateOp(Sum),
AggregateModifier(
By,
GroupingLabels(
QuotedLabelName(StringLiteral)
)
),
FunctionCallBody(
VectorSelector(
Identifier
)
)
)
)
)
# Case insensitivity for aggregations and binop modifiers. # Case insensitivity for aggregations and binop modifiers.
SuM BY(testlabel1) (testmetric1) / IGNOring(testlabel2) AVG withOUT(testlabel3) (testmetric2) SuM BY(testlabel1) (testmetric1) / IGNOring(testlabel2) AVG withOUT(testlabel3) (testmetric2)
@ -226,25 +274,25 @@ PromQL(
VectorSelector( VectorSelector(
Identifier, Identifier,
LabelMatchers( LabelMatchers(
LabelMatcher( UnquotedLabelMatcher(
LabelName, LabelName,
MatchOp(EqlSingle), MatchOp(EqlSingle),
StringLiteral StringLiteral
), ),
LabelMatcher( UnquotedLabelMatcher(
LabelName, LabelName,
MatchOp(Neq), MatchOp(Neq),
StringLiteral StringLiteral
), ),
LabelMatcher( UnquotedLabelMatcher(
LabelName, LabelName,
MatchOp(EqlRegex), MatchOp(EqlRegex),
StringLiteral StringLiteral
), ),
LabelMatcher( UnquotedLabelMatcher(
LabelName, LabelName,
MatchOp(NeqRegex), MatchOp(NeqRegex),
StringLiteral StringLiteral
) )
) )
) )
@ -571,14 +619,14 @@ PromQL(NumberLiteral)
NaN{foo="bar"} NaN{foo="bar"}
==> ==>
PromQL(BinaryExpr(NumberLiteral,⚠,VectorSelector(LabelMatchers(LabelMatcher(LabelName,MatchOp(EqlSingle),StringLiteral))))) PromQL(BinaryExpr(NumberLiteral,⚠,VectorSelector(LabelMatchers(UnquotedLabelMatcher(LabelName,MatchOp(EqlSingle),StringLiteral)))))
# Trying to illegally use Inf as a metric name. # Trying to illegally use Inf as a metric name.
Inf{foo="bar"} Inf{foo="bar"}
==> ==>
PromQL(BinaryExpr(NumberLiteral,⚠,VectorSelector(LabelMatchers(LabelMatcher(LabelName,MatchOp(EqlSingle),StringLiteral))))) PromQL(BinaryExpr(NumberLiteral,⚠,VectorSelector(LabelMatchers(UnquotedLabelMatcher(LabelName,MatchOp(EqlSingle),StringLiteral)))))
# Negative offset # Negative offset
@ -614,3 +662,24 @@ MetricName(Identifier)
==> ==>
PromQL(BinaryExpr(NumberLiteral,Add,BinaryExpr(VectorSelector(Identifier),Atan2,VectorSelector(Identifier)))) PromQL(BinaryExpr(NumberLiteral,Add,BinaryExpr(VectorSelector(Identifier),Atan2,VectorSelector(Identifier))))
# Testing quoted metric name
{"metric_name"}
==>
PromQL(VectorSelector(LabelMatchers(QuotedLabelName(StringLiteral))))
# Testing quoted label name
{"foo"="bar"}
==>
PromQL(VectorSelector(LabelMatchers(QuotedLabelMatcher(QuotedLabelName(StringLiteral), MatchOp(EqlSingle), StringLiteral))))
# Testing quoted metric name and label name
{"metric_name", "foo"="bar"}
==>
PromQL(VectorSelector(LabelMatchers(QuotedLabelName(StringLiteral), QuotedLabelMatcher(QuotedLabelName(StringLiteral), MatchOp(EqlSingle), StringLiteral))))