mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-25 13:44:05 -08:00
Merge branch 'main' into refactor/add_max_func_to_maxTimestamp
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
This commit is contained in:
commit
759ca8b207
|
@ -3,6 +3,7 @@
|
|||
## unreleased
|
||||
|
||||
* [CHANGE] TSDB: Fix the predicate checking for blocks which are beyond the retention period to include the ones right at the retention boundary. #9633
|
||||
* [ENHANCEMENT] Rules: Add `rule_group_last_restore_duration_seconds` to measure the time it takes to restore a rule group. #13974
|
||||
|
||||
## 2.51.2 / 2024-04-09
|
||||
|
||||
|
|
|
@ -217,6 +217,7 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
|||
level.Info(logger).Log("msg", "Experimental PromQL functions enabled.")
|
||||
case "native-histograms":
|
||||
c.tsdb.EnableNativeHistograms = true
|
||||
c.scrape.EnableNativeHistogramsIngestion = true
|
||||
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers.
|
||||
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
||||
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
||||
|
|
|
@ -838,6 +838,10 @@ func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxB
|
|||
}
|
||||
|
||||
func displayHistogram(dataType string, datas []int, total int) {
|
||||
if len(datas) == 0 {
|
||||
fmt.Printf("%s: N/A\n\n", dataType)
|
||||
return
|
||||
}
|
||||
slices.Sort(datas)
|
||||
start, end, step := generateBucket(datas[0], datas[len(datas)-1])
|
||||
sum := 0
|
||||
|
|
|
@ -19,16 +19,6 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
clientGoRequestMetrics = &clientGoRequestMetricAdapter{}
|
||||
clientGoWorkloadMetrics = &clientGoWorkqueueMetricsProvider{}
|
||||
)
|
||||
|
||||
func init() {
|
||||
clientGoRequestMetrics.RegisterWithK8sGoClient()
|
||||
clientGoWorkloadMetrics.RegisterWithK8sGoClient()
|
||||
}
|
||||
|
||||
// Metrics to be used with a discovery manager.
|
||||
type Metrics struct {
|
||||
FailedConfigs prometheus.Gauge
|
||||
|
|
|
@ -35,6 +35,11 @@ const (
|
|||
workqueueMetricsNamespace = KubernetesMetricsNamespace + "_workqueue"
|
||||
)
|
||||
|
||||
var (
|
||||
clientGoRequestMetrics = &clientGoRequestMetricAdapter{}
|
||||
clientGoWorkloadMetrics = &clientGoWorkqueueMetricsProvider{}
|
||||
)
|
||||
|
||||
var (
|
||||
// Metrics for client-go's HTTP requests.
|
||||
clientGoRequestResultMetricVec = prometheus.NewCounterVec(
|
||||
|
@ -135,6 +140,9 @@ func clientGoMetrics() []prometheus.Collector {
|
|||
}
|
||||
|
||||
func RegisterK8sClientMetricsWithPrometheus(registerer prometheus.Registerer) error {
|
||||
clientGoRequestMetrics.RegisterWithK8sGoClient()
|
||||
clientGoWorkloadMetrics.RegisterWithK8sGoClient()
|
||||
|
||||
for _, collector := range clientGoMetrics() {
|
||||
err := registerer.Register(collector)
|
||||
if err != nil {
|
||||
|
|
|
@ -174,20 +174,25 @@ func (d *instanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
|
|||
labels[instanceTagsLabel] = model.LabelValue(tags)
|
||||
}
|
||||
|
||||
addr := ""
|
||||
if server.IPv6 != nil {
|
||||
labels[instancePublicIPv6Label] = model.LabelValue(server.IPv6.Address.String())
|
||||
addr = server.IPv6.Address.String()
|
||||
}
|
||||
|
||||
if server.PublicIP != nil {
|
||||
labels[instancePublicIPv4Label] = model.LabelValue(server.PublicIP.Address.String())
|
||||
addr = server.PublicIP.Address.String()
|
||||
}
|
||||
|
||||
if server.PrivateIP != nil {
|
||||
labels[instancePrivateIPv4Label] = model.LabelValue(*server.PrivateIP)
|
||||
addr = *server.PrivateIP
|
||||
}
|
||||
|
||||
addr := net.JoinHostPort(*server.PrivateIP, strconv.FormatUint(uint64(d.port), 10))
|
||||
if addr != "" {
|
||||
addr := net.JoinHostPort(addr, strconv.FormatUint(uint64(d.port), 10))
|
||||
labels[model.AddressLabel] = model.LabelValue(addr)
|
||||
|
||||
targets = append(targets, labels)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ api_url: %s
|
|||
tg := tgs[0]
|
||||
require.NotNil(t, tg)
|
||||
require.NotNil(t, tg.Targets)
|
||||
require.Len(t, tg.Targets, 2)
|
||||
require.Len(t, tg.Targets, 3)
|
||||
|
||||
for i, lbls := range []model.LabelSet{
|
||||
{
|
||||
|
@ -110,6 +110,28 @@ api_url: %s
|
|||
"__meta_scaleway_instance_type": "DEV1-S",
|
||||
"__meta_scaleway_instance_zone": "fr-par-1",
|
||||
},
|
||||
{
|
||||
"__address__": "51.158.183.115:80",
|
||||
"__meta_scaleway_instance_boot_type": "local",
|
||||
"__meta_scaleway_instance_hostname": "routed-dualstack",
|
||||
"__meta_scaleway_instance_id": "4904366a-7e26-4b65-b97b-6392c761247a",
|
||||
"__meta_scaleway_instance_image_arch": "x86_64",
|
||||
"__meta_scaleway_instance_image_id": "3e0a5b84-1d69-4993-8fa4-0d7df52d5160",
|
||||
"__meta_scaleway_instance_image_name": "Ubuntu 22.04 Jammy Jellyfish",
|
||||
"__meta_scaleway_instance_location_cluster_id": "19",
|
||||
"__meta_scaleway_instance_location_hypervisor_id": "1201",
|
||||
"__meta_scaleway_instance_location_node_id": "24",
|
||||
"__meta_scaleway_instance_name": "routed-dualstack",
|
||||
"__meta_scaleway_instance_organization_id": "20b3d507-96ac-454c-a795-bc731b46b12f",
|
||||
"__meta_scaleway_instance_project_id": "20b3d507-96ac-454c-a795-bc731b46b12f",
|
||||
"__meta_scaleway_instance_public_ipv4": "51.158.183.115",
|
||||
"__meta_scaleway_instance_region": "nl-ams",
|
||||
"__meta_scaleway_instance_security_group_id": "984414da-9fc2-49c0-a925-fed6266fe092",
|
||||
"__meta_scaleway_instance_security_group_name": "Default security group",
|
||||
"__meta_scaleway_instance_status": "running",
|
||||
"__meta_scaleway_instance_type": "DEV1-S",
|
||||
"__meta_scaleway_instance_zone": "nl-ams-1",
|
||||
},
|
||||
} {
|
||||
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
|
||||
require.Equal(t, lbls, tg.Targets[i])
|
||||
|
|
142
discovery/scaleway/testdata/instance.json
vendored
142
discovery/scaleway/testdata/instance.json
vendored
|
@ -216,6 +216,146 @@
|
|||
"placement_group": null,
|
||||
"private_nics": [],
|
||||
"zone": "fr-par-1"
|
||||
},
|
||||
{
|
||||
"id": "4904366a-7e26-4b65-b97b-6392c761247a",
|
||||
"name": "routed-dualstack",
|
||||
"arch": "x86_64",
|
||||
"commercial_type": "DEV1-S",
|
||||
"boot_type": "local",
|
||||
"organization": "20b3d507-96ac-454c-a795-bc731b46b12f",
|
||||
"project": "20b3d507-96ac-454c-a795-bc731b46b12f",
|
||||
"hostname": "routed-dualstack",
|
||||
"image": {
|
||||
"id": "3e0a5b84-1d69-4993-8fa4-0d7df52d5160",
|
||||
"name": "Ubuntu 22.04 Jammy Jellyfish",
|
||||
"organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
|
||||
"project": "51b656e3-4865-41e8-adbc-0c45bdd780db",
|
||||
"root_volume": {
|
||||
"id": "13d945b9-5e78-4f9d-8ac4-c4bc2fa7c31a",
|
||||
"name": "Ubuntu 22.04 Jammy Jellyfish",
|
||||
"volume_type": "unified",
|
||||
"size": 10000000000
|
||||
},
|
||||
"extra_volumes": {},
|
||||
"public": true,
|
||||
"arch": "x86_64",
|
||||
"creation_date": "2024-02-22T15:52:56.037007+00:00",
|
||||
"modification_date": "2024-02-22T15:52:56.037007+00:00",
|
||||
"default_bootscript": null,
|
||||
"from_server": null,
|
||||
"state": "available",
|
||||
"tags": [],
|
||||
"zone": "nl-ams-1"
|
||||
},
|
||||
"volumes": {
|
||||
"0": {
|
||||
"boot": false,
|
||||
"id": "fe85c817-e67e-4e24-8f13-bde3e9f42620",
|
||||
"name": "Ubuntu 22.04 Jammy Jellyfish",
|
||||
"volume_type": "l_ssd",
|
||||
"export_uri": null,
|
||||
"organization": "20b3d507-96ac-454c-a795-bc731b46b12f",
|
||||
"project": "20b3d507-96ac-454c-a795-bc731b46b12f",
|
||||
"server": {
|
||||
"id": "4904366a-7e26-4b65-b97b-6392c761247a",
|
||||
"name": "routed-dualstack"
|
||||
},
|
||||
"size": 20000000000,
|
||||
"state": "available",
|
||||
"creation_date": "2024-04-19T14:50:14.019739+00:00",
|
||||
"modification_date": "2024-04-19T14:50:14.019739+00:00",
|
||||
"tags": [],
|
||||
"zone": "nl-ams-1"
|
||||
}
|
||||
},
|
||||
"tags": [],
|
||||
"state": "running",
|
||||
"protected": false,
|
||||
"state_detail": "booted",
|
||||
"public_ip": {
|
||||
"id": "53f8f861-7a11-4b16-a4bc-fb8f4b4a11d0",
|
||||
"address": "51.158.183.115",
|
||||
"dynamic": false,
|
||||
"gateway": "62.210.0.1",
|
||||
"netmask": "32",
|
||||
"family": "inet",
|
||||
"provisioning_mode": "dhcp",
|
||||
"tags": [],
|
||||
"state": "attached",
|
||||
"ipam_id": "ec3499ff-a664-49b7-818a-9fe4b95aee5e"
|
||||
},
|
||||
"public_ips": [
|
||||
{
|
||||
"id": "53f8f861-7a11-4b16-a4bc-fb8f4b4a11d0",
|
||||
"address": "51.158.183.115",
|
||||
"dynamic": false,
|
||||
"gateway": "62.210.0.1",
|
||||
"netmask": "32",
|
||||
"family": "inet",
|
||||
"provisioning_mode": "dhcp",
|
||||
"tags": [],
|
||||
"state": "attached",
|
||||
"ipam_id": "ec3499ff-a664-49b7-818a-9fe4b95aee5e"
|
||||
},
|
||||
{
|
||||
"id": "f52a8c81-0875-4aee-b96e-eccfc6bec367",
|
||||
"address": "2001:bc8:1640:1568:dc00:ff:fe21:91b",
|
||||
"dynamic": false,
|
||||
"gateway": "fe80::dc00:ff:fe21:91c",
|
||||
"netmask": "64",
|
||||
"family": "inet6",
|
||||
"provisioning_mode": "slaac",
|
||||
"tags": [],
|
||||
"state": "attached",
|
||||
"ipam_id": "40d1e6ea-e932-42f9-8acb-55398bec7ad6"
|
||||
}
|
||||
],
|
||||
"mac_address": "de:00:00:21:09:1b",
|
||||
"routed_ip_enabled": true,
|
||||
"ipv6": null,
|
||||
"extra_networks": [],
|
||||
"dynamic_ip_required": false,
|
||||
"enable_ipv6": false,
|
||||
"private_ip": null,
|
||||
"creation_date": "2024-04-19T14:50:14.019739+00:00",
|
||||
"modification_date": "2024-04-19T14:52:21.181670+00:00",
|
||||
"bootscript": {
|
||||
"id": "5a520dda-96d6-4ed2-acd1-1d526b6859fe",
|
||||
"public": true,
|
||||
"title": "x86_64 mainline 4.4.182 rev1",
|
||||
"architecture": "x86_64",
|
||||
"organization": "11111111-1111-4111-8111-111111111111",
|
||||
"project": "11111111-1111-4111-8111-111111111111",
|
||||
"kernel": "http://10.196.2.9/kernel/x86_64-mainline-lts-4.4-4.4.182-rev1/vmlinuz-4.4.182",
|
||||
"dtb": "",
|
||||
"initrd": "http://10.196.2.9/initrd/initrd-Linux-x86_64-v3.14.6.gz",
|
||||
"bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
|
||||
"default": true,
|
||||
"zone": "nl-ams-1"
|
||||
},
|
||||
"security_group": {
|
||||
"id": "984414da-9fc2-49c0-a925-fed6266fe092",
|
||||
"name": "Default security group"
|
||||
},
|
||||
"location": {
|
||||
"zone_id": "ams1",
|
||||
"platform_id": "23",
|
||||
"cluster_id": "19",
|
||||
"hypervisor_id": "1201",
|
||||
"node_id": "24"
|
||||
},
|
||||
"maintenances": [],
|
||||
"allowed_actions": [
|
||||
"poweroff",
|
||||
"terminate",
|
||||
"reboot",
|
||||
"stop_in_place",
|
||||
"backup"
|
||||
],
|
||||
"placement_group": null,
|
||||
"private_nics": [],
|
||||
"zone": "nl-ams-1"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
|
@ -2952,9 +2952,10 @@ The following meta labels are available on targets during [relabeling](#relabel_
|
|||
* `__meta_scaleway_instance_type`: commercial type of the server
|
||||
* `__meta_scaleway_instance_zone`: the zone of the server (ex: `fr-par-1`, complete list [here](https://developers.scaleway.com/en/products/instance/api/#introduction))
|
||||
|
||||
This role uses the private IPv4 address by default. This can be
|
||||
This role uses the first address it finds in the following order: private IPv4, public IPv4, public IPv6. This can be
|
||||
changed with relabeling, as demonstrated in [the Prometheus scaleway-sd
|
||||
configuration file](/documentation/examples/prometheus-scaleway.yml).
|
||||
Should an instance have no address before relabeling, it will not be added to the target list and you will not be able to relabel it.
|
||||
|
||||
#### Baremetal role
|
||||
|
||||
|
|
|
@ -84,8 +84,10 @@ or 31 days, whichever is smaller.
|
|||
Prometheus has several flags that configure local storage. The most important are:
|
||||
|
||||
- `--storage.tsdb.path`: Where Prometheus writes its database. Defaults to `data/`.
|
||||
- `--storage.tsdb.retention.time`: When to remove old data. Defaults to `15d`.
|
||||
Overrides `storage.tsdb.retention` if this flag is set to anything other than default.
|
||||
- `--storage.tsdb.retention.time`: How long to retain samples in storage. When this flag is
|
||||
set, it overrides `storage.tsdb.retention`. If neither this flag nor `storage.tsdb.retention`
|
||||
nor `storage.tsdb.retention.size` is set, the retention time defaults to `15d`.
|
||||
Supported units: y, w, d, h, m, s, ms.
|
||||
- `--storage.tsdb.retention.size`: The maximum number of bytes of storage blocks to retain.
|
||||
The oldest data will be removed first. Defaults to `0` or disabled. Units supported:
|
||||
B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. Only
|
||||
|
|
4
go.mod
4
go.mod
|
@ -11,7 +11,7 @@ require (
|
|||
github.com/KimMachineGun/automemlimit v0.6.0
|
||||
github.com/alecthomas/kingpin/v2 v2.4.0
|
||||
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9
|
||||
github.com/aws/aws-sdk-go v1.51.24
|
||||
github.com/aws/aws-sdk-go v1.51.25
|
||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
|
||||
github.com/cespare/xxhash/v2 v2.3.0
|
||||
github.com/dennwc/varint v1.0.0
|
||||
|
@ -36,7 +36,7 @@ require (
|
|||
github.com/grpc-ecosystem/grpc-gateway v1.16.0
|
||||
github.com/hashicorp/consul/api v1.28.2
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240418183417-ea5f2f6748c7
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.7.1
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.7.2
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.11
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/klauspost/compress v1.17.8
|
||||
|
|
8
go.sum
8
go.sum
|
@ -92,8 +92,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W
|
|||
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.51.24 h1:nwL5MaommPkwb7Ixk24eWkdx5HY4of1gD10kFFVAl6A=
|
||||
github.com/aws/aws-sdk-go v1.51.24/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
|
||||
github.com/aws/aws-sdk-go v1.51.25 h1:DjTT8mtmsachhV6yrXR8+yhnG6120dazr720nopRsls=
|
||||
github.com/aws/aws-sdk-go v1.51.25/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
|
||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
|
||||
|
@ -413,8 +413,8 @@ github.com/hashicorp/nomad/api v0.0.0-20240418183417-ea5f2f6748c7/go.mod h1:svtx
|
|||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
|
||||
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.7.1 h1:D4domwRSLOyBL/bwzd1O7hunBbKmeEHZTa7GmCYrniY=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.7.1/go.mod h1:49tIV+pXRJTUC7fbFZ03s45LKqSQdOPP5y91eOnJo/k=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.7.2 h1:UlE7n1GQZacCfyjv9tDVUN7HZfOXErPIfM/M039u9A0=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.7.2/go.mod h1:49tIV+pXRJTUC7fbFZ03s45LKqSQdOPP5y91eOnJo/k=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
|
|
|
@ -96,12 +96,14 @@ func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, io
|
|||
|
||||
err = file.Truncate(int64(filesize))
|
||||
if err != nil {
|
||||
file.Close()
|
||||
level.Error(logger).Log("msg", "Error setting filesize.", "filesize", filesize, "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
fileAsBytes, err := mmap.Map(file, mmap.RDWR, 0)
|
||||
if err != nil {
|
||||
file.Close()
|
||||
level.Error(logger).Log("msg", "Failed to mmap", "file", filename, "Attempted size", filesize, "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
|
|
@ -110,10 +110,7 @@ func TestMMapFile(t *testing.T) {
|
|||
filename := file.Name()
|
||||
defer os.Remove(filename)
|
||||
|
||||
fileAsBytes, closer, err := getMMapedFile(filename, 2, nil)
|
||||
if err != nil {
|
||||
t.Cleanup(func() { closer.Close() })
|
||||
}
|
||||
fileAsBytes, _, err := getMMapedFile(filename, 2, nil)
|
||||
|
||||
require.NoError(t, err)
|
||||
copy(fileAsBytes, "ab")
|
||||
|
|
|
@ -457,8 +457,17 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc,
|
|||
}
|
||||
}
|
||||
|
||||
// If the alert was previously firing, keep it around for a given
|
||||
// retention time so it is reported as resolved to the AlertManager.
|
||||
// If the alert is resolved (was firing but is now inactive) keep it for
|
||||
// at least the retention period. This is important for a number of reasons:
|
||||
//
|
||||
// 1. It allows for Prometheus to be more resilient to network issues that
|
||||
// would otherwise prevent a resolved alert from being reported as resolved
|
||||
// to Alertmanager.
|
||||
//
|
||||
// 2. It helps reduce the chance of resolved notifications being lost if
|
||||
// Alertmanager crashes or restarts between receiving the resolved alert
|
||||
// from Prometheus and sending the resolved notification. This tends to
|
||||
// occur for routes with large Group intervals.
|
||||
if a.State == StatePending || (!a.ResolvedAt.IsZero() && ts.Sub(a.ResolvedAt) > resolvedRetention) {
|
||||
delete(r.active, fp)
|
||||
}
|
||||
|
|
|
@ -230,7 +230,11 @@ func (g *Group) run(ctx context.Context) {
|
|||
g.evalIterationFunc(ctx, g, evalTimestamp)
|
||||
}
|
||||
|
||||
g.RestoreForState(time.Now())
|
||||
restoreStartTime := time.Now()
|
||||
g.RestoreForState(restoreStartTime)
|
||||
totalRestoreTimeSeconds := time.Since(restoreStartTime).Seconds()
|
||||
g.metrics.GroupLastRestoreDuration.WithLabelValues(GroupKey(g.file, g.name)).Set(totalRestoreTimeSeconds)
|
||||
level.Debug(g.logger).Log("msg", "'for' state restoration completed", "duration_seconds", totalRestoreTimeSeconds)
|
||||
g.shouldRestore = false
|
||||
}
|
||||
|
||||
|
@ -779,17 +783,18 @@ const namespace = "prometheus"
|
|||
|
||||
// Metrics for rule evaluation.
|
||||
type Metrics struct {
|
||||
EvalDuration prometheus.Summary
|
||||
IterationDuration prometheus.Summary
|
||||
IterationsMissed *prometheus.CounterVec
|
||||
IterationsScheduled *prometheus.CounterVec
|
||||
EvalTotal *prometheus.CounterVec
|
||||
EvalFailures *prometheus.CounterVec
|
||||
GroupInterval *prometheus.GaugeVec
|
||||
GroupLastEvalTime *prometheus.GaugeVec
|
||||
GroupLastDuration *prometheus.GaugeVec
|
||||
GroupRules *prometheus.GaugeVec
|
||||
GroupSamples *prometheus.GaugeVec
|
||||
EvalDuration prometheus.Summary
|
||||
IterationDuration prometheus.Summary
|
||||
IterationsMissed *prometheus.CounterVec
|
||||
IterationsScheduled *prometheus.CounterVec
|
||||
EvalTotal *prometheus.CounterVec
|
||||
EvalFailures *prometheus.CounterVec
|
||||
GroupInterval *prometheus.GaugeVec
|
||||
GroupLastEvalTime *prometheus.GaugeVec
|
||||
GroupLastDuration *prometheus.GaugeVec
|
||||
GroupLastRestoreDuration *prometheus.GaugeVec
|
||||
GroupRules *prometheus.GaugeVec
|
||||
GroupSamples *prometheus.GaugeVec
|
||||
}
|
||||
|
||||
// NewGroupMetrics creates a new instance of Metrics and registers it with the provided registerer,
|
||||
|
@ -865,6 +870,14 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics {
|
|||
},
|
||||
[]string{"rule_group"},
|
||||
),
|
||||
GroupLastRestoreDuration: prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "rule_group_last_restore_duration_seconds",
|
||||
Help: "The duration of the last alert rules alerts restoration using the `ALERTS_FOR_STATE` series.",
|
||||
},
|
||||
[]string{"rule_group"},
|
||||
),
|
||||
GroupRules: prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
|
@ -894,6 +907,7 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics {
|
|||
m.GroupInterval,
|
||||
m.GroupLastEvalTime,
|
||||
m.GroupLastDuration,
|
||||
m.GroupLastRestoreDuration,
|
||||
m.GroupRules,
|
||||
m.GroupSamples,
|
||||
)
|
||||
|
|
|
@ -81,6 +81,8 @@ type Options struct {
|
|||
// Option to enable the ingestion of the created timestamp as a synthetic zero sample.
|
||||
// See: https://github.com/prometheus/proposals/blob/main/proposals/2023-06-13_created-timestamp.md
|
||||
EnableCreatedTimestampZeroIngestion bool
|
||||
// Option to enable the ingestion of native histograms.
|
||||
EnableNativeHistogramsIngestion bool
|
||||
|
||||
// Optional HTTP client options to use when scraping.
|
||||
HTTPClientOptions []config_util.HTTPClientOption
|
||||
|
|
|
@ -178,6 +178,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
|
|||
opts.interval,
|
||||
opts.timeout,
|
||||
opts.scrapeClassicHistograms,
|
||||
options.EnableNativeHistogramsIngestion,
|
||||
options.EnableCreatedTimestampZeroIngestion,
|
||||
options.ExtraMetrics,
|
||||
options.EnableMetadataStorage,
|
||||
|
@ -827,7 +828,10 @@ type scrapeLoop struct {
|
|||
interval time.Duration
|
||||
timeout time.Duration
|
||||
scrapeClassicHistograms bool
|
||||
enableCTZeroIngestion bool
|
||||
|
||||
// Feature flagged options.
|
||||
enableNativeHistogramIngestion bool
|
||||
enableCTZeroIngestion bool
|
||||
|
||||
appender func(ctx context.Context) storage.Appender
|
||||
symbolTable *labels.SymbolTable
|
||||
|
@ -1123,6 +1127,7 @@ func newScrapeLoop(ctx context.Context,
|
|||
interval time.Duration,
|
||||
timeout time.Duration,
|
||||
scrapeClassicHistograms bool,
|
||||
enableNativeHistogramIngestion bool,
|
||||
enableCTZeroIngestion bool,
|
||||
reportExtraMetrics bool,
|
||||
appendMetadataToWAL bool,
|
||||
|
@ -1153,33 +1158,34 @@ func newScrapeLoop(ctx context.Context,
|
|||
}
|
||||
|
||||
sl := &scrapeLoop{
|
||||
scraper: sc,
|
||||
buffers: buffers,
|
||||
cache: cache,
|
||||
appender: appender,
|
||||
symbolTable: symbolTable,
|
||||
sampleMutator: sampleMutator,
|
||||
reportSampleMutator: reportSampleMutator,
|
||||
stopped: make(chan struct{}),
|
||||
offsetSeed: offsetSeed,
|
||||
l: l,
|
||||
parentCtx: ctx,
|
||||
appenderCtx: appenderCtx,
|
||||
honorTimestamps: honorTimestamps,
|
||||
trackTimestampsStaleness: trackTimestampsStaleness,
|
||||
enableCompression: enableCompression,
|
||||
sampleLimit: sampleLimit,
|
||||
bucketLimit: bucketLimit,
|
||||
maxSchema: maxSchema,
|
||||
labelLimits: labelLimits,
|
||||
interval: interval,
|
||||
timeout: timeout,
|
||||
scrapeClassicHistograms: scrapeClassicHistograms,
|
||||
enableCTZeroIngestion: enableCTZeroIngestion,
|
||||
reportExtraMetrics: reportExtraMetrics,
|
||||
appendMetadataToWAL: appendMetadataToWAL,
|
||||
metrics: metrics,
|
||||
skipOffsetting: skipOffsetting,
|
||||
scraper: sc,
|
||||
buffers: buffers,
|
||||
cache: cache,
|
||||
appender: appender,
|
||||
symbolTable: symbolTable,
|
||||
sampleMutator: sampleMutator,
|
||||
reportSampleMutator: reportSampleMutator,
|
||||
stopped: make(chan struct{}),
|
||||
offsetSeed: offsetSeed,
|
||||
l: l,
|
||||
parentCtx: ctx,
|
||||
appenderCtx: appenderCtx,
|
||||
honorTimestamps: honorTimestamps,
|
||||
trackTimestampsStaleness: trackTimestampsStaleness,
|
||||
enableCompression: enableCompression,
|
||||
sampleLimit: sampleLimit,
|
||||
bucketLimit: bucketLimit,
|
||||
maxSchema: maxSchema,
|
||||
labelLimits: labelLimits,
|
||||
interval: interval,
|
||||
timeout: timeout,
|
||||
scrapeClassicHistograms: scrapeClassicHistograms,
|
||||
enableNativeHistogramIngestion: enableNativeHistogramIngestion,
|
||||
enableCTZeroIngestion: enableCTZeroIngestion,
|
||||
reportExtraMetrics: reportExtraMetrics,
|
||||
appendMetadataToWAL: appendMetadataToWAL,
|
||||
metrics: metrics,
|
||||
skipOffsetting: skipOffsetting,
|
||||
}
|
||||
sl.ctx, sl.cancel = context.WithCancel(ctx)
|
||||
|
||||
|
@ -1627,7 +1633,7 @@ loop:
|
|||
}
|
||||
}
|
||||
|
||||
if isHistogram {
|
||||
if isHistogram && sl.enableNativeHistogramIngestion {
|
||||
if h != nil {
|
||||
ref, err = app.AppendHistogram(ref, lset, t, h, nil)
|
||||
} else {
|
||||
|
|
|
@ -678,6 +678,7 @@ func newBasicScrapeLoop(t testing.TB, ctx context.Context, scraper scraper, app
|
|||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
|
@ -819,6 +820,7 @@ func TestScrapeLoopRun(t *testing.T) {
|
|||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
scrapeMetrics,
|
||||
|
@ -962,6 +964,7 @@ func TestScrapeLoopMetadata(t *testing.T) {
|
|||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
scrapeMetrics,
|
||||
|
@ -1571,6 +1574,7 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) {
|
|||
app := &bucketLimitAppender{Appender: resApp, limit: 2}
|
||||
|
||||
sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0)
|
||||
sl.enableNativeHistogramIngestion = true
|
||||
sl.sampleMutator = func(l labels.Labels) labels.Labels {
|
||||
if l.Has("deleteme") {
|
||||
return labels.EmptyLabels()
|
||||
|
@ -1797,14 +1801,15 @@ func TestScrapeLoopAppendStalenessIfTrackTimestampStaleness(t *testing.T) {
|
|||
|
||||
func TestScrapeLoopAppendExemplar(t *testing.T) {
|
||||
tests := []struct {
|
||||
title string
|
||||
scrapeClassicHistograms bool
|
||||
scrapeText string
|
||||
contentType string
|
||||
discoveryLabels []string
|
||||
floats []floatSample
|
||||
histograms []histogramSample
|
||||
exemplars []exemplar.Exemplar
|
||||
title string
|
||||
scrapeClassicHistograms bool
|
||||
enableNativeHistogramsIngestion bool
|
||||
scrapeText string
|
||||
contentType string
|
||||
discoveryLabels []string
|
||||
floats []floatSample
|
||||
histograms []histogramSample
|
||||
exemplars []exemplar.Exemplar
|
||||
}{
|
||||
{
|
||||
title: "Metric without exemplars",
|
||||
|
@ -1862,6 +1867,8 @@ metric_total{n="2"} 2 # {t="2"} 2.0 20000
|
|||
},
|
||||
{
|
||||
title: "Native histogram with three exemplars",
|
||||
|
||||
enableNativeHistogramsIngestion: true,
|
||||
scrapeText: `name: "test_histogram"
|
||||
help: "Test histogram with many buckets removed to keep it manageable in size."
|
||||
type: HISTOGRAM
|
||||
|
@ -1976,6 +1983,8 @@ metric: <
|
|||
},
|
||||
{
|
||||
title: "Native histogram with three exemplars scraped as classic histogram",
|
||||
|
||||
enableNativeHistogramsIngestion: true,
|
||||
scrapeText: `name: "test_histogram"
|
||||
help: "Test histogram with many buckets removed to keep it manageable in size."
|
||||
type: HISTOGRAM
|
||||
|
@ -2115,6 +2124,7 @@ metric: <
|
|||
}
|
||||
|
||||
sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0)
|
||||
sl.enableNativeHistogramIngestion = test.enableNativeHistogramsIngestion
|
||||
sl.sampleMutator = func(l labels.Labels) labels.Labels {
|
||||
return mutateSampleLabels(l, discoveryLabels, false, nil)
|
||||
}
|
||||
|
@ -3710,7 +3720,7 @@ scrape_configs:
|
|||
s.DB.EnableNativeHistograms()
|
||||
reg := prometheus.NewRegistry()
|
||||
|
||||
mng, err := NewManager(nil, nil, s, reg)
|
||||
mng, err := NewManager(&Options{EnableNativeHistogramsIngestion: true}, nil, s, reg)
|
||||
require.NoError(t, err)
|
||||
cfg, err := config.Load(configStr, false, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -1,22 +0,0 @@
|
|||
## Copying from opentelemetry/opentelemetry-collector-contrib
|
||||
|
||||
This files in the `prometheus/` and `prometheusremotewrite/` are copied from the OpenTelemetry Project[^1].
|
||||
|
||||
This is done instead of adding a go.mod dependency because OpenTelemetry depends on `prometheus/prometheus` and a cyclic dependency will be created. This is just a temporary solution and the long-term solution is to move the required packages from OpenTelemetry into `prometheus/prometheus`.
|
||||
|
||||
To update the dependency is a multi-step process:
|
||||
1. Vendor the latest `prometheus/prometheus`@`main` into [`opentelemetry/opentelemetry-collector-contrib`](https://github.com/open-telemetry/opentelemetry-collector-contrib)
|
||||
1. Update the VERSION in `update-copy.sh`.
|
||||
1. Run `./update-copy.sh`.
|
||||
|
||||
### Why copy?
|
||||
|
||||
This is because the packages we copy depend on the [`prompb`](https://github.com/prometheus/prometheus/blob/main/prompb) package. While the package is relatively stable, there are still changes. For example, https://github.com/prometheus/prometheus/pull/11935 changed the types.
|
||||
This means if we depend on the upstream packages directly, we will never able to make the changes like above. Hence we're copying the code for now.
|
||||
|
||||
### I need to manually change these files
|
||||
|
||||
When we do want to make changes to the types in `prompb`, we might need to edit the files directly. That is OK, please let @gouthamve or @jesusvazquez know so they can take care of updating the upstream code (by vendoring in `prometheus/prometheus` upstream and resolving conflicts) and then will run the copy
|
||||
script again to keep things updated.
|
||||
|
||||
[^1]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/translator/prometheus and https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/translator/prometheusremotewrite
|
|
@ -1,9 +1,20 @@
|
|||
// DO NOT EDIT. COPIED AS-IS. SEE ../README.md
|
||||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_label.go
|
||||
// Provenance-includes-license: Apache-2.0
|
||||
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package prometheus // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
|
|
@ -1,9 +1,20 @@
|
|||
// DO NOT EDIT. COPIED AS-IS. SEE ../README.md
|
||||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_name.go
|
||||
// Provenance-includes-license: Apache-2.0
|
||||
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package prometheus // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
|
|
@ -1,9 +1,20 @@
|
|||
// DO NOT EDIT. COPIED AS-IS. SEE ../README.md
|
||||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/unit_to_ucum.go
|
||||
// Provenance-includes-license: Apache-2.0
|
||||
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package prometheus // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
|
||||
package prometheus
|
||||
|
||||
import "strings"
|
||||
|
||||
|
|
|
@ -1,29 +1,42 @@
|
|||
// DO NOT EDIT. COPIED AS-IS. SEE ../README.md
|
||||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/helper.go
|
||||
// Provenance-includes-license: Apache-2.0
|
||||
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package prometheusremotewrite // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
|
||||
package prometheusremotewrite
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/prometheus/model/timestamp"
|
||||
"github.com/prometheus/prometheus/model/value"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
"go.opentelemetry.io/collector/pdata/pcommon"
|
||||
"go.opentelemetry.io/collector/pdata/pmetric"
|
||||
conventions "go.opentelemetry.io/collector/semconv/v1.6.1"
|
||||
|
||||
"github.com/prometheus/prometheus/model/timestamp"
|
||||
"github.com/prometheus/prometheus/model/value"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
|
||||
prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
|
||||
)
|
||||
|
||||
|
@ -48,7 +61,7 @@ const (
|
|||
)
|
||||
|
||||
type bucketBoundsData struct {
|
||||
sig string
|
||||
ts *prompb.TimeSeries
|
||||
bound float64
|
||||
}
|
||||
|
||||
|
@ -66,94 +79,47 @@ func (a ByLabelName) Len() int { return len(a) }
|
|||
func (a ByLabelName) Less(i, j int) bool { return a[i].Name < a[j].Name }
|
||||
func (a ByLabelName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
// addSample finds a TimeSeries in tsMap that corresponds to the label set labels, and add sample to the TimeSeries; it
|
||||
// creates a new TimeSeries in the map if not found and returns the time series signature.
|
||||
// tsMap will be unmodified if either labels or sample is nil, but can still be modified if the exemplar is nil.
|
||||
func addSample(tsMap map[string]*prompb.TimeSeries, sample *prompb.Sample, labels []prompb.Label,
|
||||
datatype string) string {
|
||||
if sample == nil || labels == nil || tsMap == nil {
|
||||
// This shouldn't happen
|
||||
return ""
|
||||
}
|
||||
|
||||
sig := timeSeriesSignature(datatype, labels)
|
||||
ts := tsMap[sig]
|
||||
if ts != nil {
|
||||
ts.Samples = append(ts.Samples, *sample)
|
||||
} else {
|
||||
newTs := &prompb.TimeSeries{
|
||||
Labels: labels,
|
||||
Samples: []prompb.Sample{*sample},
|
||||
}
|
||||
tsMap[sig] = newTs
|
||||
}
|
||||
|
||||
return sig
|
||||
}
|
||||
|
||||
// addExemplars finds a bucket bound that corresponds to the exemplars value and add the exemplar to the specific sig;
|
||||
// we only add exemplars if samples are presents
|
||||
// tsMap is unmodified if either of its parameters is nil and samples are nil.
|
||||
func addExemplars(tsMap map[string]*prompb.TimeSeries, exemplars []prompb.Exemplar, bucketBoundsData []bucketBoundsData) {
|
||||
if len(tsMap) == 0 || len(bucketBoundsData) == 0 || len(exemplars) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
sort.Sort(byBucketBoundsData(bucketBoundsData))
|
||||
|
||||
for _, exemplar := range exemplars {
|
||||
addExemplar(tsMap, bucketBoundsData, exemplar)
|
||||
}
|
||||
}
|
||||
|
||||
func addExemplar(tsMap map[string]*prompb.TimeSeries, bucketBounds []bucketBoundsData, exemplar prompb.Exemplar) {
|
||||
for _, bucketBound := range bucketBounds {
|
||||
sig := bucketBound.sig
|
||||
bound := bucketBound.bound
|
||||
|
||||
ts := tsMap[sig]
|
||||
if ts != nil && len(ts.Samples) > 0 && exemplar.Value <= bound {
|
||||
ts.Exemplars = append(ts.Exemplars, exemplar)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// timeSeries return a string signature in the form of:
|
||||
//
|
||||
// TYPE-label1-value1- ... -labelN-valueN
|
||||
//
|
||||
// the label slice should not contain duplicate label names; this method sorts the slice by label name before creating
|
||||
// timeSeriesSignature returns a hashed label set signature.
|
||||
// The label slice should not contain duplicate label names; this method sorts the slice by label name before creating
|
||||
// the signature.
|
||||
func timeSeriesSignature(datatype string, labels []prompb.Label) string {
|
||||
length := len(datatype)
|
||||
|
||||
for _, lb := range labels {
|
||||
length += 2 + len(lb.GetName()) + len(lb.GetValue())
|
||||
}
|
||||
|
||||
b := strings.Builder{}
|
||||
b.Grow(length)
|
||||
b.WriteString(datatype)
|
||||
|
||||
// The algorithm is the same as in Prometheus' labels.StableHash function.
|
||||
func timeSeriesSignature(labels []prompb.Label) uint64 {
|
||||
sort.Sort(ByLabelName(labels))
|
||||
|
||||
for _, lb := range labels {
|
||||
b.WriteString("-")
|
||||
b.WriteString(lb.GetName())
|
||||
b.WriteString("-")
|
||||
b.WriteString(lb.GetValue())
|
||||
}
|
||||
// Use xxhash.Sum64(b) for fast path as it's faster.
|
||||
b := make([]byte, 0, 1024)
|
||||
for i, v := range labels {
|
||||
if len(b)+len(v.Name)+len(v.Value)+2 >= cap(b) {
|
||||
// If labels entry is 1KB+ do not allocate whole entry.
|
||||
h := xxhash.New()
|
||||
_, _ = h.Write(b)
|
||||
for _, v := range labels[i:] {
|
||||
_, _ = h.WriteString(v.Name)
|
||||
_, _ = h.Write(seps)
|
||||
_, _ = h.WriteString(v.Value)
|
||||
_, _ = h.Write(seps)
|
||||
}
|
||||
return h.Sum64()
|
||||
}
|
||||
|
||||
return b.String()
|
||||
b = append(b, v.Name...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, v.Value...)
|
||||
b = append(b, seps[0])
|
||||
}
|
||||
return xxhash.Sum64(b)
|
||||
}
|
||||
|
||||
var seps = []byte{'\xff'}
|
||||
|
||||
// createAttributes creates a slice of Prometheus Labels with OTLP attributes and pairs of string values.
|
||||
// Unpaired string values are ignored. String pairs overwrite OTLP labels if collisions happen, and overwrites are
|
||||
// logged. Resulting label names are sanitized.
|
||||
func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externalLabels map[string]string, extras ...string) []prompb.Label {
|
||||
serviceName, haveServiceName := resource.Attributes().Get(conventions.AttributeServiceName)
|
||||
instance, haveInstanceID := resource.Attributes().Get(conventions.AttributeServiceInstanceID)
|
||||
// Unpaired string values are ignored. String pairs overwrite OTLP labels if collisions happen and
|
||||
// if logOnOverwrite is true, the overwrite is logged. Resulting label names are sanitized.
|
||||
func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externalLabels map[string]string,
|
||||
ignoreAttrs []string, logOnOverwrite bool, extras ...string) []prompb.Label {
|
||||
resourceAttrs := resource.Attributes()
|
||||
serviceName, haveServiceName := resourceAttrs.Get(conventions.AttributeServiceName)
|
||||
instance, haveInstanceID := resourceAttrs.Get(conventions.AttributeServiceInstanceID)
|
||||
|
||||
// Calculate the maximum possible number of labels we could return so we can preallocate l
|
||||
maxLabelCount := attributes.Len() + len(externalLabels) + len(extras)/2
|
||||
|
@ -171,9 +137,13 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
|
|||
|
||||
// Ensure attributes are sorted by key for consistent merging of keys which
|
||||
// collide when sanitized.
|
||||
labels := make([]prompb.Label, 0, attributes.Len())
|
||||
labels := make([]prompb.Label, 0, maxLabelCount)
|
||||
// XXX: Should we always drop service namespace/service name/service instance ID from the labels
|
||||
// (as they get mapped to other Prometheus labels)?
|
||||
attributes.Range(func(key string, value pcommon.Value) bool {
|
||||
labels = append(labels, prompb.Label{Name: key, Value: value.AsString()})
|
||||
if !slices.Contains(ignoreAttrs, key) {
|
||||
labels = append(labels, prompb.Label{Name: key, Value: value.AsString()})
|
||||
}
|
||||
return true
|
||||
})
|
||||
sort.Stable(ByLabelName(labels))
|
||||
|
@ -190,7 +160,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
|
|||
// Map service.name + service.namespace to job
|
||||
if haveServiceName {
|
||||
val := serviceName.AsString()
|
||||
if serviceNamespace, ok := resource.Attributes().Get(conventions.AttributeServiceNamespace); ok {
|
||||
if serviceNamespace, ok := resourceAttrs.Get(conventions.AttributeServiceNamespace); ok {
|
||||
val = fmt.Sprintf("%s/%s", serviceNamespace.AsString(), val)
|
||||
}
|
||||
l[model.JobLabel] = val
|
||||
|
@ -213,7 +183,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
|
|||
break
|
||||
}
|
||||
_, found := l[extras[i]]
|
||||
if found {
|
||||
if found && logOnOverwrite {
|
||||
log.Println("label " + extras[i] + " is overwritten. Check if Prometheus reserved labels are used.")
|
||||
}
|
||||
// internal labels should be maintained
|
||||
|
@ -224,12 +194,12 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
|
|||
l[name] = extras[i+1]
|
||||
}
|
||||
|
||||
s := make([]prompb.Label, 0, len(l))
|
||||
labels = labels[:0]
|
||||
for k, v := range l {
|
||||
s = append(s, prompb.Label{Name: k, Value: v})
|
||||
labels = append(labels, prompb.Label{Name: k, Value: v})
|
||||
}
|
||||
|
||||
return s
|
||||
return labels
|
||||
}
|
||||
|
||||
// isValidAggregationTemporality checks whether an OTel metric has a valid
|
||||
|
@ -249,100 +219,84 @@ func isValidAggregationTemporality(metric pmetric.Metric) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// addSingleHistogramDataPoint converts pt to 2 + min(len(ExplicitBounds), len(BucketCount)) + 1 samples. It
|
||||
// ignore extra buckets if len(ExplicitBounds) > len(BucketCounts)
|
||||
func addSingleHistogramDataPoint(pt pmetric.HistogramDataPoint, resource pcommon.Resource, metric pmetric.Metric, settings Settings, tsMap map[string]*prompb.TimeSeries, baseName string) {
|
||||
timestamp := convertTimeStamp(pt.Timestamp())
|
||||
baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels)
|
||||
func (c *prometheusConverter) addHistogramDataPoints(dataPoints pmetric.HistogramDataPointSlice,
|
||||
resource pcommon.Resource, settings Settings, baseName string) {
|
||||
for x := 0; x < dataPoints.Len(); x++ {
|
||||
pt := dataPoints.At(x)
|
||||
timestamp := convertTimeStamp(pt.Timestamp())
|
||||
baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nil, false)
|
||||
|
||||
createLabels := func(nameSuffix string, extras ...string) []prompb.Label {
|
||||
extraLabelCount := len(extras) / 2
|
||||
labels := make([]prompb.Label, len(baseLabels), len(baseLabels)+extraLabelCount+1) // +1 for name
|
||||
copy(labels, baseLabels)
|
||||
// If the sum is unset, it indicates the _sum metric point should be
|
||||
// omitted
|
||||
if pt.HasSum() {
|
||||
// treat sum as a sample in an individual TimeSeries
|
||||
sum := &prompb.Sample{
|
||||
Value: pt.Sum(),
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
if pt.Flags().NoRecordedValue() {
|
||||
sum.Value = math.Float64frombits(value.StaleNaN)
|
||||
}
|
||||
|
||||
sumlabels := createLabels(baseName+sumStr, baseLabels)
|
||||
c.addSample(sum, sumlabels)
|
||||
|
||||
for extrasIdx := 0; extrasIdx < extraLabelCount; extrasIdx++ {
|
||||
labels = append(labels, prompb.Label{Name: extras[extrasIdx], Value: extras[extrasIdx+1]})
|
||||
}
|
||||
|
||||
// sum, count, and buckets of the histogram should append suffix to baseName
|
||||
labels = append(labels, prompb.Label{Name: model.MetricNameLabel, Value: baseName + nameSuffix})
|
||||
|
||||
return labels
|
||||
}
|
||||
|
||||
// If the sum is unset, it indicates the _sum metric point should be
|
||||
// omitted
|
||||
if pt.HasSum() {
|
||||
// treat sum as a sample in an individual TimeSeries
|
||||
sum := &prompb.Sample{
|
||||
Value: pt.Sum(),
|
||||
// treat count as a sample in an individual TimeSeries
|
||||
count := &prompb.Sample{
|
||||
Value: float64(pt.Count()),
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
if pt.Flags().NoRecordedValue() {
|
||||
sum.Value = math.Float64frombits(value.StaleNaN)
|
||||
count.Value = math.Float64frombits(value.StaleNaN)
|
||||
}
|
||||
|
||||
sumlabels := createLabels(sumStr)
|
||||
addSample(tsMap, sum, sumlabels, metric.Type().String())
|
||||
countlabels := createLabels(baseName+countStr, baseLabels)
|
||||
c.addSample(count, countlabels)
|
||||
|
||||
}
|
||||
// cumulative count for conversion to cumulative histogram
|
||||
var cumulativeCount uint64
|
||||
|
||||
// treat count as a sample in an individual TimeSeries
|
||||
count := &prompb.Sample{
|
||||
Value: float64(pt.Count()),
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
if pt.Flags().NoRecordedValue() {
|
||||
count.Value = math.Float64frombits(value.StaleNaN)
|
||||
}
|
||||
var bucketBounds []bucketBoundsData
|
||||
|
||||
countlabels := createLabels(countStr)
|
||||
addSample(tsMap, count, countlabels, metric.Type().String())
|
||||
// process each bound, based on histograms proto definition, # of buckets = # of explicit bounds + 1
|
||||
for i := 0; i < pt.ExplicitBounds().Len() && i < pt.BucketCounts().Len(); i++ {
|
||||
bound := pt.ExplicitBounds().At(i)
|
||||
cumulativeCount += pt.BucketCounts().At(i)
|
||||
bucket := &prompb.Sample{
|
||||
Value: float64(cumulativeCount),
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
if pt.Flags().NoRecordedValue() {
|
||||
bucket.Value = math.Float64frombits(value.StaleNaN)
|
||||
}
|
||||
boundStr := strconv.FormatFloat(bound, 'f', -1, 64)
|
||||
labels := createLabels(baseName+bucketStr, baseLabels, leStr, boundStr)
|
||||
ts := c.addSample(bucket, labels)
|
||||
|
||||
// cumulative count for conversion to cumulative histogram
|
||||
var cumulativeCount uint64
|
||||
|
||||
promExemplars := getPromExemplars[pmetric.HistogramDataPoint](pt)
|
||||
|
||||
var bucketBounds []bucketBoundsData
|
||||
|
||||
// process each bound, based on histograms proto definition, # of buckets = # of explicit bounds + 1
|
||||
for i := 0; i < pt.ExplicitBounds().Len() && i < pt.BucketCounts().Len(); i++ {
|
||||
bound := pt.ExplicitBounds().At(i)
|
||||
cumulativeCount += pt.BucketCounts().At(i)
|
||||
bucket := &prompb.Sample{
|
||||
Value: float64(cumulativeCount),
|
||||
bucketBounds = append(bucketBounds, bucketBoundsData{ts: ts, bound: bound})
|
||||
}
|
||||
// add le=+Inf bucket
|
||||
infBucket := &prompb.Sample{
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
if pt.Flags().NoRecordedValue() {
|
||||
bucket.Value = math.Float64frombits(value.StaleNaN)
|
||||
infBucket.Value = math.Float64frombits(value.StaleNaN)
|
||||
} else {
|
||||
infBucket.Value = float64(pt.Count())
|
||||
}
|
||||
boundStr := strconv.FormatFloat(bound, 'f', -1, 64)
|
||||
labels := createLabels(bucketStr, leStr, boundStr)
|
||||
sig := addSample(tsMap, bucket, labels, metric.Type().String())
|
||||
infLabels := createLabels(baseName+bucketStr, baseLabels, leStr, pInfStr)
|
||||
ts := c.addSample(infBucket, infLabels)
|
||||
|
||||
bucketBounds = append(bucketBounds, bucketBoundsData{sig: sig, bound: bound})
|
||||
}
|
||||
// add le=+Inf bucket
|
||||
infBucket := &prompb.Sample{
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
if pt.Flags().NoRecordedValue() {
|
||||
infBucket.Value = math.Float64frombits(value.StaleNaN)
|
||||
} else {
|
||||
infBucket.Value = float64(pt.Count())
|
||||
}
|
||||
infLabels := createLabels(bucketStr, leStr, pInfStr)
|
||||
sig := addSample(tsMap, infBucket, infLabels, metric.Type().String())
|
||||
bucketBounds = append(bucketBounds, bucketBoundsData{ts: ts, bound: math.Inf(1)})
|
||||
c.addExemplars(pt, bucketBounds)
|
||||
|
||||
bucketBounds = append(bucketBounds, bucketBoundsData{sig: sig, bound: math.Inf(1)})
|
||||
addExemplars(tsMap, promExemplars, bucketBounds)
|
||||
|
||||
// add _created time series if needed
|
||||
startTimestamp := pt.StartTimestamp()
|
||||
if settings.ExportCreatedMetric && startTimestamp != 0 {
|
||||
labels := createLabels(createdSuffix)
|
||||
addCreatedTimeSeriesIfNeeded(tsMap, labels, startTimestamp, pt.Timestamp(), metric.Type().String())
|
||||
startTimestamp := pt.StartTimestamp()
|
||||
if settings.ExportCreatedMetric && startTimestamp != 0 {
|
||||
labels := createLabels(baseName+createdSuffix, baseLabels)
|
||||
c.addTimeSeriesIfNeeded(labels, startTimestamp, pt.Timestamp())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -441,129 +395,177 @@ func mostRecentTimestampInMetric(metric pmetric.Metric) pcommon.Timestamp {
|
|||
return ts
|
||||
}
|
||||
|
||||
// addSingleSummaryDataPoint converts pt to len(QuantileValues) + 2 samples.
|
||||
func addSingleSummaryDataPoint(pt pmetric.SummaryDataPoint, resource pcommon.Resource, metric pmetric.Metric, settings Settings,
|
||||
tsMap map[string]*prompb.TimeSeries, baseName string) {
|
||||
timestamp := convertTimeStamp(pt.Timestamp())
|
||||
baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels)
|
||||
func (c *prometheusConverter) addSummaryDataPoints(dataPoints pmetric.SummaryDataPointSlice, resource pcommon.Resource,
|
||||
settings Settings, baseName string) {
|
||||
for x := 0; x < dataPoints.Len(); x++ {
|
||||
pt := dataPoints.At(x)
|
||||
timestamp := convertTimeStamp(pt.Timestamp())
|
||||
baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nil, false)
|
||||
|
||||
createLabels := func(name string, extras ...string) []prompb.Label {
|
||||
extraLabelCount := len(extras) / 2
|
||||
labels := make([]prompb.Label, len(baseLabels), len(baseLabels)+extraLabelCount+1) // +1 for name
|
||||
copy(labels, baseLabels)
|
||||
|
||||
for extrasIdx := 0; extrasIdx < extraLabelCount; extrasIdx++ {
|
||||
labels = append(labels, prompb.Label{Name: extras[extrasIdx], Value: extras[extrasIdx+1]})
|
||||
}
|
||||
|
||||
labels = append(labels, prompb.Label{Name: model.MetricNameLabel, Value: name})
|
||||
|
||||
return labels
|
||||
}
|
||||
|
||||
// treat sum as a sample in an individual TimeSeries
|
||||
sum := &prompb.Sample{
|
||||
Value: pt.Sum(),
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
if pt.Flags().NoRecordedValue() {
|
||||
sum.Value = math.Float64frombits(value.StaleNaN)
|
||||
}
|
||||
// sum and count of the summary should append suffix to baseName
|
||||
sumlabels := createLabels(baseName + sumStr)
|
||||
addSample(tsMap, sum, sumlabels, metric.Type().String())
|
||||
|
||||
// treat count as a sample in an individual TimeSeries
|
||||
count := &prompb.Sample{
|
||||
Value: float64(pt.Count()),
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
if pt.Flags().NoRecordedValue() {
|
||||
count.Value = math.Float64frombits(value.StaleNaN)
|
||||
}
|
||||
countlabels := createLabels(baseName + countStr)
|
||||
addSample(tsMap, count, countlabels, metric.Type().String())
|
||||
|
||||
// process each percentile/quantile
|
||||
for i := 0; i < pt.QuantileValues().Len(); i++ {
|
||||
qt := pt.QuantileValues().At(i)
|
||||
quantile := &prompb.Sample{
|
||||
Value: qt.Value(),
|
||||
// treat sum as a sample in an individual TimeSeries
|
||||
sum := &prompb.Sample{
|
||||
Value: pt.Sum(),
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
if pt.Flags().NoRecordedValue() {
|
||||
quantile.Value = math.Float64frombits(value.StaleNaN)
|
||||
sum.Value = math.Float64frombits(value.StaleNaN)
|
||||
}
|
||||
percentileStr := strconv.FormatFloat(qt.Quantile(), 'f', -1, 64)
|
||||
qtlabels := createLabels(baseName, quantileStr, percentileStr)
|
||||
addSample(tsMap, quantile, qtlabels, metric.Type().String())
|
||||
}
|
||||
// sum and count of the summary should append suffix to baseName
|
||||
sumlabels := createLabels(baseName+sumStr, baseLabels)
|
||||
c.addSample(sum, sumlabels)
|
||||
|
||||
// add _created time series if needed
|
||||
startTimestamp := pt.StartTimestamp()
|
||||
if settings.ExportCreatedMetric && startTimestamp != 0 {
|
||||
createdLabels := createLabels(baseName + createdSuffix)
|
||||
addCreatedTimeSeriesIfNeeded(tsMap, createdLabels, startTimestamp, pt.Timestamp(), metric.Type().String())
|
||||
// treat count as a sample in an individual TimeSeries
|
||||
count := &prompb.Sample{
|
||||
Value: float64(pt.Count()),
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
if pt.Flags().NoRecordedValue() {
|
||||
count.Value = math.Float64frombits(value.StaleNaN)
|
||||
}
|
||||
countlabels := createLabels(baseName+countStr, baseLabels)
|
||||
c.addSample(count, countlabels)
|
||||
|
||||
// process each percentile/quantile
|
||||
for i := 0; i < pt.QuantileValues().Len(); i++ {
|
||||
qt := pt.QuantileValues().At(i)
|
||||
quantile := &prompb.Sample{
|
||||
Value: qt.Value(),
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
if pt.Flags().NoRecordedValue() {
|
||||
quantile.Value = math.Float64frombits(value.StaleNaN)
|
||||
}
|
||||
percentileStr := strconv.FormatFloat(qt.Quantile(), 'f', -1, 64)
|
||||
qtlabels := createLabels(baseName, baseLabels, quantileStr, percentileStr)
|
||||
c.addSample(quantile, qtlabels)
|
||||
}
|
||||
|
||||
startTimestamp := pt.StartTimestamp()
|
||||
if settings.ExportCreatedMetric && startTimestamp != 0 {
|
||||
createdLabels := createLabels(baseName+createdSuffix, baseLabels)
|
||||
c.addTimeSeriesIfNeeded(createdLabels, startTimestamp, pt.Timestamp())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// addCreatedTimeSeriesIfNeeded adds {name}_created time series with a single
|
||||
// sample. If the series exists, then new samples won't be added.
|
||||
func addCreatedTimeSeriesIfNeeded(
|
||||
series map[string]*prompb.TimeSeries,
|
||||
labels []prompb.Label,
|
||||
startTimestamp pcommon.Timestamp,
|
||||
timestamp pcommon.Timestamp,
|
||||
metricType string,
|
||||
) {
|
||||
sig := timeSeriesSignature(metricType, labels)
|
||||
if _, ok := series[sig]; !ok {
|
||||
series[sig] = &prompb.TimeSeries{
|
||||
Labels: labels,
|
||||
Samples: []prompb.Sample{
|
||||
{ // convert ns to ms
|
||||
Value: float64(convertTimeStamp(startTimestamp)),
|
||||
Timestamp: convertTimeStamp(timestamp),
|
||||
},
|
||||
// createLabels returns a copy of baseLabels, adding to it the pair model.MetricNameLabel=name.
|
||||
// If extras are provided, corresponding label pairs are also added to the returned slice.
|
||||
// If extras is uneven length, the last (unpaired) extra will be ignored.
|
||||
func createLabels(name string, baseLabels []prompb.Label, extras ...string) []prompb.Label {
|
||||
extraLabelCount := len(extras) / 2
|
||||
labels := make([]prompb.Label, len(baseLabels), len(baseLabels)+extraLabelCount+1) // +1 for name
|
||||
copy(labels, baseLabels)
|
||||
|
||||
n := len(extras)
|
||||
n -= n % 2
|
||||
for extrasIdx := 0; extrasIdx < n; extrasIdx += 2 {
|
||||
labels = append(labels, prompb.Label{Name: extras[extrasIdx], Value: extras[extrasIdx+1]})
|
||||
}
|
||||
|
||||
labels = append(labels, prompb.Label{Name: model.MetricNameLabel, Value: name})
|
||||
return labels
|
||||
}
|
||||
|
||||
// getOrCreateTimeSeries returns the time series corresponding to the label set if existent, and false.
|
||||
// Otherwise it creates a new one and returns that, and true.
|
||||
func (c *prometheusConverter) getOrCreateTimeSeries(lbls []prompb.Label) (*prompb.TimeSeries, bool) {
|
||||
h := timeSeriesSignature(lbls)
|
||||
ts := c.unique[h]
|
||||
if ts != nil {
|
||||
if isSameMetric(ts, lbls) {
|
||||
// We already have this metric
|
||||
return ts, false
|
||||
}
|
||||
|
||||
// Look for a matching conflict
|
||||
for _, cTS := range c.conflicts[h] {
|
||||
if isSameMetric(cTS, lbls) {
|
||||
// We already have this metric
|
||||
return cTS, false
|
||||
}
|
||||
}
|
||||
|
||||
// New conflict
|
||||
ts = &prompb.TimeSeries{
|
||||
Labels: lbls,
|
||||
}
|
||||
c.conflicts[h] = append(c.conflicts[h], ts)
|
||||
return ts, true
|
||||
}
|
||||
|
||||
// This metric is new
|
||||
ts = &prompb.TimeSeries{
|
||||
Labels: lbls,
|
||||
}
|
||||
c.unique[h] = ts
|
||||
return ts, true
|
||||
}
|
||||
|
||||
// addTimeSeriesIfNeeded adds a corresponding time series if it doesn't already exist.
|
||||
// If the time series doesn't already exist, it gets added with startTimestamp for its value and timestamp for its timestamp,
|
||||
// both converted to milliseconds.
|
||||
func (c *prometheusConverter) addTimeSeriesIfNeeded(lbls []prompb.Label, startTimestamp pcommon.Timestamp, timestamp pcommon.Timestamp) {
|
||||
ts, created := c.getOrCreateTimeSeries(lbls)
|
||||
if created {
|
||||
ts.Samples = []prompb.Sample{
|
||||
{
|
||||
// convert ns to ms
|
||||
Value: float64(convertTimeStamp(startTimestamp)),
|
||||
Timestamp: convertTimeStamp(timestamp),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// addResourceTargetInfo converts the resource to the target info metric
|
||||
func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timestamp pcommon.Timestamp, tsMap map[string]*prompb.TimeSeries) {
|
||||
if settings.DisableTargetInfo {
|
||||
// addResourceTargetInfo converts the resource to the target info metric.
|
||||
func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timestamp pcommon.Timestamp, converter *prometheusConverter) {
|
||||
if settings.DisableTargetInfo || timestamp == 0 {
|
||||
return
|
||||
}
|
||||
// Use resource attributes (other than those used for job+instance) as the
|
||||
// metric labels for the target info metric
|
||||
attributes := pcommon.NewMap()
|
||||
resource.Attributes().CopyTo(attributes)
|
||||
attributes.RemoveIf(func(k string, _ pcommon.Value) bool {
|
||||
switch k {
|
||||
case conventions.AttributeServiceName, conventions.AttributeServiceNamespace, conventions.AttributeServiceInstanceID:
|
||||
// Remove resource attributes used for job + instance
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
|
||||
attributes := resource.Attributes()
|
||||
identifyingAttrs := []string{
|
||||
conventions.AttributeServiceNamespace,
|
||||
conventions.AttributeServiceName,
|
||||
conventions.AttributeServiceInstanceID,
|
||||
}
|
||||
nonIdentifyingAttrsCount := attributes.Len()
|
||||
for _, a := range identifyingAttrs {
|
||||
_, haveAttr := attributes.Get(a)
|
||||
if haveAttr {
|
||||
nonIdentifyingAttrsCount--
|
||||
}
|
||||
})
|
||||
if attributes.Len() == 0 {
|
||||
}
|
||||
if nonIdentifyingAttrsCount == 0 {
|
||||
// If we only have job + instance, then target_info isn't useful, so don't add it.
|
||||
return
|
||||
}
|
||||
// create parameters for addSample
|
||||
|
||||
name := targetMetricName
|
||||
if len(settings.Namespace) > 0 {
|
||||
name = settings.Namespace + "_" + name
|
||||
}
|
||||
labels := createAttributes(resource, attributes, settings.ExternalLabels, model.MetricNameLabel, name)
|
||||
|
||||
labels := createAttributes(resource, attributes, settings.ExternalLabels, identifyingAttrs, false, model.MetricNameLabel, name)
|
||||
haveIdentifier := false
|
||||
for _, l := range labels {
|
||||
if l.Name == model.JobLabel || l.Name == model.InstanceLabel {
|
||||
haveIdentifier = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !haveIdentifier {
|
||||
// We need at least one identifying label to generate target_info.
|
||||
return
|
||||
}
|
||||
|
||||
sample := &prompb.Sample{
|
||||
Value: float64(1),
|
||||
// convert ns to ms
|
||||
Timestamp: convertTimeStamp(timestamp),
|
||||
}
|
||||
addSample(tsMap, sample, labels, infoType)
|
||||
converter.addSample(sample, labels)
|
||||
}
|
||||
|
||||
// convertTimeStamp converts OTLP timestamp in ns to timestamp in ms
|
||||
|
|
|
@ -1,58 +1,59 @@
|
|||
// DO NOT EDIT. COPIED AS-IS. SEE ../README.md
|
||||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/histograms.go
|
||||
// Provenance-includes-license: Apache-2.0
|
||||
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package prometheusremotewrite // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
|
||||
package prometheusremotewrite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/prometheus/model/value"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
"go.opentelemetry.io/collector/pdata/pcommon"
|
||||
"go.opentelemetry.io/collector/pdata/pmetric"
|
||||
|
||||
"github.com/prometheus/prometheus/model/value"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
)
|
||||
|
||||
const defaultZeroThreshold = 1e-128
|
||||
|
||||
func addSingleExponentialHistogramDataPoint(
|
||||
metric string,
|
||||
pt pmetric.ExponentialHistogramDataPoint,
|
||||
resource pcommon.Resource,
|
||||
settings Settings,
|
||||
series map[string]*prompb.TimeSeries,
|
||||
) error {
|
||||
labels := createAttributes(
|
||||
resource,
|
||||
pt.Attributes(),
|
||||
settings.ExternalLabels,
|
||||
model.MetricNameLabel,
|
||||
metric,
|
||||
)
|
||||
func (c *prometheusConverter) addExponentialHistogramDataPoints(dataPoints pmetric.ExponentialHistogramDataPointSlice,
|
||||
resource pcommon.Resource, settings Settings, baseName string) error {
|
||||
for x := 0; x < dataPoints.Len(); x++ {
|
||||
pt := dataPoints.At(x)
|
||||
lbls := createAttributes(
|
||||
resource,
|
||||
pt.Attributes(),
|
||||
settings.ExternalLabels,
|
||||
nil,
|
||||
true,
|
||||
model.MetricNameLabel,
|
||||
baseName,
|
||||
)
|
||||
ts, _ := c.getOrCreateTimeSeries(lbls)
|
||||
|
||||
sig := timeSeriesSignature(
|
||||
pmetric.MetricTypeExponentialHistogram.String(),
|
||||
labels,
|
||||
)
|
||||
ts, ok := series[sig]
|
||||
if !ok {
|
||||
ts = &prompb.TimeSeries{
|
||||
Labels: labels,
|
||||
histogram, err := exponentialToNativeHistogram(pt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
series[sig] = ts
|
||||
}
|
||||
ts.Histograms = append(ts.Histograms, histogram)
|
||||
|
||||
histogram, err := exponentialToNativeHistogram(pt)
|
||||
if err != nil {
|
||||
return err
|
||||
exemplars := getPromExemplars[pmetric.ExponentialHistogramDataPoint](pt)
|
||||
ts.Exemplars = append(ts.Exemplars, exemplars...)
|
||||
}
|
||||
ts.Histograms = append(ts.Histograms, histogram)
|
||||
|
||||
exemplars := getPromExemplars[pmetric.ExponentialHistogramDataPoint](pt)
|
||||
ts.Exemplars = append(ts.Exemplars, exemplars...)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,19 +1,32 @@
|
|||
// DO NOT EDIT. COPIED AS-IS. SEE ../README.md
|
||||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/metrics_to_prw.go
|
||||
// Provenance-includes-license: Apache-2.0
|
||||
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package prometheusremotewrite // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
|
||||
package prometheusremotewrite
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
"go.opentelemetry.io/collector/pdata/pcommon"
|
||||
"go.opentelemetry.io/collector/pdata/pmetric"
|
||||
"go.uber.org/multierr"
|
||||
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
|
||||
)
|
||||
|
||||
|
@ -27,9 +40,33 @@ type Settings struct {
|
|||
}
|
||||
|
||||
// FromMetrics converts pmetric.Metrics to Prometheus remote write format.
|
||||
func FromMetrics(md pmetric.Metrics, settings Settings) (tsMap map[string]*prompb.TimeSeries, errs error) {
|
||||
tsMap = make(map[string]*prompb.TimeSeries)
|
||||
func FromMetrics(md pmetric.Metrics, settings Settings) (map[string]*prompb.TimeSeries, error) {
|
||||
c := newPrometheusConverter()
|
||||
errs := c.fromMetrics(md, settings)
|
||||
tss := c.timeSeries()
|
||||
out := make(map[string]*prompb.TimeSeries, len(tss))
|
||||
for i := range tss {
|
||||
out[strconv.Itoa(i)] = &tss[i]
|
||||
}
|
||||
|
||||
return out, errs
|
||||
}
|
||||
|
||||
// prometheusConverter converts from OTel write format to Prometheus write format.
|
||||
type prometheusConverter struct {
|
||||
unique map[uint64]*prompb.TimeSeries
|
||||
conflicts map[uint64][]*prompb.TimeSeries
|
||||
}
|
||||
|
||||
func newPrometheusConverter() *prometheusConverter {
|
||||
return &prometheusConverter{
|
||||
unique: map[uint64]*prompb.TimeSeries{},
|
||||
conflicts: map[uint64][]*prompb.TimeSeries{},
|
||||
}
|
||||
}
|
||||
|
||||
// fromMetrics converts pmetric.Metrics to Prometheus remote write format.
|
||||
func (c *prometheusConverter) fromMetrics(md pmetric.Metrics, settings Settings) (errs error) {
|
||||
resourceMetricsSlice := md.ResourceMetrics()
|
||||
for i := 0; i < resourceMetricsSlice.Len(); i++ {
|
||||
resourceMetrics := resourceMetricsSlice.At(i)
|
||||
|
@ -39,8 +76,7 @@ func FromMetrics(md pmetric.Metrics, settings Settings) (tsMap map[string]*promp
|
|||
// use with the "target" info metric
|
||||
var mostRecentTimestamp pcommon.Timestamp
|
||||
for j := 0; j < scopeMetricsSlice.Len(); j++ {
|
||||
scopeMetrics := scopeMetricsSlice.At(j)
|
||||
metricSlice := scopeMetrics.Metrics()
|
||||
metricSlice := scopeMetricsSlice.At(j).Metrics()
|
||||
|
||||
// TODO: decide if instrumentation library information should be exported as labels
|
||||
for k := 0; k < metricSlice.Len(); k++ {
|
||||
|
@ -54,65 +90,125 @@ func FromMetrics(md pmetric.Metrics, settings Settings) (tsMap map[string]*promp
|
|||
|
||||
promName := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes)
|
||||
|
||||
// handle individual metric based on type
|
||||
// handle individual metrics based on type
|
||||
//exhaustive:enforce
|
||||
switch metric.Type() {
|
||||
case pmetric.MetricTypeGauge:
|
||||
dataPoints := metric.Gauge().DataPoints()
|
||||
if dataPoints.Len() == 0 {
|
||||
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
|
||||
break
|
||||
}
|
||||
for x := 0; x < dataPoints.Len(); x++ {
|
||||
addSingleGaugeNumberDataPoint(dataPoints.At(x), resource, metric, settings, tsMap, promName)
|
||||
}
|
||||
c.addGaugeNumberDataPoints(dataPoints, resource, settings, promName)
|
||||
case pmetric.MetricTypeSum:
|
||||
dataPoints := metric.Sum().DataPoints()
|
||||
if dataPoints.Len() == 0 {
|
||||
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
|
||||
break
|
||||
}
|
||||
for x := 0; x < dataPoints.Len(); x++ {
|
||||
addSingleSumNumberDataPoint(dataPoints.At(x), resource, metric, settings, tsMap, promName)
|
||||
}
|
||||
c.addSumNumberDataPoints(dataPoints, resource, metric, settings, promName)
|
||||
case pmetric.MetricTypeHistogram:
|
||||
dataPoints := metric.Histogram().DataPoints()
|
||||
if dataPoints.Len() == 0 {
|
||||
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
|
||||
break
|
||||
}
|
||||
for x := 0; x < dataPoints.Len(); x++ {
|
||||
addSingleHistogramDataPoint(dataPoints.At(x), resource, metric, settings, tsMap, promName)
|
||||
}
|
||||
c.addHistogramDataPoints(dataPoints, resource, settings, promName)
|
||||
case pmetric.MetricTypeExponentialHistogram:
|
||||
dataPoints := metric.ExponentialHistogram().DataPoints()
|
||||
if dataPoints.Len() == 0 {
|
||||
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
|
||||
break
|
||||
}
|
||||
for x := 0; x < dataPoints.Len(); x++ {
|
||||
errs = multierr.Append(
|
||||
errs,
|
||||
addSingleExponentialHistogramDataPoint(
|
||||
promName,
|
||||
dataPoints.At(x),
|
||||
resource,
|
||||
settings,
|
||||
tsMap,
|
||||
),
|
||||
)
|
||||
}
|
||||
errs = multierr.Append(errs, c.addExponentialHistogramDataPoints(
|
||||
dataPoints,
|
||||
resource,
|
||||
settings,
|
||||
promName,
|
||||
))
|
||||
case pmetric.MetricTypeSummary:
|
||||
dataPoints := metric.Summary().DataPoints()
|
||||
if dataPoints.Len() == 0 {
|
||||
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
|
||||
break
|
||||
}
|
||||
for x := 0; x < dataPoints.Len(); x++ {
|
||||
addSingleSummaryDataPoint(dataPoints.At(x), resource, metric, settings, tsMap, promName)
|
||||
}
|
||||
c.addSummaryDataPoints(dataPoints, resource, settings, promName)
|
||||
default:
|
||||
errs = multierr.Append(errs, errors.New("unsupported metric type"))
|
||||
}
|
||||
}
|
||||
}
|
||||
addResourceTargetInfo(resource, settings, mostRecentTimestamp, tsMap)
|
||||
addResourceTargetInfo(resource, settings, mostRecentTimestamp, c)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// timeSeries returns a slice of the prompb.TimeSeries that were converted from OTel format.
|
||||
func (c *prometheusConverter) timeSeries() []prompb.TimeSeries {
|
||||
conflicts := 0
|
||||
for _, ts := range c.conflicts {
|
||||
conflicts += len(ts)
|
||||
}
|
||||
allTS := make([]prompb.TimeSeries, 0, len(c.unique)+conflicts)
|
||||
for _, ts := range c.unique {
|
||||
allTS = append(allTS, *ts)
|
||||
}
|
||||
for _, cTS := range c.conflicts {
|
||||
for _, ts := range cTS {
|
||||
allTS = append(allTS, *ts)
|
||||
}
|
||||
}
|
||||
|
||||
return allTS
|
||||
}
|
||||
|
||||
func isSameMetric(ts *prompb.TimeSeries, lbls []prompb.Label) bool {
|
||||
if len(ts.Labels) != len(lbls) {
|
||||
return false
|
||||
}
|
||||
for i, l := range ts.Labels {
|
||||
if l.Name != ts.Labels[i].Name || l.Value != ts.Labels[i].Value {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// addExemplars adds exemplars for the dataPoint. For each exemplar, if it can find a bucket bound corresponding to its value,
|
||||
// the exemplar is added to the bucket bound's time series, provided that the time series' has samples.
|
||||
func (c *prometheusConverter) addExemplars(dataPoint pmetric.HistogramDataPoint, bucketBounds []bucketBoundsData) {
|
||||
if len(bucketBounds) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
exemplars := getPromExemplars(dataPoint)
|
||||
if len(exemplars) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
sort.Sort(byBucketBoundsData(bucketBounds))
|
||||
for _, exemplar := range exemplars {
|
||||
for _, bound := range bucketBounds {
|
||||
if len(bound.ts.Samples) > 0 && exemplar.Value <= bound.bound {
|
||||
bound.ts.Exemplars = append(bound.ts.Exemplars, exemplar)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// addSample finds a TimeSeries that corresponds to lbls, and adds sample to it.
|
||||
// If there is no corresponding TimeSeries already, it's created.
|
||||
// The corresponding TimeSeries is returned.
|
||||
// If either lbls is nil/empty or sample is nil, nothing is done.
|
||||
func (c *prometheusConverter) addSample(sample *prompb.Sample, lbls []prompb.Label) *prompb.TimeSeries {
|
||||
if sample == nil || len(lbls) == 0 {
|
||||
// This shouldn't happen
|
||||
return nil
|
||||
}
|
||||
|
||||
ts, _ := c.getOrCreateTimeSeries(lbls)
|
||||
ts.Samples = append(ts.Samples, *sample)
|
||||
return ts
|
||||
}
|
||||
|
|
|
@ -1,106 +1,110 @@
|
|||
// DO NOT EDIT. COPIED AS-IS. SEE ../README.md
|
||||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/number_data_points.go
|
||||
// Provenance-includes-license: Apache-2.0
|
||||
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package prometheusremotewrite // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
|
||||
package prometheusremotewrite
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/prometheus/model/value"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
"go.opentelemetry.io/collector/pdata/pcommon"
|
||||
"go.opentelemetry.io/collector/pdata/pmetric"
|
||||
|
||||
"github.com/prometheus/prometheus/model/value"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
)
|
||||
|
||||
// addSingleGaugeNumberDataPoint converts the Gauge metric data point to a
|
||||
// Prometheus time series with samples and labels. The result is stored in the
|
||||
// series map.
|
||||
func addSingleGaugeNumberDataPoint(
|
||||
pt pmetric.NumberDataPoint,
|
||||
resource pcommon.Resource,
|
||||
metric pmetric.Metric,
|
||||
settings Settings,
|
||||
series map[string]*prompb.TimeSeries,
|
||||
name string,
|
||||
) {
|
||||
labels := createAttributes(
|
||||
resource,
|
||||
pt.Attributes(),
|
||||
settings.ExternalLabels,
|
||||
model.MetricNameLabel,
|
||||
name,
|
||||
)
|
||||
sample := &prompb.Sample{
|
||||
// convert ns to ms
|
||||
Timestamp: convertTimeStamp(pt.Timestamp()),
|
||||
func (c *prometheusConverter) addGaugeNumberDataPoints(dataPoints pmetric.NumberDataPointSlice,
|
||||
resource pcommon.Resource, settings Settings, name string) {
|
||||
for x := 0; x < dataPoints.Len(); x++ {
|
||||
pt := dataPoints.At(x)
|
||||
labels := createAttributes(
|
||||
resource,
|
||||
pt.Attributes(),
|
||||
settings.ExternalLabels,
|
||||
nil,
|
||||
true,
|
||||
model.MetricNameLabel,
|
||||
name,
|
||||
)
|
||||
sample := &prompb.Sample{
|
||||
// convert ns to ms
|
||||
Timestamp: convertTimeStamp(pt.Timestamp()),
|
||||
}
|
||||
switch pt.ValueType() {
|
||||
case pmetric.NumberDataPointValueTypeInt:
|
||||
sample.Value = float64(pt.IntValue())
|
||||
case pmetric.NumberDataPointValueTypeDouble:
|
||||
sample.Value = pt.DoubleValue()
|
||||
}
|
||||
if pt.Flags().NoRecordedValue() {
|
||||
sample.Value = math.Float64frombits(value.StaleNaN)
|
||||
}
|
||||
c.addSample(sample, labels)
|
||||
}
|
||||
switch pt.ValueType() {
|
||||
case pmetric.NumberDataPointValueTypeInt:
|
||||
sample.Value = float64(pt.IntValue())
|
||||
case pmetric.NumberDataPointValueTypeDouble:
|
||||
sample.Value = pt.DoubleValue()
|
||||
}
|
||||
if pt.Flags().NoRecordedValue() {
|
||||
sample.Value = math.Float64frombits(value.StaleNaN)
|
||||
}
|
||||
addSample(series, sample, labels, metric.Type().String())
|
||||
}
|
||||
|
||||
// addSingleSumNumberDataPoint converts the Sum metric data point to a Prometheus
|
||||
// time series with samples, labels and exemplars. The result is stored in the
|
||||
// series map.
|
||||
func addSingleSumNumberDataPoint(
|
||||
pt pmetric.NumberDataPoint,
|
||||
resource pcommon.Resource,
|
||||
metric pmetric.Metric,
|
||||
settings Settings,
|
||||
series map[string]*prompb.TimeSeries,
|
||||
name string,
|
||||
) {
|
||||
labels := createAttributes(
|
||||
resource,
|
||||
pt.Attributes(),
|
||||
settings.ExternalLabels,
|
||||
model.MetricNameLabel, name,
|
||||
)
|
||||
sample := &prompb.Sample{
|
||||
// convert ns to ms
|
||||
Timestamp: convertTimeStamp(pt.Timestamp()),
|
||||
}
|
||||
switch pt.ValueType() {
|
||||
case pmetric.NumberDataPointValueTypeInt:
|
||||
sample.Value = float64(pt.IntValue())
|
||||
case pmetric.NumberDataPointValueTypeDouble:
|
||||
sample.Value = pt.DoubleValue()
|
||||
}
|
||||
if pt.Flags().NoRecordedValue() {
|
||||
sample.Value = math.Float64frombits(value.StaleNaN)
|
||||
}
|
||||
sig := addSample(series, sample, labels, metric.Type().String())
|
||||
|
||||
if ts := series[sig]; sig != "" && ts != nil {
|
||||
exemplars := getPromExemplars[pmetric.NumberDataPoint](pt)
|
||||
ts.Exemplars = append(ts.Exemplars, exemplars...)
|
||||
}
|
||||
|
||||
// add _created time series if needed
|
||||
if settings.ExportCreatedMetric && metric.Sum().IsMonotonic() {
|
||||
startTimestamp := pt.StartTimestamp()
|
||||
if startTimestamp == 0 {
|
||||
return
|
||||
func (c *prometheusConverter) addSumNumberDataPoints(dataPoints pmetric.NumberDataPointSlice,
|
||||
resource pcommon.Resource, metric pmetric.Metric, settings Settings, name string) {
|
||||
for x := 0; x < dataPoints.Len(); x++ {
|
||||
pt := dataPoints.At(x)
|
||||
lbls := createAttributes(
|
||||
resource,
|
||||
pt.Attributes(),
|
||||
settings.ExternalLabels,
|
||||
nil,
|
||||
true,
|
||||
model.MetricNameLabel,
|
||||
name,
|
||||
)
|
||||
sample := &prompb.Sample{
|
||||
// convert ns to ms
|
||||
Timestamp: convertTimeStamp(pt.Timestamp()),
|
||||
}
|
||||
switch pt.ValueType() {
|
||||
case pmetric.NumberDataPointValueTypeInt:
|
||||
sample.Value = float64(pt.IntValue())
|
||||
case pmetric.NumberDataPointValueTypeDouble:
|
||||
sample.Value = pt.DoubleValue()
|
||||
}
|
||||
if pt.Flags().NoRecordedValue() {
|
||||
sample.Value = math.Float64frombits(value.StaleNaN)
|
||||
}
|
||||
ts := c.addSample(sample, lbls)
|
||||
if ts != nil {
|
||||
exemplars := getPromExemplars[pmetric.NumberDataPoint](pt)
|
||||
ts.Exemplars = append(ts.Exemplars, exemplars...)
|
||||
}
|
||||
|
||||
createdLabels := make([]prompb.Label, len(labels))
|
||||
copy(createdLabels, labels)
|
||||
for i, l := range createdLabels {
|
||||
if l.Name == model.MetricNameLabel {
|
||||
createdLabels[i].Value = name + createdSuffix
|
||||
break
|
||||
// add created time series if needed
|
||||
if settings.ExportCreatedMetric && metric.Sum().IsMonotonic() {
|
||||
startTimestamp := pt.StartTimestamp()
|
||||
if startTimestamp == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
createdLabels := make([]prompb.Label, len(lbls))
|
||||
copy(createdLabels, lbls)
|
||||
for i, l := range createdLabels {
|
||||
if l.Name == model.MetricNameLabel {
|
||||
createdLabels[i].Value = name + createdSuffix
|
||||
break
|
||||
}
|
||||
}
|
||||
c.addTimeSeriesIfNeeded(createdLabels, startTimestamp, pt.Timestamp())
|
||||
}
|
||||
addCreatedTimeSeriesIfNeeded(series, createdLabels, startTimestamp, pt.Timestamp(), metric.Type().String())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,14 +1,25 @@
|
|||
// DO NOT EDIT. COPIED AS-IS. SEE ../README.md
|
||||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/otlp_to_openmetrics_metadata.go
|
||||
// Provenance-includes-license: Apache-2.0
|
||||
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package prometheusremotewrite // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
|
||||
package prometheusremotewrite
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
"go.opentelemetry.io/collector/pdata/pmetric"
|
||||
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
|
||||
)
|
||||
|
||||
|
|
|
@ -1,27 +0,0 @@
|
|||
#!/bin/bash
|
||||
set -xe
|
||||
|
||||
OTEL_VERSION=v0.95.0
|
||||
|
||||
git clone https://github.com/open-telemetry/opentelemetry-collector-contrib ./tmp
|
||||
cd ./tmp
|
||||
git checkout $OTEL_VERSION
|
||||
cd ..
|
||||
|
||||
rm -rf ./prometheusremotewrite/*
|
||||
cp -r ./tmp/pkg/translator/prometheusremotewrite/*.go ./prometheusremotewrite
|
||||
rm -rf ./prometheusremotewrite/*_test.go
|
||||
|
||||
rm -rf ./prometheus/*
|
||||
cp -r ./tmp/pkg/translator/prometheus/*.go ./prometheus
|
||||
rm -rf ./prometheus/*_test.go
|
||||
|
||||
rm -rf ./tmp
|
||||
|
||||
case $(sed --help 2>&1) in
|
||||
*GNU*) set sed -i;;
|
||||
*) set sed -i '';;
|
||||
esac
|
||||
|
||||
"$@" -e 's#github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus#github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus#g' ./prometheusremotewrite/*.go ./prometheus/*.go
|
||||
"$@" -e '1s#^#// DO NOT EDIT. COPIED AS-IS. SEE ../README.md\n\n#g' ./prometheusremotewrite/*.go ./prometheus/*.go
|
|
@ -331,7 +331,7 @@ func postingsForMatcher(ctx context.Context, ix IndexReader, m *labels.Matcher)
|
|||
return nil, err
|
||||
}
|
||||
|
||||
var res []string
|
||||
res := vals[:0]
|
||||
for _, val := range vals {
|
||||
if m.Matches(val) {
|
||||
res = append(res, val)
|
||||
|
@ -368,7 +368,7 @@ func inversePostingsForMatcher(ctx context.Context, ix IndexReader, m *labels.Ma
|
|||
return nil, err
|
||||
}
|
||||
|
||||
var res []string
|
||||
res := vals[:0]
|
||||
// If the inverse match is ="", we just want all the values.
|
||||
if m.Type == labels.MatchEqual && m.Value == "" {
|
||||
res = vals
|
||||
|
|
|
@ -251,6 +251,12 @@ describe('analyzeCompletion test', () => {
|
|||
pos: 11, // cursor is between the bracket after the string myL
|
||||
expectedContext: [{ kind: ContextKind.LabelName }],
|
||||
},
|
||||
{
|
||||
title: 'continue to autocomplete QuotedLabelName in aggregate modifier',
|
||||
expr: 'sum by ("myL")',
|
||||
pos: 12, // cursor is between the bracket after the string myL
|
||||
expectedContext: [{ kind: ContextKind.LabelName }],
|
||||
},
|
||||
{
|
||||
title: 'autocomplete labelName in a list',
|
||||
expr: 'sum by (myLabel1,)',
|
||||
|
@ -263,6 +269,12 @@ describe('analyzeCompletion test', () => {
|
|||
pos: 23, // cursor is between the bracket after the string myLab
|
||||
expectedContext: [{ kind: ContextKind.LabelName }],
|
||||
},
|
||||
{
|
||||
title: 'autocomplete labelName in a list 2',
|
||||
expr: 'sum by ("myLabel1", "myLab")',
|
||||
pos: 27, // cursor is between the bracket after the string myLab
|
||||
expectedContext: [{ kind: ContextKind.LabelName }],
|
||||
},
|
||||
{
|
||||
title: 'autocomplete labelName associated to a metric',
|
||||
expr: 'metric_name{}',
|
||||
|
@ -299,6 +311,12 @@ describe('analyzeCompletion test', () => {
|
|||
pos: 22, // cursor is between the bracket after the comma
|
||||
expectedContext: [{ kind: ContextKind.LabelName, metricName: '' }],
|
||||
},
|
||||
{
|
||||
title: 'continue to autocomplete quoted labelName associated to a metric',
|
||||
expr: '{"metric_"}',
|
||||
pos: 10, // cursor is between the bracket after the string metric_
|
||||
expectedContext: [{ kind: ContextKind.MetricName, metricName: 'metric_' }],
|
||||
},
|
||||
{
|
||||
title: 'autocomplete the labelValue with metricName + labelName',
|
||||
expr: 'metric_name{labelName=""}',
|
||||
|
@ -342,6 +360,30 @@ describe('analyzeCompletion test', () => {
|
|||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
title: 'autocomplete the labelValue with metricName + quoted labelName',
|
||||
expr: 'metric_name{labelName="labelValue", "labelName"!=""}',
|
||||
pos: 50, // cursor is between the quotes
|
||||
expectedContext: [
|
||||
{
|
||||
kind: ContextKind.LabelValue,
|
||||
metricName: 'metric_name',
|
||||
labelName: 'labelName',
|
||||
matchers: [
|
||||
{
|
||||
name: 'labelName',
|
||||
type: Neq,
|
||||
value: '',
|
||||
},
|
||||
{
|
||||
name: 'labelName',
|
||||
type: EqlSingle,
|
||||
value: 'labelValue',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
title: 'autocomplete the labelValue associated to a labelName',
|
||||
expr: '{labelName=""}',
|
||||
|
@ -427,6 +469,12 @@ describe('analyzeCompletion test', () => {
|
|||
pos: 22, // cursor is after '!'
|
||||
expectedContext: [{ kind: ContextKind.MatchOp }],
|
||||
},
|
||||
{
|
||||
title: 'autocomplete matchOp 3',
|
||||
expr: 'metric_name{"labelName"!}',
|
||||
pos: 24, // cursor is after '!'
|
||||
expectedContext: [{ kind: ContextKind.BinOp }],
|
||||
},
|
||||
{
|
||||
title: 'autocomplete duration with offset',
|
||||
expr: 'http_requests_total offset 5',
|
||||
|
|
|
@ -29,7 +29,6 @@ import {
|
|||
GroupingLabels,
|
||||
Gte,
|
||||
Gtr,
|
||||
LabelMatcher,
|
||||
LabelMatchers,
|
||||
LabelName,
|
||||
Lss,
|
||||
|
@ -52,6 +51,9 @@ import {
|
|||
SubqueryExpr,
|
||||
Unless,
|
||||
VectorSelector,
|
||||
UnquotedLabelMatcher,
|
||||
QuotedLabelMatcher,
|
||||
QuotedLabelName,
|
||||
} from '@prometheus-io/lezer-promql';
|
||||
import { Completion, CompletionContext, CompletionResult } from '@codemirror/autocomplete';
|
||||
import { EditorState } from '@codemirror/state';
|
||||
|
@ -181,7 +183,10 @@ export function computeStartCompletePosition(node: SyntaxNode, pos: number): num
|
|||
let start = node.from;
|
||||
if (node.type.id === LabelMatchers || node.type.id === GroupingLabels) {
|
||||
start = computeStartCompleteLabelPositionInLabelMatcherOrInGroupingLabel(node, pos);
|
||||
} else if (node.type.id === FunctionCallBody || (node.type.id === StringLiteral && node.parent?.type.id === LabelMatcher)) {
|
||||
} else if (
|
||||
node.type.id === FunctionCallBody ||
|
||||
(node.type.id === StringLiteral && (node.parent?.type.id === UnquotedLabelMatcher || node.parent?.type.id === QuotedLabelMatcher))
|
||||
) {
|
||||
// When the cursor is between bracket, quote, we need to increment the starting position to avoid to consider the open bracket/ first string.
|
||||
start++;
|
||||
} else if (
|
||||
|
@ -212,7 +217,7 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context
|
|||
result.push({ kind: ContextKind.Duration });
|
||||
break;
|
||||
}
|
||||
if (node.parent?.type.id === LabelMatcher) {
|
||||
if (node.parent?.type.id === UnquotedLabelMatcher || node.parent?.type.id === QuotedLabelMatcher) {
|
||||
// In this case the current token is not itself a valid match op yet:
|
||||
// metric_name{labelName!}
|
||||
result.push({ kind: ContextKind.MatchOp });
|
||||
|
@ -380,7 +385,7 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context
|
|||
// sum by (myL)
|
||||
// So we have to continue to autocomplete any kind of labelName
|
||||
result.push({ kind: ContextKind.LabelName });
|
||||
} else if (node.parent?.type.id === LabelMatcher) {
|
||||
} else if (node.parent?.type.id === UnquotedLabelMatcher) {
|
||||
// In that case we are in the given situation:
|
||||
// metric_name{myL} or {myL}
|
||||
// so we have or to continue to autocomplete any kind of labelName or
|
||||
|
@ -389,9 +394,9 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context
|
|||
}
|
||||
break;
|
||||
case StringLiteral:
|
||||
if (node.parent?.type.id === LabelMatcher) {
|
||||
if (node.parent?.type.id === UnquotedLabelMatcher || node.parent?.type.id === QuotedLabelMatcher) {
|
||||
// In this case we are in the given situation:
|
||||
// metric_name{labelName=""}
|
||||
// metric_name{labelName=""} or metric_name{"labelName"=""}
|
||||
// So we can autocomplete the labelValue
|
||||
|
||||
// Get the labelName.
|
||||
|
@ -399,18 +404,34 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context
|
|||
let labelName = '';
|
||||
if (node.parent.firstChild?.type.id === LabelName) {
|
||||
labelName = state.sliceDoc(node.parent.firstChild.from, node.parent.firstChild.to);
|
||||
} else if (node.parent.firstChild?.type.id === QuotedLabelName) {
|
||||
labelName = state.sliceDoc(node.parent.firstChild.from, node.parent.firstChild.to).slice(1, -1);
|
||||
}
|
||||
// then find the metricName if it exists
|
||||
const metricName = getMetricNameInVectorSelector(node, state);
|
||||
// finally get the full matcher available
|
||||
const matcherNode = walkBackward(node, LabelMatchers);
|
||||
const labelMatchers = buildLabelMatchers(matcherNode ? matcherNode.getChildren(LabelMatcher) : [], state);
|
||||
const labelMatcherOpts = [QuotedLabelName, QuotedLabelMatcher, UnquotedLabelMatcher];
|
||||
let labelMatchers: Matcher[] = [];
|
||||
for (const labelMatcherOpt of labelMatcherOpts) {
|
||||
labelMatchers = labelMatchers.concat(buildLabelMatchers(matcherNode ? matcherNode.getChildren(labelMatcherOpt) : [], state));
|
||||
}
|
||||
result.push({
|
||||
kind: ContextKind.LabelValue,
|
||||
metricName: metricName,
|
||||
labelName: labelName,
|
||||
matchers: labelMatchers,
|
||||
});
|
||||
} else if (node.parent?.parent?.type.id === GroupingLabels) {
|
||||
// In this case we are in the given situation:
|
||||
// sum by ("myL")
|
||||
// So we have to continue to autocomplete any kind of labelName
|
||||
result.push({ kind: ContextKind.LabelName });
|
||||
} else if (node.parent?.parent?.type.id === LabelMatchers) {
|
||||
// In that case we are in the given situation:
|
||||
// {""} or {"metric_"}
|
||||
// since this is for the QuotedMetricName we need to continue to autocomplete for the metric names
|
||||
result.push({ kind: ContextKind.MetricName, metricName: state.sliceDoc(node.from, node.to).slice(1, -1) });
|
||||
}
|
||||
break;
|
||||
case NumberLiteral:
|
||||
|
|
|
@ -12,33 +12,75 @@
|
|||
// limitations under the License.
|
||||
|
||||
import { SyntaxNode } from '@lezer/common';
|
||||
import { EqlRegex, EqlSingle, LabelName, MatchOp, Neq, NeqRegex, StringLiteral } from '@prometheus-io/lezer-promql';
|
||||
import {
|
||||
EqlRegex,
|
||||
EqlSingle,
|
||||
LabelName,
|
||||
MatchOp,
|
||||
Neq,
|
||||
NeqRegex,
|
||||
StringLiteral,
|
||||
UnquotedLabelMatcher,
|
||||
QuotedLabelMatcher,
|
||||
QuotedLabelName,
|
||||
} from '@prometheus-io/lezer-promql';
|
||||
import { EditorState } from '@codemirror/state';
|
||||
import { Matcher } from '../types';
|
||||
|
||||
function createMatcher(labelMatcher: SyntaxNode, state: EditorState): Matcher {
|
||||
const matcher = new Matcher(0, '', '');
|
||||
const cursor = labelMatcher.cursor();
|
||||
if (!cursor.next()) {
|
||||
// weird case, that would mean the labelMatcher doesn't have any child.
|
||||
return matcher;
|
||||
}
|
||||
do {
|
||||
switch (cursor.type.id) {
|
||||
case LabelName:
|
||||
matcher.name = state.sliceDoc(cursor.from, cursor.to);
|
||||
break;
|
||||
case MatchOp:
|
||||
const ope = cursor.node.firstChild;
|
||||
if (ope) {
|
||||
matcher.type = ope.type.id;
|
||||
switch (cursor.type.id) {
|
||||
case QuotedLabelMatcher:
|
||||
if (!cursor.next()) {
|
||||
// weird case, that would mean the QuotedLabelMatcher doesn't have any child.
|
||||
return matcher;
|
||||
}
|
||||
do {
|
||||
switch (cursor.type.id) {
|
||||
case QuotedLabelName:
|
||||
matcher.name = state.sliceDoc(cursor.from, cursor.to).slice(1, -1);
|
||||
break;
|
||||
case MatchOp:
|
||||
const ope = cursor.node.firstChild;
|
||||
if (ope) {
|
||||
matcher.type = ope.type.id;
|
||||
}
|
||||
break;
|
||||
case StringLiteral:
|
||||
matcher.value = state.sliceDoc(cursor.from, cursor.to).slice(1, -1);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case StringLiteral:
|
||||
matcher.value = state.sliceDoc(cursor.from, cursor.to).slice(1, -1);
|
||||
break;
|
||||
}
|
||||
} while (cursor.nextSibling());
|
||||
} while (cursor.nextSibling());
|
||||
break;
|
||||
case UnquotedLabelMatcher:
|
||||
if (!cursor.next()) {
|
||||
// weird case, that would mean the UnquotedLabelMatcher doesn't have any child.
|
||||
return matcher;
|
||||
}
|
||||
do {
|
||||
switch (cursor.type.id) {
|
||||
case LabelName:
|
||||
matcher.name = state.sliceDoc(cursor.from, cursor.to);
|
||||
break;
|
||||
case MatchOp:
|
||||
const ope = cursor.node.firstChild;
|
||||
if (ope) {
|
||||
matcher.type = ope.type.id;
|
||||
}
|
||||
break;
|
||||
case StringLiteral:
|
||||
matcher.value = state.sliceDoc(cursor.from, cursor.to).slice(1, -1);
|
||||
break;
|
||||
}
|
||||
} while (cursor.nextSibling());
|
||||
break;
|
||||
case QuotedLabelName:
|
||||
matcher.name = '__name__';
|
||||
matcher.value = state.sliceDoc(cursor.from, cursor.to).slice(1, -1);
|
||||
matcher.type = EqlSingle;
|
||||
break;
|
||||
}
|
||||
return matcher;
|
||||
}
|
||||
|
||||
|
|
|
@ -204,6 +204,11 @@ describe('promql operations', () => {
|
|||
expectedValueType: ValueType.vector,
|
||||
expectedDiag: [] as Diagnostic[],
|
||||
},
|
||||
{
|
||||
expr: 'foo and on(test,"blub") bar',
|
||||
expectedValueType: ValueType.vector,
|
||||
expectedDiag: [] as Diagnostic[],
|
||||
},
|
||||
{
|
||||
expr: 'foo and on() bar',
|
||||
expectedValueType: ValueType.vector,
|
||||
|
@ -214,6 +219,11 @@ describe('promql operations', () => {
|
|||
expectedValueType: ValueType.vector,
|
||||
expectedDiag: [] as Diagnostic[],
|
||||
},
|
||||
{
|
||||
expr: 'foo and ignoring(test,"blub") bar',
|
||||
expectedValueType: ValueType.vector,
|
||||
expectedDiag: [] as Diagnostic[],
|
||||
},
|
||||
{
|
||||
expr: 'foo and ignoring() bar',
|
||||
expectedValueType: ValueType.vector,
|
||||
|
@ -229,6 +239,11 @@ describe('promql operations', () => {
|
|||
expectedValueType: ValueType.vector,
|
||||
expectedDiag: [] as Diagnostic[],
|
||||
},
|
||||
{
|
||||
expr: 'foo / on(test,blub) group_left("bar") bar',
|
||||
expectedValueType: ValueType.vector,
|
||||
expectedDiag: [] as Diagnostic[],
|
||||
},
|
||||
{
|
||||
expr: 'foo / ignoring(test,blub) group_left(blub) bar',
|
||||
expectedValueType: ValueType.vector,
|
||||
|
@ -825,6 +840,134 @@ describe('promql operations', () => {
|
|||
expectedValueType: ValueType.vector,
|
||||
expectedDiag: [],
|
||||
},
|
||||
{
|
||||
expr: '{"foo"}',
|
||||
expectedValueType: ValueType.vector,
|
||||
expectedDiag: [],
|
||||
},
|
||||
{
|
||||
// with metric name in the middle
|
||||
expr: '{a="b","foo",c~="d"}',
|
||||
expectedValueType: ValueType.vector,
|
||||
expectedDiag: [],
|
||||
},
|
||||
{
|
||||
expr: '{"foo", a="bc"}',
|
||||
expectedValueType: ValueType.vector,
|
||||
expectedDiag: [],
|
||||
},
|
||||
{
|
||||
expr: '{"colon:in:the:middle"}',
|
||||
expectedValueType: ValueType.vector,
|
||||
expectedDiag: [],
|
||||
},
|
||||
{
|
||||
expr: '{"dot.in.the.middle"}',
|
||||
expectedValueType: ValueType.vector,
|
||||
expectedDiag: [],
|
||||
},
|
||||
{
|
||||
expr: '{"😀 in metric name"}',
|
||||
expectedValueType: ValueType.vector,
|
||||
expectedDiag: [],
|
||||
},
|
||||
{
|
||||
// quotes with escape
|
||||
expr: '{"this is \"foo\" metric"}', // eslint-disable-line
|
||||
expectedValueType: ValueType.vector,
|
||||
expectedDiag: [],
|
||||
},
|
||||
{
|
||||
expr: '{"foo","colon:in:the:middle"="val"}',
|
||||
expectedValueType: ValueType.vector,
|
||||
expectedDiag: [],
|
||||
},
|
||||
{
|
||||
expr: '{"foo","dot.in.the.middle"="val"}',
|
||||
expectedValueType: ValueType.vector,
|
||||
expectedDiag: [],
|
||||
},
|
||||
{
|
||||
expr: '{"foo","😀 in label name"="val"}',
|
||||
expectedValueType: ValueType.vector,
|
||||
expectedDiag: [],
|
||||
},
|
||||
{
|
||||
// quotes with escape
|
||||
expr: '{"foo","this is \"bar\" label"="val"}', // eslint-disable-line
|
||||
expectedValueType: ValueType.vector,
|
||||
expectedDiag: [],
|
||||
},
|
||||
{
|
||||
expr: 'foo{"bar"}',
|
||||
expectedValueType: ValueType.vector,
|
||||
expectedDiag: [
|
||||
{
|
||||
from: 0,
|
||||
message: 'metric name must not be set twice: foo or bar',
|
||||
severity: 'error',
|
||||
to: 10,
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
expr: '{"foo", __name__="bar"}',
|
||||
expectedValueType: ValueType.vector,
|
||||
expectedDiag: [
|
||||
{
|
||||
from: 0,
|
||||
message: 'metric name must not be set twice: foo or bar',
|
||||
severity: 'error',
|
||||
to: 23,
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
expr: '{"foo", "__name__"="bar"}',
|
||||
expectedValueType: ValueType.vector,
|
||||
expectedDiag: [
|
||||
{
|
||||
from: 0,
|
||||
message: 'metric name must not be set twice: foo or bar',
|
||||
severity: 'error',
|
||||
to: 25,
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
expr: '{"__name__"="foo", __name__="bar"}',
|
||||
expectedValueType: ValueType.vector,
|
||||
expectedDiag: [
|
||||
{
|
||||
from: 0,
|
||||
message: 'metric name must not be set twice: foo or bar',
|
||||
severity: 'error',
|
||||
to: 34,
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
expr: '{"foo", "bar"}',
|
||||
expectedValueType: ValueType.vector,
|
||||
expectedDiag: [
|
||||
{
|
||||
from: 0,
|
||||
to: 14,
|
||||
message: 'metric name must not be set twice: foo or bar',
|
||||
severity: 'error',
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
expr: `{'foo\`metric':'bar'}`, // eslint-disable-line
|
||||
expectedValueType: ValueType.vector,
|
||||
expectedDiag: [],
|
||||
},
|
||||
{
|
||||
expr: '{`foo\"metric`=`bar`}', // eslint-disable-line
|
||||
expectedValueType: ValueType.vector,
|
||||
expectedDiag: [],
|
||||
},
|
||||
];
|
||||
testCases.forEach((value) => {
|
||||
const state = createEditorState(value.expr);
|
||||
|
|
|
@ -27,7 +27,6 @@ import {
|
|||
Gte,
|
||||
Gtr,
|
||||
Identifier,
|
||||
LabelMatcher,
|
||||
LabelMatchers,
|
||||
Lss,
|
||||
Lte,
|
||||
|
@ -36,11 +35,14 @@ import {
|
|||
Or,
|
||||
ParenExpr,
|
||||
Quantile,
|
||||
QuotedLabelMatcher,
|
||||
QuotedLabelName,
|
||||
StepInvariantExpr,
|
||||
SubqueryExpr,
|
||||
Topk,
|
||||
UnaryExpr,
|
||||
Unless,
|
||||
UnquotedLabelMatcher,
|
||||
VectorSelector,
|
||||
} from '@prometheus-io/lezer-promql';
|
||||
import { containsAtLeastOneChild } from './path-finder';
|
||||
|
@ -282,7 +284,11 @@ export class Parser {
|
|||
|
||||
private checkVectorSelector(node: SyntaxNode): void {
|
||||
const matchList = node.getChild(LabelMatchers);
|
||||
const labelMatchers = buildLabelMatchers(matchList ? matchList.getChildren(LabelMatcher) : [], this.state);
|
||||
const labelMatcherOpts = [QuotedLabelName, QuotedLabelMatcher, UnquotedLabelMatcher];
|
||||
let labelMatchers: Matcher[] = [];
|
||||
for (const labelMatcherOpt of labelMatcherOpts) {
|
||||
labelMatchers = labelMatchers.concat(buildLabelMatchers(matchList ? matchList.getChildren(labelMatcherOpt) : [], this.state));
|
||||
}
|
||||
let vectorSelectorName = '';
|
||||
// VectorSelector ( Identifier )
|
||||
// https://github.com/promlabs/lezer-promql/blob/71e2f9fa5ae6f5c5547d5738966cd2512e6b99a8/src/promql.grammar#L200
|
||||
|
@ -301,6 +307,14 @@ export class Parser {
|
|||
// adding the metric name as a Matcher to avoid a false positive for this kind of expression:
|
||||
// foo{bare=''}
|
||||
labelMatchers.push(new Matcher(EqlSingle, '__name__', vectorSelectorName));
|
||||
} else {
|
||||
// In this case when metric name is not set outside the braces
|
||||
// It is checking whether metric name is set twice like in :
|
||||
// {__name__:"foo", "foo"}, {"foo", "bar"}
|
||||
const labelMatchersMetricName = labelMatchers.filter((lm) => lm.name === '__name__');
|
||||
if (labelMatchersMetricName.length > 1) {
|
||||
this.addDiagnostic(node, `metric name must not be set twice: ${labelMatchersMetricName[0].value} or ${labelMatchersMetricName[1].value}`);
|
||||
}
|
||||
}
|
||||
|
||||
// A Vector selector must contain at least one non-empty matcher to prevent
|
||||
|
|
|
@ -97,7 +97,7 @@ binModifiers {
|
|||
}
|
||||
|
||||
GroupingLabels {
|
||||
"(" (LabelName ("," LabelName)* ","?)? ")"
|
||||
"(" ((LabelName | QuotedLabelName) ("," (LabelName | QuotedLabelName))* ","?)? ")"
|
||||
}
|
||||
|
||||
FunctionCall {
|
||||
|
@ -220,7 +220,7 @@ VectorSelector {
|
|||
}
|
||||
|
||||
LabelMatchers {
|
||||
"{" (LabelMatcher ("," LabelMatcher)* ","?)? "}"
|
||||
"{" ((UnquotedLabelMatcher | QuotedLabelMatcher | QuotedLabelName)("," (UnquotedLabelMatcher | QuotedLabelMatcher | QuotedLabelName))* ","?)? "}"
|
||||
}
|
||||
|
||||
MatchOp {
|
||||
|
@ -230,8 +230,16 @@ MatchOp {
|
|||
NeqRegex
|
||||
}
|
||||
|
||||
LabelMatcher {
|
||||
LabelName MatchOp StringLiteral
|
||||
UnquotedLabelMatcher {
|
||||
LabelName MatchOp StringLiteral
|
||||
}
|
||||
|
||||
QuotedLabelMatcher {
|
||||
QuotedLabelName MatchOp StringLiteral
|
||||
}
|
||||
|
||||
QuotedLabelName {
|
||||
StringLiteral
|
||||
}
|
||||
|
||||
StepInvariantExpr {
|
||||
|
|
|
@ -112,6 +112,54 @@ PromQL(
|
|||
)
|
||||
)
|
||||
|
||||
# Quoted label name in grouping labels
|
||||
|
||||
sum by("job", mode) (test_metric) / on("job") group_left sum by("job")(test_metric)
|
||||
|
||||
==>
|
||||
|
||||
PromQL(
|
||||
BinaryExpr(
|
||||
AggregateExpr(
|
||||
AggregateOp(Sum),
|
||||
AggregateModifier(
|
||||
By,
|
||||
GroupingLabels(
|
||||
QuotedLabelName(StringLiteral),
|
||||
LabelName
|
||||
)
|
||||
),
|
||||
FunctionCallBody(
|
||||
VectorSelector(
|
||||
Identifier
|
||||
)
|
||||
)
|
||||
),
|
||||
Div,
|
||||
MatchingModifierClause(
|
||||
On,
|
||||
GroupingLabels(
|
||||
QuotedLabelName(StringLiteral)
|
||||
)
|
||||
GroupLeft
|
||||
),
|
||||
AggregateExpr(
|
||||
AggregateOp(Sum),
|
||||
AggregateModifier(
|
||||
By,
|
||||
GroupingLabels(
|
||||
QuotedLabelName(StringLiteral)
|
||||
)
|
||||
),
|
||||
FunctionCallBody(
|
||||
VectorSelector(
|
||||
Identifier
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
# Case insensitivity for aggregations and binop modifiers.
|
||||
|
||||
SuM BY(testlabel1) (testmetric1) / IGNOring(testlabel2) AVG withOUT(testlabel3) (testmetric2)
|
||||
|
@ -226,25 +274,25 @@ PromQL(
|
|||
VectorSelector(
|
||||
Identifier,
|
||||
LabelMatchers(
|
||||
LabelMatcher(
|
||||
LabelName,
|
||||
MatchOp(EqlSingle),
|
||||
StringLiteral
|
||||
UnquotedLabelMatcher(
|
||||
LabelName,
|
||||
MatchOp(EqlSingle),
|
||||
StringLiteral
|
||||
),
|
||||
LabelMatcher(
|
||||
LabelName,
|
||||
MatchOp(Neq),
|
||||
StringLiteral
|
||||
UnquotedLabelMatcher(
|
||||
LabelName,
|
||||
MatchOp(Neq),
|
||||
StringLiteral
|
||||
),
|
||||
LabelMatcher(
|
||||
LabelName,
|
||||
MatchOp(EqlRegex),
|
||||
StringLiteral
|
||||
UnquotedLabelMatcher(
|
||||
LabelName,
|
||||
MatchOp(EqlRegex),
|
||||
StringLiteral
|
||||
),
|
||||
LabelMatcher(
|
||||
LabelName,
|
||||
MatchOp(NeqRegex),
|
||||
StringLiteral
|
||||
UnquotedLabelMatcher(
|
||||
LabelName,
|
||||
MatchOp(NeqRegex),
|
||||
StringLiteral
|
||||
)
|
||||
)
|
||||
)
|
||||
|
@ -571,14 +619,14 @@ PromQL(NumberLiteral)
|
|||
NaN{foo="bar"}
|
||||
|
||||
==>
|
||||
PromQL(BinaryExpr(NumberLiteral,⚠,VectorSelector(LabelMatchers(LabelMatcher(LabelName,MatchOp(EqlSingle),StringLiteral)))))
|
||||
PromQL(BinaryExpr(NumberLiteral,⚠,VectorSelector(LabelMatchers(UnquotedLabelMatcher(LabelName,MatchOp(EqlSingle),StringLiteral)))))
|
||||
|
||||
# Trying to illegally use Inf as a metric name.
|
||||
|
||||
Inf{foo="bar"}
|
||||
|
||||
==>
|
||||
PromQL(BinaryExpr(NumberLiteral,⚠,VectorSelector(LabelMatchers(LabelMatcher(LabelName,MatchOp(EqlSingle),StringLiteral)))))
|
||||
PromQL(BinaryExpr(NumberLiteral,⚠,VectorSelector(LabelMatchers(UnquotedLabelMatcher(LabelName,MatchOp(EqlSingle),StringLiteral)))))
|
||||
|
||||
# Negative offset
|
||||
|
||||
|
@ -614,3 +662,24 @@ MetricName(Identifier)
|
|||
|
||||
==>
|
||||
PromQL(BinaryExpr(NumberLiteral,Add,BinaryExpr(VectorSelector(Identifier),Atan2,VectorSelector(Identifier))))
|
||||
|
||||
# Testing quoted metric name
|
||||
|
||||
{"metric_name"}
|
||||
|
||||
==>
|
||||
PromQL(VectorSelector(LabelMatchers(QuotedLabelName(StringLiteral))))
|
||||
|
||||
# Testing quoted label name
|
||||
|
||||
{"foo"="bar"}
|
||||
|
||||
==>
|
||||
PromQL(VectorSelector(LabelMatchers(QuotedLabelMatcher(QuotedLabelName(StringLiteral), MatchOp(EqlSingle), StringLiteral))))
|
||||
|
||||
# Testing quoted metric name and label name
|
||||
|
||||
{"metric_name", "foo"="bar"}
|
||||
|
||||
==>
|
||||
PromQL(VectorSelector(LabelMatchers(QuotedLabelName(StringLiteral), QuotedLabelMatcher(QuotedLabelName(StringLiteral), MatchOp(EqlSingle), StringLiteral))))
|
Loading…
Reference in a new issue