Bump OTel Collector dependency to v0.88.0

I initially didn't copy the otlptranslator/prometheus folder because I
assumed it wouldn't get changes. But it did. So this PR fixes that and
updates the Collector version.

Supersedes: https://github.com/prometheus/prometheus/pull/12809

Signed-off-by: Goutham <gouthamve@gmail.com>
This commit is contained in:
Goutham 2023-11-15 15:09:15 +01:00
parent cf528bef03
commit a99f48cc9f
No known key found for this signature in database
GPG key ID: F1C217E8E9023CAD
13 changed files with 308 additions and 339 deletions

1
go.mod
View file

@ -55,6 +55,7 @@ require (
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
github.com/stretchr/testify v1.8.4 github.com/stretchr/testify v1.8.4
github.com/vultr/govultr/v2 v2.17.2 github.com/vultr/govultr/v2 v2.17.2
go.opentelemetry.io/collector/featuregate v0.77.0
go.opentelemetry.io/collector/pdata v1.0.0-rcv0017 go.opentelemetry.io/collector/pdata v1.0.0-rcv0017
go.opentelemetry.io/collector/semconv v0.88.0 go.opentelemetry.io/collector/semconv v0.88.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0

2
go.sum
View file

@ -760,6 +760,8 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/collector/featuregate v0.77.0 h1:m1/IzaXoQh6SgF6CM80vrBOCf5zSJ2GVISfA27fYzGU=
go.opentelemetry.io/collector/featuregate v0.77.0/go.mod h1:/kVAsGUCyJXIDSgHftCN63QiwAEVHRLX2Kh/S+dqgHY=
go.opentelemetry.io/collector/pdata v1.0.0-rcv0017 h1:AgALhc2VenoA5l1DvTdg7mkzaBGqoTSuMkAtjsttBFo= go.opentelemetry.io/collector/pdata v1.0.0-rcv0017 h1:AgALhc2VenoA5l1DvTdg7mkzaBGqoTSuMkAtjsttBFo=
go.opentelemetry.io/collector/pdata v1.0.0-rcv0017/go.mod h1:Rv9fOclA5AtM/JGm0d4jBOIAo1+jBA13UT5Bx0ovXi4= go.opentelemetry.io/collector/pdata v1.0.0-rcv0017/go.mod h1:Rv9fOclA5AtM/JGm0d4jBOIAo1+jBA13UT5Bx0ovXi4=
go.opentelemetry.io/collector/semconv v0.88.0 h1:8TVP4hYaUC87S6CCLKNoSxsUE0ChldE4vqotvNHHUnE= go.opentelemetry.io/collector/semconv v0.88.0 h1:8TVP4hYaUC87S6CCLKNoSxsUE0ChldE4vqotvNHHUnE=

View file

@ -1,21 +1,31 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
package normalize package prometheus // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus"
import ( import (
"strings" "strings"
"unicode" "unicode"
"go.opentelemetry.io/collector/featuregate"
) )
// Normalizes the specified label to follow Prometheus label names standard. var dropSanitizationGate = featuregate.GlobalRegistry().MustRegister(
"pkg.translator.prometheus.PermissiveLabelSanitization",
featuregate.StageAlpha,
featuregate.WithRegisterDescription("Controls whether to change labels starting with '_' to 'key_'."),
featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/8950"),
)
// Normalizes the specified label to follow Prometheus label names standard
// //
// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels // See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels
// //
// Labels that start with non-letter rune will be prefixed with "key_". // Labels that start with non-letter rune will be prefixed with "key_"
// //
// Exception is made for double-underscores which are allowed. // Exception is made for double-underscores which are allowed
func NormalizeLabel(label string) string { func NormalizeLabel(label string) string {
// Trivial case // Trivial case
if len(label) == 0 { if len(label) == 0 {
return label return label
@ -27,12 +37,14 @@ func NormalizeLabel(label string) string {
// If label starts with a number, prepend with "key_" // If label starts with a number, prepend with "key_"
if unicode.IsDigit(rune(label[0])) { if unicode.IsDigit(rune(label[0])) {
label = "key_" + label label = "key_" + label
} else if strings.HasPrefix(label, "_") && !strings.HasPrefix(label, "__") && !dropSanitizationGate.IsEnabled() {
label = "key" + label
} }
return label return label
} }
// Return '_' for anything non-alphanumeric. // Return '_' for anything non-alphanumeric
func sanitizeRune(r rune) rune { func sanitizeRune(r rune) rune {
if unicode.IsLetter(r) || unicode.IsDigit(r) { if unicode.IsLetter(r) || unicode.IsDigit(r) {
return r return r

View file

@ -1,19 +0,0 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package normalize
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestSanitizeDropSanitization(t *testing.T) {
require.Equal(t, "", NormalizeLabel(""))
require.Equal(t, "_test", NormalizeLabel("_test"))
require.Equal(t, "key_0test", NormalizeLabel("0test"))
require.Equal(t, "test", NormalizeLabel("test"))
require.Equal(t, "test__", NormalizeLabel("test_/"))
require.Equal(t, "__test", NormalizeLabel("__test"))
}

View file

@ -1,21 +1,23 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
package normalize package prometheus // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus"
import ( import (
"strings" "strings"
"unicode" "unicode"
"go.opentelemetry.io/collector/featuregate"
"go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pmetric"
) )
// The map to translate OTLP units to Prometheus units. // The map to translate OTLP units to Prometheus units
// OTLP metrics use the c/s notation as specified at https://ucum.org/ucum.html // OTLP metrics use the c/s notation as specified at https://ucum.org/ucum.html
// (See also https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/semantic_conventions/README.md#instrument-units) // (See also https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/semantic_conventions/README.md#instrument-units)
// Prometheus best practices for units: https://prometheus.io/docs/practices/naming/#base-units // Prometheus best practices for units: https://prometheus.io/docs/practices/naming/#base-units
// OpenMetrics specification for units: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#units-and-base-units // OpenMetrics specification for units: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#units-and-base-units
var unitMap = map[string]string{ var unitMap = map[string]string{
// Time // Time
"d": "days", "d": "days",
"h": "hours", "h": "hours",
@ -35,11 +37,6 @@ var unitMap = map[string]string{
"MBy": "megabytes", "MBy": "megabytes",
"GBy": "gigabytes", "GBy": "gigabytes",
"TBy": "terabytes", "TBy": "terabytes",
"B": "bytes",
"KB": "kilobytes",
"MB": "megabytes",
"GB": "gigabytes",
"TB": "terabytes",
// SI // SI
"m": "meters", "m": "meters",
@ -54,11 +51,10 @@ var unitMap = map[string]string{
"Hz": "hertz", "Hz": "hertz",
"1": "", "1": "",
"%": "percent", "%": "percent",
"$": "dollars",
} }
// The map that translates the "per" unit. // The map that translates the "per" unit
// Example: s => per second (singular). // Example: s => per second (singular)
var perUnitMap = map[string]string{ var perUnitMap = map[string]string{
"s": "second", "s": "second",
"m": "minute", "m": "minute",
@ -69,7 +65,14 @@ var perUnitMap = map[string]string{
"y": "year", "y": "year",
} }
// Build a Prometheus-compliant metric name for the specified metric. var normalizeNameGate = featuregate.GlobalRegistry().MustRegister(
"pkg.translator.prometheus.NormalizeName",
featuregate.StageBeta,
featuregate.WithRegisterDescription("Controls whether metrics names are automatically normalized to follow Prometheus naming convention"),
featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/8950"),
)
// BuildCompliantName builds a Prometheus-compliant metric name for the specified metric
// //
// Metric name is prefixed with specified namespace and underscore (if any). // Metric name is prefixed with specified namespace and underscore (if any).
// Namespace is not cleaned up. Make sure specified namespace follows Prometheus // Namespace is not cleaned up. Make sure specified namespace follows Prometheus
@ -77,7 +80,33 @@ var perUnitMap = map[string]string{
// //
// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels // See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels
// and https://prometheus.io/docs/practices/naming/#metric-and-label-naming // and https://prometheus.io/docs/practices/naming/#metric-and-label-naming
func BuildPromCompliantName(metric pmetric.Metric, namespace string) string { func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffixes bool) string {
var metricName string
// Full normalization following standard Prometheus naming conventions
if addMetricSuffixes && normalizeNameGate.IsEnabled() {
return normalizeName(metric, namespace)
}
// Simple case (no full normalization, no units, etc.), we simply trim out forbidden chars
metricName = RemovePromForbiddenRunes(metric.Name())
// Namespace?
if namespace != "" {
return namespace + "_" + metricName
}
// Metric name starts with a digit? Prefix it with an underscore
if metricName != "" && unicode.IsDigit(rune(metricName[0])) {
metricName = "_" + metricName
}
return metricName
}
// Build a normalized name for the specified metric
func normalizeName(metric pmetric.Metric, namespace string) string {
// Split metric name in "tokens" (remove all non-alphanumeric) // Split metric name in "tokens" (remove all non-alphanumeric)
nameTokens := strings.FieldsFunc( nameTokens := strings.FieldsFunc(
metric.Name(), metric.Name(),
@ -202,7 +231,7 @@ func removeSuffix(tokens []string, suffix string) []string {
return tokens return tokens
} }
// Clean up specified string so it's Prometheus compliant. // Clean up specified string so it's Prometheus compliant
func CleanUpString(s string) string { func CleanUpString(s string) string {
return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) }), "_") return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) }), "_")
} }
@ -211,8 +240,8 @@ func RemovePromForbiddenRunes(s string) string {
return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) && r != '_' && r != ':' }), "_") return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) && r != '_' && r != ':' }), "_")
} }
// Retrieve the Prometheus "basic" unit corresponding to the specified "basic" unit. // Retrieve the Prometheus "basic" unit corresponding to the specified "basic" unit
// Returns the specified unit if not found in unitMap. // Returns the specified unit if not found in unitMap
func unitMapGetOrDefault(unit string) string { func unitMapGetOrDefault(unit string) string {
if promUnit, ok := unitMap[unit]; ok { if promUnit, ok := unitMap[unit]; ok {
return promUnit return promUnit
@ -220,8 +249,8 @@ func unitMapGetOrDefault(unit string) string {
return unit return unit
} }
// Retrieve the Prometheus "per" unit corresponding to the specified "per" unit. // Retrieve the Prometheus "per" unit corresponding to the specified "per" unit
// Returns the specified unit if not found in perUnitMap. // Returns the specified unit if not found in perUnitMap
func perUnitMapGetOrDefault(perUnit string) string { func perUnitMapGetOrDefault(perUnit string) string {
if promPerUnit, ok := perUnitMap[perUnit]; ok { if promPerUnit, ok := perUnitMap[perUnit]; ok {
return promPerUnit return promPerUnit
@ -229,7 +258,7 @@ func perUnitMapGetOrDefault(perUnit string) string {
return perUnit return perUnit
} }
// Returns whether the slice contains the specified value. // Returns whether the slice contains the specified value
func contains(slice []string, value string) bool { func contains(slice []string, value string) bool {
for _, sliceEntry := range slice { for _, sliceEntry := range slice {
if sliceEntry == value { if sliceEntry == value {
@ -239,7 +268,7 @@ func contains(slice []string, value string) bool {
return false return false
} }
// Remove the specified value from the slice. // Remove the specified value from the slice
func removeItem(slice []string, value string) []string { func removeItem(slice []string, value string) []string {
newSlice := make([]string, 0, len(slice)) newSlice := make([]string, 0, len(slice))
for _, sliceEntry := range slice { for _, sliceEntry := range slice {

View file

@ -1,180 +0,0 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package normalize
import (
"testing"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/collector/pdata/pmetric"
)
func TestByte(t *testing.T) {
require.Equal(t, "system_filesystem_usage_bytes", BuildPromCompliantName(createGauge("system.filesystem.usage", "By"), ""))
}
func TestByteCounter(t *testing.T) {
require.Equal(t, "system_io_bytes_total", BuildPromCompliantName(createCounter("system.io", "By"), ""))
require.Equal(t, "network_transmitted_bytes_total", BuildPromCompliantName(createCounter("network_transmitted_bytes_total", "By"), ""))
}
func TestWhiteSpaces(t *testing.T) {
require.Equal(t, "system_filesystem_usage_bytes", BuildPromCompliantName(createGauge("\t system.filesystem.usage ", " By\t"), ""))
}
func TestNonStandardUnit(t *testing.T) {
require.Equal(t, "system_network_dropped", BuildPromCompliantName(createGauge("system.network.dropped", "{packets}"), ""))
}
func TestNonStandardUnitCounter(t *testing.T) {
require.Equal(t, "system_network_dropped_total", BuildPromCompliantName(createCounter("system.network.dropped", "{packets}"), ""))
}
func TestBrokenUnit(t *testing.T) {
require.Equal(t, "system_network_dropped_packets", BuildPromCompliantName(createGauge("system.network.dropped", "packets"), ""))
require.Equal(t, "system_network_packets_dropped", BuildPromCompliantName(createGauge("system.network.packets.dropped", "packets"), ""))
require.Equal(t, "system_network_packets", BuildPromCompliantName(createGauge("system.network.packets", "packets"), ""))
}
func TestBrokenUnitCounter(t *testing.T) {
require.Equal(t, "system_network_dropped_packets_total", BuildPromCompliantName(createCounter("system.network.dropped", "packets"), ""))
require.Equal(t, "system_network_packets_dropped_total", BuildPromCompliantName(createCounter("system.network.packets.dropped", "packets"), ""))
require.Equal(t, "system_network_packets_total", BuildPromCompliantName(createCounter("system.network.packets", "packets"), ""))
}
func TestRatio(t *testing.T) {
require.Equal(t, "hw_gpu_memory_utilization_ratio", BuildPromCompliantName(createGauge("hw.gpu.memory.utilization", "1"), ""))
require.Equal(t, "hw_fan_speed_ratio", BuildPromCompliantName(createGauge("hw.fan.speed_ratio", "1"), ""))
require.Equal(t, "objects_total", BuildPromCompliantName(createCounter("objects", "1"), ""))
}
func TestHertz(t *testing.T) {
require.Equal(t, "hw_cpu_speed_limit_hertz", BuildPromCompliantName(createGauge("hw.cpu.speed_limit", "Hz"), ""))
}
func TestPer(t *testing.T) {
require.Equal(t, "broken_metric_speed_km_per_hour", BuildPromCompliantName(createGauge("broken.metric.speed", "km/h"), ""))
require.Equal(t, "astro_light_speed_limit_meters_per_second", BuildPromCompliantName(createGauge("astro.light.speed_limit", "m/s"), ""))
}
func TestPercent(t *testing.T) {
require.Equal(t, "broken_metric_success_ratio_percent", BuildPromCompliantName(createGauge("broken.metric.success_ratio", "%"), ""))
require.Equal(t, "broken_metric_success_percent", BuildPromCompliantName(createGauge("broken.metric.success_percent", "%"), ""))
}
func TestDollar(t *testing.T) {
require.Equal(t, "crypto_bitcoin_value_dollars", BuildPromCompliantName(createGauge("crypto.bitcoin.value", "$"), ""))
require.Equal(t, "crypto_bitcoin_value_dollars", BuildPromCompliantName(createGauge("crypto.bitcoin.value.dollars", "$"), ""))
}
func TestEmpty(t *testing.T) {
require.Equal(t, "test_metric_no_unit", BuildPromCompliantName(createGauge("test.metric.no_unit", ""), ""))
require.Equal(t, "test_metric_spaces", BuildPromCompliantName(createGauge("test.metric.spaces", " \t "), ""))
}
func TestUnsupportedRunes(t *testing.T) {
require.Equal(t, "unsupported_metric_temperature_F", BuildPromCompliantName(createGauge("unsupported.metric.temperature", "°F"), ""))
require.Equal(t, "unsupported_metric_weird", BuildPromCompliantName(createGauge("unsupported.metric.weird", "+=.:,!* & #"), ""))
require.Equal(t, "unsupported_metric_redundant_test_per_C", BuildPromCompliantName(createGauge("unsupported.metric.redundant", "__test $/°C"), ""))
}
func TestOtelReceivers(t *testing.T) {
require.Equal(t, "active_directory_ds_replication_network_io_bytes_total", BuildPromCompliantName(createCounter("active_directory.ds.replication.network.io", "By"), ""))
require.Equal(t, "active_directory_ds_replication_sync_object_pending_total", BuildPromCompliantName(createCounter("active_directory.ds.replication.sync.object.pending", "{objects}"), ""))
require.Equal(t, "active_directory_ds_replication_object_rate_per_second", BuildPromCompliantName(createGauge("active_directory.ds.replication.object.rate", "{objects}/s"), ""))
require.Equal(t, "active_directory_ds_name_cache_hit_rate_percent", BuildPromCompliantName(createGauge("active_directory.ds.name_cache.hit_rate", "%"), ""))
require.Equal(t, "active_directory_ds_ldap_bind_last_successful_time_milliseconds", BuildPromCompliantName(createGauge("active_directory.ds.ldap.bind.last_successful.time", "ms"), ""))
require.Equal(t, "apache_current_connections", BuildPromCompliantName(createGauge("apache.current_connections", "connections"), ""))
require.Equal(t, "apache_workers_connections", BuildPromCompliantName(createGauge("apache.workers", "connections"), ""))
require.Equal(t, "apache_requests_total", BuildPromCompliantName(createCounter("apache.requests", "1"), ""))
require.Equal(t, "bigip_virtual_server_request_count_total", BuildPromCompliantName(createCounter("bigip.virtual_server.request.count", "{requests}"), ""))
require.Equal(t, "system_cpu_utilization_ratio", BuildPromCompliantName(createGauge("system.cpu.utilization", "1"), ""))
require.Equal(t, "system_disk_operation_time_seconds_total", BuildPromCompliantName(createCounter("system.disk.operation_time", "s"), ""))
require.Equal(t, "system_cpu_load_average_15m_ratio", BuildPromCompliantName(createGauge("system.cpu.load_average.15m", "1"), ""))
require.Equal(t, "memcached_operation_hit_ratio_percent", BuildPromCompliantName(createGauge("memcached.operation_hit_ratio", "%"), ""))
require.Equal(t, "mongodbatlas_process_asserts_per_second", BuildPromCompliantName(createGauge("mongodbatlas.process.asserts", "{assertions}/s"), ""))
require.Equal(t, "mongodbatlas_process_journaling_data_files_mebibytes", BuildPromCompliantName(createGauge("mongodbatlas.process.journaling.data_files", "MiBy"), ""))
require.Equal(t, "mongodbatlas_process_network_io_bytes_per_second", BuildPromCompliantName(createGauge("mongodbatlas.process.network.io", "By/s"), ""))
require.Equal(t, "mongodbatlas_process_oplog_rate_gibibytes_per_hour", BuildPromCompliantName(createGauge("mongodbatlas.process.oplog.rate", "GiBy/h"), ""))
require.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", BuildPromCompliantName(createGauge("mongodbatlas.process.db.query_targeting.scanned_per_returned", "{scanned}/{returned}"), ""))
require.Equal(t, "nginx_requests", BuildPromCompliantName(createGauge("nginx.requests", "requests"), ""))
require.Equal(t, "nginx_connections_accepted", BuildPromCompliantName(createGauge("nginx.connections_accepted", "connections"), ""))
require.Equal(t, "nsxt_node_memory_usage_kilobytes", BuildPromCompliantName(createGauge("nsxt.node.memory.usage", "KBy"), ""))
require.Equal(t, "redis_latest_fork_microseconds", BuildPromCompliantName(createGauge("redis.latest_fork", "us"), ""))
}
func TestTrimPromSuffixes(t *testing.T) {
require.Equal(t, "active_directory_ds_replication_network_io", TrimPromSuffixes("active_directory_ds_replication_network_io_bytes_total", pmetric.MetricTypeSum, "bytes"))
require.Equal(t, "active_directory_ds_name_cache_hit_rate", TrimPromSuffixes("active_directory_ds_name_cache_hit_rate_percent", pmetric.MetricTypeGauge, "percent"))
require.Equal(t, "active_directory_ds_ldap_bind_last_successful_time", TrimPromSuffixes("active_directory_ds_ldap_bind_last_successful_time_milliseconds", pmetric.MetricTypeGauge, "milliseconds"))
require.Equal(t, "apache_requests", TrimPromSuffixes("apache_requests_total", pmetric.MetricTypeSum, "1"))
require.Equal(t, "system_cpu_utilization", TrimPromSuffixes("system_cpu_utilization_ratio", pmetric.MetricTypeGauge, "ratio"))
require.Equal(t, "mongodbatlas_process_journaling_data_files", TrimPromSuffixes("mongodbatlas_process_journaling_data_files_mebibytes", pmetric.MetricTypeGauge, "mebibytes"))
require.Equal(t, "mongodbatlas_process_network_io", TrimPromSuffixes("mongodbatlas_process_network_io_bytes_per_second", pmetric.MetricTypeGauge, "bytes_per_second"))
require.Equal(t, "mongodbatlas_process_oplog_rate", TrimPromSuffixes("mongodbatlas_process_oplog_rate_gibibytes_per_hour", pmetric.MetricTypeGauge, "gibibytes_per_hour"))
require.Equal(t, "nsxt_node_memory_usage", TrimPromSuffixes("nsxt_node_memory_usage_kilobytes", pmetric.MetricTypeGauge, "kilobytes"))
require.Equal(t, "redis_latest_fork", TrimPromSuffixes("redis_latest_fork_microseconds", pmetric.MetricTypeGauge, "microseconds"))
require.Equal(t, "up", TrimPromSuffixes("up", pmetric.MetricTypeGauge, ""))
// These are not necessarily valid OM units, only tested for the sake of completeness.
require.Equal(t, "active_directory_ds_replication_sync_object_pending", TrimPromSuffixes("active_directory_ds_replication_sync_object_pending_total", pmetric.MetricTypeSum, "{objects}"))
require.Equal(t, "apache_current", TrimPromSuffixes("apache_current_connections", pmetric.MetricTypeGauge, "connections"))
require.Equal(t, "bigip_virtual_server_request_count", TrimPromSuffixes("bigip_virtual_server_request_count_total", pmetric.MetricTypeSum, "{requests}"))
require.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", TrimPromSuffixes("mongodbatlas_process_db_query_targeting_scanned_per_returned", pmetric.MetricTypeGauge, "{scanned}/{returned}"))
require.Equal(t, "nginx_connections_accepted", TrimPromSuffixes("nginx_connections_accepted", pmetric.MetricTypeGauge, "connections"))
require.Equal(t, "apache_workers", TrimPromSuffixes("apache_workers_connections", pmetric.MetricTypeGauge, "connections"))
require.Equal(t, "nginx", TrimPromSuffixes("nginx_requests", pmetric.MetricTypeGauge, "requests"))
// Units shouldn't be trimmed if the unit is not a direct match with the suffix, i.e, a suffix "_seconds" shouldn't be removed if unit is "sec" or "s"
require.Equal(t, "system_cpu_load_average_15m_ratio", TrimPromSuffixes("system_cpu_load_average_15m_ratio", pmetric.MetricTypeGauge, "1"))
require.Equal(t, "mongodbatlas_process_asserts_per_second", TrimPromSuffixes("mongodbatlas_process_asserts_per_second", pmetric.MetricTypeGauge, "{assertions}/s"))
require.Equal(t, "memcached_operation_hit_ratio_percent", TrimPromSuffixes("memcached_operation_hit_ratio_percent", pmetric.MetricTypeGauge, "%"))
require.Equal(t, "active_directory_ds_replication_object_rate_per_second", TrimPromSuffixes("active_directory_ds_replication_object_rate_per_second", pmetric.MetricTypeGauge, "{objects}/s"))
require.Equal(t, "system_disk_operation_time_seconds", TrimPromSuffixes("system_disk_operation_time_seconds_total", pmetric.MetricTypeSum, "s"))
}
func TestNamespace(t *testing.T) {
require.Equal(t, "space_test", BuildPromCompliantName(createGauge("test", ""), "space"))
require.Equal(t, "space_test", BuildPromCompliantName(createGauge("#test", ""), "space"))
}
func TestCleanUpString(t *testing.T) {
require.Equal(t, "", CleanUpString(""))
require.Equal(t, "a_b", CleanUpString("a b"))
require.Equal(t, "hello_world", CleanUpString("hello, world!"))
require.Equal(t, "hello_you_2", CleanUpString("hello you 2"))
require.Equal(t, "1000", CleanUpString("$1000"))
require.Equal(t, "", CleanUpString("*+$^=)"))
}
func TestUnitMapGetOrDefault(t *testing.T) {
require.Equal(t, "", unitMapGetOrDefault(""))
require.Equal(t, "seconds", unitMapGetOrDefault("s"))
require.Equal(t, "invalid", unitMapGetOrDefault("invalid"))
}
func TestPerUnitMapGetOrDefault(t *testing.T) {
require.Equal(t, "", perUnitMapGetOrDefault(""))
require.Equal(t, "second", perUnitMapGetOrDefault("s"))
require.Equal(t, "invalid", perUnitMapGetOrDefault("invalid"))
}
func TestRemoveItem(t *testing.T) {
require.Equal(t, []string{}, removeItem([]string{}, "test"))
require.Equal(t, []string{}, removeItem([]string{}, ""))
require.Equal(t, []string{"a", "b", "c"}, removeItem([]string{"a", "b", "c"}, "d"))
require.Equal(t, []string{"a", "b", "c"}, removeItem([]string{"a", "b", "c"}, ""))
require.Equal(t, []string{"a", "b"}, removeItem([]string{"a", "b", "c"}, "c"))
require.Equal(t, []string{"a", "c"}, removeItem([]string{"a", "b", "c"}, "b"))
require.Equal(t, []string{"b", "c"}, removeItem([]string{"a", "b", "c"}, "a"))
}
func TestBuildPromCompliantName(t *testing.T) {
require.Equal(t, "system_io_bytes_total", BuildPromCompliantName(createCounter("system.io", "By"), ""))
require.Equal(t, "system_network_io_bytes_total", BuildPromCompliantName(createCounter("network.io", "By"), "system"))
require.Equal(t, "_3_14_digits", BuildPromCompliantName(createGauge("3.14 digits", ""), ""))
require.Equal(t, "envoy_rule_engine_zlib_buf_error", BuildPromCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), ""))
require.Equal(t, "foo_bar", BuildPromCompliantName(createGauge(":foo::bar", ""), ""))
require.Equal(t, "foo_bar_total", BuildPromCompliantName(createCounter(":foo::bar", ""), ""))
}

View file

@ -1,34 +0,0 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package normalize
import (
"go.opentelemetry.io/collector/pdata/pmetric"
)
var ilm pmetric.ScopeMetrics
func init() {
metrics := pmetric.NewMetrics()
resourceMetrics := metrics.ResourceMetrics().AppendEmpty()
ilm = resourceMetrics.ScopeMetrics().AppendEmpty()
}
// Returns a new Metric of type "Gauge" with specified name and unit.
func createGauge(name, unit string) pmetric.Metric {
gauge := ilm.Metrics().AppendEmpty()
gauge.SetName(name)
gauge.SetUnit(unit)
gauge.SetEmptyGauge()
return gauge
}
// Returns a new Metric of type Monotonic Sum with specified name and unit.
func createCounter(name, unit string) pmetric.Metric {
counter := ilm.Metrics().AppendEmpty()
counter.SetEmptySum().SetIsMonotonic(true)
counter.SetName(name)
counter.SetUnit(unit)
return counter
}

View file

@ -0,0 +1,90 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package prometheus // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus"
import "strings"
var wordToUCUM = map[string]string{
// Time
"days": "d",
"hours": "h",
"minutes": "min",
"seconds": "s",
"milliseconds": "ms",
"microseconds": "us",
"nanoseconds": "ns",
// Bytes
"bytes": "By",
"kibibytes": "KiBy",
"mebibytes": "MiBy",
"gibibytes": "GiBy",
"tibibytes": "TiBy",
"kilobytes": "KBy",
"megabytes": "MBy",
"gigabytes": "GBy",
"terabytes": "TBy",
// SI
"meters": "m",
"volts": "V",
"amperes": "A",
"joules": "J",
"watts": "W",
"grams": "g",
// Misc
"celsius": "Cel",
"hertz": "Hz",
"ratio": "1",
"percent": "%",
}
// The map that translates the "per" unit
// Example: per_second (singular) => /s
var perWordToUCUM = map[string]string{
"second": "s",
"minute": "m",
"hour": "h",
"day": "d",
"week": "w",
"month": "mo",
"year": "y",
}
// UnitWordToUCUM converts english unit words to UCUM units:
// https://ucum.org/ucum#section-Alphabetic-Index-By-Symbol
// It also handles rates, such as meters_per_second, by translating the first
// word to UCUM, and the "per" word to UCUM. It joins them with a "/" between.
func UnitWordToUCUM(unit string) string {
unitTokens := strings.SplitN(unit, "_per_", 2)
if len(unitTokens) == 0 {
return ""
}
ucumUnit := wordToUCUMOrDefault(unitTokens[0])
if len(unitTokens) > 1 && unitTokens[1] != "" {
ucumUnit += "/" + perWordToUCUMOrDefault(unitTokens[1])
}
return ucumUnit
}
// wordToUCUMOrDefault retrieves the Prometheus "basic" unit corresponding to
// the specified "basic" unit. Returns the specified unit if not found in
// wordToUCUM.
func wordToUCUMOrDefault(unit string) string {
if promUnit, ok := wordToUCUM[unit]; ok {
return promUnit
}
return unit
}
// perWordToUCUMOrDefault retrieve the Prometheus "per" unit corresponding to
// the specified "per" unit. Returns the specified unit if not found in perWordToUCUM.
func perWordToUCUMOrDefault(perUnit string) string {
if promPerUnit, ok := perWordToUCUM[perUnit]; ok {
return promPerUnit
}
return perUnit
}

View file

@ -71,8 +71,8 @@ func (a ByLabelName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// creates a new TimeSeries in the map if not found and returns the time series signature. // creates a new TimeSeries in the map if not found and returns the time series signature.
// tsMap will be unmodified if either labels or sample is nil, but can still be modified if the exemplar is nil. // tsMap will be unmodified if either labels or sample is nil, but can still be modified if the exemplar is nil.
func addSample(tsMap map[string]*prompb.TimeSeries, sample *prompb.Sample, labels []prompb.Label, func addSample(tsMap map[string]*prompb.TimeSeries, sample *prompb.Sample, labels []prompb.Label,
datatype string, datatype string) string {
) string {
if sample == nil || labels == nil || tsMap == nil { if sample == nil || labels == nil || tsMap == nil {
return "" return ""
} }
@ -132,7 +132,14 @@ func addExemplar(tsMap map[string]*prompb.TimeSeries, bucketBounds []bucketBound
// the label slice should not contain duplicate label names; this method sorts the slice by label name before creating // the label slice should not contain duplicate label names; this method sorts the slice by label name before creating
// the signature. // the signature.
func timeSeriesSignature(datatype string, labels *[]prompb.Label) string { func timeSeriesSignature(datatype string, labels *[]prompb.Label) string {
length := len(datatype)
for _, lb := range *labels {
length += 2 + len(lb.GetName()) + len(lb.GetValue())
}
b := strings.Builder{} b := strings.Builder{}
b.Grow(length)
b.WriteString(datatype) b.WriteString(datatype)
sort.Sort(ByLabelName(*labels)) sort.Sort(ByLabelName(*labels))
@ -151,8 +158,22 @@ func timeSeriesSignature(datatype string, labels *[]prompb.Label) string {
// Unpaired string value is ignored. String pairs overwrites OTLP labels if collision happens, and the overwrite is // Unpaired string value is ignored. String pairs overwrites OTLP labels if collision happens, and the overwrite is
// logged. Resultant label names are sanitized. // logged. Resultant label names are sanitized.
func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externalLabels map[string]string, extras ...string) []prompb.Label { func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externalLabels map[string]string, extras ...string) []prompb.Label {
serviceName, haveServiceName := resource.Attributes().Get(conventions.AttributeServiceName)
instance, haveInstanceID := resource.Attributes().Get(conventions.AttributeServiceInstanceID)
// Calculate the maximum possible number of labels we could return so we can preallocate l
maxLabelCount := attributes.Len() + len(externalLabels) + len(extras)/2
if haveServiceName {
maxLabelCount++
}
if haveInstanceID {
maxLabelCount++
}
// map ensures no duplicate label name // map ensures no duplicate label name
l := map[string]prompb.Label{} l := make(map[string]string, maxLabelCount)
// Ensure attributes are sorted by key for consistent merging of keys which // Ensure attributes are sorted by key for consistent merging of keys which
// collide when sanitized. // collide when sanitized.
@ -164,35 +185,25 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
sort.Stable(ByLabelName(labels)) sort.Stable(ByLabelName(labels))
for _, label := range labels { for _, label := range labels {
finalKey := prometheustranslator.NormalizeLabel(label.Name) var finalKey = prometheustranslator.NormalizeLabel(label.Name)
if existingLabel, alreadyExists := l[finalKey]; alreadyExists { if existingLabel, alreadyExists := l[finalKey]; alreadyExists {
existingLabel.Value = existingLabel.Value + ";" + label.Value l[finalKey] = existingLabel + ";" + label.Value
l[finalKey] = existingLabel
} else { } else {
l[finalKey] = prompb.Label{ l[finalKey] = label.Value
Name: finalKey,
Value: label.Value,
}
} }
} }
// Map service.name + service.namespace to job // Map service.name + service.namespace to job
if serviceName, ok := resource.Attributes().Get(conventions.AttributeServiceName); ok { if haveServiceName {
val := serviceName.AsString() val := serviceName.AsString()
if serviceNamespace, ok := resource.Attributes().Get(conventions.AttributeServiceNamespace); ok { if serviceNamespace, ok := resource.Attributes().Get(conventions.AttributeServiceNamespace); ok {
val = fmt.Sprintf("%s/%s", serviceNamespace.AsString(), val) val = fmt.Sprintf("%s/%s", serviceNamespace.AsString(), val)
} }
l[model.JobLabel] = prompb.Label{ l[model.JobLabel] = val
Name: model.JobLabel,
Value: val,
}
} }
// Map service.instance.id to instance // Map service.instance.id to instance
if instance, ok := resource.Attributes().Get(conventions.AttributeServiceInstanceID); ok { if haveInstanceID {
l[model.InstanceLabel] = prompb.Label{ l[model.InstanceLabel] = instance.AsString()
Name: model.InstanceLabel,
Value: instance.AsString(),
}
} }
for key, value := range externalLabels { for key, value := range externalLabels {
// External labels have already been sanitized // External labels have already been sanitized
@ -200,10 +211,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
// Skip external labels if they are overridden by metric attributes // Skip external labels if they are overridden by metric attributes
continue continue
} }
l[key] = prompb.Label{ l[key] = value
Name: key,
Value: value,
}
} }
for i := 0; i < len(extras); i += 2 { for i := 0; i < len(extras); i += 2 {
@ -219,15 +227,12 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
if !(len(name) > 4 && name[:2] == "__" && name[len(name)-2:] == "__") { if !(len(name) > 4 && name[:2] == "__" && name[len(name)-2:] == "__") {
name = prometheustranslator.NormalizeLabel(name) name = prometheustranslator.NormalizeLabel(name)
} }
l[name] = prompb.Label{ l[name] = extras[i+1]
Name: name,
Value: extras[i+1],
}
} }
s := make([]prompb.Label, 0, len(l)) s := make([]prompb.Label, 0, len(l))
for _, lb := range l { for k, v := range l {
s = append(s, lb) s = append(s, prompb.Label{Name: k, Value: v})
} }
return s return s
@ -236,6 +241,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
// isValidAggregationTemporality checks whether an OTel metric has a valid // isValidAggregationTemporality checks whether an OTel metric has a valid
// aggregation temporality for conversion to a Prometheus metric. // aggregation temporality for conversion to a Prometheus metric.
func isValidAggregationTemporality(metric pmetric.Metric) bool { func isValidAggregationTemporality(metric pmetric.Metric) bool {
//exhaustive:enforce
switch metric.Type() { switch metric.Type() {
case pmetric.MetricTypeGauge, pmetric.MetricTypeSummary: case pmetric.MetricTypeGauge, pmetric.MetricTypeSummary:
return true return true
@ -254,7 +260,22 @@ func isValidAggregationTemporality(metric pmetric.Metric) bool {
func addSingleHistogramDataPoint(pt pmetric.HistogramDataPoint, resource pcommon.Resource, metric pmetric.Metric, settings Settings, tsMap map[string]*prompb.TimeSeries) { func addSingleHistogramDataPoint(pt pmetric.HistogramDataPoint, resource pcommon.Resource, metric pmetric.Metric, settings Settings, tsMap map[string]*prompb.TimeSeries) {
timestamp := convertTimeStamp(pt.Timestamp()) timestamp := convertTimeStamp(pt.Timestamp())
// sum, count, and buckets of the histogram should append suffix to baseName // sum, count, and buckets of the histogram should append suffix to baseName
baseName := prometheustranslator.BuildPromCompliantName(metric, settings.Namespace) baseName := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes)
baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels)
createLabels := func(nameSuffix string, extras ...string) []prompb.Label {
extraLabelCount := len(extras) / 2
labels := make([]prompb.Label, len(baseLabels), len(baseLabels)+extraLabelCount+1) // +1 for name
copy(labels, baseLabels)
for extrasIdx := 0; extrasIdx < extraLabelCount; extrasIdx++ {
labels = append(labels, prompb.Label{Name: extras[extrasIdx], Value: extras[extrasIdx+1]})
}
labels = append(labels, prompb.Label{Name: nameStr, Value: baseName + nameSuffix})
return labels
}
// If the sum is unset, it indicates the _sum metric point should be // If the sum is unset, it indicates the _sum metric point should be
// omitted // omitted
@ -268,7 +289,7 @@ func addSingleHistogramDataPoint(pt pmetric.HistogramDataPoint, resource pcommon
sum.Value = math.Float64frombits(value.StaleNaN) sum.Value = math.Float64frombits(value.StaleNaN)
} }
sumlabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+sumStr) sumlabels := createLabels(sumStr)
addSample(tsMap, sum, sumlabels, metric.Type().String()) addSample(tsMap, sum, sumlabels, metric.Type().String())
} }
@ -282,7 +303,7 @@ func addSingleHistogramDataPoint(pt pmetric.HistogramDataPoint, resource pcommon
count.Value = math.Float64frombits(value.StaleNaN) count.Value = math.Float64frombits(value.StaleNaN)
} }
countlabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+countStr) countlabels := createLabels(countStr)
addSample(tsMap, count, countlabels, metric.Type().String()) addSample(tsMap, count, countlabels, metric.Type().String())
// cumulative count for conversion to cumulative histogram // cumulative count for conversion to cumulative histogram
@ -304,7 +325,7 @@ func addSingleHistogramDataPoint(pt pmetric.HistogramDataPoint, resource pcommon
bucket.Value = math.Float64frombits(value.StaleNaN) bucket.Value = math.Float64frombits(value.StaleNaN)
} }
boundStr := strconv.FormatFloat(bound, 'f', -1, 64) boundStr := strconv.FormatFloat(bound, 'f', -1, 64)
labels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+bucketStr, leStr, boundStr) labels := createLabels(bucketStr, leStr, boundStr)
sig := addSample(tsMap, bucket, labels, metric.Type().String()) sig := addSample(tsMap, bucket, labels, metric.Type().String())
bucketBounds = append(bucketBounds, bucketBoundsData{sig: sig, bound: bound}) bucketBounds = append(bucketBounds, bucketBoundsData{sig: sig, bound: bound})
@ -318,7 +339,7 @@ func addSingleHistogramDataPoint(pt pmetric.HistogramDataPoint, resource pcommon
} else { } else {
infBucket.Value = float64(pt.Count()) infBucket.Value = float64(pt.Count())
} }
infLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+bucketStr, leStr, pInfStr) infLabels := createLabels(bucketStr, leStr, pInfStr)
sig := addSample(tsMap, infBucket, infLabels, metric.Type().String()) sig := addSample(tsMap, infBucket, infLabels, metric.Type().String())
bucketBounds = append(bucketBounds, bucketBoundsData{sig: sig, bound: math.Inf(1)}) bucketBounds = append(bucketBounds, bucketBoundsData{sig: sig, bound: math.Inf(1)})
@ -327,14 +348,8 @@ func addSingleHistogramDataPoint(pt pmetric.HistogramDataPoint, resource pcommon
// add _created time series if needed // add _created time series if needed
startTimestamp := pt.StartTimestamp() startTimestamp := pt.StartTimestamp()
if settings.ExportCreatedMetric && startTimestamp != 0 { if settings.ExportCreatedMetric && startTimestamp != 0 {
createdLabels := createAttributes( labels := createLabels(createdSuffix)
resource, addCreatedTimeSeriesIfNeeded(tsMap, labels, startTimestamp, metric.Type().String())
pt.Attributes(),
settings.ExternalLabels,
nameStr,
baseName+createdSuffix,
)
addCreatedTimeSeriesIfNeeded(tsMap, createdLabels, startTimestamp, metric.Type().String())
} }
} }
@ -402,6 +417,7 @@ func getPromExemplars[T exemplarType](pt T) []prompb.Exemplar {
func mostRecentTimestampInMetric(metric pmetric.Metric) pcommon.Timestamp { func mostRecentTimestampInMetric(metric pmetric.Metric) pcommon.Timestamp {
var ts pcommon.Timestamp var ts pcommon.Timestamp
// handle individual metric based on type // handle individual metric based on type
//exhaustive:enforce
switch metric.Type() { switch metric.Type() {
case pmetric.MetricTypeGauge: case pmetric.MetricTypeGauge:
dataPoints := metric.Gauge().DataPoints() dataPoints := metric.Gauge().DataPoints()
@ -441,11 +457,26 @@ func maxTimestamp(a, b pcommon.Timestamp) pcommon.Timestamp {
// addSingleSummaryDataPoint converts pt to len(QuantileValues) + 2 samples. // addSingleSummaryDataPoint converts pt to len(QuantileValues) + 2 samples.
func addSingleSummaryDataPoint(pt pmetric.SummaryDataPoint, resource pcommon.Resource, metric pmetric.Metric, settings Settings, func addSingleSummaryDataPoint(pt pmetric.SummaryDataPoint, resource pcommon.Resource, metric pmetric.Metric, settings Settings,
tsMap map[string]*prompb.TimeSeries, tsMap map[string]*prompb.TimeSeries) {
) {
timestamp := convertTimeStamp(pt.Timestamp()) timestamp := convertTimeStamp(pt.Timestamp())
// sum and count of the summary should append suffix to baseName // sum and count of the summary should append suffix to baseName
baseName := prometheustranslator.BuildPromCompliantName(metric, settings.Namespace) baseName := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes)
baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels)
createLabels := func(name string, extras ...string) []prompb.Label {
extraLabelCount := len(extras) / 2
labels := make([]prompb.Label, len(baseLabels), len(baseLabels)+extraLabelCount+1) // +1 for name
copy(labels, baseLabels)
for extrasIdx := 0; extrasIdx < extraLabelCount; extrasIdx++ {
labels = append(labels, prompb.Label{Name: extras[extrasIdx], Value: extras[extrasIdx+1]})
}
labels = append(labels, prompb.Label{Name: nameStr, Value: name})
return labels
}
// treat sum as a sample in an individual TimeSeries // treat sum as a sample in an individual TimeSeries
sum := &prompb.Sample{ sum := &prompb.Sample{
Value: pt.Sum(), Value: pt.Sum(),
@ -454,7 +485,7 @@ func addSingleSummaryDataPoint(pt pmetric.SummaryDataPoint, resource pcommon.Res
if pt.Flags().NoRecordedValue() { if pt.Flags().NoRecordedValue() {
sum.Value = math.Float64frombits(value.StaleNaN) sum.Value = math.Float64frombits(value.StaleNaN)
} }
sumlabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+sumStr) sumlabels := createLabels(baseName + sumStr)
addSample(tsMap, sum, sumlabels, metric.Type().String()) addSample(tsMap, sum, sumlabels, metric.Type().String())
// treat count as a sample in an individual TimeSeries // treat count as a sample in an individual TimeSeries
@ -465,7 +496,7 @@ func addSingleSummaryDataPoint(pt pmetric.SummaryDataPoint, resource pcommon.Res
if pt.Flags().NoRecordedValue() { if pt.Flags().NoRecordedValue() {
count.Value = math.Float64frombits(value.StaleNaN) count.Value = math.Float64frombits(value.StaleNaN)
} }
countlabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+countStr) countlabels := createLabels(baseName + countStr)
addSample(tsMap, count, countlabels, metric.Type().String()) addSample(tsMap, count, countlabels, metric.Type().String())
// process each percentile/quantile // process each percentile/quantile
@ -479,20 +510,14 @@ func addSingleSummaryDataPoint(pt pmetric.SummaryDataPoint, resource pcommon.Res
quantile.Value = math.Float64frombits(value.StaleNaN) quantile.Value = math.Float64frombits(value.StaleNaN)
} }
percentileStr := strconv.FormatFloat(qt.Quantile(), 'f', -1, 64) percentileStr := strconv.FormatFloat(qt.Quantile(), 'f', -1, 64)
qtlabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName, quantileStr, percentileStr) qtlabels := createLabels(baseName, quantileStr, percentileStr)
addSample(tsMap, quantile, qtlabels, metric.Type().String()) addSample(tsMap, quantile, qtlabels, metric.Type().String())
} }
// add _created time series if needed // add _created time series if needed
startTimestamp := pt.StartTimestamp() startTimestamp := pt.StartTimestamp()
if settings.ExportCreatedMetric && startTimestamp != 0 { if settings.ExportCreatedMetric && startTimestamp != 0 {
createdLabels := createAttributes( createdLabels := createLabels(baseName + createdSuffix)
resource,
pt.Attributes(),
settings.ExternalLabels,
nameStr,
baseName+createdSuffix,
)
addCreatedTimeSeriesIfNeeded(tsMap, createdLabels, startTimestamp, metric.Type().String()) addCreatedTimeSeriesIfNeeded(tsMap, createdLabels, startTimestamp, metric.Type().String())
} }
} }

View file

@ -60,15 +60,20 @@ func addSingleExponentialHistogramDataPoint(
// to Prometheus Native Histogram. // to Prometheus Native Histogram.
func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (prompb.Histogram, error) { func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (prompb.Histogram, error) {
scale := p.Scale() scale := p.Scale()
if scale < -4 || scale > 8 { if scale < -4 {
return prompb.Histogram{}, return prompb.Histogram{},
fmt.Errorf("cannot convert exponential to native histogram."+ fmt.Errorf("cannot convert exponential to native histogram."+
" Scale must be <= 8 and >= -4, was %d", scale) " Scale must be >= -4, was %d", scale)
// TODO: downscale to 8 if scale > 8
} }
pSpans, pDeltas := convertBucketsLayout(p.Positive()) var scaleDown int32
nSpans, nDeltas := convertBucketsLayout(p.Negative()) if scale > 8 {
scaleDown = scale - 8
scale = 8
}
pSpans, pDeltas := convertBucketsLayout(p.Positive(), scaleDown)
nSpans, nDeltas := convertBucketsLayout(p.Negative(), scaleDown)
h := prompb.Histogram{ h := prompb.Histogram{
Schema: scale, Schema: scale,
@ -106,17 +111,19 @@ func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (prom
// The bucket indexes conversion was adjusted, since OTel exp. histogram bucket // The bucket indexes conversion was adjusted, since OTel exp. histogram bucket
// index 0 corresponds to the range (1, base] while Prometheus bucket index 0 // index 0 corresponds to the range (1, base] while Prometheus bucket index 0
// to the range (base 1]. // to the range (base 1].
func convertBucketsLayout(buckets pmetric.ExponentialHistogramDataPointBuckets) ([]prompb.BucketSpan, []int64) { //
// scaleDown is the factor by which the buckets are scaled down. In other words 2^scaleDown buckets will be merged into one.
func convertBucketsLayout(buckets pmetric.ExponentialHistogramDataPointBuckets, scaleDown int32) ([]prompb.BucketSpan, []int64) {
bucketCounts := buckets.BucketCounts() bucketCounts := buckets.BucketCounts()
if bucketCounts.Len() == 0 { if bucketCounts.Len() == 0 {
return nil, nil return nil, nil
} }
var ( var (
spans []prompb.BucketSpan spans []prompb.BucketSpan
deltas []int64 deltas []int64
prevCount int64 count int64
nextBucketIdx int32 prevCount int64
) )
appendDelta := func(count int64) { appendDelta := func(count int64) {
@ -125,34 +132,67 @@ func convertBucketsLayout(buckets pmetric.ExponentialHistogramDataPointBuckets)
prevCount = count prevCount = count
} }
for i := 0; i < bucketCounts.Len(); i++ { // Let the compiler figure out that this is const during this function by
count := int64(bucketCounts.At(i)) // moving it into a local variable.
numBuckets := bucketCounts.Len()
// The offset is scaled and adjusted by 1 as described above.
bucketIdx := buckets.Offset()>>scaleDown + 1
spans = append(spans, prompb.BucketSpan{
Offset: bucketIdx,
Length: 0,
})
for i := 0; i < numBuckets; i++ {
// The offset is scaled and adjusted by 1 as described above.
nextBucketIdx := (int32(i)+buckets.Offset())>>scaleDown + 1
if bucketIdx == nextBucketIdx { // We have not collected enough buckets to merge yet.
count += int64(bucketCounts.At(i))
continue
}
if count == 0 { if count == 0 {
count = int64(bucketCounts.At(i))
continue continue
} }
// The offset is adjusted by 1 as described above. gap := nextBucketIdx - bucketIdx - 1
bucketIdx := int32(i) + buckets.Offset() + 1 if gap > 2 {
delta := bucketIdx - nextBucketIdx // We have to create a new span, because we have found a gap
if i == 0 || delta > 2 {
// We have to create a new span, either because we are
// at the very beginning, or because we have found a gap
// of more than two buckets. The constant 2 is copied from the logic in // of more than two buckets. The constant 2 is copied from the logic in
// https://github.com/prometheus/client_golang/blob/27f0506d6ebbb117b6b697d0552ee5be2502c5f2/prometheus/histogram.go#L1296 // https://github.com/prometheus/client_golang/blob/27f0506d6ebbb117b6b697d0552ee5be2502c5f2/prometheus/histogram.go#L1296
spans = append(spans, prompb.BucketSpan{ spans = append(spans, prompb.BucketSpan{
Offset: delta, Offset: gap,
Length: 0, Length: 0,
}) })
} else { } else {
// We have found a small gap (or no gap at all). // We have found a small gap (or no gap at all).
// Insert empty buckets as needed. // Insert empty buckets as needed.
for j := int32(0); j < delta; j++ { for j := int32(0); j < gap; j++ {
appendDelta(0) appendDelta(0)
} }
} }
appendDelta(count) appendDelta(count)
nextBucketIdx = bucketIdx + 1 count = int64(bucketCounts.At(i))
bucketIdx = nextBucketIdx
} }
// Need to use the last item's index. The offset is scaled and adjusted by 1 as described above.
gap := (int32(numBuckets)+buckets.Offset()-1)>>scaleDown + 1 - bucketIdx
if gap > 2 {
// We have to create a new span, because we have found a gap
// of more than two buckets. The constant 2 is copied from the logic in
// https://github.com/prometheus/client_golang/blob/27f0506d6ebbb117b6b697d0552ee5be2502c5f2/prometheus/histogram.go#L1296
spans = append(spans, prompb.BucketSpan{
Offset: gap,
Length: 0,
})
} else {
// We have found a small gap (or no gap at all).
// Insert empty buckets as needed.
for j := int32(0); j < gap; j++ {
appendDelta(0)
}
}
appendDelta(count)
return spans, deltas return spans, deltas
} }

View file

@ -22,6 +22,7 @@ type Settings struct {
ExternalLabels map[string]string ExternalLabels map[string]string
DisableTargetInfo bool DisableTargetInfo bool
ExportCreatedMetric bool ExportCreatedMetric bool
AddMetricSuffixes bool
} }
// FromMetrics converts pmetric.Metrics to prometheus remote write format. // FromMetrics converts pmetric.Metrics to prometheus remote write format.
@ -51,6 +52,7 @@ func FromMetrics(md pmetric.Metrics, settings Settings) (tsMap map[string]*promp
} }
// handle individual metric based on type // handle individual metric based on type
//exhaustive:enforce
switch metric.Type() { switch metric.Type() {
case pmetric.MetricTypeGauge: case pmetric.MetricTypeGauge:
dataPoints := metric.Gauge().DataPoints() dataPoints := metric.Gauge().DataPoints()
@ -81,7 +83,7 @@ func FromMetrics(md pmetric.Metrics, settings Settings) (tsMap map[string]*promp
if dataPoints.Len() == 0 { if dataPoints.Len() == 0 {
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
} }
name := prometheustranslator.BuildPromCompliantName(metric, settings.Namespace) name := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes)
for x := 0; x < dataPoints.Len(); x++ { for x := 0; x < dataPoints.Len(); x++ {
errs = multierr.Append( errs = multierr.Append(
errs, errs,

View file

@ -27,7 +27,7 @@ func addSingleGaugeNumberDataPoint(
settings Settings, settings Settings,
series map[string]*prompb.TimeSeries, series map[string]*prompb.TimeSeries,
) { ) {
name := prometheustranslator.BuildPromCompliantName(metric, settings.Namespace) name := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes)
labels := createAttributes( labels := createAttributes(
resource, resource,
pt.Attributes(), pt.Attributes(),
@ -60,7 +60,7 @@ func addSingleSumNumberDataPoint(
settings Settings, settings Settings,
series map[string]*prompb.TimeSeries, series map[string]*prompb.TimeSeries,
) { ) {
name := prometheustranslator.BuildPromCompliantName(metric, settings.Namespace) name := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes)
labels := createAttributes( labels := createAttributes(
resource, resource,
pt.Attributes(), pt.Attributes(),

View file

@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
OTEL_VERSION=v0.81.0 OTEL_VERSION=v0.88.0
git clone https://github.com/open-telemetry/opentelemetry-collector-contrib ./tmp git clone https://github.com/open-telemetry/opentelemetry-collector-contrib ./tmp
cd ./tmp cd ./tmp
@ -8,7 +8,8 @@ git checkout $OTEL_VERSION
cd .. cd ..
rm -rf ./prometheusremotewrite/* rm -rf ./prometheusremotewrite/*
cp -r ./tmp/pkg/translator/prometheusremotewrite/*.go ./prometheusremotewrite cp -r ./tmp/pkg/translator/prometheusremotewrite/*.go ./prometheusremotewrite
rm -rf ./prometheusremotewrite/*_test.go cp -r ./tmp/pkg/translator/prometheus/*.go ./prometheus
rm -rf ./prometheus/*_test.go
rm -rf ./tmp rm -rf ./tmp
sed -i '' 's#github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus#github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus#g' ./prometheusremotewrite/*.go sed -i '' 's#github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus#github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus#g' ./prometheusremotewrite/*.go