mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-09 23:24:05 -08:00
Merge branch 'main' into sum-and-avg-over-mixed-custom-exponential-histograms
# Conflicts: # promql/promqltest/testdata/native_histograms.test
This commit is contained in:
commit
52818a97e2
2
.github/workflows/buf-lint.yml
vendored
2
.github/workflows/buf-lint.yml
vendored
|
@ -13,7 +13,7 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||||
- uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0
|
- uses: bufbuild/buf-setup-action@aceb106d2419c4cff48863df90161d92decb8591 # v1.35.1
|
||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
|
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
|
||||||
|
|
2
.github/workflows/buf.yml
vendored
2
.github/workflows/buf.yml
vendored
|
@ -13,7 +13,7 @@ jobs:
|
||||||
if: github.repository_owner == 'prometheus'
|
if: github.repository_owner == 'prometheus'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||||
- uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0
|
- uses: bufbuild/buf-setup-action@aceb106d2419c4cff48863df90161d92decb8591 # v1.35.1
|
||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
|
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
|
||||||
|
|
2
.github/workflows/fuzzing.yml
vendored
2
.github/workflows/fuzzing.yml
vendored
|
@ -21,7 +21,7 @@ jobs:
|
||||||
fuzz-seconds: 600
|
fuzz-seconds: 600
|
||||||
dry-run: false
|
dry-run: false
|
||||||
- name: Upload Crash
|
- name: Upload Crash
|
||||||
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
|
uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4
|
||||||
if: failure() && steps.build.outcome == 'success'
|
if: failure() && steps.build.outcome == 'success'
|
||||||
with:
|
with:
|
||||||
name: artifacts
|
name: artifacts
|
||||||
|
|
2
.github/workflows/scorecards.yml
vendored
2
.github/workflows/scorecards.yml
vendored
|
@ -37,7 +37,7 @@ jobs:
|
||||||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||||
# format to the repository Actions tab.
|
# format to the repository Actions tab.
|
||||||
- name: "Upload artifact"
|
- name: "Upload artifact"
|
||||||
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # tag=v4.3.3
|
uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # tag=v4.3.4
|
||||||
with:
|
with:
|
||||||
name: SARIF file
|
name: SARIF file
|
||||||
path: results.sarif
|
path: results.sarif
|
||||||
|
|
2
go.mod
2
go.mod
|
@ -13,7 +13,7 @@ require (
|
||||||
github.com/KimMachineGun/automemlimit v0.6.1
|
github.com/KimMachineGun/automemlimit v0.6.1
|
||||||
github.com/alecthomas/kingpin/v2 v2.4.0
|
github.com/alecthomas/kingpin/v2 v2.4.0
|
||||||
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30
|
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30
|
||||||
github.com/aws/aws-sdk-go v1.54.19
|
github.com/aws/aws-sdk-go v1.55.5
|
||||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
|
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
|
||||||
github.com/cespare/xxhash/v2 v2.3.0
|
github.com/cespare/xxhash/v2 v2.3.0
|
||||||
github.com/dennwc/varint v1.0.0
|
github.com/dennwc/varint v1.0.0
|
||||||
|
|
4
go.sum
4
go.sum
|
@ -92,8 +92,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W
|
||||||
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
||||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||||
github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI=
|
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
|
||||||
github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
|
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
|
||||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
|
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
|
||||||
|
|
|
@ -1057,7 +1057,7 @@ func (ev *evaluator) recover(expr parser.Expr, ws *annotations.Annotations, errp
|
||||||
buf := make([]byte, 64<<10)
|
buf := make([]byte, 64<<10)
|
||||||
buf = buf[:runtime.Stack(buf, false)]
|
buf = buf[:runtime.Stack(buf, false)]
|
||||||
|
|
||||||
level.Error(ev.logger).Log("msg", "runtime panic in parser", "expr", expr.String(), "err", e, "stacktrace", string(buf))
|
level.Error(ev.logger).Log("msg", "runtime panic during query evaluation", "expr", expr.String(), "err", e, "stacktrace", string(buf))
|
||||||
*errp = fmt.Errorf("unexpected error: %w", err)
|
*errp = fmt.Errorf("unexpected error: %w", err)
|
||||||
case errWithWarnings:
|
case errWithWarnings:
|
||||||
*errp = err.err
|
*errp = err.err
|
||||||
|
@ -2357,7 +2357,7 @@ loop:
|
||||||
histograms = append(histograms, HPoint{H: &histogram.FloatHistogram{}})
|
histograms = append(histograms, HPoint{H: &histogram.FloatHistogram{}})
|
||||||
}
|
}
|
||||||
if histograms[n].H == nil {
|
if histograms[n].H == nil {
|
||||||
// Make sure to pass non zero H to AtFloatHistogram so that it does a deep-copy.
|
// Make sure to pass non-nil H to AtFloatHistogram so that it does a deep-copy.
|
||||||
// Not an issue in the loop above since that uses an intermediate buffer.
|
// Not an issue in the loop above since that uses an intermediate buffer.
|
||||||
histograms[n].H = &histogram.FloatHistogram{}
|
histograms[n].H = &histogram.FloatHistogram{}
|
||||||
}
|
}
|
||||||
|
|
26
promql/promqltest/testdata/histograms.test
vendored
26
promql/promqltest/testdata/histograms.test
vendored
|
@ -482,3 +482,29 @@ load_with_nhcb 5m
|
||||||
eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket"})
|
eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket"})
|
||||||
|
|
||||||
eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*"})
|
eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*"})
|
||||||
|
|
||||||
|
# Histogram with constant buckets.
|
||||||
|
load_with_nhcb 1m
|
||||||
|
const_histogram_bucket{le="0.0"} 1 1 1 1 1
|
||||||
|
const_histogram_bucket{le="1.0"} 1 1 1 1 1
|
||||||
|
const_histogram_bucket{le="2.0"} 1 1 1 1 1
|
||||||
|
const_histogram_bucket{le="+Inf"} 1 1 1 1 1
|
||||||
|
|
||||||
|
# There is no change to the bucket count over time, thus rate is 0 in each bucket.
|
||||||
|
eval instant at 5m rate(const_histogram_bucket[5m])
|
||||||
|
{le="0.0"} 0
|
||||||
|
{le="1.0"} 0
|
||||||
|
{le="2.0"} 0
|
||||||
|
{le="+Inf"} 0
|
||||||
|
|
||||||
|
# Native histograms do not represent empty buckets, so here the zeros are implicit.
|
||||||
|
eval instant at 5m rate(const_histogram[5m])
|
||||||
|
{} {{schema:-53 sum:0 count:0 custom_values:[0.0 1.0 2.0]}}
|
||||||
|
|
||||||
|
# Zero buckets mean no observations, so there is no value that observations fall below,
|
||||||
|
# which means that any quantile is a NaN.
|
||||||
|
eval instant at 5m histogram_quantile(1.0, sum by (le) (rate(const_histogram_bucket[5m])))
|
||||||
|
{} NaN
|
||||||
|
|
||||||
|
eval instant at 5m histogram_quantile(1.0, sum(rate(const_histogram[5m])))
|
||||||
|
{} NaN
|
||||||
|
|
|
@ -787,6 +787,52 @@ eval_warn instant at 30s rate(some_metric[30s])
|
||||||
|
|
||||||
clear
|
clear
|
||||||
|
|
||||||
|
# Histogram with constant buckets.
|
||||||
|
load 1m
|
||||||
|
const_histogram {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}}
|
||||||
|
|
||||||
|
# There is no change to the bucket count over time, thus rate is 0 in each bucket.
|
||||||
|
# However native histograms do not represent empty buckets, so here the zeros are implicit.
|
||||||
|
eval instant at 5m rate(const_histogram[5m])
|
||||||
|
{} {{schema:0 sum:0 count:0}}
|
||||||
|
|
||||||
|
# Zero buckets mean no observations, thus the denominator in the average is 0
|
||||||
|
# leading to 0/0, which is NaN.
|
||||||
|
eval instant at 5m histogram_avg(rate(const_histogram[5m]))
|
||||||
|
{} NaN
|
||||||
|
|
||||||
|
# Zero buckets mean no observations, so count is 0.
|
||||||
|
eval instant at 5m histogram_count(rate(const_histogram[5m]))
|
||||||
|
{} 0.0
|
||||||
|
|
||||||
|
# Zero buckets mean no observations and empty histogram has a sum of 0 by definition.
|
||||||
|
eval instant at 5m histogram_sum(rate(const_histogram[5m]))
|
||||||
|
{} 0.0
|
||||||
|
|
||||||
|
# Zero buckets mean no observations, thus the denominator in the fraction is 0,
|
||||||
|
# leading to 0/0, which is NaN.
|
||||||
|
eval instant at 5m histogram_fraction(0.0, 1.0, rate(const_histogram[5m]))
|
||||||
|
{} NaN
|
||||||
|
|
||||||
|
# Workaround to calculate the observation count corresponding to NaN fraction.
|
||||||
|
eval instant at 5m histogram_count(rate(const_histogram[5m])) == 0.0 or histogram_fraction(0.0, 1.0, rate(const_histogram[5m])) * histogram_count(rate(const_histogram[5m]))
|
||||||
|
{} 0.0
|
||||||
|
|
||||||
|
# Zero buckets mean no observations, so there is no value that observations fall below,
|
||||||
|
# which means that any quantile is a NaN.
|
||||||
|
eval instant at 5m histogram_quantile(1.0, rate(const_histogram[5m]))
|
||||||
|
{} NaN
|
||||||
|
|
||||||
|
# Zero buckets mean no observations, so there is no standard deviation.
|
||||||
|
eval instant at 5m histogram_stddev(rate(const_histogram[5m]))
|
||||||
|
{} NaN
|
||||||
|
|
||||||
|
# Zero buckets mean no observations, so there is no standard variance.
|
||||||
|
eval instant at 5m histogram_stdvar(rate(const_histogram[5m]))
|
||||||
|
{} NaN
|
||||||
|
|
||||||
|
clear
|
||||||
|
|
||||||
# Test mixing exponential and custom buckets.
|
# Test mixing exponential and custom buckets.
|
||||||
load 6m
|
load 6m
|
||||||
metric{series="exponential"} {{sum:4 count:3 buckets:[1 2 1]}} _ {{sum:4 count:3 buckets:[1 2 1]}}
|
metric{series="exponential"} {{sum:4 count:3 buckets:[1 2 1]}} _ {{sum:4 count:3 buckets:[1 2 1]}}
|
||||||
|
|
|
@ -26,7 +26,7 @@ jobs:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
|
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
||||||
with:
|
with:
|
||||||
go-version: 1.22.x
|
go-version: 1.22.x
|
||||||
- name: Install snmp_exporter/generator dependencies
|
- name: Install snmp_exporter/generator dependencies
|
||||||
|
|
|
@ -21,15 +21,14 @@ import (
|
||||||
"unicode"
|
"unicode"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Normalizes the specified label to follow Prometheus label names standard
|
// Normalizes the specified label to follow Prometheus label names standard.
|
||||||
//
|
//
|
||||||
// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels
|
// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels.
|
||||||
//
|
//
|
||||||
// Labels that start with non-letter rune will be prefixed with "key_"
|
// Labels that start with non-letter rune will be prefixed with "key_".
|
||||||
//
|
//
|
||||||
// Exception is made for double-underscores which are allowed
|
// An exception is made for double-underscores which are allowed.
|
||||||
func NormalizeLabel(label string) string {
|
func NormalizeLabel(label string) string {
|
||||||
|
|
||||||
// Trivial case
|
// Trivial case
|
||||||
if len(label) == 0 {
|
if len(label) == 0 {
|
||||||
return label
|
return label
|
||||||
|
@ -48,7 +47,7 @@ func NormalizeLabel(label string) string {
|
||||||
return label
|
return label
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return '_' for anything non-alphanumeric
|
// Return '_' for anything non-alphanumeric.
|
||||||
func sanitizeRune(r rune) rune {
|
func sanitizeRune(r rune) rune {
|
||||||
if unicode.IsLetter(r) || unicode.IsDigit(r) {
|
if unicode.IsLetter(r) || unicode.IsDigit(r) {
|
||||||
return r
|
return r
|
||||||
|
|
|
@ -76,14 +76,15 @@ var perUnitMap = map[string]string{
|
||||||
"y": "year",
|
"y": "year",
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildCompliantName builds a Prometheus-compliant metric name for the specified metric
|
// BuildCompliantName builds a Prometheus-compliant metric name for the specified metric.
|
||||||
//
|
//
|
||||||
// Metric name is prefixed with specified namespace and underscore (if any).
|
// Metric name is prefixed with specified namespace and underscore (if any).
|
||||||
// Namespace is not cleaned up. Make sure specified namespace follows Prometheus
|
// Namespace is not cleaned up. Make sure specified namespace follows Prometheus
|
||||||
// naming convention.
|
// naming convention.
|
||||||
//
|
//
|
||||||
// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels
|
// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels,
|
||||||
// and https://prometheus.io/docs/practices/naming/#metric-and-label-naming
|
// https://prometheus.io/docs/practices/naming/#metric-and-label-naming
|
||||||
|
// and https://github.com/open-telemetry/opentelemetry-specification/blob/v1.33.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus.
|
||||||
func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffixes bool) string {
|
func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffixes bool) string {
|
||||||
var metricName string
|
var metricName string
|
||||||
|
|
||||||
|
@ -110,7 +111,7 @@ func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffix
|
||||||
|
|
||||||
// Build a normalized name for the specified metric
|
// Build a normalized name for the specified metric
|
||||||
func normalizeName(metric pmetric.Metric, namespace string) string {
|
func normalizeName(metric pmetric.Metric, namespace string) string {
|
||||||
// Split metric name in "tokens" (remove all non-alphanumeric)
|
// Split metric name into "tokens" (remove all non-alphanumerics)
|
||||||
nameTokens := strings.FieldsFunc(
|
nameTokens := strings.FieldsFunc(
|
||||||
metric.Name(),
|
metric.Name(),
|
||||||
func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) },
|
func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) },
|
||||||
|
@ -122,9 +123,9 @@ func normalizeName(metric pmetric.Metric, namespace string) string {
|
||||||
// Main unit
|
// Main unit
|
||||||
// Append if not blank, doesn't contain '{}', and is not present in metric name already
|
// Append if not blank, doesn't contain '{}', and is not present in metric name already
|
||||||
if len(unitTokens) > 0 {
|
if len(unitTokens) > 0 {
|
||||||
mainUnitOtel := strings.TrimSpace(unitTokens[0])
|
mainUnitOTel := strings.TrimSpace(unitTokens[0])
|
||||||
if mainUnitOtel != "" && !strings.ContainsAny(mainUnitOtel, "{}") {
|
if mainUnitOTel != "" && !strings.ContainsAny(mainUnitOTel, "{}") {
|
||||||
mainUnitProm := CleanUpString(unitMapGetOrDefault(mainUnitOtel))
|
mainUnitProm := CleanUpString(unitMapGetOrDefault(mainUnitOTel))
|
||||||
if mainUnitProm != "" && !contains(nameTokens, mainUnitProm) {
|
if mainUnitProm != "" && !contains(nameTokens, mainUnitProm) {
|
||||||
nameTokens = append(nameTokens, mainUnitProm)
|
nameTokens = append(nameTokens, mainUnitProm)
|
||||||
}
|
}
|
||||||
|
@ -133,11 +134,11 @@ func normalizeName(metric pmetric.Metric, namespace string) string {
|
||||||
// Per unit
|
// Per unit
|
||||||
// Append if not blank, doesn't contain '{}', and is not present in metric name already
|
// Append if not blank, doesn't contain '{}', and is not present in metric name already
|
||||||
if len(unitTokens) > 1 && unitTokens[1] != "" {
|
if len(unitTokens) > 1 && unitTokens[1] != "" {
|
||||||
perUnitOtel := strings.TrimSpace(unitTokens[1])
|
perUnitOTel := strings.TrimSpace(unitTokens[1])
|
||||||
if perUnitOtel != "" && !strings.ContainsAny(perUnitOtel, "{}") {
|
if perUnitOTel != "" && !strings.ContainsAny(perUnitOTel, "{}") {
|
||||||
perUnitProm := CleanUpString(perUnitMapGetOrDefault(perUnitOtel))
|
perUnitProm := CleanUpString(perUnitMapGetOrDefault(perUnitOTel))
|
||||||
if perUnitProm != "" && !contains(nameTokens, perUnitProm) {
|
if perUnitProm != "" && !contains(nameTokens, perUnitProm) {
|
||||||
nameTokens = append(append(nameTokens, "per"), perUnitProm)
|
nameTokens = append(nameTokens, "per", perUnitProm)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -150,7 +151,7 @@ func normalizeName(metric pmetric.Metric, namespace string) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Append _ratio for metrics with unit "1"
|
// Append _ratio for metrics with unit "1"
|
||||||
// Some Otel receivers improperly use unit "1" for counters of objects
|
// Some OTel receivers improperly use unit "1" for counters of objects
|
||||||
// See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aissue+some+metric+units+don%27t+follow+otel+semantic+conventions
|
// See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aissue+some+metric+units+don%27t+follow+otel+semantic+conventions
|
||||||
// Until these issues have been fixed, we're appending `_ratio` for gauges ONLY
|
// Until these issues have been fixed, we're appending `_ratio` for gauges ONLY
|
||||||
// Theoretically, counters could be ratios as well, but it's absurd (for mathematical reasons)
|
// Theoretically, counters could be ratios as well, but it's absurd (for mathematical reasons)
|
||||||
|
|
205
storage/remote/otlptranslator/prometheus/normalize_name_test.go
Normal file
205
storage/remote/otlptranslator/prometheus/normalize_name_test.go
Normal file
|
@ -0,0 +1,205 @@
|
||||||
|
// Copyright 2024 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_name_test.go
|
||||||
|
// Provenance-includes-license: Apache-2.0
|
||||||
|
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.opentelemetry.io/collector/pdata/pmetric"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestByte(t *testing.T) {
|
||||||
|
require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("system.filesystem.usage", "By"), ""))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestByteCounter(t *testing.T) {
|
||||||
|
require.Equal(t, "system_io_bytes_total", normalizeName(createCounter("system.io", "By"), ""))
|
||||||
|
require.Equal(t, "network_transmitted_bytes_total", normalizeName(createCounter("network_transmitted_bytes_total", "By"), ""))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWhiteSpaces(t *testing.T) {
|
||||||
|
require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("\t system.filesystem.usage ", " By\t"), ""))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNonStandardUnit(t *testing.T) {
|
||||||
|
require.Equal(t, "system_network_dropped", normalizeName(createGauge("system.network.dropped", "{packets}"), ""))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNonStandardUnitCounter(t *testing.T) {
|
||||||
|
require.Equal(t, "system_network_dropped_total", normalizeName(createCounter("system.network.dropped", "{packets}"), ""))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBrokenUnit(t *testing.T) {
|
||||||
|
require.Equal(t, "system_network_dropped_packets", normalizeName(createGauge("system.network.dropped", "packets"), ""))
|
||||||
|
require.Equal(t, "system_network_packets_dropped", normalizeName(createGauge("system.network.packets.dropped", "packets"), ""))
|
||||||
|
require.Equal(t, "system_network_packets", normalizeName(createGauge("system.network.packets", "packets"), ""))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBrokenUnitCounter(t *testing.T) {
|
||||||
|
require.Equal(t, "system_network_dropped_packets_total", normalizeName(createCounter("system.network.dropped", "packets"), ""))
|
||||||
|
require.Equal(t, "system_network_packets_dropped_total", normalizeName(createCounter("system.network.packets.dropped", "packets"), ""))
|
||||||
|
require.Equal(t, "system_network_packets_total", normalizeName(createCounter("system.network.packets", "packets"), ""))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRatio(t *testing.T) {
|
||||||
|
require.Equal(t, "hw_gpu_memory_utilization_ratio", normalizeName(createGauge("hw.gpu.memory.utilization", "1"), ""))
|
||||||
|
require.Equal(t, "hw_fan_speed_ratio", normalizeName(createGauge("hw.fan.speed_ratio", "1"), ""))
|
||||||
|
require.Equal(t, "objects_total", normalizeName(createCounter("objects", "1"), ""))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHertz(t *testing.T) {
|
||||||
|
require.Equal(t, "hw_cpu_speed_limit_hertz", normalizeName(createGauge("hw.cpu.speed_limit", "Hz"), ""))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPer(t *testing.T) {
|
||||||
|
require.Equal(t, "broken_metric_speed_km_per_hour", normalizeName(createGauge("broken.metric.speed", "km/h"), ""))
|
||||||
|
require.Equal(t, "astro_light_speed_limit_meters_per_second", normalizeName(createGauge("astro.light.speed_limit", "m/s"), ""))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPercent(t *testing.T) {
|
||||||
|
require.Equal(t, "broken_metric_success_ratio_percent", normalizeName(createGauge("broken.metric.success_ratio", "%"), ""))
|
||||||
|
require.Equal(t, "broken_metric_success_percent", normalizeName(createGauge("broken.metric.success_percent", "%"), ""))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEmpty(t *testing.T) {
|
||||||
|
require.Equal(t, "test_metric_no_unit", normalizeName(createGauge("test.metric.no_unit", ""), ""))
|
||||||
|
require.Equal(t, "test_metric_spaces", normalizeName(createGauge("test.metric.spaces", " \t "), ""))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnsupportedRunes(t *testing.T) {
|
||||||
|
require.Equal(t, "unsupported_metric_temperature_F", normalizeName(createGauge("unsupported.metric.temperature", "°F"), ""))
|
||||||
|
require.Equal(t, "unsupported_metric_weird", normalizeName(createGauge("unsupported.metric.weird", "+=.:,!* & #"), ""))
|
||||||
|
require.Equal(t, "unsupported_metric_redundant_test_per_C", normalizeName(createGauge("unsupported.metric.redundant", "__test $/°C"), ""))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOTelReceivers(t *testing.T) {
|
||||||
|
require.Equal(t, "active_directory_ds_replication_network_io_bytes_total", normalizeName(createCounter("active_directory.ds.replication.network.io", "By"), ""))
|
||||||
|
require.Equal(t, "active_directory_ds_replication_sync_object_pending_total", normalizeName(createCounter("active_directory.ds.replication.sync.object.pending", "{objects}"), ""))
|
||||||
|
require.Equal(t, "active_directory_ds_replication_object_rate_per_second", normalizeName(createGauge("active_directory.ds.replication.object.rate", "{objects}/s"), ""))
|
||||||
|
require.Equal(t, "active_directory_ds_name_cache_hit_rate_percent", normalizeName(createGauge("active_directory.ds.name_cache.hit_rate", "%"), ""))
|
||||||
|
require.Equal(t, "active_directory_ds_ldap_bind_last_successful_time_milliseconds", normalizeName(createGauge("active_directory.ds.ldap.bind.last_successful.time", "ms"), ""))
|
||||||
|
require.Equal(t, "apache_current_connections", normalizeName(createGauge("apache.current_connections", "connections"), ""))
|
||||||
|
require.Equal(t, "apache_workers_connections", normalizeName(createGauge("apache.workers", "connections"), ""))
|
||||||
|
require.Equal(t, "apache_requests_total", normalizeName(createCounter("apache.requests", "1"), ""))
|
||||||
|
require.Equal(t, "bigip_virtual_server_request_count_total", normalizeName(createCounter("bigip.virtual_server.request.count", "{requests}"), ""))
|
||||||
|
require.Equal(t, "system_cpu_utilization_ratio", normalizeName(createGauge("system.cpu.utilization", "1"), ""))
|
||||||
|
require.Equal(t, "system_disk_operation_time_seconds_total", normalizeName(createCounter("system.disk.operation_time", "s"), ""))
|
||||||
|
require.Equal(t, "system_cpu_load_average_15m_ratio", normalizeName(createGauge("system.cpu.load_average.15m", "1"), ""))
|
||||||
|
require.Equal(t, "memcached_operation_hit_ratio_percent", normalizeName(createGauge("memcached.operation_hit_ratio", "%"), ""))
|
||||||
|
require.Equal(t, "mongodbatlas_process_asserts_per_second", normalizeName(createGauge("mongodbatlas.process.asserts", "{assertions}/s"), ""))
|
||||||
|
require.Equal(t, "mongodbatlas_process_journaling_data_files_mebibytes", normalizeName(createGauge("mongodbatlas.process.journaling.data_files", "MiBy"), ""))
|
||||||
|
require.Equal(t, "mongodbatlas_process_network_io_bytes_per_second", normalizeName(createGauge("mongodbatlas.process.network.io", "By/s"), ""))
|
||||||
|
require.Equal(t, "mongodbatlas_process_oplog_rate_gibibytes_per_hour", normalizeName(createGauge("mongodbatlas.process.oplog.rate", "GiBy/h"), ""))
|
||||||
|
require.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", normalizeName(createGauge("mongodbatlas.process.db.query_targeting.scanned_per_returned", "{scanned}/{returned}"), ""))
|
||||||
|
require.Equal(t, "nginx_requests", normalizeName(createGauge("nginx.requests", "requests"), ""))
|
||||||
|
require.Equal(t, "nginx_connections_accepted", normalizeName(createGauge("nginx.connections_accepted", "connections"), ""))
|
||||||
|
require.Equal(t, "nsxt_node_memory_usage_kilobytes", normalizeName(createGauge("nsxt.node.memory.usage", "KBy"), ""))
|
||||||
|
require.Equal(t, "redis_latest_fork_microseconds", normalizeName(createGauge("redis.latest_fork", "us"), ""))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTrimPromSuffixes(t *testing.T) {
|
||||||
|
assert.Equal(t, "active_directory_ds_replication_network_io", TrimPromSuffixes("active_directory_ds_replication_network_io_bytes_total", pmetric.MetricTypeSum, "bytes"))
|
||||||
|
assert.Equal(t, "active_directory_ds_name_cache_hit_rate", TrimPromSuffixes("active_directory_ds_name_cache_hit_rate_percent", pmetric.MetricTypeGauge, "percent"))
|
||||||
|
assert.Equal(t, "active_directory_ds_ldap_bind_last_successful_time", TrimPromSuffixes("active_directory_ds_ldap_bind_last_successful_time_milliseconds", pmetric.MetricTypeGauge, "milliseconds"))
|
||||||
|
assert.Equal(t, "apache_requests", TrimPromSuffixes("apache_requests_total", pmetric.MetricTypeSum, "1"))
|
||||||
|
assert.Equal(t, "system_cpu_utilization", TrimPromSuffixes("system_cpu_utilization_ratio", pmetric.MetricTypeGauge, "ratio"))
|
||||||
|
assert.Equal(t, "mongodbatlas_process_journaling_data_files", TrimPromSuffixes("mongodbatlas_process_journaling_data_files_mebibytes", pmetric.MetricTypeGauge, "mebibytes"))
|
||||||
|
assert.Equal(t, "mongodbatlas_process_network_io", TrimPromSuffixes("mongodbatlas_process_network_io_bytes_per_second", pmetric.MetricTypeGauge, "bytes_per_second"))
|
||||||
|
assert.Equal(t, "mongodbatlas_process_oplog_rate", TrimPromSuffixes("mongodbatlas_process_oplog_rate_gibibytes_per_hour", pmetric.MetricTypeGauge, "gibibytes_per_hour"))
|
||||||
|
assert.Equal(t, "nsxt_node_memory_usage", TrimPromSuffixes("nsxt_node_memory_usage_kilobytes", pmetric.MetricTypeGauge, "kilobytes"))
|
||||||
|
assert.Equal(t, "redis_latest_fork", TrimPromSuffixes("redis_latest_fork_microseconds", pmetric.MetricTypeGauge, "microseconds"))
|
||||||
|
assert.Equal(t, "up", TrimPromSuffixes("up", pmetric.MetricTypeGauge, ""))
|
||||||
|
|
||||||
|
// These are not necessarily valid OM units, only tested for the sake of completeness.
|
||||||
|
assert.Equal(t, "active_directory_ds_replication_sync_object_pending", TrimPromSuffixes("active_directory_ds_replication_sync_object_pending_total", pmetric.MetricTypeSum, "{objects}"))
|
||||||
|
assert.Equal(t, "apache_current", TrimPromSuffixes("apache_current_connections", pmetric.MetricTypeGauge, "connections"))
|
||||||
|
assert.Equal(t, "bigip_virtual_server_request_count", TrimPromSuffixes("bigip_virtual_server_request_count_total", pmetric.MetricTypeSum, "{requests}"))
|
||||||
|
assert.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", TrimPromSuffixes("mongodbatlas_process_db_query_targeting_scanned_per_returned", pmetric.MetricTypeGauge, "{scanned}/{returned}"))
|
||||||
|
assert.Equal(t, "nginx_connections_accepted", TrimPromSuffixes("nginx_connections_accepted", pmetric.MetricTypeGauge, "connections"))
|
||||||
|
assert.Equal(t, "apache_workers", TrimPromSuffixes("apache_workers_connections", pmetric.MetricTypeGauge, "connections"))
|
||||||
|
assert.Equal(t, "nginx", TrimPromSuffixes("nginx_requests", pmetric.MetricTypeGauge, "requests"))
|
||||||
|
|
||||||
|
// Units shouldn't be trimmed if the unit is not a direct match with the suffix, i.e, a suffix "_seconds" shouldn't be removed if unit is "sec" or "s"
|
||||||
|
assert.Equal(t, "system_cpu_load_average_15m_ratio", TrimPromSuffixes("system_cpu_load_average_15m_ratio", pmetric.MetricTypeGauge, "1"))
|
||||||
|
assert.Equal(t, "mongodbatlas_process_asserts_per_second", TrimPromSuffixes("mongodbatlas_process_asserts_per_second", pmetric.MetricTypeGauge, "{assertions}/s"))
|
||||||
|
assert.Equal(t, "memcached_operation_hit_ratio_percent", TrimPromSuffixes("memcached_operation_hit_ratio_percent", pmetric.MetricTypeGauge, "%"))
|
||||||
|
assert.Equal(t, "active_directory_ds_replication_object_rate_per_second", TrimPromSuffixes("active_directory_ds_replication_object_rate_per_second", pmetric.MetricTypeGauge, "{objects}/s"))
|
||||||
|
assert.Equal(t, "system_disk_operation_time_seconds", TrimPromSuffixes("system_disk_operation_time_seconds_total", pmetric.MetricTypeSum, "s"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNamespace(t *testing.T) {
|
||||||
|
require.Equal(t, "space_test", normalizeName(createGauge("test", ""), "space"))
|
||||||
|
require.Equal(t, "space_test", normalizeName(createGauge("#test", ""), "space"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCleanUpString(t *testing.T) {
|
||||||
|
require.Equal(t, "", CleanUpString(""))
|
||||||
|
require.Equal(t, "a_b", CleanUpString("a b"))
|
||||||
|
require.Equal(t, "hello_world", CleanUpString("hello, world!"))
|
||||||
|
require.Equal(t, "hello_you_2", CleanUpString("hello you 2"))
|
||||||
|
require.Equal(t, "1000", CleanUpString("$1000"))
|
||||||
|
require.Equal(t, "", CleanUpString("*+$^=)"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnitMapGetOrDefault(t *testing.T) {
|
||||||
|
require.Equal(t, "", unitMapGetOrDefault(""))
|
||||||
|
require.Equal(t, "seconds", unitMapGetOrDefault("s"))
|
||||||
|
require.Equal(t, "invalid", unitMapGetOrDefault("invalid"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPerUnitMapGetOrDefault(t *testing.T) {
|
||||||
|
require.Equal(t, "", perUnitMapGetOrDefault(""))
|
||||||
|
require.Equal(t, "second", perUnitMapGetOrDefault("s"))
|
||||||
|
require.Equal(t, "invalid", perUnitMapGetOrDefault("invalid"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRemoveItem(t *testing.T) {
|
||||||
|
require.Equal(t, []string{}, removeItem([]string{}, "test"))
|
||||||
|
require.Equal(t, []string{}, removeItem([]string{}, ""))
|
||||||
|
require.Equal(t, []string{"a", "b", "c"}, removeItem([]string{"a", "b", "c"}, "d"))
|
||||||
|
require.Equal(t, []string{"a", "b", "c"}, removeItem([]string{"a", "b", "c"}, ""))
|
||||||
|
require.Equal(t, []string{"a", "b"}, removeItem([]string{"a", "b", "c"}, "c"))
|
||||||
|
require.Equal(t, []string{"a", "c"}, removeItem([]string{"a", "b", "c"}, "b"))
|
||||||
|
require.Equal(t, []string{"b", "c"}, removeItem([]string{"a", "b", "c"}, "a"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuildCompliantNameWithNormalize(t *testing.T) {
|
||||||
|
require.Equal(t, "system_io_bytes_total", BuildCompliantName(createCounter("system.io", "By"), "", true))
|
||||||
|
require.Equal(t, "system_network_io_bytes_total", BuildCompliantName(createCounter("network.io", "By"), "system", true))
|
||||||
|
require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", ""), "", true))
|
||||||
|
require.Equal(t, "envoy_rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", true))
|
||||||
|
require.Equal(t, "foo_bar", BuildCompliantName(createGauge(":foo::bar", ""), "", true))
|
||||||
|
require.Equal(t, "foo_bar_total", BuildCompliantName(createCounter(":foo::bar", ""), "", true))
|
||||||
|
// Gauges with unit 1 are considered ratios.
|
||||||
|
require.Equal(t, "foo_bar_ratio", BuildCompliantName(createGauge("foo.bar", "1"), "", true))
|
||||||
|
// Slashes in units are converted.
|
||||||
|
require.Equal(t, "system_io_foo_per_bar_total", BuildCompliantName(createCounter("system.io", "foo/bar"), "", true))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuildCompliantNameWithoutSuffixes(t *testing.T) {
|
||||||
|
require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "By"), "", false))
|
||||||
|
require.Equal(t, "system_network_io", BuildCompliantName(createCounter("network.io", "By"), "system", false))
|
||||||
|
require.Equal(t, "system_network_I_O", BuildCompliantName(createCounter("network (I/O)", "By"), "system", false))
|
||||||
|
require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", "By"), "", false))
|
||||||
|
require.Equal(t, "envoy__rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", false))
|
||||||
|
require.Equal(t, ":foo::bar", BuildCompliantName(createGauge(":foo::bar", ""), "", false))
|
||||||
|
require.Equal(t, ":foo::bar", BuildCompliantName(createCounter(":foo::bar", ""), "", false))
|
||||||
|
require.Equal(t, "foo_bar", BuildCompliantName(createGauge("foo.bar", "1"), "", false))
|
||||||
|
require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "foo/bar"), "", false))
|
||||||
|
}
|
49
storage/remote/otlptranslator/prometheus/testutils_test.go
Normal file
49
storage/remote/otlptranslator/prometheus/testutils_test.go
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
// Copyright 2024 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/testutils_test.go
|
||||||
|
// Provenance-includes-license: Apache-2.0
|
||||||
|
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"go.opentelemetry.io/collector/pdata/pmetric"
|
||||||
|
)
|
||||||
|
|
||||||
|
var ilm pmetric.ScopeMetrics
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
|
||||||
|
metrics := pmetric.NewMetrics()
|
||||||
|
resourceMetrics := metrics.ResourceMetrics().AppendEmpty()
|
||||||
|
ilm = resourceMetrics.ScopeMetrics().AppendEmpty()
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns a new Metric of type "Gauge" with specified name and unit
|
||||||
|
func createGauge(name string, unit string) pmetric.Metric {
|
||||||
|
gauge := ilm.Metrics().AppendEmpty()
|
||||||
|
gauge.SetName(name)
|
||||||
|
gauge.SetUnit(unit)
|
||||||
|
gauge.SetEmptyGauge()
|
||||||
|
return gauge
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns a new Metric of type Monotonic Sum with specified name and unit
|
||||||
|
func createCounter(name string, unit string) pmetric.Metric {
|
||||||
|
counter := ilm.Metrics().AppendEmpty()
|
||||||
|
counter.SetEmptySum().SetIsMonotonic(true)
|
||||||
|
counter.SetName(name)
|
||||||
|
counter.SetUnit(unit)
|
||||||
|
return counter
|
||||||
|
}
|
|
@ -689,10 +689,11 @@ func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) err
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to open segment: %w", err)
|
return fmt.Errorf("unable to open segment: %w", err)
|
||||||
}
|
}
|
||||||
defer sr.Close()
|
|
||||||
|
|
||||||
r := NewLiveReader(w.logger, w.readerMetrics, sr)
|
r := NewLiveReader(w.logger, w.readerMetrics, sr)
|
||||||
if err := readFn(w, r, index, false); err != nil && !errors.Is(err, io.EOF) {
|
err = readFn(w, r, index, false)
|
||||||
|
sr.Close()
|
||||||
|
if err != nil && !errors.Is(err, io.EOF) {
|
||||||
return fmt.Errorf("readSegment: %w", err)
|
return fmt.Errorf("readSegment: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -230,11 +230,11 @@ func TestTailSamples(t *testing.T) {
|
||||||
for i := first; i <= last; i++ {
|
for i := first; i <= last; i++ {
|
||||||
segment, err := OpenReadSegment(SegmentName(watcher.walDir, i))
|
segment, err := OpenReadSegment(SegmentName(watcher.walDir, i))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer segment.Close()
|
|
||||||
|
|
||||||
reader := NewLiveReader(nil, NewLiveReaderMetrics(nil), segment)
|
reader := NewLiveReader(nil, NewLiveReaderMetrics(nil), segment)
|
||||||
// Use tail true so we can ensure we got the right number of samples.
|
// Use tail true so we can ensure we got the right number of samples.
|
||||||
watcher.readSegment(reader, i, true)
|
watcher.readSegment(reader, i, true)
|
||||||
|
require.NoError(t, segment.Close())
|
||||||
}
|
}
|
||||||
|
|
||||||
expectedSeries := seriesCount
|
expectedSeries := seriesCount
|
||||||
|
|
Loading…
Reference in a new issue