Merge remote-tracking branch 'prometheus/main' into arve/wlog-histograms

Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
This commit is contained in:
Arve Knudsen 2024-08-14 19:04:53 +01:00
commit b5d13a1ab5
107 changed files with 4920 additions and 1709 deletions

View file

@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0
- uses: bufbuild/buf-setup-action@aceb106d2419c4cff48863df90161d92decb8591 # v1.35.1
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1

View file

@ -13,7 +13,7 @@ jobs:
if: github.repository_owner == 'prometheus'
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0
- uses: bufbuild/buf-setup-action@aceb106d2419c4cff48863df90161d92decb8591 # v1.35.1
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1

View file

@ -75,7 +75,7 @@ jobs:
runs-on: windows-latest
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
- uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with:
go-version: 1.22.x
- run: |
@ -162,7 +162,7 @@ jobs:
- name: Checkout repository
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Install Go
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with:
cache: false
go-version: 1.22.x
@ -175,14 +175,14 @@ jobs:
- name: Checkout repository
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Install Go
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with:
go-version: 1.22.x
- name: Install snmp_exporter/generator dependencies
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter'
- name: Lint
uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1
uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0
with:
args: --verbose
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.

View file

@ -27,12 +27,12 @@ jobs:
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Initialize CodeQL
uses: github/codeql-action/init@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11
uses: github/codeql-action/init@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # v3.25.15
with:
languages: ${{ matrix.language }}
- name: Autobuild
uses: github/codeql-action/autobuild@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11
uses: github/codeql-action/autobuild@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # v3.25.15
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11
uses: github/codeql-action/analyze@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # v3.25.15

View file

@ -21,7 +21,7 @@ jobs:
fuzz-seconds: 600
dry-run: false
- name: Upload Crash
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4
if: failure() && steps.build.outcome == 'success'
with:
name: artifacts

View file

@ -26,7 +26,7 @@ jobs:
persist-credentials: false
- name: "Run analysis"
uses: ossf/scorecard-action@dc50aa9510b46c811795eb24b2f1ba02a914e534 # tag=v2.3.3
uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # tag=v2.4.0
with:
results_file: results.sarif
results_format: sarif
@ -37,7 +37,7 @@ jobs:
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
# format to the repository Actions tab.
- name: "Upload artifact"
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # tag=v4.3.3
uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # tag=v4.3.4
with:
name: SARIF file
path: results.sarif
@ -45,6 +45,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@b611370bb5703a7efb587f9d136a52ea24c5c38c # tag=v3.25.11
uses: github/codeql-action/upload-sarif@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # tag=v3.25.15
with:
sarif_file: results.sarif

View file

@ -3,10 +3,50 @@
## unreleased
* [FEATURE] OTLP receiver: Add new option `otlp.promote_resource_attributes`, for any OTel resource attributes that should be promoted to metric labels. #14200
* [FEATURE] Remote-Write: Add sender and receiver support for [Remote Write 2.0-rc.2](https://prometheus.io/docs/specs/remote_write_spec_2_0/) specification #14395 #14427 #14444
* [ENHANCEMENT] Remote-Write: 1.x messages against Remote Write 2.x Receivers will have now correct values for `prometheus_storage_<samples|histograms|exemplar>_failed_total` in case of partial errors #14444
* [BUGFIX] tsdb/wlog.Watcher.readSegmentForGC: Only count unknown record types against record_decode_failures_total metric. #14042
## 2.54.0-rc.1 / 2024-08-05
Release 2.54 brings a release candidate of a major new version of [Remote Write: 2.0](https://prometheus.io/docs/specs/remote_write_spec_2_0/).
This is experimental at this time and may still change.
Remote-write v2 is enabled by default, but can be disabled via feature-flag `web.remote-write-receiver.accepted-protobuf-messages`.
* [CHANGE] Remote-Write: `highest_timestamp_in_seconds` and `queue_highest_sent_timestamp_seconds` metrics now initialized to 0. #14437
* [CHANGE] API: Split warnings from info annotations in API response. #14327
* [FEATURE] Remote-Write: Version 2.0 experimental, plus metadata in WAL via feature flag `metadata-wal-records` (defaults on). #14395,#14427,#14444
* [FEATURE] PromQL: add limitk() and limit_ratio() aggregation operators. #12503
* [ENHANCEMENT] PromQL: Accept underscores in literal numbers, e.g. 1_000_000 for 1 million. #12821
* [ENHANCEMENT] PromQL: float literal numbers and durations are now interchangeable (experimental). Example: `time() - my_timestamp > 10m`. #9138
* [ENHANCEMENT] PromQL: use Kahan summation for sum(). #14074,#14362
* [ENHANCEMENT] PromQL (experimental native histograms): Optimize `histogram_count` and `histogram_sum` functions. #14097
* [ENHANCEMENT] TSDB: Better support for out-of-order experimental native histogram samples. #14438
* [ENHANCEMENT] TSDB: Optimise seek within index. #14393
* [ENHANCEMENT] TSDB: Optimise deletion of stale series. #14307
* [ENHANCEMENT] TSDB: Reduce locking to optimise adding and removing series. #13286,#14286
* [ENHANCEMENT] TSDB: Small optimisation: streamline special handling for out-of-order data. #14396,#14584
* [ENHANCEMENT] Regexps: Optimize patterns with multiple prefixes. #13843,#14368
* [ENHANCEMENT] Regexps: Optimize patterns containing multiple literal strings. #14173
* [ENHANCEMENT] AWS SD: expose Primary IPv6 addresses as __meta_ec2_primary_ipv6_addresses. #14156
* [ENHANCEMENT] Docker SD: add MatchFirstNetwork for containers with multiple networks. #10490
* [ENHANCEMENT] OpenStack SD: Use `flavor.original_name` if available. #14312
* [ENHANCEMENT] UI (experimental native histograms): more accurate representation. #13680,#14430
* [ENHANCEMENT] Agent: `out_of_order_time_window` config option now applies to agent. #14094
* [ENHANCEMENT] Notifier: Send any outstanding Alertmanager notifications when shutting down. #14290
* [ENHANCEMENT] Rules: Add label-matcher support to Rules API. #10194
* [ENHANCEMENT] HTTP API: Add url to message logged on error while sending response. #14209
* [BUGFIX] CLI: escape `|` characters when generating docs. #14420
* [BUGFIX] PromQL (experimental native histograms): Fix some binary operators between native histogram values. #14454
* [BUGFIX] TSDB: LabelNames API could fail during compaction. #14279
* [BUGFIX] TSDB: Fix rare issue where pending OOO read can be left dangling if creating querier fails. #14341
* [BUGFIX] TSDB: fix check for context cancellation in LabelNamesFor. #14302
* [BUGFIX] Rules: Fix rare panic on reload. #14366
* [BUGFIX] Config: In YAML marshalling, do not output a regexp field if it was never set. #14004
* [BUGFIX] Remote-Write: reject samples with future timestamps. #14304
* [BUGFIX] Remote-Write: Fix data corruption in remote write if max_sample_age is applied. #14078
* [BUGFIX] Notifier: Fix Alertmanager discovery not updating under heavy load. #14174
* [BUGFIX] Regexes: some Unicode characters were not matched by case-insensitive comparison. #14170,#14299
* [BUGFIX] Remote-Read: Resolve occasional segmentation fault on query. #14515
## 2.53.1 / 2024-07-10
Fix a bug which would drop samples in remote-write if the sending flow stalled

View file

@ -12,9 +12,10 @@ examples and guides.</p>
[![Docker Pulls](https://img.shields.io/docker/pulls/prom/prometheus.svg?maxAge=604800)][hub]
[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/prometheus)](https://goreportcard.com/report/github.com/prometheus/prometheus)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/486/badge)](https://bestpractices.coreinfrastructure.org/projects/486)
[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/prometheus/prometheus/badge)](https://securityscorecards.dev/viewer/?uri=github.com/prometheus/prometheus)
[![CLOMonitor](https://img.shields.io/endpoint?url=https://clomonitor.io/api/projects/cncf/prometheus/badge)](https://clomonitor.io/projects/cncf/prometheus)
[![Gitpod ready-to-code](https://img.shields.io/badge/Gitpod-ready--to--code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/prometheus/prometheus)
[![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/prometheus.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:prometheus)
[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/prometheus/prometheus/badge)](https://securityscorecards.dev/viewer/?uri=github.com/prometheus/prometheus)
</div>

48
SECURITY-INSIGHTS.yml Normal file
View file

@ -0,0 +1,48 @@
header:
schema-version: '1.0.0'
expiration-date: '2025-07-30T01:00:00.000Z'
last-updated: '2024-07-30'
last-reviewed: '2024-07-30'
project-url: https://github.com/prometheus/prometheus
changelog: https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md
license: https://github.com/prometheus/prometheus/blob/main/LICENSE
project-lifecycle:
status: active
bug-fixes-only: false
core-maintainers:
- https://github.com/prometheus/prometheus/blob/main/MAINTAINERS.md
contribution-policy:
accepts-pull-requests: true
accepts-automated-pull-requests: true
dependencies:
third-party-packages: true
dependencies-lists:
- https://github.com/prometheus/prometheus/blob/main/go.mod
- https://github.com/prometheus/prometheus/blob/main/web/ui/package.json
env-dependencies-policy:
policy-url: https://github.com/prometheus/prometheus/blob/main/CONTRIBUTING.md#dependency-management
distribution-points:
- https://github.com/prometheus/prometheus/releases
documentation:
- https://prometheus.io/docs/introduction/overview/
security-contacts:
- type: email
value: prometheus-team@googlegroups.com
security-testing:
- tool-type: sca
tool-name: Dependabot
tool-version: latest
integration:
ad-hoc: false
ci: true
before-release: true
- tool-type: sast
tool-name: CodeQL
tool-version: latest
integration:
ad-hoc: false
ci: true
before-release: true
vulnerability-reporting:
accepts-vulnerability-reports: true
security-policy: https://github.com/prometheus/prometheus/security/policy

View file

@ -1 +1 @@
2.53.1
2.54.0-rc.1

View file

@ -234,6 +234,9 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
level.Info(logger).Log("msg", "Experimental created timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
case "delayed-compaction":
c.tsdb.EnableDelayedCompaction = true
level.Info(logger).Log("msg", "Experimental delayed compaction is enabled.")
case "":
continue
case "promql-at-modifier", "promql-negative-offset":
@ -381,6 +384,9 @@ func main() {
serverOnlyFlag(a, "storage.tsdb.allow-overlapping-blocks", "[DEPRECATED] This flag has no effect. Overlapping blocks are enabled by default now.").
Default("true").Hidden().BoolVar(&b)
serverOnlyFlag(a, "storage.tsdb.allow-overlapping-compaction", "Allow compaction of overlapping blocks. If set to false, TSDB stops vertical compaction and leaves overlapping blocks there. The use case is to let another component handle the compaction of overlapping blocks.").
Default("true").Hidden().BoolVar(&cfg.tsdb.EnableOverlappingCompaction)
serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL.").
Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression)
@ -475,7 +481,7 @@ func main() {
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
Default("").StringsVar(&cfg.featureList)
promlogflag.AddFlags(a, &cfg.promlogConfig)
@ -1715,6 +1721,8 @@ type tsdbOptions struct {
MaxExemplars int64
EnableMemorySnapshotOnShutdown bool
EnableNativeHistograms bool
EnableDelayedCompaction bool
EnableOverlappingCompaction bool
}
func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
@ -1735,7 +1743,8 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
EnableMemorySnapshotOnShutdown: opts.EnableMemorySnapshotOnShutdown,
EnableNativeHistograms: opts.EnableNativeHistograms,
OutOfOrderTimeWindow: opts.OutOfOrderTimeWindow,
EnableOverlappingCompaction: true,
EnableDelayedCompaction: opts.EnableDelayedCompaction,
EnableOverlappingCompaction: opts.EnableOverlappingCompaction,
}
}

View file

@ -204,6 +204,7 @@ func main() {
pushMetricsHeaders := pushMetricsCmd.Flag("header", "Prometheus remote write header.").StringMap()
testCmd := app.Command("test", "Unit testing.")
junitOutFile := testCmd.Flag("junit", "File path to store JUnit XML test results.").OpenFile(os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644)
testRulesCmd := testCmd.Command("rules", "Unit tests for rules.")
testRulesRun := testRulesCmd.Flag("run", "If set, will only run test groups whose names match the regular expression. Can be specified multiple times.").Strings()
testRulesFiles := testRulesCmd.Arg(
@ -378,7 +379,11 @@ func main() {
os.Exit(QueryLabels(serverURL, httpRoundTripper, *queryLabelsMatch, *queryLabelsName, *queryLabelsBegin, *queryLabelsEnd, p))
case testRulesCmd.FullCommand():
os.Exit(RulesUnitTest(
results := io.Discard
if *junitOutFile != nil {
results = *junitOutFile
}
os.Exit(RulesUnitTestResult(results,
promqltest.LazyLoaderOpts{
EnableAtModifier: true,
EnableNegativeOffset: true,

View file

@ -20,6 +20,7 @@ import (
"math"
"os"
"runtime"
"slices"
"strings"
"testing"
"time"
@ -152,12 +153,18 @@ func TestTSDBDump(t *testing.T) {
expectedMetrics, err := os.ReadFile(tt.expectedDump)
require.NoError(t, err)
expectedMetrics = normalizeNewLine(expectedMetrics)
// even though in case of one matcher samples are not sorted, the order in the cases above should stay the same.
require.Equal(t, string(expectedMetrics), dumpedMetrics)
// Sort both, because Prometheus does not guarantee the output order.
require.Equal(t, sortLines(string(expectedMetrics)), sortLines(dumpedMetrics))
})
}
}
func sortLines(buf string) string {
lines := strings.Split(buf, "\n")
slices.Sort(lines)
return strings.Join(lines, "\n")
}
func TestTSDBDumpOpenMetrics(t *testing.T) {
storage := promqltest.LoadedStorage(t, `
load 1m
@ -169,7 +176,7 @@ func TestTSDBDumpOpenMetrics(t *testing.T) {
require.NoError(t, err)
expectedMetrics = normalizeNewLine(expectedMetrics)
dumpedMetrics := getDumpedSamples(t, storage.Dir(), math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics)
require.Equal(t, string(expectedMetrics), dumpedMetrics)
require.Equal(t, sortLines(string(expectedMetrics)), sortLines(dumpedMetrics))
}
func TestTSDBDumpOpenMetricsRoundTrip(t *testing.T) {

View file

@ -18,6 +18,7 @@ import (
"encoding/json"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"sort"
@ -29,9 +30,10 @@ import (
"github.com/google/go-cmp/cmp"
"github.com/grafana/regexp"
"github.com/nsf/jsondiff"
"github.com/prometheus/common/model"
"gopkg.in/yaml.v2"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql"
@ -39,12 +41,18 @@ import (
"github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/rules"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/util/junitxml"
)
// RulesUnitTest does unit testing of rules based on the unit testing files provided.
// More info about the file format can be found in the docs.
func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int {
return RulesUnitTestResult(io.Discard, queryOpts, runStrings, diffFlag, files...)
}
func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int {
failed := false
junit := &junitxml.JUnitXML{}
var run *regexp.Regexp
if runStrings != nil {
@ -52,7 +60,7 @@ func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, dif
}
for _, f := range files {
if errs := ruleUnitTest(f, queryOpts, run, diffFlag); errs != nil {
if errs := ruleUnitTest(f, queryOpts, run, diffFlag, junit.Suite(f)); errs != nil {
fmt.Fprintln(os.Stderr, " FAILED:")
for _, e := range errs {
fmt.Fprintln(os.Stderr, e.Error())
@ -64,25 +72,30 @@ func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, dif
}
fmt.Println()
}
err := junit.WriteXML(results)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to write JUnit XML: %s\n", err)
}
if failed {
return failureExitCode
}
return successExitCode
}
func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool) []error {
fmt.Println("Unit Testing: ", filename)
func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool, ts *junitxml.TestSuite) []error {
b, err := os.ReadFile(filename)
if err != nil {
ts.Abort(err)
return []error{err}
}
var unitTestInp unitTestFile
if err := yaml.UnmarshalStrict(b, &unitTestInp); err != nil {
ts.Abort(err)
return []error{err}
}
if err := resolveAndGlobFilepaths(filepath.Dir(filename), &unitTestInp); err != nil {
ts.Abort(err)
return []error{err}
}
@ -91,29 +104,38 @@ func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *reg
}
evalInterval := time.Duration(unitTestInp.EvaluationInterval)
ts.Settime(time.Now().Format("2006-01-02T15:04:05"))
// Giving number for groups mentioned in the file for ordering.
// Lower number group should be evaluated before higher number group.
groupOrderMap := make(map[string]int)
for i, gn := range unitTestInp.GroupEvalOrder {
if _, ok := groupOrderMap[gn]; ok {
return []error{fmt.Errorf("group name repeated in evaluation order: %s", gn)}
err := fmt.Errorf("group name repeated in evaluation order: %s", gn)
ts.Abort(err)
return []error{err}
}
groupOrderMap[gn] = i
}
// Testing.
var errs []error
for _, t := range unitTestInp.Tests {
for i, t := range unitTestInp.Tests {
if !matchesRun(t.TestGroupName, run) {
continue
}
testname := t.TestGroupName
if testname == "" {
testname = fmt.Sprintf("unnamed#%d", i)
}
tc := ts.Case(testname)
if t.Interval == 0 {
t.Interval = unitTestInp.EvaluationInterval
}
ers := t.test(evalInterval, groupOrderMap, queryOpts, diffFlag, unitTestInp.RuleFiles...)
if ers != nil {
for _, e := range ers {
tc.Fail(e.Error())
}
errs = append(errs, ers...)
}
}

View file

@ -14,11 +14,15 @@
package main
import (
"bytes"
"encoding/xml"
"fmt"
"testing"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/util/junitxml"
)
func TestRulesUnitTest(t *testing.T) {
@ -125,13 +129,59 @@ func TestRulesUnitTest(t *testing.T) {
want: 0,
},
}
reuseFiles := []string{}
reuseCount := [2]int{}
for _, tt := range tests {
if (tt.queryOpts == promqltest.LazyLoaderOpts{
EnableNegativeOffset: true,
} || tt.queryOpts == promqltest.LazyLoaderOpts{
EnableAtModifier: true,
}) {
reuseFiles = append(reuseFiles, tt.args.files...)
reuseCount[tt.want] += len(tt.args.files)
}
t.Run(tt.name, func(t *testing.T) {
if got := RulesUnitTest(tt.queryOpts, nil, false, tt.args.files...); got != tt.want {
t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want)
}
})
}
t.Run("Junit xml output ", func(t *testing.T) {
var buf bytes.Buffer
if got := RulesUnitTestResult(&buf, promqltest.LazyLoaderOpts{}, nil, false, reuseFiles...); got != 1 {
t.Errorf("RulesUnitTestResults() = %v, want 1", got)
}
var test junitxml.JUnitXML
output := buf.Bytes()
err := xml.Unmarshal(output, &test)
if err != nil {
fmt.Println("error in decoding XML:", err)
return
}
var total int
var passes int
var failures int
var cases int
total = len(test.Suites)
if total != len(reuseFiles) {
t.Errorf("JUnit output had %d testsuite elements; expected %d\n", total, len(reuseFiles))
}
for _, i := range test.Suites {
if i.FailureCount == 0 {
passes++
} else {
failures++
}
cases += len(i.Cases)
}
if total != passes+failures {
t.Errorf("JUnit output mismatch: Total testsuites (%d) does not equal the sum of passes (%d) and failures (%d).", total, passes, failures)
}
if cases < total {
t.Errorf("JUnit output had %d suites without test cases\n", total-cases)
}
})
}
func TestRulesUnitTestRun(t *testing.T) {

View file

@ -37,6 +37,7 @@ import (
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/relabel"
"github.com/prometheus/prometheus/storage/remote/azuread"
"github.com/prometheus/prometheus/storage/remote/googleiam"
)
var (
@ -1089,8 +1090,9 @@ func (m RemoteWriteProtoMsgs) String() string {
}
var (
// RemoteWriteProtoMsgV1 represents the deprecated `prometheus.WriteRequest` protobuf
// message introduced in the https://prometheus.io/docs/specs/remote_write_spec/.
// RemoteWriteProtoMsgV1 represents the `prometheus.WriteRequest` protobuf
// message introduced in the https://prometheus.io/docs/specs/remote_write_spec/,
// which will eventually be deprecated.
//
// NOTE: This string is used for both HTTP header values and config value, so don't change
// this reference.
@ -1123,6 +1125,7 @@ type RemoteWriteConfig struct {
MetadataConfig MetadataConfig `yaml:"metadata_config,omitempty"`
SigV4Config *sigv4.SigV4Config `yaml:"sigv4,omitempty"`
AzureADConfig *azuread.AzureADConfig `yaml:"azuread,omitempty"`
GoogleIAMConfig *googleiam.Config `yaml:"google_iam,omitempty"`
}
// SetDirectory joins any relative file paths with dir.
@ -1160,17 +1163,33 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
return err
}
httpClientConfigAuthEnabled := c.HTTPClientConfig.BasicAuth != nil ||
c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil
return validateAuthConfigs(c)
}
if httpClientConfigAuthEnabled && (c.SigV4Config != nil || c.AzureADConfig != nil) {
return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured")
// validateAuthConfigs validates that at most one of basic_auth, authorization, oauth2, sigv4, azuread or google_iam must be configured.
func validateAuthConfigs(c *RemoteWriteConfig) error {
var authConfigured []string
if c.HTTPClientConfig.BasicAuth != nil {
authConfigured = append(authConfigured, "basic_auth")
}
if c.SigV4Config != nil && c.AzureADConfig != nil {
return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured")
if c.HTTPClientConfig.Authorization != nil {
authConfigured = append(authConfigured, "authorization")
}
if c.HTTPClientConfig.OAuth2 != nil {
authConfigured = append(authConfigured, "oauth2")
}
if c.SigV4Config != nil {
authConfigured = append(authConfigured, "sigv4")
}
if c.AzureADConfig != nil {
authConfigured = append(authConfigured, "azuread")
}
if c.GoogleIAMConfig != nil {
authConfigured = append(authConfigured, "google_iam")
}
if len(authConfigured) > 1 {
return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, azuread or google_iam must be configured. Currently configured: %v", authConfigured)
}
return nil
}
@ -1189,7 +1208,7 @@ func validateHeadersForTracing(headers map[string]string) error {
func validateHeaders(headers map[string]string) error {
for header := range headers {
if strings.ToLower(header) == "authorization" {
return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter")
return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, azuread or google_iam parameter")
}
if _, ok := reservedHeaders[strings.ToLower(header)]; ok {
return fmt.Errorf("%s is a reserved header. It must not be changed", header)

View file

@ -1826,7 +1826,7 @@ var expectedErrors = []struct {
},
{
filename: "remote_write_authorization_header.bad.yml",
errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter`,
errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, azuread or google_iam parameter`,
},
{
filename: "remote_write_wrong_msg.bad.yml",

View file

@ -1090,7 +1090,6 @@ func TestCoordinationWithReceiver(t *testing.T) {
}
for _, tc := range testCases {
tc := tc
t.Run(tc.title, func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()

View file

@ -56,7 +56,7 @@ The Prometheus monitoring server
| <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` |
| <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` |
| <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` |
| <code class="text-nowrap">--enable-feature</code> | Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
| <code class="text-nowrap">--enable-feature</code> | Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
| <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` |
| <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` |

View file

@ -442,6 +442,15 @@ Unit testing.
#### Flags
| Flag | Description |
| --- | --- |
| <code class="text-nowrap">--junit</code> | File path to store JUnit XML test results. |
##### `promtool test rules`
Unit tests for rules.

View file

@ -3401,8 +3401,8 @@ authorization:
# It is mutually exclusive with `credentials`.
[ credentials_file: <filename> ]
# Optionally configures AWS's Signature Verification 4 signing process to
# sign requests. Cannot be set at the same time as basic_auth, authorization, or oauth2.
# Optionally configures AWS's Signature Verification 4 signing process to sign requests.
# Cannot be set at the same time as basic_auth, authorization, oauth2, azuread or google_iam.
# To use the default credentials from the AWS SDK, use `sigv4: {}`.
sigv4:
# The AWS region. If blank, the region from the default credentials chain
@ -3655,12 +3655,12 @@ sigv4:
[ role_arn: <string> ]
# Optional OAuth 2.0 configuration.
# Cannot be used at the same time as basic_auth, authorization, sigv4, or azuread.
# Cannot be used at the same time as basic_auth, authorization, sigv4, azuread or google_iam.
oauth2:
[ <oauth2> ]
# Optional AzureAD configuration.
# Cannot be used at the same time as basic_auth, authorization, oauth2, or sigv4.
# Cannot be used at the same time as basic_auth, authorization, oauth2, sigv4 or google_iam.
azuread:
# The Azure Cloud. Options are 'AzurePublic', 'AzureChina', or 'AzureGovernment'.
[ cloud: <string> | default = AzurePublic ]
@ -3680,6 +3680,14 @@ azuread:
[ sdk:
[ tenant_id: <string> ] ]
# WARNING: Remote write is NOT SUPPORTED by Google Cloud. This configuration is reserved for future use.
# Optional Google Cloud Monitoring configuration.
# Cannot be used at the same time as basic_auth, authorization, oauth2, sigv4 or azuread.
# To use the default credentials from the Google Cloud SDK, use `google_iam: {}`.
google_iam:
# Service account key with monitoring write permessions.
credentials_file: <file_name>
# Configures the remote write request's TLS settings.
tls_config:
[ <tls_config> ]

View file

@ -92,7 +92,7 @@ series: <string>
#
# Native histogram notation:
# Native histograms can be used instead of floating point numbers using the following notation:
# {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5}}
# {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5 counter_reset_hint:gauge}}
# Native histograms support the same expanding notation as floating point numbers, i.e. 'axn', 'a+bxn' and 'a-bxn'.
# All properties are optional and default to 0. The order is not important. The following properties are supported:
# - schema (int):
@ -119,6 +119,8 @@ series: <string>
# Observation counts in negative buckets. Each represents an absolute count.
# - n_offset (int):
# The starting index of the first entry in the negative buckets.
# - counter_reset_hint (one of 'unknown', 'reset', 'not_reset' or 'gauge')
# The counter reset hint associated with this histogram. Defaults to 'unknown' if not set.
values: <string>
```

View file

@ -200,8 +200,9 @@ won't work when you push OTLP metrics.
`--enable-feature=promql-experimental-functions`
Enables PromQL functions that are considered experimental and whose name or
semantics could change.
Enables PromQL functions that are considered experimental. These functions
might change their name, syntax, or semantics. They might also get removed
entirely.
## Created Timestamps Zero Injection
@ -234,3 +235,17 @@ metadata changes as WAL records on a per-series basis.
This must be used if
you are also using remote write 2.0 as it will only gather metadata from the WAL.
## Delay compaction start time
`--enable-feature=delayed-compaction`
A random offset, up to `10%` of the chunk range, is added to the Head compaction start time. This assists Prometheus instances in avoiding simultaneous compactions and reduces the load on shared resources.
Only auto Head compactions and the operations directly resulting from them are subject to this delay.
In the event of multiple consecutive Head compactions being possible, only the first compaction experiences this delay.
Note that during this delay, the Head continues its usual operations, which include serving and appending series.
Despite the delay in compaction, the blocks produced are time-aligned in the same manner as they would be if the delay was not in place.

View file

@ -617,7 +617,7 @@ Like `sort`, `sort_desc` only affects the results of instant queries, as range q
## `sort_by_label()`
**This function has to be enabled via the [feature flag](../feature_flags/) `--enable-feature=promql-experimental-functions`.**
**This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.**
`sort_by_label(v instant-vector, label string, ...)` returns vector elements sorted by their label values and sample value in case of label values being equal, in ascending order.
@ -627,7 +627,7 @@ This function uses [natural sort order](https://en.wikipedia.org/wiki/Natural_so
## `sort_by_label_desc()`
**This function has to be enabled via the [feature flag](../feature_flags/) `--enable-feature=promql-experimental-functions`.**
**This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.**
Same as `sort_by_label`, but sorts in descending order.
@ -676,7 +676,7 @@ over time and return an instant vector with per-series aggregation results:
* `last_over_time(range-vector)`: the most recent point value in the specified interval.
* `present_over_time(range-vector)`: the value 1 for any series in the specified interval.
If the [feature flag](../feature_flags/)
If the [feature flag](../feature_flags.md#experimental-promql-functions)
`--enable-feature=promql-experimental-functions` is set, the following
additional functions are available:

View file

@ -19,7 +19,7 @@ remote_write:
protobuf_message: "io.prometheus.write.v2.Request"
```
or for deprecated Remote Write 1.0 message:
or for the eventually deprecated Remote Write 1.0 message:
```yaml
remote_write:

View file

@ -16,8 +16,8 @@ require (
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect
github.com/aws/aws-sdk-go v1.53.16 // indirect
@ -36,7 +36,6 @@ require (
github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.8 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect

View file

@ -1,9 +1,9 @@
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 h1:sUFnFjzDUie80h24I7mrKtwCKgLY9L8h5Tp2x9+TWqk=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0/go.mod h1:52JbnQTp15qg5mRkMBHwp0j0ZFwHJ42Sx3zVV5RE9p0=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA=
@ -39,7 +39,6 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc=
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
@ -50,8 +49,6 @@ github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LO
github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo=
github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
@ -279,8 +276,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c h1:6GEA48LnonkYZhQ654v7QTIP5uBTbCEVm49oIhif5lc=
github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c/go.mod h1:FcNs5wa7M9yV8IlxlB/05s5oy9vULUIlu/tZsviRIT8=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=

30
go.mod
View file

@ -13,12 +13,12 @@ require (
github.com/KimMachineGun/automemlimit v0.6.1
github.com/alecthomas/kingpin/v2 v2.4.0
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30
github.com/aws/aws-sdk-go v1.54.19
github.com/aws/aws-sdk-go v1.55.5
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
github.com/cespare/xxhash/v2 v2.3.0
github.com/dennwc/varint v1.0.0
github.com/digitalocean/godo v1.118.0
github.com/docker/docker v27.0.3+incompatible
github.com/digitalocean/godo v1.119.0
github.com/docker/docker v27.1.1+incompatible
github.com/edsrzf/mmap-go v1.1.0
github.com/envoyproxy/go-control-plane v0.12.0
github.com/envoyproxy/protoc-gen-validate v1.0.4
@ -33,17 +33,17 @@ require (
github.com/google/go-cmp v0.6.0
github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da
github.com/google/uuid v1.6.0
github.com/gophercloud/gophercloud v1.13.0
github.com/gophercloud/gophercloud v1.14.0
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc
github.com/grpc-ecosystem/grpc-gateway v1.16.0
github.com/hashicorp/consul/api v1.29.2
github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3
github.com/hetznercloud/hcloud-go/v2 v2.10.2
github.com/ionos-cloud/sdk-go/v6 v6.1.11
github.com/hetznercloud/hcloud-go/v2 v2.12.0
github.com/ionos-cloud/sdk-go/v6 v6.2.0
github.com/json-iterator/go v1.1.12
github.com/klauspost/compress v1.17.9
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
github.com/linode/linodego v1.37.0
github.com/linode/linodego v1.38.0
github.com/miekg/dns v1.1.61
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
@ -82,8 +82,8 @@ require (
golang.org/x/text v0.16.0
golang.org/x/time v0.5.0
golang.org/x/tools v0.23.0
google.golang.org/api v0.188.0
google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d
google.golang.org/api v0.190.0
google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f
google.golang.org/grpc v1.65.0
google.golang.org/protobuf v1.34.2
gopkg.in/yaml.v2 v2.4.0
@ -96,9 +96,9 @@ require (
)
require (
cloud.google.com/go/auth v0.7.0 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect
cloud.google.com/go/compute/metadata v0.4.0 // indirect
cloud.google.com/go/auth v0.7.3 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect
cloud.google.com/go/compute/metadata v0.5.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
@ -140,9 +140,9 @@ require (
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/s2a-go v0.1.7 // indirect
github.com/google/s2a-go v0.1.8 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
github.com/googleapis/gax-go/v2 v2.12.5 // indirect
github.com/googleapis/gax-go/v2 v2.13.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
github.com/hashicorp/cronexpr v1.1.2 // indirect
@ -191,7 +191,7 @@ require (
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
golang.org/x/mod v0.19.0 // indirect
golang.org/x/term v0.22.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gotest.tools/v3 v3.0.3 // indirect

60
go.sum
View file

@ -12,18 +12,18 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go/auth v0.7.0 h1:kf/x9B3WTbBUHkC+1VS8wwwli9TzhSt0vSTVBmMR8Ts=
cloud.google.com/go/auth v0.7.0/go.mod h1:D+WqdrpcjmiCgWrXmLLxOVq1GACoE36chW6KXoEvuIw=
cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4=
cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q=
cloud.google.com/go/auth v0.7.3 h1:98Vr+5jMaCZ5NZk6e/uBgf60phTk/XN84r8QEWB9yjY=
cloud.google.com/go/auth v0.7.3/go.mod h1:HJtWUx1P5eqjy/f6Iq5KeytNpbAcGolPhOgyop2LlzA=
cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI=
cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/compute/metadata v0.4.0 h1:vHzJCWaM4g8XIcm8kopr3XmDA4Gy/lblD3EhhSux05c=
cloud.google.com/go/compute/metadata v0.4.0/go.mod h1:SIQh1Kkb4ZJ8zJ874fqVkslA29PRXuleyj6vOzlbK7M=
cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY=
cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
@ -92,8 +92,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI=
github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
@ -143,14 +143,14 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/digitalocean/godo v1.118.0 h1:lkzGFQmACrVCp7UqH1sAi4JK/PWwlc5aaxubgorKmC4=
github.com/digitalocean/godo v1.118.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo=
github.com/digitalocean/godo v1.119.0 h1:dmFNQwSIAcH3z+FVovHLkazKDC2uA8oOlGvg5+H4vRw=
github.com/digitalocean/godo v1.119.0/go.mod h1:WQVH83OHUy6gC4gXpEVQKtxTd4L5oCp+5OialidkPLY=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/docker/docker v27.0.3+incompatible h1:aBGI9TeQ4MPlhquTQKq9XbK79rKFVwXNUAYz9aXyEBE=
github.com/docker/docker v27.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY=
github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@ -322,8 +322,8 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da h1:xRmpO92tb8y+Z85iUOMOicpCfaYcv7o3Cg3wKrIpg8g=
github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM=
github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
@ -332,10 +332,10 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA=
github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E=
github.com/gophercloud/gophercloud v1.13.0 h1:8iY9d1DAbzMW6Vok1AxbbK5ZaUjzMp0tdyt4fX9IeJ0=
github.com/gophercloud/gophercloud v1.13.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s=
github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A=
github.com/gophercloud/gophercloud v1.14.0 h1:Bt9zQDhPrbd4qX7EILGmy+i7GP35cc+AAL2+wIJpUE8=
github.com/gophercloud/gophercloud v1.14.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
@ -414,8 +414,8 @@ github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtx
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
github.com/hetznercloud/hcloud-go/v2 v2.10.2 h1:9gyTUPhfNbfbS40Spgij5mV5k37bOZgt8iHKCbfGs5I=
github.com/hetznercloud/hcloud-go/v2 v2.10.2/go.mod h1:xQ+8KhIS62W0D78Dpi57jsufWh844gUw1az5OUvaeq8=
github.com/hetznercloud/hcloud-go/v2 v2.12.0 h1:nOgfNTo0gyXZJJdM8mo/XH5MO/e80wAEpldRzdWayhY=
github.com/hetznercloud/hcloud-go/v2 v2.12.0/go.mod h1:dhix40Br3fDiBhwaSG/zgaYOFFddpfBm/6R1Zz0IiF0=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
@ -423,8 +423,8 @@ github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8=
github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k=
github.com/ionos-cloud/sdk-go/v6 v6.2.0 h1:qX7gachC0wJSmFfVRnd+DHmz9AStvVraKcwQ/JokIB4=
github.com/ionos-cloud/sdk-go/v6 v6.2.0/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k=
github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww=
github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
@ -472,8 +472,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/linode/linodego v1.37.0 h1:B/2Spzv9jYXzKA+p+GD8fVCNJ7Wuw6P91ZDD9eCkkso=
github.com/linode/linodego v1.37.0/go.mod h1:L7GXKFD3PoN2xSEtFc04wIXP5WK65O10jYQx0PQISWQ=
github.com/linode/linodego v1.38.0 h1:wP3oW9OhGc6vhze8NPf2knbwH4TzSbrjzuCd9okjbTY=
github.com/linode/linodego v1.38.0/go.mod h1:L7GXKFD3PoN2xSEtFc04wIXP5WK65O10jYQx0PQISWQ=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
@ -1047,8 +1047,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.188.0 h1:51y8fJ/b1AaaBRJr4yWm96fPcuxSo0JcegXE3DaHQHw=
google.golang.org/api v0.188.0/go.mod h1:VR0d+2SIiWOYG3r/jdm7adPW9hI2aRv9ETOSCQ9Beag=
google.golang.org/api v0.190.0 h1:ASM+IhLY1zljNdLu19W1jTmU6A+gMk6M46Wlur61s+Q=
google.golang.org/api v0.190.0/go.mod h1:QIr6I9iedBLnfqoD6L6Vze1UvS5Hzj5r2aUBOaZnLHo=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -1085,10 +1085,10 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d h1:kHjw/5UfflP/L5EbledDrcG4C2597RtymmGRZvHiCuY=
google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d/go.mod h1:mw8MG/Qz5wfgYr6VqVCiZcHe/GJEfI+oGGDCohaVgB0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b h1:04+jVzTs2XBnOZcPsLnmrTGqltqJbZQ1Ey26hjYdQQ0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f h1:b1Ln/PG8orm0SsBbHZWke8dDp2lrCD4jSmfglFpTZbk=
google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf h1:liao9UHurZLtiEwBgT9LMOnKYsHze6eA6w1KQCMVN2Q=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=

View file

@ -213,6 +213,10 @@ func (re Regexp) IsZero() bool {
// String returns the original string used to compile the regular expression.
func (re Regexp) String() string {
if re.Regexp == nil {
return ""
}
str := re.Regexp.String()
// Trim the anchor `^(?:` prefix and `)$` suffix.
return str[4 : len(str)-2]

View file

@ -900,3 +900,16 @@ action: replace
})
}
}
func TestRegexp_ShouldMarshalAndUnmarshalZeroValue(t *testing.T) {
var zero Regexp
marshalled, err := yaml.Marshal(&zero)
require.NoError(t, err)
require.Equal(t, "null\n", string(marshalled))
var unmarshalled Regexp
err = yaml.Unmarshal(marshalled, &unmarshalled)
require.NoError(t, err)
require.Nil(t, unmarshalled.Regexp)
}

View file

@ -94,16 +94,46 @@ type OpenMetricsParser struct {
exemplarVal float64
exemplarTs int64
hasExemplarTs bool
skipCTSeries bool
}
// NewOpenMetricsParser returns a new parser of the byte slice.
func NewOpenMetricsParser(b []byte, st *labels.SymbolTable) Parser {
return &OpenMetricsParser{
l: &openMetricsLexer{b: b},
builder: labels.NewScratchBuilderWithSymbolTable(st, 16),
type openMetricsParserOptions struct {
SkipCTSeries bool
}
type OpenMetricsOption func(*openMetricsParserOptions)
// WithOMParserCTSeriesSkipped turns off exposing _created lines
// as series, which makes those only used for parsing created timestamp
// for `CreatedTimestamp` method purposes.
//
// It's recommended to use this option to avoid using _created lines for other
// purposes than created timestamp, but leave false by default for the
// best-effort compatibility.
func WithOMParserCTSeriesSkipped() OpenMetricsOption {
return func(o *openMetricsParserOptions) {
o.SkipCTSeries = true
}
}
// NewOpenMetricsParser returns a new parser for the byte slice with option to skip CT series parsing.
func NewOpenMetricsParser(b []byte, st *labels.SymbolTable, opts ...OpenMetricsOption) Parser {
options := &openMetricsParserOptions{}
for _, opt := range opts {
opt(options)
}
parser := &OpenMetricsParser{
l: &openMetricsLexer{b: b},
builder: labels.NewScratchBuilderWithSymbolTable(st, 16),
skipCTSeries: options.SkipCTSeries,
}
return parser
}
// Series returns the bytes of the series, the timestamp if set, and the value
// of the current sample.
func (p *OpenMetricsParser) Series() ([]byte, *int64, float64) {
@ -219,10 +249,90 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool {
return true
}
// CreatedTimestamp returns nil as it's not implemented yet.
// TODO(bwplotka): https://github.com/prometheus/prometheus/issues/12980
// CreatedTimestamp returns the created timestamp for a current Metric if exists or nil.
// NOTE(Maniktherana): Might use additional CPU/mem resources due to deep copy of parser required for peeking given 1.0 OM specification on _created series.
func (p *OpenMetricsParser) CreatedTimestamp() *int64 {
return nil
if !TypeRequiresCT(p.mtype) {
// Not a CT supported metric type, fast path.
return nil
}
var (
currLset labels.Labels
buf []byte
peekWithoutNameLsetHash uint64
)
p.Metric(&currLset)
currFamilyLsetHash, buf := currLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile")
// Search for the _created line for the currFamilyLsetHash using ephemeral parser until
// we see EOF or new metric family. We have to do it as we don't know where (and if)
// that CT line is.
// TODO(bwplotka): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this.
peek := deepCopy(p)
for {
eType, err := peek.Next()
if err != nil {
// This means peek will give error too later on, so def no CT line found.
// This might result in partial scrape with wrong/missing CT, but only
// spec improvement would help.
// TODO(bwplotka): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this.
return nil
}
if eType != EntrySeries {
// Assume we hit different family, no CT line found.
return nil
}
var peekedLset labels.Labels
peek.Metric(&peekedLset)
peekedName := peekedLset.Get(model.MetricNameLabel)
if !strings.HasSuffix(peekedName, "_created") {
// Not a CT line, search more.
continue
}
// We got a CT line here, but let's search if CT line is actually for our series, edge case.
peekWithoutNameLsetHash, _ = peekedLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile")
if peekWithoutNameLsetHash != currFamilyLsetHash {
// CT line for a different series, for our series no CT.
return nil
}
ct := int64(peek.val)
return &ct
}
}
// TypeRequiresCT returns true if the metric type requires a _created timestamp.
func TypeRequiresCT(t model.MetricType) bool {
switch t {
case model.MetricTypeCounter, model.MetricTypeSummary, model.MetricTypeHistogram:
return true
default:
return false
}
}
// deepCopy creates a copy of a parser without re-using the slices' original memory addresses.
func deepCopy(p *OpenMetricsParser) OpenMetricsParser {
newB := make([]byte, len(p.l.b))
copy(newB, p.l.b)
newLexer := &openMetricsLexer{
b: newB,
i: p.l.i,
start: p.l.start,
err: p.l.err,
state: p.l.state,
}
newParser := OpenMetricsParser{
l: newLexer,
builder: p.builder,
mtype: p.mtype,
val: p.val,
skipCTSeries: false,
}
return newParser
}
// nextToken returns the next token from the openMetricsLexer.
@ -337,7 +447,13 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
}
p.series = p.l.b[p.start:p.l.i]
return p.parseMetricSuffix(p.nextToken())
if err := p.parseSeriesEndOfLine(p.nextToken()); err != nil {
return EntryInvalid, err
}
if p.skipCTSeries && p.isCreatedSeries() {
return p.Next()
}
return EntrySeries, nil
case tMName:
p.offsets = append(p.offsets, p.start, p.l.i)
p.series = p.l.b[p.start:p.l.i]
@ -351,8 +467,14 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
p.series = p.l.b[p.start:p.l.i]
t2 = p.nextToken()
}
return p.parseMetricSuffix(t2)
if err := p.parseSeriesEndOfLine(t2); err != nil {
return EntryInvalid, err
}
if p.skipCTSeries && p.isCreatedSeries() {
return p.Next()
}
return EntrySeries, nil
default:
err = p.parseError("expected a valid start token", t)
}
@ -467,51 +589,64 @@ func (p *OpenMetricsParser) parseLVals(offsets []int, isExemplar bool) ([]int, e
}
}
// parseMetricSuffix parses the end of the line after the metric name and
// labels. It starts parsing with the provided token.
func (p *OpenMetricsParser) parseMetricSuffix(t token) (Entry, error) {
// isCreatedSeries returns true if the current series is a _created series.
func (p *OpenMetricsParser) isCreatedSeries() bool {
var newLbs labels.Labels
p.Metric(&newLbs)
name := newLbs.Get(model.MetricNameLabel)
if TypeRequiresCT(p.mtype) && strings.HasSuffix(name, "_created") {
return true
}
return false
}
// parseSeriesEndOfLine parses the series end of the line (value, optional
// timestamp, commentary, etc.) after the metric name and labels.
// It starts parsing with the provided token.
func (p *OpenMetricsParser) parseSeriesEndOfLine(t token) error {
if p.offsets[0] == -1 {
return EntryInvalid, fmt.Errorf("metric name not set while parsing: %q", p.l.b[p.start:p.l.i])
return fmt.Errorf("metric name not set while parsing: %q", p.l.b[p.start:p.l.i])
}
var err error
p.val, err = p.getFloatValue(t, "metric")
if err != nil {
return EntryInvalid, err
return err
}
p.hasTS = false
switch t2 := p.nextToken(); t2 {
case tEOF:
return EntryInvalid, errors.New("data does not end with # EOF")
return errors.New("data does not end with # EOF")
case tLinebreak:
break
case tComment:
if err := p.parseComment(); err != nil {
return EntryInvalid, err
return err
}
case tTimestamp:
p.hasTS = true
var ts float64
// A float is enough to hold what we need for millisecond resolution.
if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil {
return EntryInvalid, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i])
return fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i])
}
if math.IsNaN(ts) || math.IsInf(ts, 0) {
return EntryInvalid, fmt.Errorf("invalid timestamp %f", ts)
return fmt.Errorf("invalid timestamp %f", ts)
}
p.ts = int64(ts * 1000)
switch t3 := p.nextToken(); t3 {
case tLinebreak:
case tComment:
if err := p.parseComment(); err != nil {
return EntryInvalid, err
return err
}
default:
return EntryInvalid, p.parseError("expected next entry after timestamp", t3)
return p.parseError("expected next entry after timestamp", t3)
}
}
return EntrySeries, nil
return nil
}
func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error) {

View file

@ -14,6 +14,7 @@
package textparse
import (
"errors"
"io"
"testing"
@ -24,6 +25,8 @@ import (
"github.com/prometheus/prometheus/model/labels"
)
func int64p(x int64) *int64 { return &x }
func TestOpenMetricsParse(t *testing.T) {
input := `# HELP go_gc_duration_seconds A summary of the GC invocation durations.
# TYPE go_gc_duration_seconds summary
@ -63,15 +66,34 @@ ss{A="a"} 0
_metric_starting_with_underscore 1
testmetric{_label_starting_with_underscore="foo"} 1
testmetric{label="\"bar\""} 1
# HELP foo Counter with and without labels to certify CT is parsed for both cases
# TYPE foo counter
foo_total 17.0 1520879607.789 # {id="counter-test"} 5`
foo_total 17.0 1520879607.789 # {id="counter-test"} 5
foo_created 1000
foo_total{a="b"} 17.0 1520879607.789 # {id="counter-test"} 5
foo_created{a="b"} 1000
# HELP bar Summary with CT at the end, making sure we find CT even if it's multiple lines a far
# TYPE bar summary
bar_count 17.0
bar_sum 324789.3
bar{quantile="0.95"} 123.7
bar{quantile="0.99"} 150.0
bar_created 1520430000
# HELP baz Histogram with the same objective as above's summary
# TYPE baz histogram
baz_bucket{le="0.0"} 0
baz_bucket{le="+Inf"} 17
baz_count 17
baz_sum 324789.3
baz_created 1520430000
# HELP fizz_created Gauge which shouldn't be parsed as CT
# TYPE fizz_created gauge
fizz_created 17.0`
input += "\n# HELP metric foo\x00bar"
input += "\nnull_byte_metric{a=\"abc\x00\"} 1"
input += "\n# EOF\n"
int64p := func(x int64) *int64 { return &x }
exp := []expectedParse{
{
m: "go_gc_duration_seconds",
@ -216,6 +238,9 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5`
m: "testmetric{label=\"\\\"bar\\\"\"}",
v: 1,
lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`),
}, {
m: "foo",
help: "Counter with and without labels to certify CT is parsed for both cases",
}, {
m: "foo",
typ: model.MetricTypeCounter,
@ -225,6 +250,76 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5`
lset: labels.FromStrings("__name__", "foo_total"),
t: int64p(1520879607789),
e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5},
ct: int64p(1000),
}, {
m: `foo_total{a="b"}`,
v: 17.0,
lset: labels.FromStrings("__name__", "foo_total", "a", "b"),
t: int64p(1520879607789),
e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5},
ct: int64p(1000),
}, {
m: "bar",
help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far",
}, {
m: "bar",
typ: model.MetricTypeSummary,
}, {
m: "bar_count",
v: 17.0,
lset: labels.FromStrings("__name__", "bar_count"),
ct: int64p(1520430000),
}, {
m: "bar_sum",
v: 324789.3,
lset: labels.FromStrings("__name__", "bar_sum"),
ct: int64p(1520430000),
}, {
m: `bar{quantile="0.95"}`,
v: 123.7,
lset: labels.FromStrings("__name__", "bar", "quantile", "0.95"),
ct: int64p(1520430000),
}, {
m: `bar{quantile="0.99"}`,
v: 150.0,
lset: labels.FromStrings("__name__", "bar", "quantile", "0.99"),
ct: int64p(1520430000),
}, {
m: "baz",
help: "Histogram with the same objective as above's summary",
}, {
m: "baz",
typ: model.MetricTypeHistogram,
}, {
m: `baz_bucket{le="0.0"}`,
v: 0,
lset: labels.FromStrings("__name__", "baz_bucket", "le", "0.0"),
ct: int64p(1520430000),
}, {
m: `baz_bucket{le="+Inf"}`,
v: 17,
lset: labels.FromStrings("__name__", "baz_bucket", "le", "+Inf"),
ct: int64p(1520430000),
}, {
m: `baz_count`,
v: 17,
lset: labels.FromStrings("__name__", "baz_count"),
ct: int64p(1520430000),
}, {
m: `baz_sum`,
v: 324789.3,
lset: labels.FromStrings("__name__", "baz_sum"),
ct: int64p(1520430000),
}, {
m: "fizz_created",
help: "Gauge which shouldn't be parsed as CT",
}, {
m: "fizz_created",
typ: model.MetricTypeGauge,
}, {
m: `fizz_created`,
v: 17,
lset: labels.FromStrings("__name__", "fizz_created"),
}, {
m: "metric",
help: "foo\x00bar",
@ -235,8 +330,8 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5`
},
}
p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable())
checkParseResults(t, p, exp)
p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped())
checkParseResultsWithCT(t, p, exp, true)
}
func TestUTF8OpenMetricsParse(t *testing.T) {
@ -251,6 +346,7 @@ func TestUTF8OpenMetricsParse(t *testing.T) {
# UNIT "go.gc_duration_seconds" seconds
{"go.gc_duration_seconds",quantile="0"} 4.9351e-05
{"go.gc_duration_seconds",quantile="0.25"} 7.424100000000001e-05
{"go.gc_duration_seconds_created"} 12313
{"go.gc_duration_seconds",quantile="0.5",a="b"} 8.3835e-05
{"http.status",q="0.9",a="b"} 8.3835e-05
{"http.status",q="0.9",a="b"} 8.3835e-05
@ -274,10 +370,12 @@ func TestUTF8OpenMetricsParse(t *testing.T) {
m: `{"go.gc_duration_seconds",quantile="0"}`,
v: 4.9351e-05,
lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0"),
ct: int64p(12313),
}, {
m: `{"go.gc_duration_seconds",quantile="0.25"}`,
v: 7.424100000000001e-05,
lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.25"),
ct: int64p(12313),
}, {
m: `{"go.gc_duration_seconds",quantile="0.5",a="b"}`,
v: 8.3835e-05,
@ -306,8 +404,8 @@ choices}`, "strange©™\n'quoted' \"name\"", "6"),
},
}
p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable())
checkParseResults(t, p, exp)
p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped())
checkParseResultsWithCT(t, p, exp, true)
}
func TestOpenMetricsParseErrors(t *testing.T) {
@ -598,10 +696,6 @@ func TestOpenMetricsParseErrors(t *testing.T) {
input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 -Inf",
err: `invalid exemplar timestamp -Inf`,
},
{
input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 Inf",
err: `invalid exemplar timestamp +Inf`,
},
}
for i, c := range cases {
@ -684,3 +778,217 @@ func TestOMNullByteHandling(t *testing.T) {
require.Equal(t, c.err, err.Error(), "test %d", i)
}
}
// While not desirable, there are cases were CT fails to parse and
// these tests show them.
// TODO(maniktherana): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this.
func TestCTParseFailures(t *testing.T) {
input := `# HELP something Histogram with _created between buckets and summary
# TYPE something histogram
something_count 17
something_sum 324789.3
something_created 1520430001
something_bucket{le="0.0"} 0
something_bucket{le="+Inf"} 17
# HELP thing Histogram with _created as first line
# TYPE thing histogram
thing_created 1520430002
thing_count 17
thing_sum 324789.3
thing_bucket{le="0.0"} 0
thing_bucket{le="+Inf"} 17
# HELP yum Summary with _created between sum and quantiles
# TYPE yum summary
yum_count 17.0
yum_sum 324789.3
yum_created 1520430003
yum{quantile="0.95"} 123.7
yum{quantile="0.99"} 150.0
# HELP foobar Summary with _created as the first line
# TYPE foobar summary
foobar_created 1520430004
foobar_count 17.0
foobar_sum 324789.3
foobar{quantile="0.95"} 123.7
foobar{quantile="0.99"} 150.0`
input += "\n# EOF\n"
int64p := func(x int64) *int64 { return &x }
type expectCT struct {
m string
ct *int64
typ model.MetricType
help string
isErr bool
}
exp := []expectCT{
{
m: "something",
help: "Histogram with _created between buckets and summary",
isErr: false,
}, {
m: "something",
typ: model.MetricTypeHistogram,
isErr: false,
}, {
m: `something_count`,
ct: int64p(1520430001),
isErr: false,
}, {
m: `something_sum`,
ct: int64p(1520430001),
isErr: false,
}, {
m: `something_bucket{le="0.0"}`,
ct: int64p(1520430001),
isErr: true,
}, {
m: `something_bucket{le="+Inf"}`,
ct: int64p(1520430001),
isErr: true,
}, {
m: "thing",
help: "Histogram with _created as first line",
isErr: false,
}, {
m: "thing",
typ: model.MetricTypeHistogram,
isErr: false,
}, {
m: `thing_count`,
ct: int64p(1520430002),
isErr: true,
}, {
m: `thing_sum`,
ct: int64p(1520430002),
isErr: true,
}, {
m: `thing_bucket{le="0.0"}`,
ct: int64p(1520430002),
isErr: true,
}, {
m: `thing_bucket{le="+Inf"}`,
ct: int64p(1520430002),
isErr: true,
}, {
m: "yum",
help: "Summary with _created between summary and quantiles",
isErr: false,
}, {
m: "yum",
typ: model.MetricTypeSummary,
isErr: false,
}, {
m: "yum_count",
ct: int64p(1520430003),
isErr: false,
}, {
m: "yum_sum",
ct: int64p(1520430003),
isErr: false,
}, {
m: `yum{quantile="0.95"}`,
ct: int64p(1520430003),
isErr: true,
}, {
m: `yum{quantile="0.99"}`,
ct: int64p(1520430003),
isErr: true,
}, {
m: "foobar",
help: "Summary with _created as the first line",
isErr: false,
}, {
m: "foobar",
typ: model.MetricTypeSummary,
isErr: false,
}, {
m: "foobar_count",
ct: int64p(1520430004),
isErr: true,
}, {
m: "foobar_sum",
ct: int64p(1520430004),
isErr: true,
}, {
m: `foobar{quantile="0.95"}`,
ct: int64p(1520430004),
isErr: true,
}, {
m: `foobar{quantile="0.99"}`,
ct: int64p(1520430004),
isErr: true,
},
}
p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped())
i := 0
var res labels.Labels
for {
et, err := p.Next()
if errors.Is(err, io.EOF) {
break
}
require.NoError(t, err)
switch et {
case EntrySeries:
p.Metric(&res)
if ct := p.CreatedTimestamp(); exp[i].isErr {
require.Nil(t, ct)
} else {
require.Equal(t, *exp[i].ct, *ct)
}
default:
i++
continue
}
i++
}
}
func TestDeepCopy(t *testing.T) {
input := []byte(`# HELP go_goroutines A gauge goroutines.
# TYPE go_goroutines gauge
go_goroutines 33 123.123
# TYPE go_gc_duration_seconds summary
go_gc_duration_seconds
go_gc_duration_seconds_created`)
st := labels.NewSymbolTable()
parser := NewOpenMetricsParser(input, st, WithOMParserCTSeriesSkipped()).(*OpenMetricsParser)
// Modify the original parser state
_, err := parser.Next()
require.NoError(t, err)
require.Equal(t, "go_goroutines", string(parser.l.b[parser.offsets[0]:parser.offsets[1]]))
require.True(t, parser.skipCTSeries)
// Create a deep copy of the parser
copyParser := deepCopy(parser)
etype, err := copyParser.Next()
require.NoError(t, err)
require.Equal(t, EntryType, etype)
require.True(t, parser.skipCTSeries)
require.False(t, copyParser.skipCTSeries)
// Modify the original parser further
parser.Next()
parser.Next()
parser.Next()
require.Equal(t, "go_gc_duration_seconds", string(parser.l.b[parser.offsets[0]:parser.offsets[1]]))
require.Equal(t, "summary", string(parser.mtype))
require.False(t, copyParser.skipCTSeries)
require.True(t, parser.skipCTSeries)
// Ensure the copy remains unchanged
copyParser.Next()
copyParser.Next()
require.Equal(t, "go_gc_duration_seconds", string(copyParser.l.b[copyParser.offsets[0]:copyParser.offsets[1]]))
require.False(t, copyParser.skipCTSeries)
}

View file

@ -18,6 +18,7 @@ import (
"errors"
"io"
"os"
"strings"
"testing"
"github.com/klauspost/compress/gzip"
@ -41,6 +42,7 @@ type expectedParse struct {
unit string
comment string
e *exemplar.Exemplar
ct *int64
}
func TestPromParse(t *testing.T) {
@ -188,6 +190,10 @@ testmetric{label="\"bar\""} 1`
}
func checkParseResults(t *testing.T, p Parser, exp []expectedParse) {
checkParseResultsWithCT(t, p, exp, false)
}
func checkParseResultsWithCT(t *testing.T, p Parser, exp []expectedParse, ctLinesRemoved bool) {
i := 0
var res labels.Labels
@ -205,6 +211,14 @@ func checkParseResults(t *testing.T, p Parser, exp []expectedParse) {
p.Metric(&res)
if ctLinesRemoved {
// Are CT series skipped?
_, typ := p.Type()
if TypeRequiresCT(typ) && strings.HasSuffix(res.Get(labels.MetricName), "_created") {
t.Fatalf("we exped created lines skipped")
}
}
require.Equal(t, exp[i].m, string(m))
require.Equal(t, exp[i].t, ts)
require.Equal(t, exp[i].v, v)
@ -218,6 +232,11 @@ func checkParseResults(t *testing.T, p Parser, exp []expectedParse) {
require.True(t, found)
testutil.RequireEqual(t, *exp[i].e, e)
}
if ct := p.CreatedTimestamp(); ct != nil {
require.Equal(t, *exp[i].ct, *ct)
} else {
require.Nil(t, exp[i].ct)
}
case EntryType:
m, typ := p.Type()
@ -475,8 +494,10 @@ const (
func BenchmarkParse(b *testing.B) {
for parserName, parser := range map[string]func([]byte, *labels.SymbolTable) Parser{
"prometheus": NewPromParser,
"openmetrics": NewOpenMetricsParser,
"prometheus": NewPromParser,
"openmetrics": func(b []byte, st *labels.SymbolTable) Parser {
return NewOpenMetricsParser(b, st)
},
} {
for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} {
f, err := os.Open(fn)

View file

@ -302,15 +302,10 @@ type Exemplar struct {
// value represents an exact example value. This can be useful when the exemplar
// is attached to a histogram, which only gives an estimated value through buckets.
Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
// timestamp represents an optional timestamp of the sample in ms.
// timestamp represents the timestamp of the exemplar in ms.
//
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
// for conversion from/to time.Time to Prometheus timestamp.
//
// Note that the "optional" keyword is omitted due to
// https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
// Zero value means value not set. If you need to use exactly zero value for
// the timestamp, use 1 millisecond before or after.
Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`

View file

@ -107,15 +107,10 @@ message Exemplar {
// value represents an exact example value. This can be useful when the exemplar
// is attached to a histogram, which only gives an estimated value through buckets.
double value = 2;
// timestamp represents an optional timestamp of the sample in ms.
// timestamp represents the timestamp of the exemplar in ms.
//
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
// for conversion from/to time.Time to Prometheus timestamp.
//
// Note that the "optional" keyword is omitted due to
// https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
// Zero value means value not set. If you need to use exactly zero value for
// the timestamp, use 1 millisecond before or after.
int64 timestamp = 3;
}

View file

@ -165,6 +165,9 @@ func rangeQueryCases() []benchCase {
{
expr: "sum(a_X)",
},
{
expr: "avg(a_X)",
},
{
expr: "sum without (l)(h_X)",
},

View file

@ -1057,7 +1057,7 @@ func (ev *evaluator) recover(expr parser.Expr, ws *annotations.Annotations, errp
buf := make([]byte, 64<<10)
buf = buf[:runtime.Stack(buf, false)]
level.Error(ev.logger).Log("msg", "runtime panic in parser", "expr", expr.String(), "err", e, "stacktrace", string(buf))
level.Error(ev.logger).Log("msg", "runtime panic during query evaluation", "expr", expr.String(), "err", e, "stacktrace", string(buf))
*errp = fmt.Errorf("unexpected error: %w", err)
case errWithWarnings:
*errp = err.err
@ -2356,6 +2356,11 @@ loop:
} else {
histograms = append(histograms, HPoint{H: &histogram.FloatHistogram{}})
}
if histograms[n].H == nil {
// Make sure to pass non-nil H to AtFloatHistogram so that it does a deep-copy.
// Not an issue in the loop above since that uses an intermediate buffer.
histograms[n].H = &histogram.FloatHistogram{}
}
histograms[n].T, histograms[n].H = it.AtFloatHistogram(histograms[n].H)
if value.IsStaleNaN(histograms[n].H.Sum) {
histograms = histograms[:n]
@ -2773,15 +2778,20 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram
}
type groupedAggregation struct {
seen bool // Was this output groups seen in the input at this timestamp.
hasFloat bool // Has at least 1 float64 sample aggregated.
hasHistogram bool // Has at least 1 histogram sample aggregated.
floatValue float64
histogramValue *histogram.FloatHistogram
floatMean float64 // Mean, or "compensating value" for Kahan summation.
groupCount int
groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group
heap vectorByValueHeap
floatValue float64
histogramValue *histogram.FloatHistogram
floatMean float64
floatKahanC float64 // "Compensating value" for Kahan summation.
groupCount float64
heap vectorByValueHeap
// All bools together for better packing within the struct.
seen bool // Was this output groups seen in the input at this timestamp.
hasFloat bool // Has at least 1 float64 sample aggregated.
hasHistogram bool // Has at least 1 histogram sample aggregated.
incompatibleHistograms bool // If true, group has seen mixed exponential and custom buckets, or incompatible custom buckets.
groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group.
incrementalMean bool // True after reverting to incremental calculation of the mean value.
}
// aggregation evaluates sum, avg, count, stdvar, stddev or quantile at one timestep on inputMatrix.
@ -2805,15 +2815,14 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
// Initialize this group if it's the first time we've seen it.
if !group.seen {
*group = groupedAggregation{
seen: true,
floatValue: f,
groupCount: 1,
seen: true,
floatValue: f,
floatMean: f,
incompatibleHistograms: false,
groupCount: 1,
}
switch op {
case parser.AVG:
group.floatMean = f
fallthrough
case parser.SUM:
case parser.AVG, parser.SUM:
if h == nil {
group.hasFloat = true
} else {
@ -2821,7 +2830,6 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
group.hasHistogram = true
}
case parser.STDVAR, parser.STDDEV:
group.floatMean = f
group.floatValue = 0
case parser.QUANTILE:
group.heap = make(vectorByValueHeap, 1)
@ -2832,6 +2840,10 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
continue
}
if group.incompatibleHistograms {
continue
}
switch op {
case parser.SUM:
if h != nil {
@ -2840,6 +2852,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
_, err := group.histogramValue.Add(h)
if err != nil {
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
group.incompatibleHistograms = true
}
}
// Otherwise the aggregation contained floats
@ -2847,7 +2860,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
// point in copying the histogram in that case.
} else {
group.hasFloat = true
group.floatValue, group.floatMean = kahanSumInc(f, group.floatValue, group.floatMean)
group.floatValue, group.floatKahanC = kahanSumInc(f, group.floatValue, group.floatKahanC)
}
case parser.AVG:
@ -2855,15 +2868,19 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
if h != nil {
group.hasHistogram = true
if group.histogramValue != nil {
left := h.Copy().Div(float64(group.groupCount))
right := group.histogramValue.Copy().Div(float64(group.groupCount))
left := h.Copy().Div(group.groupCount)
right := group.histogramValue.Copy().Div(group.groupCount)
toAdd, err := left.Sub(right)
if err != nil {
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
group.incompatibleHistograms = true
continue
}
_, err = group.histogramValue.Add(toAdd)
if err != nil {
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
group.incompatibleHistograms = true
continue
}
}
// Otherwise the aggregation contained floats
@ -2871,6 +2888,22 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
// point in copying the histogram in that case.
} else {
group.hasFloat = true
if !group.incrementalMean {
newV, newC := kahanSumInc(f, group.floatValue, group.floatKahanC)
if !math.IsInf(newV, 0) {
// The sum doesn't overflow, so we propagate it to the
// group struct and continue with the regular
// calculation of the mean value.
group.floatValue, group.floatKahanC = newV, newC
break
}
// If we are here, we know that the sum _would_ overflow. So
// instead of continue to sum up, we revert to incremental
// calculation of the mean value from here on.
group.incrementalMean = true
group.floatMean = group.floatValue / (group.groupCount - 1)
group.floatKahanC /= group.groupCount - 1
}
if math.IsInf(group.floatMean, 0) {
if math.IsInf(f, 0) && (group.floatMean > 0) == (f > 0) {
// The `floatMean` and `s.F` values are `Inf` of the same sign. They
@ -2888,8 +2921,13 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
break
}
}
// Divide each side of the `-` by `group.groupCount` to avoid float64 overflows.
group.floatMean += f/float64(group.groupCount) - group.floatMean/float64(group.groupCount)
currentMean := group.floatMean + group.floatKahanC
group.floatMean, group.floatKahanC = kahanSumInc(
// Divide each side of the `-` by `group.groupCount` to avoid float64 overflows.
f/group.groupCount-currentMean/group.groupCount,
group.floatMean,
group.floatKahanC,
)
}
case parser.GROUP:
@ -2912,7 +2950,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
if h == nil { // Ignore native histograms.
group.groupCount++
delta := f - group.floatMean
group.floatMean += delta / float64(group.groupCount)
group.floatMean += delta / group.groupCount
group.floatValue += delta * (f - group.floatMean)
}
@ -2938,20 +2976,25 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(e.Expr.PositionRange()))
continue
}
if aggr.hasHistogram {
switch {
case aggr.incompatibleHistograms:
continue
case aggr.hasHistogram:
aggr.histogramValue = aggr.histogramValue.Compact(0)
} else {
aggr.floatValue = aggr.floatMean
case aggr.incrementalMean:
aggr.floatValue = aggr.floatMean + aggr.floatKahanC
default:
aggr.floatValue = (aggr.floatValue + aggr.floatKahanC) / aggr.groupCount
}
case parser.COUNT:
aggr.floatValue = float64(aggr.groupCount)
aggr.floatValue = aggr.groupCount
case parser.STDVAR:
aggr.floatValue /= float64(aggr.groupCount)
aggr.floatValue /= aggr.groupCount
case parser.STDDEV:
aggr.floatValue = math.Sqrt(aggr.floatValue / float64(aggr.groupCount))
aggr.floatValue = math.Sqrt(aggr.floatValue / aggr.groupCount)
case parser.QUANTILE:
aggr.floatValue = quantile(q, aggr.heap)
@ -2962,10 +3005,13 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(e.Expr.PositionRange()))
continue
}
if aggr.hasHistogram {
switch {
case aggr.incompatibleHistograms:
continue
case aggr.hasHistogram:
aggr.histogramValue.Compact(0)
} else {
aggr.floatValue += aggr.floatMean // Add Kahan summation compensating term.
default:
aggr.floatValue += aggr.floatKahanC
}
default:
// For other aggregations, we already have the right value.

View file

@ -26,7 +26,6 @@ import (
"time"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
@ -51,7 +50,7 @@ const (
func TestMain(m *testing.M) {
// Enable experimental functions testing
parser.EnableExperimentalFunctions = true
goleak.VerifyTestMain(m)
testutil.TolerantVerifyLeak(m)
}
func TestQueryConcurrency(t *testing.T) {
@ -3798,3 +3797,62 @@ func makeInt64Pointer(val int64) *int64 {
*valp = val
return valp
}
func TestHistogramCopyFromIteratorRegression(t *testing.T) {
// Loading the following histograms creates two chunks because there's a
// counter reset. Not only the counter is lower in the last histogram
// but also there's missing buckets.
// This in turns means that chunk iterators will have different spans.
load := `load 1m
histogram {{sum:4 count:4 buckets:[2 2]}} {{sum:6 count:6 buckets:[3 3]}} {{sum:1 count:1 buckets:[1]}}
`
storage := promqltest.LoadedStorage(t, load)
t.Cleanup(func() { storage.Close() })
engine := promqltest.NewTestEngine(false, 0, promqltest.DefaultMaxSamplesPerQuery)
verify := func(t *testing.T, qry promql.Query, expected []histogram.FloatHistogram) {
res := qry.Exec(context.Background())
require.NoError(t, res.Err)
m, ok := res.Value.(promql.Matrix)
require.True(t, ok)
require.Len(t, m, 1)
series := m[0]
require.Empty(t, series.Floats)
require.Len(t, series.Histograms, len(expected))
for i, e := range expected {
series.Histograms[i].H.CounterResetHint = histogram.UnknownCounterReset // Don't care.
require.Equal(t, &e, series.Histograms[i].H)
}
}
qry, err := engine.NewRangeQuery(context.Background(), storage, nil, "increase(histogram[60s])", time.Unix(0, 0), time.Unix(0, 0).Add(1*time.Minute), time.Minute)
require.NoError(t, err)
verify(t, qry, []histogram.FloatHistogram{
{
Count: 2,
Sum: 2, // Increase from 4 to 6 is 2.
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, // Two buckets changed between the first and second histogram.
PositiveBuckets: []float64{1, 1}, // Increase from 2 to 3 is 1 in both buckets.
},
})
qry, err = engine.NewInstantQuery(context.Background(), storage, nil, "histogram[60s]", time.Unix(0, 0).Add(2*time.Minute))
require.NoError(t, err)
verify(t, qry, []histogram.FloatHistogram{
{
Count: 6,
Sum: 6,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}},
PositiveBuckets: []float64{3, 3},
},
{
Count: 1,
Sum: 1,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
PositiveBuckets: []float64{1},
},
})
}

View file

@ -97,9 +97,10 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod
lastT = samples.Histograms[numSamplesMinusOne].T
var newAnnos annotations.Annotations
resultHistogram, newAnnos = histogramRate(samples.Histograms, isCounter, metricName, args[0].PositionRange())
annos.Merge(newAnnos)
if resultHistogram == nil {
// The histograms are not compatible with each other.
return enh.Out, annos.Merge(newAnnos)
return enh.Out, annos
}
case len(samples.Floats) > 1:
numSamplesMinusOne = len(samples.Floats) - 1
@ -178,17 +179,29 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod
// Otherwise, it returns the calculated histogram and an empty annotation.
func histogramRate(points []HPoint, isCounter bool, metricName string, pos posrange.PositionRange) (*histogram.FloatHistogram, annotations.Annotations) {
prev := points[0].H
usingCustomBuckets := prev.UsesCustomBuckets()
last := points[len(points)-1].H
if last == nil {
return nil, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos))
}
minSchema := prev.Schema
if last.Schema < minSchema {
minSchema = last.Schema
}
if last.UsesCustomBuckets() != usingCustomBuckets {
return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
}
var annos annotations.Annotations
// We check for gauge type histograms in the loop below, but the loop below does not run on the first and last point,
// so check the first and last point now.
if isCounter && (prev.CounterResetHint == histogram.GaugeType || last.CounterResetHint == histogram.GaugeType) {
annos.Add(annotations.NewNativeHistogramNotCounterWarning(metricName, pos))
}
// First iteration to find out two things:
// - What's the smallest relevant schema?
// - Are all data points histograms?
@ -208,6 +221,9 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra
if curr.Schema < minSchema {
minSchema = curr.Schema
}
if curr.UsesCustomBuckets() != usingCustomBuckets {
return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
}
}
h := last.CopyToSchema(minSchema)
@ -241,7 +257,7 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra
}
h.CounterResetHint = histogram.GaugeType
return h.Compact(0), nil
return h.Compact(0), annos
}
// === delta(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
@ -573,9 +589,28 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
return vec, nil
}
return aggrOverTime(vals, enh, func(s Series) float64 {
var mean, count, c float64
var (
sum, mean, count, kahanC float64
incrementalMean bool
)
for _, f := range s.Floats {
count++
if !incrementalMean {
newSum, newC := kahanSumInc(f.F, sum, kahanC)
// Perform regular mean calculation as long as
// the sum doesn't overflow and (in any case)
// for the first iteration (even if we start
// with ±Inf) to not run into division-by-zero
// problems below.
if count == 1 || !math.IsInf(newSum, 0) {
sum, kahanC = newSum, newC
continue
}
// Handle overflow by reverting to incremental calculation of the mean value.
incrementalMean = true
mean = sum / (count - 1)
kahanC /= count - 1
}
if math.IsInf(mean, 0) {
if math.IsInf(f.F, 0) && (mean > 0) == (f.F > 0) {
// The `mean` and `f.F` values are `Inf` of the same sign. They
@ -593,13 +628,13 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
continue
}
}
mean, c = kahanSumInc(f.F/count-mean/count, mean, c)
correctedMean := mean + kahanC
mean, kahanC = kahanSumInc(f.F/count-correctedMean/count, mean, kahanC)
}
if math.IsInf(mean, 0) {
return mean
if incrementalMean {
return mean + kahanC
}
return mean + c
return (sum + kahanC) / count
}), nil
}

View file

@ -68,6 +68,10 @@ func fuzzParseMetricWithContentType(in []byte, contentType string) int {
panic(warning)
}
if contentType == "application/openmetrics-text" {
p = textparse.NewOpenMetricsParser(in, symbolTable)
}
var err error
for {
_, err = p.Next()

View file

@ -48,7 +48,6 @@ func (f *histogramStatsIterator) AtHistogram(h *histogram.Histogram) (int64, *hi
var t int64
t, f.currentH = f.Iterator.AtHistogram(f.currentH)
if value.IsStaleNaN(f.currentH.Sum) {
f.setLastH(f.currentH)
h = &histogram.Histogram{Sum: f.currentH.Sum}
return t, h
}
@ -63,9 +62,13 @@ func (f *histogramStatsIterator) AtHistogram(h *histogram.Histogram) (int64, *hi
return t, h
}
h.CounterResetHint = f.getResetHint(f.currentH)
h.Count = f.currentH.Count
h.Sum = f.currentH.Sum
returnValue := histogram.Histogram{
CounterResetHint: f.getResetHint(f.currentH),
Count: f.currentH.Count,
Sum: f.currentH.Sum,
}
returnValue.CopyTo(h)
f.setLastH(f.currentH)
return t, h
}
@ -77,7 +80,6 @@ func (f *histogramStatsIterator) AtFloatHistogram(fh *histogram.FloatHistogram)
var t int64
t, f.currentFH = f.Iterator.AtFloatHistogram(f.currentFH)
if value.IsStaleNaN(f.currentFH.Sum) {
f.setLastFH(f.currentFH)
return t, &histogram.FloatHistogram{Sum: f.currentFH.Sum}
}
@ -91,9 +93,13 @@ func (f *histogramStatsIterator) AtFloatHistogram(fh *histogram.FloatHistogram)
return t, fh
}
fh.CounterResetHint = f.getFloatResetHint(f.currentFH.CounterResetHint)
fh.Count = f.currentFH.Count
fh.Sum = f.currentFH.Sum
returnValue := histogram.FloatHistogram{
CounterResetHint: f.getFloatResetHint(f.currentFH.CounterResetHint),
Count: f.currentFH.Count,
Sum: f.currentFH.Sum,
}
returnValue.CopyTo(fh)
f.setLastFH(f.currentFH)
return t, fh
}

View file

@ -14,62 +14,132 @@
package promql
import (
"fmt"
"math"
"testing"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
)
func TestHistogramStatsDecoding(t *testing.T) {
histograms := []*histogram.Histogram{
tsdbutil.GenerateTestHistogram(0),
tsdbutil.GenerateTestHistogram(1),
tsdbutil.GenerateTestHistogram(2),
tsdbutil.GenerateTestHistogram(2),
}
histograms[0].CounterResetHint = histogram.NotCounterReset
histograms[1].CounterResetHint = histogram.UnknownCounterReset
histograms[2].CounterResetHint = histogram.CounterReset
histograms[3].CounterResetHint = histogram.UnknownCounterReset
expectedHints := []histogram.CounterResetHint{
histogram.NotCounterReset,
histogram.NotCounterReset,
histogram.CounterReset,
histogram.NotCounterReset,
cases := []struct {
name string
histograms []*histogram.Histogram
expectedHints []histogram.CounterResetHint
}{
{
name: "unknown counter reset triggers detection",
histograms: []*histogram.Histogram{
tsdbutil.GenerateTestHistogramWithHint(0, histogram.NotCounterReset),
tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset),
tsdbutil.GenerateTestHistogramWithHint(2, histogram.CounterReset),
tsdbutil.GenerateTestHistogramWithHint(2, histogram.UnknownCounterReset),
},
expectedHints: []histogram.CounterResetHint{
histogram.NotCounterReset,
histogram.NotCounterReset,
histogram.CounterReset,
histogram.NotCounterReset,
},
},
{
name: "stale sample before unknown reset hint",
histograms: []*histogram.Histogram{
tsdbutil.GenerateTestHistogramWithHint(0, histogram.NotCounterReset),
tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset),
{Sum: math.Float64frombits(value.StaleNaN)},
tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset),
},
expectedHints: []histogram.CounterResetHint{
histogram.NotCounterReset,
histogram.NotCounterReset,
histogram.UnknownCounterReset,
histogram.NotCounterReset,
},
},
{
name: "unknown counter reset at the beginning",
histograms: []*histogram.Histogram{
tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset),
},
expectedHints: []histogram.CounterResetHint{
histogram.NotCounterReset,
},
},
{
name: "detect real counter reset",
histograms: []*histogram.Histogram{
tsdbutil.GenerateTestHistogramWithHint(2, histogram.UnknownCounterReset),
tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset),
},
expectedHints: []histogram.CounterResetHint{
histogram.NotCounterReset,
histogram.CounterReset,
},
},
{
name: "detect real counter reset after stale NaN",
histograms: []*histogram.Histogram{
tsdbutil.GenerateTestHistogramWithHint(2, histogram.UnknownCounterReset),
{Sum: math.Float64frombits(value.StaleNaN)},
tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset),
},
expectedHints: []histogram.CounterResetHint{
histogram.NotCounterReset,
histogram.UnknownCounterReset,
histogram.CounterReset,
},
},
}
t.Run("histogram_stats", func(t *testing.T) {
decodedStats := make([]*histogram.Histogram, 0)
statsIterator := NewHistogramStatsIterator(newHistogramSeries(histograms).Iterator(nil))
for statsIterator.Next() != chunkenc.ValNone {
_, h := statsIterator.AtHistogram(nil)
decodedStats = append(decodedStats, h)
}
for i := 0; i < len(histograms); i++ {
require.Equal(t, expectedHints[i], decodedStats[i].CounterResetHint)
require.Equal(t, histograms[i].Count, decodedStats[i].Count)
require.Equal(t, histograms[i].Sum, decodedStats[i].Sum)
}
})
t.Run("float_histogram_stats", func(t *testing.T) {
decodedStats := make([]*histogram.FloatHistogram, 0)
statsIterator := NewHistogramStatsIterator(newHistogramSeries(histograms).Iterator(nil))
for statsIterator.Next() != chunkenc.ValNone {
_, h := statsIterator.AtFloatHistogram(nil)
decodedStats = append(decodedStats, h)
}
for i := 0; i < len(histograms); i++ {
fh := histograms[i].ToFloat(nil)
require.Equal(t, expectedHints[i], decodedStats[i].CounterResetHint)
require.Equal(t, fh.Count, decodedStats[i].Count)
require.Equal(t, fh.Sum, decodedStats[i].Sum)
}
})
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
t.Run("histogram_stats", func(t *testing.T) {
decodedStats := make([]*histogram.Histogram, 0)
statsIterator := NewHistogramStatsIterator(newHistogramSeries(tc.histograms).Iterator(nil))
for statsIterator.Next() != chunkenc.ValNone {
_, h := statsIterator.AtHistogram(nil)
decodedStats = append(decodedStats, h)
}
for i := 0; i < len(tc.histograms); i++ {
require.Equal(t, tc.expectedHints[i], decodedStats[i].CounterResetHint, fmt.Sprintf("mismatch in counter reset hint for histogram %d", i))
h := tc.histograms[i]
if value.IsStaleNaN(h.Sum) {
require.True(t, value.IsStaleNaN(decodedStats[i].Sum))
require.Equal(t, uint64(0), decodedStats[i].Count)
} else {
require.Equal(t, tc.histograms[i].Count, decodedStats[i].Count)
require.Equal(t, tc.histograms[i].Sum, decodedStats[i].Sum)
}
}
})
t.Run("float_histogram_stats", func(t *testing.T) {
decodedStats := make([]*histogram.FloatHistogram, 0)
statsIterator := NewHistogramStatsIterator(newHistogramSeries(tc.histograms).Iterator(nil))
for statsIterator.Next() != chunkenc.ValNone {
_, h := statsIterator.AtFloatHistogram(nil)
decodedStats = append(decodedStats, h)
}
for i := 0; i < len(tc.histograms); i++ {
require.Equal(t, tc.expectedHints[i], decodedStats[i].CounterResetHint)
fh := tc.histograms[i].ToFloat(nil)
if value.IsStaleNaN(fh.Sum) {
require.True(t, value.IsStaleNaN(decodedStats[i].Sum))
require.Equal(t, float64(0), decodedStats[i].Count)
} else {
require.Equal(t, fh.Count, decodedStats[i].Count)
require.Equal(t, fh.Sum, decodedStats[i].Sum)
}
}
})
})
}
}
type histogramSeries struct {

View file

@ -23,6 +23,8 @@ import (
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/promql/parser/posrange"
"github.com/prometheus/common/model"
)
%}
@ -84,6 +86,7 @@ NEGATIVE_BUCKETS_DESC
ZERO_BUCKET_DESC
ZERO_BUCKET_WIDTH_DESC
CUSTOM_VALUES_DESC
COUNTER_RESET_HINT_DESC
%token histogramDescEnd
// Operators.
@ -149,6 +152,14 @@ START
END
%token preprocessorEnd
// Counter reset hints.
%token counterResetHintsStart
%token <item>
UNKNOWN_COUNTER_RESET
COUNTER_RESET
NOT_COUNTER_RESET
GAUGE_TYPE
%token counterResetHintsEnd
// Start symbols for the generated parser.
%token startSymbolsStart
@ -163,7 +174,7 @@ START_METRIC_SELECTOR
// Type definitions for grammar rules.
%type <matchers> label_match_list
%type <matcher> label_matcher
%type <item> aggregate_op grouping_label match_op maybe_label metric_identifier unary_op at_modifier_preprocessors string_identifier
%type <item> aggregate_op grouping_label match_op maybe_label metric_identifier unary_op at_modifier_preprocessors string_identifier counter_reset_hint
%type <labels> label_set metric
%type <lblList> label_set_list
%type <label> label_set_item
@ -351,11 +362,19 @@ grouping_label_list:
grouping_label : maybe_label
{
if !isLabel($1.Val) {
if !model.LabelName($1.Val).IsValid() {
yylex.(*parser).unexpected("grouping opts", "label")
}
$$ = $1
}
| STRING {
if !model.LabelName(yylex.(*parser).unquoteString($1.Val)).IsValid() {
yylex.(*parser).unexpected("grouping opts", "label")
}
$$ = $1
$$.Pos++
$$.Val = yylex.(*parser).unquoteString($$.Val)
}
| error
{ yylex.(*parser).unexpected("grouping opts", "label"); $$ = Item{} }
;
@ -839,6 +858,11 @@ histogram_desc_item
$$ = yylex.(*parser).newMap()
$$["n_offset"] = $3
}
| COUNTER_RESET_HINT_DESC COLON counter_reset_hint
{
$$ = yylex.(*parser).newMap()
$$["counter_reset_hint"] = $3
}
;
bucket_set : LEFT_BRACKET bucket_set_list SPACE RIGHT_BRACKET
@ -862,6 +886,7 @@ bucket_set_list : bucket_set_list SPACE number
| bucket_set_list error
;
counter_reset_hint : UNKNOWN_COUNTER_RESET | COUNTER_RESET | NOT_COUNTER_RESET | GAUGE_TYPE;
/*
* Keyword lists.

File diff suppressed because it is too large Load diff

View file

@ -137,16 +137,24 @@ var key = map[string]ItemType{
}
var histogramDesc = map[string]ItemType{
"sum": SUM_DESC,
"count": COUNT_DESC,
"schema": SCHEMA_DESC,
"offset": OFFSET_DESC,
"n_offset": NEGATIVE_OFFSET_DESC,
"buckets": BUCKETS_DESC,
"n_buckets": NEGATIVE_BUCKETS_DESC,
"z_bucket": ZERO_BUCKET_DESC,
"z_bucket_w": ZERO_BUCKET_WIDTH_DESC,
"custom_values": CUSTOM_VALUES_DESC,
"sum": SUM_DESC,
"count": COUNT_DESC,
"schema": SCHEMA_DESC,
"offset": OFFSET_DESC,
"n_offset": NEGATIVE_OFFSET_DESC,
"buckets": BUCKETS_DESC,
"n_buckets": NEGATIVE_BUCKETS_DESC,
"z_bucket": ZERO_BUCKET_DESC,
"z_bucket_w": ZERO_BUCKET_WIDTH_DESC,
"custom_values": CUSTOM_VALUES_DESC,
"counter_reset_hint": COUNTER_RESET_HINT_DESC,
}
var counterResetHints = map[string]ItemType{
"unknown": UNKNOWN_COUNTER_RESET,
"reset": COUNTER_RESET,
"not_reset": NOT_COUNTER_RESET,
"gauge": GAUGE_TYPE,
}
// ItemTypeStr is the default string representations for common Items. It does not
@ -585,6 +593,11 @@ Loop:
return lexHistogram
}
}
if desc, ok := counterResetHints[strings.ToLower(word)]; ok {
l.emit(desc)
return lexHistogram
}
l.errorf("bad histogram descriptor found: %q", word)
break Loop
}
@ -1046,16 +1059,3 @@ func isDigit(r rune) bool {
func isAlpha(r rune) bool {
return r == '_' || ('a' <= r && r <= 'z') || ('A' <= r && r <= 'Z')
}
// isLabel reports whether the string can be used as label.
func isLabel(s string) bool {
if len(s) == 0 || !isAlpha(rune(s[0])) {
return false
}
for _, c := range s[1:] {
if !isAlphaNumeric(c) {
return false
}
}
return true
}

View file

@ -580,6 +580,28 @@ func (p *parser) buildHistogramFromMap(desc *map[string]interface{}) *histogram.
}
}
val, ok = (*desc)["counter_reset_hint"]
if ok {
resetHint, ok := val.(Item)
if ok {
switch resetHint.Typ {
case UNKNOWN_COUNTER_RESET:
output.CounterResetHint = histogram.UnknownCounterReset
case COUNTER_RESET:
output.CounterResetHint = histogram.CounterReset
case NOT_COUNTER_RESET:
output.CounterResetHint = histogram.NotCounterReset
case GAUGE_TYPE:
output.CounterResetHint = histogram.GaugeType
default:
p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing counter_reset_hint: unknown value %v", resetHint.Typ)
}
} else {
p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing counter_reset_hint: %v", val)
}
}
buckets, spans := p.buildHistogramBucketsAndSpans(desc, "buckets", "offset")
output.PositiveBuckets = buckets
output.PositiveSpans = spans

View file

@ -2397,6 +2397,51 @@ var testExpr = []struct {
},
},
},
{
input: `sum by ("foo")({"some.metric"})`,
expected: &AggregateExpr{
Op: SUM,
Expr: &VectorSelector{
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "some.metric"),
},
PosRange: posrange.PositionRange{
Start: 15,
End: 30,
},
},
Grouping: []string{"foo"},
PosRange: posrange.PositionRange{
Start: 0,
End: 31,
},
},
},
{
input: `sum by ("foo)(some_metric{})`,
fail: true,
errMsg: "unterminated quoted string",
},
{
input: `sum by ("foo", bar, 'baz')({"some.metric"})`,
expected: &AggregateExpr{
Op: SUM,
Expr: &VectorSelector{
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "some.metric"),
},
PosRange: posrange.PositionRange{
Start: 27,
End: 42,
},
},
Grouping: []string{"foo", "bar", "baz"},
PosRange: posrange.PositionRange{
Start: 0,
End: 43,
},
},
},
{
input: "avg by (foo)(some_metric)",
expected: &AggregateExpr{
@ -3844,6 +3889,7 @@ func readable(s string) string {
}
func TestParseExpressions(t *testing.T) {
model.NameValidationScheme = model.UTF8Validation
for _, test := range testExpr {
t.Run(readable(test.input), func(t *testing.T) {
expr, err := ParseExpr(test.input)
@ -4038,32 +4084,34 @@ func TestParseHistogramSeries(t *testing.T) {
},
{
name: "all properties used",
input: `{} {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5}}`,
input: `{} {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5 counter_reset_hint:gauge}}`,
expected: []histogram.FloatHistogram{{
Schema: 1,
Sum: -0.3,
Count: 3.1,
ZeroCount: 7.1,
ZeroThreshold: 0.05,
PositiveBuckets: []float64{5.1, 10, 7},
PositiveSpans: []histogram.Span{{Offset: -3, Length: 3}},
NegativeBuckets: []float64{4.1, 5},
NegativeSpans: []histogram.Span{{Offset: -5, Length: 2}},
Schema: 1,
Sum: -0.3,
Count: 3.1,
ZeroCount: 7.1,
ZeroThreshold: 0.05,
PositiveBuckets: []float64{5.1, 10, 7},
PositiveSpans: []histogram.Span{{Offset: -3, Length: 3}},
NegativeBuckets: []float64{4.1, 5},
NegativeSpans: []histogram.Span{{Offset: -5, Length: 2}},
CounterResetHint: histogram.GaugeType,
}},
},
{
name: "all properties used - with spaces",
input: `{} {{schema:1 sum:0.3 count:3 z_bucket:7 z_bucket_w:5 buckets:[5 10 7 ] offset:-3 n_buckets:[4 5] n_offset:5 }}`,
input: `{} {{schema:1 sum:0.3 count:3 z_bucket:7 z_bucket_w:5 buckets:[5 10 7 ] offset:-3 n_buckets:[4 5] n_offset:5 counter_reset_hint:gauge }}`,
expected: []histogram.FloatHistogram{{
Schema: 1,
Sum: 0.3,
Count: 3,
ZeroCount: 7,
ZeroThreshold: 5,
PositiveBuckets: []float64{5, 10, 7},
PositiveSpans: []histogram.Span{{Offset: -3, Length: 3}},
NegativeBuckets: []float64{4, 5},
NegativeSpans: []histogram.Span{{Offset: 5, Length: 2}},
Schema: 1,
Sum: 0.3,
Count: 3,
ZeroCount: 7,
ZeroThreshold: 5,
PositiveBuckets: []float64{5, 10, 7},
PositiveSpans: []histogram.Span{{Offset: -3, Length: 3}},
NegativeBuckets: []float64{4, 5},
NegativeSpans: []histogram.Span{{Offset: 5, Length: 2}},
CounterResetHint: histogram.GaugeType,
}},
},
{
@ -4250,6 +4298,39 @@ func TestParseHistogramSeries(t *testing.T) {
input: `{} {{ schema:1}}`,
expectedError: `1:7: parse error: unexpected "<Item 57372>" "schema" in series values`,
},
{
name: "invalid counter reset hint value",
input: `{} {{counter_reset_hint:foo}}`,
expectedError: `1:25: parse error: bad histogram descriptor found: "foo"`,
},
{
name: "'unknown' counter reset hint value",
input: `{} {{counter_reset_hint:unknown}}`,
expected: []histogram.FloatHistogram{{
CounterResetHint: histogram.UnknownCounterReset,
}},
},
{
name: "'reset' counter reset hint value",
input: `{} {{counter_reset_hint:reset}}`,
expected: []histogram.FloatHistogram{{
CounterResetHint: histogram.CounterReset,
}},
},
{
name: "'not_reset' counter reset hint value",
input: `{} {{counter_reset_hint:not_reset}}`,
expected: []histogram.FloatHistogram{{
CounterResetHint: histogram.NotCounterReset,
}},
},
{
name: "'gauge' counter reset hint value",
input: `{} {{counter_reset_hint:gauge}}`,
expected: []histogram.FloatHistogram{{
CounterResetHint: histogram.GaugeType,
}},
},
} {
t.Run(test.name, func(t *testing.T) {
_, vals, err := ParseSeriesDesc(test.input)

View file

@ -77,14 +77,24 @@ func (node *AggregateExpr) getAggOpStr() string {
switch {
case node.Without:
aggrString += fmt.Sprintf(" without (%s) ", strings.Join(node.Grouping, ", "))
aggrString += fmt.Sprintf(" without (%s) ", joinLabels(node.Grouping))
case len(node.Grouping) > 0:
aggrString += fmt.Sprintf(" by (%s) ", strings.Join(node.Grouping, ", "))
aggrString += fmt.Sprintf(" by (%s) ", joinLabels(node.Grouping))
}
return aggrString
}
func joinLabels(ss []string) string {
for i, s := range ss {
// If the label is already quoted, don't quote it again.
if s[0] != '"' && s[0] != '\'' && s[0] != '`' && !model.IsValidLegacyMetricName(model.LabelValue(s)) {
ss[i] = fmt.Sprintf("\"%s\"", s)
}
}
return strings.Join(ss, ", ")
}
func (node *BinaryExpr) String() string {
returnBool := ""
if node.ReturnBool {

View file

@ -16,6 +16,7 @@ package parser
import (
"testing"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels"
@ -44,6 +45,14 @@ func TestExprString(t *testing.T) {
in: `sum without(instance) (task:errors:rate10s{job="s"})`,
out: `sum without (instance) (task:errors:rate10s{job="s"})`,
},
{
in: `sum by("foo.bar") (task:errors:rate10s{job="s"})`,
out: `sum by ("foo.bar") (task:errors:rate10s{job="s"})`,
},
{
in: `sum without("foo.bar") (task:errors:rate10s{job="s"})`,
out: `sum without ("foo.bar") (task:errors:rate10s{job="s"})`,
},
{
in: `topk(5, task:errors:rate10s{job="s"})`,
},
@ -157,6 +166,8 @@ func TestExprString(t *testing.T) {
},
}
model.NameValidationScheme = model.UTF8Validation
for _, test := range inputs {
expr, err := ParseExpr(test.in)
require.NoError(t, err)

View file

@ -1003,13 +1003,6 @@ func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error {
return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err)
}
res := q.Exec(t.context)
countWarnings, _ := res.Warnings.CountWarningsAndInfo()
if !cmd.warn && countWarnings > 0 {
return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", cmd.expr, cmd.line, res.Warnings)
}
if cmd.warn && countWarnings == 0 {
return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", cmd.expr, cmd.line)
}
if res.Err != nil {
if cmd.fail {
return cmd.checkExpectedFailure(res.Err)
@ -1020,6 +1013,13 @@ func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error {
if res.Err == nil && cmd.fail {
return fmt.Errorf("expected error evaluating query %q (line %d) but got none", cmd.expr, cmd.line)
}
countWarnings, _ := res.Warnings.CountWarningsAndInfo()
if !cmd.warn && countWarnings > 0 {
return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", cmd.expr, cmd.line, res.Warnings)
}
if cmd.warn && countWarnings == 0 {
return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", cmd.expr, cmd.line)
}
defer q.Close()
if err := cmd.compareResult(res.Value); err != nil {
@ -1050,13 +1050,6 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq
}
defer q.Close()
res := q.Exec(t.context)
countWarnings, _ := res.Warnings.CountWarningsAndInfo()
if !cmd.warn && countWarnings > 0 {
return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings)
}
if cmd.warn && countWarnings == 0 {
return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", iq.expr, cmd.line)
}
if res.Err != nil {
if cmd.fail {
if err := cmd.checkExpectedFailure(res.Err); err != nil {
@ -1070,6 +1063,13 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq
if res.Err == nil && cmd.fail {
return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line)
}
countWarnings, _ := res.Warnings.CountWarningsAndInfo()
if !cmd.warn && countWarnings > 0 {
return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings)
}
if cmd.warn && countWarnings == 0 {
return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", iq.expr, cmd.line)
}
err = cmd.compareResult(res.Value)
if err != nil {
return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err)

View file

@ -503,7 +503,7 @@ eval instant at 1m avg(data{test="-big"})
eval instant at 1m avg(data{test="bigzero"})
{} 0
# Test summing extreme values.
# Test summing and averaging extreme values.
clear
load 10s
@ -529,21 +529,39 @@ load 10s
eval instant at 1m sum(data{test="ten"})
{} 10
eval instant at 1m avg(data{test="ten"})
{} 2.5
eval instant at 1m sum by (group) (data{test="pos_inf"})
{group="1"} Inf
{group="2"} Inf
eval instant at 1m avg by (group) (data{test="pos_inf"})
{group="1"} Inf
{group="2"} Inf
eval instant at 1m sum by (group) (data{test="neg_inf"})
{group="1"} -Inf
{group="2"} -Inf
eval instant at 1m avg by (group) (data{test="neg_inf"})
{group="1"} -Inf
{group="2"} -Inf
eval instant at 1m sum(data{test="inf_inf"})
{} NaN
eval instant at 1m avg(data{test="inf_inf"})
{} NaN
eval instant at 1m sum by (group) (data{test="nan"})
{group="1"} NaN
{group="2"} NaN
eval instant at 1m avg by (group) (data{test="nan"})
{group="1"} NaN
{group="2"} NaN
clear
# Test that aggregations are deterministic.

View file

@ -748,7 +748,6 @@ eval instant at 1m avg_over_time(metric6c[1m])
eval instant at 1m sum_over_time(metric6c[1m])/count_over_time(metric6c[1m])
{} NaN
eval instant at 1m avg_over_time(metric7[1m])
{} NaN
@ -783,6 +782,9 @@ load 10s
eval instant at 1m sum_over_time(metric[1m])
{} 2
eval instant at 1m avg_over_time(metric[1m])
{} 0.5
# Tests for stddev_over_time and stdvar_over_time.
clear
load 10s

View file

@ -482,3 +482,29 @@ load_with_nhcb 5m
eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket"})
eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*"})
# Histogram with constant buckets.
load_with_nhcb 1m
const_histogram_bucket{le="0.0"} 1 1 1 1 1
const_histogram_bucket{le="1.0"} 1 1 1 1 1
const_histogram_bucket{le="2.0"} 1 1 1 1 1
const_histogram_bucket{le="+Inf"} 1 1 1 1 1
# There is no change to the bucket count over time, thus rate is 0 in each bucket.
eval instant at 5m rate(const_histogram_bucket[5m])
{le="0.0"} 0
{le="1.0"} 0
{le="2.0"} 0
{le="+Inf"} 0
# Native histograms do not represent empty buckets, so here the zeros are implicit.
eval instant at 5m rate(const_histogram[5m])
{} {{schema:-53 sum:0 count:0 custom_values:[0.0 1.0 2.0]}}
# Zero buckets mean no observations, so there is no value that observations fall below,
# which means that any quantile is a NaN.
eval instant at 5m histogram_quantile(1.0, sum by (le) (rate(const_histogram_bucket[5m])))
{} NaN
eval instant at 5m histogram_quantile(1.0, sum(rate(const_histogram[5m])))
{} NaN

View file

@ -748,3 +748,141 @@ eval instant at 5m histogram_quantile(0.5, custom_buckets_histogram)
eval instant at 5m sum(custom_buckets_histogram)
{} {{schema:-53 sum:5 count:4 custom_values:[5 10] buckets:[1 2 1]}}
clear
# Test 'this native histogram metric is not a gauge' warning for rate
load 30s
some_metric {{schema:0 sum:1 count:1 buckets:[1] counter_reset_hint:gauge}} {{schema:0 sum:2 count:2 buckets:[2] counter_reset_hint:gauge}} {{schema:0 sum:3 count:3 buckets:[3] counter_reset_hint:gauge}}
# Test the case where we only have two points for rate
eval_warn instant at 30s rate(some_metric[30s])
{} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}}
# Test the case where we have more than two points for rate
eval_warn instant at 1m rate(some_metric[1m])
{} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}}
clear
# Test rate() over mixed exponential and custom buckets.
load 30s
some_metric {{schema:0 sum:1 count:1 buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:0 sum:5 count:4 buckets:[1 2 1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
# Start and end with exponential, with custom in the middle.
eval_warn instant at 1m rate(some_metric[1m])
# Should produce no results.
# Start and end with custom, with exponential in the middle.
eval_warn instant at 1m30s rate(some_metric[1m])
# Should produce no results.
# Start with custom, end with exponential.
eval_warn instant at 1m rate(some_metric[30s])
# Should produce no results.
# Start with exponential, end with custom.
eval_warn instant at 30s rate(some_metric[30s])
# Should produce no results.
clear
# Histogram with constant buckets.
load 1m
const_histogram {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}}
# There is no change to the bucket count over time, thus rate is 0 in each bucket.
# However native histograms do not represent empty buckets, so here the zeros are implicit.
eval instant at 5m rate(const_histogram[5m])
{} {{schema:0 sum:0 count:0}}
# Zero buckets mean no observations, thus the denominator in the average is 0
# leading to 0/0, which is NaN.
eval instant at 5m histogram_avg(rate(const_histogram[5m]))
{} NaN
# Zero buckets mean no observations, so count is 0.
eval instant at 5m histogram_count(rate(const_histogram[5m]))
{} 0.0
# Zero buckets mean no observations and empty histogram has a sum of 0 by definition.
eval instant at 5m histogram_sum(rate(const_histogram[5m]))
{} 0.0
# Zero buckets mean no observations, thus the denominator in the fraction is 0,
# leading to 0/0, which is NaN.
eval instant at 5m histogram_fraction(0.0, 1.0, rate(const_histogram[5m]))
{} NaN
# Workaround to calculate the observation count corresponding to NaN fraction.
eval instant at 5m histogram_count(rate(const_histogram[5m])) == 0.0 or histogram_fraction(0.0, 1.0, rate(const_histogram[5m])) * histogram_count(rate(const_histogram[5m]))
{} 0.0
# Zero buckets mean no observations, so there is no value that observations fall below,
# which means that any quantile is a NaN.
eval instant at 5m histogram_quantile(1.0, rate(const_histogram[5m]))
{} NaN
# Zero buckets mean no observations, so there is no standard deviation.
eval instant at 5m histogram_stddev(rate(const_histogram[5m]))
{} NaN
# Zero buckets mean no observations, so there is no standard variance.
eval instant at 5m histogram_stdvar(rate(const_histogram[5m]))
{} NaN
clear
# Test mixing exponential and custom buckets.
load 6m
metric{series="exponential"} {{sum:4 count:3 buckets:[1 2 1]}} _ {{sum:4 count:3 buckets:[1 2 1]}}
metric{series="other-exponential"} {{sum:3 count:2 buckets:[1 1 1]}} _ {{sum:3 count:2 buckets:[1 1 1]}}
metric{series="custom"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
metric{series="other-custom"} _ {{schema:-53 sum:15 count:2 custom_values:[5 10] buckets:[0 2]}} {{schema:-53 sum:15 count:2 custom_values:[5 10] buckets:[0 2]}}
# T=0: only exponential
# T=6: only custom
# T=12: mixed, should be ignored and emit a warning
eval_warn range from 0 to 12m step 6m sum(metric)
{} {{sum:7 count:5 buckets:[2 3 2]}} {{schema:-53 sum:16 count:3 custom_values:[5 10] buckets:[1 2]}} _
eval_warn range from 0 to 12m step 6m avg(metric)
{} {{sum:3.5 count:2.5 buckets:[1 1.5 1]}} {{schema:-53 sum:8 count:1.5 custom_values:[5 10] buckets:[0.5 1]}} _
clear
# Test incompatible custom bucket schemas.
load 6m
metric{series="1"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}}
metric{series="3"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
# T=0: incompatible, should be ignored and emit a warning
# T=6: compatible
# T=12: incompatible followed by compatible, should be ignored and emit a warning
eval_warn range from 0 to 12m step 6m sum(metric)
{} _ {{schema:-53 sum:2 count:2 custom_values:[5 10] buckets:[2]}} _
eval_warn range from 0 to 12m step 6m avg(metric)
{} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} _
clear
load 1m
metric{group="just-floats", series="1"} 2
metric{group="just-floats", series="2"} 3
metric{group="just-exponential-histograms", series="1"} {{sum:3 count:4 buckets:[1 2 1]}}
metric{group="just-exponential-histograms", series="2"} {{sum:2 count:3 buckets:[1 1 1]}}
metric{group="just-custom-histograms", series="1"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}}
metric{group="just-custom-histograms", series="2"} {{schema:-53 sum:3 count:4 custom_values:[2] buckets:[7]}}
metric{group="floats-and-histograms", series="1"} 2
metric{group="floats-and-histograms", series="2"} {{sum:2 count:3 buckets:[1 1 1]}}
metric{group="exponential-and-custom-histograms", series="1"} {{sum:2 count:3 buckets:[1 1 1]}}
metric{group="exponential-and-custom-histograms", series="2"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
metric{group="incompatible-custom-histograms", series="1"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
metric{group="incompatible-custom-histograms", series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}}
eval_warn instant at 0 sum by (group) (metric)
{group="just-floats"} 5
{group="just-exponential-histograms"} {{sum:5 count:7 buckets:[2 3 2]}}
{group="just-custom-histograms"} {{schema:-53 sum:4 count:5 custom_values:[2] buckets:[8]}}

View file

@ -32,7 +32,6 @@ import (
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"go.uber.org/atomic"
"go.uber.org/goleak"
"gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/model/labels"
@ -50,7 +49,7 @@ import (
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
prom_testutil.TolerantVerifyLeak(m)
}
func TestAlertingRule(t *testing.T) {

View file

@ -26,14 +26,14 @@ jobs:
- name: Checkout repository
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Install Go
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with:
go-version: 1.22.x
- name: Install snmp_exporter/generator dependencies
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter'
- name: Lint
uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1
uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0
with:
args: --verbose
version: v1.59.1

View file

@ -16,9 +16,10 @@ package storage
import "fmt"
type errDuplicateSampleForTimestamp struct {
timestamp int64
existing float64
newValue float64
timestamp int64
existing float64
existingIsHistogram bool
newValue float64
}
func NewDuplicateFloatErr(t int64, existing, newValue float64) error {
@ -29,13 +30,26 @@ func NewDuplicateFloatErr(t int64, existing, newValue float64) error {
}
}
// NewDuplicateHistogramToFloatErr describes an error where a new float sample is sent for same timestamp as previous histogram.
func NewDuplicateHistogramToFloatErr(t int64, newValue float64) error {
return errDuplicateSampleForTimestamp{
timestamp: t,
existingIsHistogram: true,
newValue: newValue,
}
}
func (e errDuplicateSampleForTimestamp) Error() string {
if e.timestamp == 0 {
return "duplicate sample for timestamp"
}
if e.existingIsHistogram {
return fmt.Sprintf("duplicate sample for timestamp %d; overrides not allowed: existing is a histogram, new value %g", e.timestamp, e.newValue)
}
return fmt.Sprintf("duplicate sample for timestamp %d; overrides not allowed: existing %g, new value %g", e.timestamp, e.existing, e.newValue)
}
// Is implements the anonymous interface checked by errors.Is.
// Every errDuplicateSampleForTimestamp compares equal to the global ErrDuplicateSampleForTimestamp.
func (e errDuplicateSampleForTimestamp) Is(t error) bool {
if t == ErrDuplicateSampleForTimestamp {

38
storage/errors_test.go Normal file
View file

@ -0,0 +1,38 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
)
func TestErrDuplicateSampleForTimestamp(t *testing.T) {
// All errDuplicateSampleForTimestamp are ErrDuplicateSampleForTimestamp
require.ErrorIs(t, ErrDuplicateSampleForTimestamp, errDuplicateSampleForTimestamp{})
// Same type only is if it has same properties.
err := NewDuplicateFloatErr(1_000, 10, 20)
sameErr := NewDuplicateFloatErr(1_000, 10, 20)
differentErr := NewDuplicateFloatErr(1_001, 30, 40)
require.ErrorIs(t, err, sameErr)
require.NotErrorIs(t, err, differentErr)
// Also works when err is wrapped.
require.ErrorIs(t, fmt.Errorf("failed: %w", err), sameErr)
require.NotErrorIs(t, fmt.Errorf("failed: %w", err), differentErr)
}

View file

@ -37,6 +37,7 @@ import (
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/storage/remote/azuread"
"github.com/prometheus/prometheus/storage/remote/googleiam"
)
const maxErrMsgLen = 1024
@ -131,6 +132,7 @@ type ClientConfig struct {
HTTPClientConfig config_util.HTTPClientConfig
SigV4Config *sigv4.SigV4Config
AzureADConfig *azuread.AzureADConfig
GoogleIAMConfig *googleiam.Config
Headers map[string]string
RetryOnRateLimit bool
WriteProtoMsg config.RemoteWriteProtoMsg
@ -192,6 +194,13 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) {
}
}
if conf.GoogleIAMConfig != nil {
t, err = googleiam.NewRoundTripper(conf.GoogleIAMConfig, t)
if err != nil {
return nil, err
}
}
writeProtoMsg := config.RemoteWriteProtoMsgV1
if conf.WriteProtoMsg != "" {
writeProtoMsg = conf.WriteProtoMsg

View file

@ -0,0 +1,54 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package googleiam provides an http.RoundTripper that attaches an Google Cloud accessToken
// to remote write requests.
package googleiam
import (
"context"
"fmt"
"net/http"
"golang.org/x/oauth2/google"
"google.golang.org/api/option"
apihttp "google.golang.org/api/transport/http"
)
type Config struct {
CredentialsFile string `yaml:"credentials_file,omitempty"`
}
// NewRoundTripper creates a round tripper that adds Google Cloud Monitoring authorization to calls
// using either a credentials file or the default credentials.
func NewRoundTripper(cfg *Config, next http.RoundTripper) (http.RoundTripper, error) {
if next == nil {
next = http.DefaultTransport
}
const scopes = "https://www.googleapis.com/auth/monitoring.write"
ctx := context.Background()
opts := []option.ClientOption{
option.WithScopes(scopes),
}
if cfg.CredentialsFile != "" {
opts = append(opts, option.WithCredentialsFile(cfg.CredentialsFile))
} else {
creds, err := google.FindDefaultCredentials(ctx, scopes)
if err != nil {
return nil, fmt.Errorf("error finding default Google credentials: %w", err)
}
opts = append(opts, option.WithCredentials(creds))
}
return apihttp.NewTransport(ctx, next, opts...)
}

View file

@ -21,15 +21,14 @@ import (
"unicode"
)
// Normalizes the specified label to follow Prometheus label names standard
// Normalizes the specified label to follow Prometheus label names standard.
//
// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels
// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels.
//
// Labels that start with non-letter rune will be prefixed with "key_"
// Labels that start with non-letter rune will be prefixed with "key_".
//
// Exception is made for double-underscores which are allowed
// An exception is made for double-underscores which are allowed.
func NormalizeLabel(label string) string {
// Trivial case
if len(label) == 0 {
return label
@ -48,7 +47,7 @@ func NormalizeLabel(label string) string {
return label
}
// Return '_' for anything non-alphanumeric
// Return '_' for anything non-alphanumeric.
func sanitizeRune(r rune) rune {
if unicode.IsLetter(r) || unicode.IsDigit(r) {
return r

View file

@ -76,14 +76,15 @@ var perUnitMap = map[string]string{
"y": "year",
}
// BuildCompliantName builds a Prometheus-compliant metric name for the specified metric
// BuildCompliantName builds a Prometheus-compliant metric name for the specified metric.
//
// Metric name is prefixed with specified namespace and underscore (if any).
// Namespace is not cleaned up. Make sure specified namespace follows Prometheus
// naming convention.
//
// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels
// and https://prometheus.io/docs/practices/naming/#metric-and-label-naming
// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels,
// https://prometheus.io/docs/practices/naming/#metric-and-label-naming
// and https://github.com/open-telemetry/opentelemetry-specification/blob/v1.33.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus.
func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffixes bool) string {
var metricName string
@ -110,7 +111,7 @@ func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffix
// Build a normalized name for the specified metric
func normalizeName(metric pmetric.Metric, namespace string) string {
// Split metric name in "tokens" (remove all non-alphanumeric)
// Split metric name into "tokens" (remove all non-alphanumerics)
nameTokens := strings.FieldsFunc(
metric.Name(),
func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) },
@ -122,9 +123,9 @@ func normalizeName(metric pmetric.Metric, namespace string) string {
// Main unit
// Append if not blank, doesn't contain '{}', and is not present in metric name already
if len(unitTokens) > 0 {
mainUnitOtel := strings.TrimSpace(unitTokens[0])
if mainUnitOtel != "" && !strings.ContainsAny(mainUnitOtel, "{}") {
mainUnitProm := CleanUpString(unitMapGetOrDefault(mainUnitOtel))
mainUnitOTel := strings.TrimSpace(unitTokens[0])
if mainUnitOTel != "" && !strings.ContainsAny(mainUnitOTel, "{}") {
mainUnitProm := CleanUpString(unitMapGetOrDefault(mainUnitOTel))
if mainUnitProm != "" && !contains(nameTokens, mainUnitProm) {
nameTokens = append(nameTokens, mainUnitProm)
}
@ -133,11 +134,11 @@ func normalizeName(metric pmetric.Metric, namespace string) string {
// Per unit
// Append if not blank, doesn't contain '{}', and is not present in metric name already
if len(unitTokens) > 1 && unitTokens[1] != "" {
perUnitOtel := strings.TrimSpace(unitTokens[1])
if perUnitOtel != "" && !strings.ContainsAny(perUnitOtel, "{}") {
perUnitProm := CleanUpString(perUnitMapGetOrDefault(perUnitOtel))
perUnitOTel := strings.TrimSpace(unitTokens[1])
if perUnitOTel != "" && !strings.ContainsAny(perUnitOTel, "{}") {
perUnitProm := CleanUpString(perUnitMapGetOrDefault(perUnitOTel))
if perUnitProm != "" && !contains(nameTokens, perUnitProm) {
nameTokens = append(append(nameTokens, "per"), perUnitProm)
nameTokens = append(nameTokens, "per", perUnitProm)
}
}
}
@ -150,7 +151,7 @@ func normalizeName(metric pmetric.Metric, namespace string) string {
}
// Append _ratio for metrics with unit "1"
// Some Otel receivers improperly use unit "1" for counters of objects
// Some OTel receivers improperly use unit "1" for counters of objects
// See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aissue+some+metric+units+don%27t+follow+otel+semantic+conventions
// Until these issues have been fixed, we're appending `_ratio` for gauges ONLY
// Theoretically, counters could be ratios as well, but it's absurd (for mathematical reasons)

View file

@ -0,0 +1,205 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_name_test.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
package prometheus
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/collector/pdata/pmetric"
)
func TestByte(t *testing.T) {
require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("system.filesystem.usage", "By"), ""))
}
func TestByteCounter(t *testing.T) {
require.Equal(t, "system_io_bytes_total", normalizeName(createCounter("system.io", "By"), ""))
require.Equal(t, "network_transmitted_bytes_total", normalizeName(createCounter("network_transmitted_bytes_total", "By"), ""))
}
func TestWhiteSpaces(t *testing.T) {
require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("\t system.filesystem.usage ", " By\t"), ""))
}
func TestNonStandardUnit(t *testing.T) {
require.Equal(t, "system_network_dropped", normalizeName(createGauge("system.network.dropped", "{packets}"), ""))
}
func TestNonStandardUnitCounter(t *testing.T) {
require.Equal(t, "system_network_dropped_total", normalizeName(createCounter("system.network.dropped", "{packets}"), ""))
}
func TestBrokenUnit(t *testing.T) {
require.Equal(t, "system_network_dropped_packets", normalizeName(createGauge("system.network.dropped", "packets"), ""))
require.Equal(t, "system_network_packets_dropped", normalizeName(createGauge("system.network.packets.dropped", "packets"), ""))
require.Equal(t, "system_network_packets", normalizeName(createGauge("system.network.packets", "packets"), ""))
}
func TestBrokenUnitCounter(t *testing.T) {
require.Equal(t, "system_network_dropped_packets_total", normalizeName(createCounter("system.network.dropped", "packets"), ""))
require.Equal(t, "system_network_packets_dropped_total", normalizeName(createCounter("system.network.packets.dropped", "packets"), ""))
require.Equal(t, "system_network_packets_total", normalizeName(createCounter("system.network.packets", "packets"), ""))
}
func TestRatio(t *testing.T) {
require.Equal(t, "hw_gpu_memory_utilization_ratio", normalizeName(createGauge("hw.gpu.memory.utilization", "1"), ""))
require.Equal(t, "hw_fan_speed_ratio", normalizeName(createGauge("hw.fan.speed_ratio", "1"), ""))
require.Equal(t, "objects_total", normalizeName(createCounter("objects", "1"), ""))
}
func TestHertz(t *testing.T) {
require.Equal(t, "hw_cpu_speed_limit_hertz", normalizeName(createGauge("hw.cpu.speed_limit", "Hz"), ""))
}
func TestPer(t *testing.T) {
require.Equal(t, "broken_metric_speed_km_per_hour", normalizeName(createGauge("broken.metric.speed", "km/h"), ""))
require.Equal(t, "astro_light_speed_limit_meters_per_second", normalizeName(createGauge("astro.light.speed_limit", "m/s"), ""))
}
func TestPercent(t *testing.T) {
require.Equal(t, "broken_metric_success_ratio_percent", normalizeName(createGauge("broken.metric.success_ratio", "%"), ""))
require.Equal(t, "broken_metric_success_percent", normalizeName(createGauge("broken.metric.success_percent", "%"), ""))
}
func TestEmpty(t *testing.T) {
require.Equal(t, "test_metric_no_unit", normalizeName(createGauge("test.metric.no_unit", ""), ""))
require.Equal(t, "test_metric_spaces", normalizeName(createGauge("test.metric.spaces", " \t "), ""))
}
func TestUnsupportedRunes(t *testing.T) {
require.Equal(t, "unsupported_metric_temperature_F", normalizeName(createGauge("unsupported.metric.temperature", "°F"), ""))
require.Equal(t, "unsupported_metric_weird", normalizeName(createGauge("unsupported.metric.weird", "+=.:,!* & #"), ""))
require.Equal(t, "unsupported_metric_redundant_test_per_C", normalizeName(createGauge("unsupported.metric.redundant", "__test $/°C"), ""))
}
func TestOTelReceivers(t *testing.T) {
require.Equal(t, "active_directory_ds_replication_network_io_bytes_total", normalizeName(createCounter("active_directory.ds.replication.network.io", "By"), ""))
require.Equal(t, "active_directory_ds_replication_sync_object_pending_total", normalizeName(createCounter("active_directory.ds.replication.sync.object.pending", "{objects}"), ""))
require.Equal(t, "active_directory_ds_replication_object_rate_per_second", normalizeName(createGauge("active_directory.ds.replication.object.rate", "{objects}/s"), ""))
require.Equal(t, "active_directory_ds_name_cache_hit_rate_percent", normalizeName(createGauge("active_directory.ds.name_cache.hit_rate", "%"), ""))
require.Equal(t, "active_directory_ds_ldap_bind_last_successful_time_milliseconds", normalizeName(createGauge("active_directory.ds.ldap.bind.last_successful.time", "ms"), ""))
require.Equal(t, "apache_current_connections", normalizeName(createGauge("apache.current_connections", "connections"), ""))
require.Equal(t, "apache_workers_connections", normalizeName(createGauge("apache.workers", "connections"), ""))
require.Equal(t, "apache_requests_total", normalizeName(createCounter("apache.requests", "1"), ""))
require.Equal(t, "bigip_virtual_server_request_count_total", normalizeName(createCounter("bigip.virtual_server.request.count", "{requests}"), ""))
require.Equal(t, "system_cpu_utilization_ratio", normalizeName(createGauge("system.cpu.utilization", "1"), ""))
require.Equal(t, "system_disk_operation_time_seconds_total", normalizeName(createCounter("system.disk.operation_time", "s"), ""))
require.Equal(t, "system_cpu_load_average_15m_ratio", normalizeName(createGauge("system.cpu.load_average.15m", "1"), ""))
require.Equal(t, "memcached_operation_hit_ratio_percent", normalizeName(createGauge("memcached.operation_hit_ratio", "%"), ""))
require.Equal(t, "mongodbatlas_process_asserts_per_second", normalizeName(createGauge("mongodbatlas.process.asserts", "{assertions}/s"), ""))
require.Equal(t, "mongodbatlas_process_journaling_data_files_mebibytes", normalizeName(createGauge("mongodbatlas.process.journaling.data_files", "MiBy"), ""))
require.Equal(t, "mongodbatlas_process_network_io_bytes_per_second", normalizeName(createGauge("mongodbatlas.process.network.io", "By/s"), ""))
require.Equal(t, "mongodbatlas_process_oplog_rate_gibibytes_per_hour", normalizeName(createGauge("mongodbatlas.process.oplog.rate", "GiBy/h"), ""))
require.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", normalizeName(createGauge("mongodbatlas.process.db.query_targeting.scanned_per_returned", "{scanned}/{returned}"), ""))
require.Equal(t, "nginx_requests", normalizeName(createGauge("nginx.requests", "requests"), ""))
require.Equal(t, "nginx_connections_accepted", normalizeName(createGauge("nginx.connections_accepted", "connections"), ""))
require.Equal(t, "nsxt_node_memory_usage_kilobytes", normalizeName(createGauge("nsxt.node.memory.usage", "KBy"), ""))
require.Equal(t, "redis_latest_fork_microseconds", normalizeName(createGauge("redis.latest_fork", "us"), ""))
}
func TestTrimPromSuffixes(t *testing.T) {
assert.Equal(t, "active_directory_ds_replication_network_io", TrimPromSuffixes("active_directory_ds_replication_network_io_bytes_total", pmetric.MetricTypeSum, "bytes"))
assert.Equal(t, "active_directory_ds_name_cache_hit_rate", TrimPromSuffixes("active_directory_ds_name_cache_hit_rate_percent", pmetric.MetricTypeGauge, "percent"))
assert.Equal(t, "active_directory_ds_ldap_bind_last_successful_time", TrimPromSuffixes("active_directory_ds_ldap_bind_last_successful_time_milliseconds", pmetric.MetricTypeGauge, "milliseconds"))
assert.Equal(t, "apache_requests", TrimPromSuffixes("apache_requests_total", pmetric.MetricTypeSum, "1"))
assert.Equal(t, "system_cpu_utilization", TrimPromSuffixes("system_cpu_utilization_ratio", pmetric.MetricTypeGauge, "ratio"))
assert.Equal(t, "mongodbatlas_process_journaling_data_files", TrimPromSuffixes("mongodbatlas_process_journaling_data_files_mebibytes", pmetric.MetricTypeGauge, "mebibytes"))
assert.Equal(t, "mongodbatlas_process_network_io", TrimPromSuffixes("mongodbatlas_process_network_io_bytes_per_second", pmetric.MetricTypeGauge, "bytes_per_second"))
assert.Equal(t, "mongodbatlas_process_oplog_rate", TrimPromSuffixes("mongodbatlas_process_oplog_rate_gibibytes_per_hour", pmetric.MetricTypeGauge, "gibibytes_per_hour"))
assert.Equal(t, "nsxt_node_memory_usage", TrimPromSuffixes("nsxt_node_memory_usage_kilobytes", pmetric.MetricTypeGauge, "kilobytes"))
assert.Equal(t, "redis_latest_fork", TrimPromSuffixes("redis_latest_fork_microseconds", pmetric.MetricTypeGauge, "microseconds"))
assert.Equal(t, "up", TrimPromSuffixes("up", pmetric.MetricTypeGauge, ""))
// These are not necessarily valid OM units, only tested for the sake of completeness.
assert.Equal(t, "active_directory_ds_replication_sync_object_pending", TrimPromSuffixes("active_directory_ds_replication_sync_object_pending_total", pmetric.MetricTypeSum, "{objects}"))
assert.Equal(t, "apache_current", TrimPromSuffixes("apache_current_connections", pmetric.MetricTypeGauge, "connections"))
assert.Equal(t, "bigip_virtual_server_request_count", TrimPromSuffixes("bigip_virtual_server_request_count_total", pmetric.MetricTypeSum, "{requests}"))
assert.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", TrimPromSuffixes("mongodbatlas_process_db_query_targeting_scanned_per_returned", pmetric.MetricTypeGauge, "{scanned}/{returned}"))
assert.Equal(t, "nginx_connections_accepted", TrimPromSuffixes("nginx_connections_accepted", pmetric.MetricTypeGauge, "connections"))
assert.Equal(t, "apache_workers", TrimPromSuffixes("apache_workers_connections", pmetric.MetricTypeGauge, "connections"))
assert.Equal(t, "nginx", TrimPromSuffixes("nginx_requests", pmetric.MetricTypeGauge, "requests"))
// Units shouldn't be trimmed if the unit is not a direct match with the suffix, i.e, a suffix "_seconds" shouldn't be removed if unit is "sec" or "s"
assert.Equal(t, "system_cpu_load_average_15m_ratio", TrimPromSuffixes("system_cpu_load_average_15m_ratio", pmetric.MetricTypeGauge, "1"))
assert.Equal(t, "mongodbatlas_process_asserts_per_second", TrimPromSuffixes("mongodbatlas_process_asserts_per_second", pmetric.MetricTypeGauge, "{assertions}/s"))
assert.Equal(t, "memcached_operation_hit_ratio_percent", TrimPromSuffixes("memcached_operation_hit_ratio_percent", pmetric.MetricTypeGauge, "%"))
assert.Equal(t, "active_directory_ds_replication_object_rate_per_second", TrimPromSuffixes("active_directory_ds_replication_object_rate_per_second", pmetric.MetricTypeGauge, "{objects}/s"))
assert.Equal(t, "system_disk_operation_time_seconds", TrimPromSuffixes("system_disk_operation_time_seconds_total", pmetric.MetricTypeSum, "s"))
}
func TestNamespace(t *testing.T) {
require.Equal(t, "space_test", normalizeName(createGauge("test", ""), "space"))
require.Equal(t, "space_test", normalizeName(createGauge("#test", ""), "space"))
}
func TestCleanUpString(t *testing.T) {
require.Equal(t, "", CleanUpString(""))
require.Equal(t, "a_b", CleanUpString("a b"))
require.Equal(t, "hello_world", CleanUpString("hello, world!"))
require.Equal(t, "hello_you_2", CleanUpString("hello you 2"))
require.Equal(t, "1000", CleanUpString("$1000"))
require.Equal(t, "", CleanUpString("*+$^=)"))
}
func TestUnitMapGetOrDefault(t *testing.T) {
require.Equal(t, "", unitMapGetOrDefault(""))
require.Equal(t, "seconds", unitMapGetOrDefault("s"))
require.Equal(t, "invalid", unitMapGetOrDefault("invalid"))
}
func TestPerUnitMapGetOrDefault(t *testing.T) {
require.Equal(t, "", perUnitMapGetOrDefault(""))
require.Equal(t, "second", perUnitMapGetOrDefault("s"))
require.Equal(t, "invalid", perUnitMapGetOrDefault("invalid"))
}
func TestRemoveItem(t *testing.T) {
require.Equal(t, []string{}, removeItem([]string{}, "test"))
require.Equal(t, []string{}, removeItem([]string{}, ""))
require.Equal(t, []string{"a", "b", "c"}, removeItem([]string{"a", "b", "c"}, "d"))
require.Equal(t, []string{"a", "b", "c"}, removeItem([]string{"a", "b", "c"}, ""))
require.Equal(t, []string{"a", "b"}, removeItem([]string{"a", "b", "c"}, "c"))
require.Equal(t, []string{"a", "c"}, removeItem([]string{"a", "b", "c"}, "b"))
require.Equal(t, []string{"b", "c"}, removeItem([]string{"a", "b", "c"}, "a"))
}
func TestBuildCompliantNameWithNormalize(t *testing.T) {
require.Equal(t, "system_io_bytes_total", BuildCompliantName(createCounter("system.io", "By"), "", true))
require.Equal(t, "system_network_io_bytes_total", BuildCompliantName(createCounter("network.io", "By"), "system", true))
require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", ""), "", true))
require.Equal(t, "envoy_rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", true))
require.Equal(t, "foo_bar", BuildCompliantName(createGauge(":foo::bar", ""), "", true))
require.Equal(t, "foo_bar_total", BuildCompliantName(createCounter(":foo::bar", ""), "", true))
// Gauges with unit 1 are considered ratios.
require.Equal(t, "foo_bar_ratio", BuildCompliantName(createGauge("foo.bar", "1"), "", true))
// Slashes in units are converted.
require.Equal(t, "system_io_foo_per_bar_total", BuildCompliantName(createCounter("system.io", "foo/bar"), "", true))
}
func TestBuildCompliantNameWithoutSuffixes(t *testing.T) {
require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "By"), "", false))
require.Equal(t, "system_network_io", BuildCompliantName(createCounter("network.io", "By"), "system", false))
require.Equal(t, "system_network_I_O", BuildCompliantName(createCounter("network (I/O)", "By"), "system", false))
require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", "By"), "", false))
require.Equal(t, "envoy__rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", false))
require.Equal(t, ":foo::bar", BuildCompliantName(createGauge(":foo::bar", ""), "", false))
require.Equal(t, ":foo::bar", BuildCompliantName(createCounter(":foo::bar", ""), "", false))
require.Equal(t, "foo_bar", BuildCompliantName(createGauge("foo.bar", "1"), "", false))
require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "foo/bar"), "", false))
}

View file

@ -0,0 +1,49 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/testutils_test.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
package prometheus
import (
"go.opentelemetry.io/collector/pdata/pmetric"
)
var ilm pmetric.ScopeMetrics
func init() {
metrics := pmetric.NewMetrics()
resourceMetrics := metrics.ResourceMetrics().AppendEmpty()
ilm = resourceMetrics.ScopeMetrics().AppendEmpty()
}
// Returns a new Metric of type "Gauge" with specified name and unit
func createGauge(name string, unit string) pmetric.Metric {
gauge := ilm.Metrics().AppendEmpty()
gauge.SetName(name)
gauge.SetUnit(unit)
gauge.SetEmptyGauge()
return gauge
}
// Returns a new Metric of type Monotonic Sum with specified name and unit
func createCounter(name string, unit string) pmetric.Metric {
counter := ilm.Metrics().AppendEmpty()
counter.SetEmptySum().SetIsMonotonic(true)
counter.SetName(name)
counter.SetUnit(unit)
return counter
}

View file

@ -1109,9 +1109,9 @@ func (t *QueueManager) shouldReshard(desiredShards int) bool {
if desiredShards == t.numShards {
return false
}
// We shouldn't reshard if Prometheus hasn't been able to send to the
// remote endpoint successfully within some period of time.
minSendTimestamp := time.Now().Add(-2 * time.Duration(t.cfg.BatchSendDeadline)).Unix()
// We shouldn't reshard if Prometheus hasn't been able to send
// since the last time it checked if it should reshard.
minSendTimestamp := time.Now().Add(-1 * shardUpdateDuration).Unix()
lsts := t.lastSendTimestamp.Load()
if lsts < minSendTimestamp {
level.Warn(t.logger).Log("msg", "Skipping resharding, last successful send was beyond threshold", "lastSendTimestamp", lsts, "minSendTimestamp", minSendTimestamp)

View file

@ -703,32 +703,35 @@ func TestShouldReshard(t *testing.T) {
startingShards int
samplesIn, samplesOut, lastSendTimestamp int64
expectedToReshard bool
sendDeadline model.Duration
}
cases := []testcase{
{
// Resharding shouldn't take place if the last successful send was > batch send deadline*2 seconds ago.
// resharding shouldn't take place if we haven't successfully sent
// since the last shardUpdateDuration, even if the send deadline is very low
startingShards: 10,
samplesIn: 1000,
samplesOut: 10,
lastSendTimestamp: time.Now().Unix() - int64(3*time.Duration(config.DefaultQueueConfig.BatchSendDeadline)/time.Second),
lastSendTimestamp: time.Now().Unix() - int64(shardUpdateDuration),
expectedToReshard: false,
sendDeadline: model.Duration(100 * time.Millisecond),
},
{
startingShards: 5,
startingShards: 10,
samplesIn: 1000,
samplesOut: 10,
lastSendTimestamp: time.Now().Unix(),
expectedToReshard: true,
sendDeadline: config.DefaultQueueConfig.BatchSendDeadline,
},
}
for _, c := range cases {
_, m := newTestClientAndQueueManager(t, defaultFlushDeadline, config.RemoteWriteProtoMsgV1)
_, m := newTestClientAndQueueManager(t, time.Duration(c.sendDeadline), config.RemoteWriteProtoMsgV1)
m.numShards = c.startingShards
m.dataIn.incr(c.samplesIn)
m.dataOut.incr(c.samplesOut)
m.lastSendTimestamp.Store(c.lastSendTimestamp)
m.Start()
desiredShards := m.calculateDesiredShards()

View file

@ -202,16 +202,34 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re
return err
}
chunks := h.getChunkSeriesSet(ctx, query, filteredMatchers)
if err := chunks.Err(); err != nil {
querier, err := h.queryable.ChunkQuerier(query.StartTimestampMs, query.EndTimestampMs)
if err != nil {
return err
}
defer func() {
if err := querier.Close(); err != nil {
level.Warn(h.logger).Log("msg", "Error on chunk querier close", "err", err.Error())
}
}()
var hints *storage.SelectHints
if query.Hints != nil {
hints = &storage.SelectHints{
Start: query.Hints.StartMs,
End: query.Hints.EndMs,
Step: query.Hints.StepMs,
Func: query.Hints.Func,
Grouping: query.Hints.Grouping,
Range: query.Hints.RangeMs,
By: query.Hints.By,
}
}
ws, err := StreamChunkedReadResponses(
NewChunkedWriter(w, f),
int64(i),
// The streaming API has to provide the series sorted.
chunks,
querier.Select(ctx, true, hints, filteredMatchers...),
sortedExternalLabels,
h.remoteReadMaxBytesInFrame,
h.marshalPool,
@ -236,35 +254,6 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re
}
}
// getChunkSeriesSet executes a query to retrieve a ChunkSeriesSet,
// encapsulating the operation in its own function to ensure timely release of
// the querier resources.
func (h *readHandler) getChunkSeriesSet(ctx context.Context, query *prompb.Query, filteredMatchers []*labels.Matcher) storage.ChunkSeriesSet {
querier, err := h.queryable.ChunkQuerier(query.StartTimestampMs, query.EndTimestampMs)
if err != nil {
return storage.ErrChunkSeriesSet(err)
}
defer func() {
if err := querier.Close(); err != nil {
level.Warn(h.logger).Log("msg", "Error on chunk querier close", "err", err.Error())
}
}()
var hints *storage.SelectHints
if query.Hints != nil {
hints = &storage.SelectHints{
Start: query.Hints.StartMs,
End: query.Hints.EndMs,
Step: query.Hints.StepMs,
Func: query.Hints.Func,
Grouping: query.Hints.Grouping,
Range: query.Hints.RangeMs,
By: query.Hints.By,
}
}
return querier.Select(ctx, true, hints, filteredMatchers...)
}
// filterExtLabelsFromMatchers change equality matchers which match external labels
// to a matcher that looks for an empty label,
// as that label should not be present in the storage.

View file

@ -176,6 +176,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
HTTPClientConfig: rwConf.HTTPClientConfig,
SigV4Config: rwConf.SigV4Config,
AzureADConfig: rwConf.AzureADConfig,
GoogleIAMConfig: rwConf.GoogleIAMConfig,
Headers: rwConf.Headers,
RetryOnRateLimit: rwConf.QueueConfig.RetryOnRateLimit,
})

View file

@ -126,6 +126,7 @@ func TestChunkSeriesSetToSeriesSet(t *testing.T) {
type histogramTest struct {
samples []chunks.Sample
expectedSamples []chunks.Sample
expectedCounterResetHeaders []chunkenc.CounterResetHeader
}
@ -141,6 +142,32 @@ func TestHistogramSeriesToChunks(t *testing.T) {
},
PositiveBuckets: []int64{2, 1}, // Abs: 2, 3
}
// h1 but with an extra empty bucket at offset -10.
// This can happen if h1 is from a recoded chunk, where a later histogram had a bucket at offset -10.
h1ExtraBuckets := &histogram.Histogram{
Count: 7,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 100,
Schema: 0,
PositiveSpans: []histogram.Span{
{Offset: -10, Length: 1},
{Offset: 9, Length: 2},
},
PositiveBuckets: []int64{0, 2, 1}, // Abs: 0, 2, 3
}
h1Recoded := &histogram.Histogram{
Count: 7,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 100,
Schema: 0,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{2, 1, -3, 0}, // Abs: 2, 3, 0, 0
}
// Appendable to h1.
h2 := &histogram.Histogram{
Count: 12,
@ -179,6 +206,32 @@ func TestHistogramSeriesToChunks(t *testing.T) {
},
PositiveBuckets: []float64{3, 1},
}
// fh1 but with an extra empty bucket at offset -10.
// This can happen if fh1 is from a recoded chunk, where a later histogram had a bucket at offset -10.
fh1ExtraBuckets := &histogram.FloatHistogram{
Count: 6,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 100,
Schema: 0,
PositiveSpans: []histogram.Span{
{Offset: -10, Length: 1},
{Offset: 9, Length: 2},
},
PositiveBuckets: []float64{0, 3, 1},
}
fh1Recoded := &histogram.FloatHistogram{
Count: 6,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 100,
Schema: 0,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []float64{3, 1, 0, 0},
}
// Appendable to fh1.
fh2 := &histogram.FloatHistogram{
Count: 17,
@ -219,6 +272,20 @@ func TestHistogramSeriesToChunks(t *testing.T) {
},
PositiveBuckets: []int64{2, 1}, // Abs: 2, 3
}
// gh1 recoded to add extra empty buckets at end.
gh1Recoded := &histogram.Histogram{
CounterResetHint: histogram.GaugeType,
Count: 7,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 100,
Schema: 0,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{2, 1, -3, 0}, // Abs: 2, 3, 0, 0
}
gh2 := &histogram.Histogram{
CounterResetHint: histogram.GaugeType,
Count: 12,
@ -246,6 +313,20 @@ func TestHistogramSeriesToChunks(t *testing.T) {
},
PositiveBuckets: []float64{3, 1},
}
// gfh1 recoded to add an extra empty buckets at end.
gfh1Recoded := &histogram.FloatHistogram{
CounterResetHint: histogram.GaugeType,
Count: 6,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 100,
Schema: 0,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []float64{3, 1, 0, 0},
}
gfh2 := &histogram.FloatHistogram{
CounterResetHint: histogram.GaugeType,
Count: 17,
@ -272,6 +353,9 @@ func TestHistogramSeriesToChunks(t *testing.T) {
samples: []chunks.Sample{
hSample{t: 1, h: h1},
},
expectedSamples: []chunks.Sample{
hSample{t: 1, h: h1},
},
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset},
},
"two histograms encoded to a single chunk": {
@ -279,6 +363,10 @@ func TestHistogramSeriesToChunks(t *testing.T) {
hSample{t: 1, h: h1},
hSample{t: 2, h: h2},
},
expectedSamples: []chunks.Sample{
hSample{t: 1, h: h1Recoded},
hSample{t: 2, h: h2},
},
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset},
},
"two histograms encoded to two chunks": {
@ -286,6 +374,10 @@ func TestHistogramSeriesToChunks(t *testing.T) {
hSample{t: 1, h: h2},
hSample{t: 2, h: h1},
},
expectedSamples: []chunks.Sample{
hSample{t: 1, h: h2},
hSample{t: 2, h: h1},
},
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset},
},
"histogram and stale sample encoded to two chunks": {
@ -293,6 +385,10 @@ func TestHistogramSeriesToChunks(t *testing.T) {
hSample{t: 1, h: staleHistogram},
hSample{t: 2, h: h1},
},
expectedSamples: []chunks.Sample{
hSample{t: 1, h: staleHistogram},
hSample{t: 2, h: h1},
},
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
},
"histogram and reduction in bucket encoded to two chunks": {
@ -300,6 +396,10 @@ func TestHistogramSeriesToChunks(t *testing.T) {
hSample{t: 1, h: h1},
hSample{t: 2, h: h2down},
},
expectedSamples: []chunks.Sample{
hSample{t: 1, h: h1},
hSample{t: 2, h: h2down},
},
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset},
},
// Float histograms.
@ -307,6 +407,9 @@ func TestHistogramSeriesToChunks(t *testing.T) {
samples: []chunks.Sample{
fhSample{t: 1, fh: fh1},
},
expectedSamples: []chunks.Sample{
fhSample{t: 1, fh: fh1},
},
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset},
},
"two float histograms encoded to a single chunk": {
@ -314,6 +417,10 @@ func TestHistogramSeriesToChunks(t *testing.T) {
fhSample{t: 1, fh: fh1},
fhSample{t: 2, fh: fh2},
},
expectedSamples: []chunks.Sample{
fhSample{t: 1, fh: fh1Recoded},
fhSample{t: 2, fh: fh2},
},
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset},
},
"two float histograms encoded to two chunks": {
@ -321,6 +428,10 @@ func TestHistogramSeriesToChunks(t *testing.T) {
fhSample{t: 1, fh: fh2},
fhSample{t: 2, fh: fh1},
},
expectedSamples: []chunks.Sample{
fhSample{t: 1, fh: fh2},
fhSample{t: 2, fh: fh1},
},
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset},
},
"float histogram and stale sample encoded to two chunks": {
@ -328,6 +439,10 @@ func TestHistogramSeriesToChunks(t *testing.T) {
fhSample{t: 1, fh: staleFloatHistogram},
fhSample{t: 2, fh: fh1},
},
expectedSamples: []chunks.Sample{
fhSample{t: 1, fh: staleFloatHistogram},
fhSample{t: 2, fh: fh1},
},
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
},
"float histogram and reduction in bucket encoded to two chunks": {
@ -335,6 +450,10 @@ func TestHistogramSeriesToChunks(t *testing.T) {
fhSample{t: 1, fh: fh1},
fhSample{t: 2, fh: fh2down},
},
expectedSamples: []chunks.Sample{
fhSample{t: 1, fh: fh1},
fhSample{t: 2, fh: fh2down},
},
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset},
},
// Mixed.
@ -343,6 +462,10 @@ func TestHistogramSeriesToChunks(t *testing.T) {
hSample{t: 1, h: h1},
fhSample{t: 2, fh: fh2},
},
expectedSamples: []chunks.Sample{
hSample{t: 1, h: h1},
fhSample{t: 2, fh: fh2},
},
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
},
"float histogram and histogram encoded to two chunks": {
@ -350,6 +473,10 @@ func TestHistogramSeriesToChunks(t *testing.T) {
fhSample{t: 1, fh: fh1},
hSample{t: 2, h: h2},
},
expectedSamples: []chunks.Sample{
fhSample{t: 1, fh: fh1},
hSample{t: 2, h: h2},
},
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
},
"histogram and stale float histogram encoded to two chunks": {
@ -357,12 +484,19 @@ func TestHistogramSeriesToChunks(t *testing.T) {
hSample{t: 1, h: h1},
fhSample{t: 2, fh: staleFloatHistogram},
},
expectedSamples: []chunks.Sample{
hSample{t: 1, h: h1},
fhSample{t: 2, fh: staleFloatHistogram},
},
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
},
"single gauge histogram encoded to one chunk": {
samples: []chunks.Sample{
hSample{t: 1, h: gh1},
},
expectedSamples: []chunks.Sample{
hSample{t: 1, h: gh1},
},
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
},
"two gauge histograms encoded to one chunk when counter increases": {
@ -370,6 +504,10 @@ func TestHistogramSeriesToChunks(t *testing.T) {
hSample{t: 1, h: gh1},
hSample{t: 2, h: gh2},
},
expectedSamples: []chunks.Sample{
hSample{t: 1, h: gh1Recoded},
hSample{t: 2, h: gh2},
},
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
},
"two gauge histograms encoded to one chunk when counter decreases": {
@ -377,12 +515,19 @@ func TestHistogramSeriesToChunks(t *testing.T) {
hSample{t: 1, h: gh2},
hSample{t: 2, h: gh1},
},
expectedSamples: []chunks.Sample{
hSample{t: 1, h: gh2},
hSample{t: 2, h: gh1Recoded},
},
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
},
"single gauge float histogram encoded to one chunk": {
samples: []chunks.Sample{
fhSample{t: 1, fh: gfh1},
},
expectedSamples: []chunks.Sample{
fhSample{t: 1, fh: gfh1},
},
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
},
"two float gauge histograms encoded to one chunk when counter increases": {
@ -390,6 +535,10 @@ func TestHistogramSeriesToChunks(t *testing.T) {
fhSample{t: 1, fh: gfh1},
fhSample{t: 2, fh: gfh2},
},
expectedSamples: []chunks.Sample{
fhSample{t: 1, fh: gfh1Recoded},
fhSample{t: 2, fh: gfh2},
},
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
},
"two float gauge histograms encoded to one chunk when counter decreases": {
@ -397,8 +546,34 @@ func TestHistogramSeriesToChunks(t *testing.T) {
fhSample{t: 1, fh: gfh2},
fhSample{t: 2, fh: gfh1},
},
expectedSamples: []chunks.Sample{
fhSample{t: 1, fh: gfh2},
fhSample{t: 2, fh: gfh1Recoded},
},
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
},
"histogram with extra empty bucket followed by histogram encodes to one chunk": {
samples: []chunks.Sample{
hSample{t: 1, h: h1ExtraBuckets},
hSample{t: 2, h: h1},
},
expectedSamples: []chunks.Sample{
hSample{t: 1, h: h1ExtraBuckets},
hSample{t: 2, h: h1ExtraBuckets}, // Recoded to add the missing buckets.
},
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset},
},
"float histogram with extra empty bucket followed by float histogram encodes to one chunk": {
samples: []chunks.Sample{
fhSample{t: 1, fh: fh1ExtraBuckets},
fhSample{t: 2, fh: fh1},
},
expectedSamples: []chunks.Sample{
fhSample{t: 1, fh: fh1ExtraBuckets},
fhSample{t: 2, fh: fh1ExtraBuckets}, // Recoded to add the missing buckets.
},
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset},
},
}
for testName, test := range tests {
@ -431,9 +606,9 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) {
// Decode all encoded samples and assert they are equal to the original ones.
encodedSamples := chunks.ChunkMetasToSamples(chks)
require.Equal(t, len(test.samples), len(encodedSamples))
require.Equal(t, len(test.expectedSamples), len(encodedSamples))
for i, s := range test.samples {
for i, s := range test.expectedSamples {
encodedSample := encodedSamples[i]
switch expectedSample := s.(type) {
case hSample:
@ -447,7 +622,7 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) {
require.True(t, value.IsStaleNaN(h.Sum), fmt.Sprintf("at idx %d", i))
continue
}
require.Equal(t, *expectedSample.h, *h.Compact(0), fmt.Sprintf("at idx %d", i))
require.Equal(t, *expectedSample.h, *h, fmt.Sprintf("at idx %d", i))
case fhSample:
require.Equal(t, chunkenc.ValFloatHistogram, encodedSample.Type(), "expect float histogram", fmt.Sprintf("at idx %d", i))
fh := encodedSample.FH()
@ -459,7 +634,7 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) {
require.True(t, value.IsStaleNaN(fh.Sum), fmt.Sprintf("at idx %d", i))
continue
}
require.Equal(t, *expectedSample.fh, *fh.Compact(0), fmt.Sprintf("at idx %d", i))
require.Equal(t, *expectedSample.fh, *fh, fmt.Sprintf("at idx %d", i))
default:
t.Error("internal error, unexpected type")
}

View file

@ -23,7 +23,6 @@ import (
"net"
"net/url"
"sort"
"strconv"
"strings"
text_template "text/template"
"time"
@ -106,25 +105,6 @@ func query(ctx context.Context, q string, ts time.Time, queryFn QueryFunc) (quer
return result, nil
}
func convertToFloat(i interface{}) (float64, error) {
switch v := i.(type) {
case float64:
return v, nil
case string:
return strconv.ParseFloat(v, 64)
case int:
return float64(v), nil
case uint:
return float64(v), nil
case int64:
return float64(v), nil
case uint64:
return float64(v), nil
default:
return 0, fmt.Errorf("can't convert %T to float", v)
}
}
// Expander executes templates in text or HTML mode with a common set of Prometheus template functions.
type Expander struct {
text string
@ -219,7 +199,7 @@ func NewTemplateExpander(
return host
},
"humanize": func(i interface{}) (string, error) {
v, err := convertToFloat(i)
v, err := common_templates.ConvertToFloat(i)
if err != nil {
return "", err
}
@ -248,7 +228,7 @@ func NewTemplateExpander(
return fmt.Sprintf("%.4g%s", v, prefix), nil
},
"humanize1024": func(i interface{}) (string, error) {
v, err := convertToFloat(i)
v, err := common_templates.ConvertToFloat(i)
if err != nil {
return "", err
}
@ -267,30 +247,15 @@ func NewTemplateExpander(
},
"humanizeDuration": common_templates.HumanizeDuration,
"humanizePercentage": func(i interface{}) (string, error) {
v, err := convertToFloat(i)
v, err := common_templates.ConvertToFloat(i)
if err != nil {
return "", err
}
return fmt.Sprintf("%.4g%%", v*100), nil
},
"humanizeTimestamp": func(i interface{}) (string, error) {
v, err := convertToFloat(i)
if err != nil {
return "", err
}
tm, err := floatToTime(v)
switch {
case errors.Is(err, errNaNOrInf):
return fmt.Sprintf("%.4g", v), nil
case err != nil:
return "", err
}
return fmt.Sprint(tm), nil
},
"humanizeTimestamp": common_templates.HumanizeTimestamp,
"toTime": func(i interface{}) (*time.Time, error) {
v, err := convertToFloat(i)
v, err := common_templates.ConvertToFloat(i)
if err != nil {
return nil, err
}

View file

@ -219,16 +219,25 @@ func (a *FloatHistogramAppender) Append(int64, float64) {
}
// appendable returns whether the chunk can be appended to, and if so whether
// any recoding needs to happen using the provided inserts (in case of any new
// buckets, positive or negative range, respectively). If the sample is a gauge
// histogram, AppendableGauge must be used instead.
// 1. Any recoding needs to happen to the chunk using the provided forward
// inserts (in case of any new buckets, positive or negative range,
// respectively).
// 2. Any recoding needs to happen for the histogram being appended, using the
// backward inserts (in case of any missing buckets, positive or negative
// range, respectively).
//
// If the sample is a gauge histogram, AppendableGauge must be used instead.
//
// The chunk is not appendable in the following cases:
//
// - The schema has changed.
// - The custom bounds have changed if the current schema is custom buckets.
// - The threshold for the zero bucket has changed.
// - Any buckets have disappeared.
// - There was a counter reset in the count of observations or in any bucket, including the zero bucket.
// - Any buckets have disappeared, unless the bucket count was 0, unused.
// Empty bucket can happen if the chunk was recoded and we're merging a non
// recoded histogram. In this case backward inserts will be provided.
// - There was a counter reset in the count of observations or in any bucket,
// including the zero bucket.
// - The last sample in the chunk was stale while the current sample is not stale.
//
// The method returns an additional boolean set to true if it is not appendable
@ -236,6 +245,7 @@ func (a *FloatHistogramAppender) Append(int64, float64) {
// append. If counterReset is true, okToAppend is always false.
func (a *FloatHistogramAppender) appendable(h *histogram.FloatHistogram) (
positiveInserts, negativeInserts []Insert,
backwardPositiveInserts, backwardNegativeInserts []Insert,
okToAppend, counterReset bool,
) {
if a.NumSamples() > 0 && a.GetCounterResetHeader() == GaugeType {
@ -279,27 +289,218 @@ func (a *FloatHistogramAppender) appendable(h *histogram.FloatHistogram) (
}
var ok bool
positiveInserts, ok = expandSpansForward(a.pSpans, h.PositiveSpans)
positiveInserts, backwardPositiveInserts, ok = expandFloatSpansAndBuckets(a.pSpans, h.PositiveSpans, a.pBuckets, h.PositiveBuckets)
if !ok {
counterReset = true
return
}
negativeInserts, ok = expandSpansForward(a.nSpans, h.NegativeSpans)
negativeInserts, backwardNegativeInserts, ok = expandFloatSpansAndBuckets(a.nSpans, h.NegativeSpans, a.nBuckets, h.NegativeBuckets)
if !ok {
counterReset = true
return
}
if counterResetInAnyFloatBucket(a.pBuckets, h.PositiveBuckets, a.pSpans, h.PositiveSpans) ||
counterResetInAnyFloatBucket(a.nBuckets, h.NegativeBuckets, a.nSpans, h.NegativeSpans) {
counterReset, positiveInserts, negativeInserts = true, nil, nil
return
}
okToAppend = true
return
}
// expandFloatSpansAndBuckets returns the inserts to expand the bucket spans 'a' so that
// they match the spans in 'b'. 'b' must cover the same or more buckets than
// 'a', otherwise the function will return false.
// The function also returns the inserts to expand 'b' to also cover all the
// buckets that are missing in 'b', but are present with 0 counter value in 'a'.
// The function also checks for counter resets between 'a' and 'b'.
//
// Example:
//
// Let's say the old buckets look like this:
//
// span syntax: [offset, length]
// spans : [ 0 , 2 ] [2,1] [ 3 , 2 ] [3,1] [1,1]
// bucket idx : [0] [1] 2 3 [4] 5 6 7 [8] [9] 10 11 12 [13] 14 [15]
// raw values 6 3 3 2 4 5 1
// deltas 6 -3 0 -1 2 1 -4
//
// But now we introduce a new bucket layout. (Carefully chosen example where we
// have a span appended, one unchanged[*], one prepended, and two merge - in
// that order.)
//
// [*] unchanged in terms of which bucket indices they represent. but to achieve
// that, their offset needs to change if "disrupted" by spans changing ahead of
// them
//
// \/ this one is "unchanged"
// spans : [ 0 , 3 ] [1,1] [ 1 , 4 ] [ 3 , 3 ]
// bucket idx : [0] [1] [2] 3 [4] 5 [6] [7] [8] [9] 10 11 12 [13] [14] [15]
// raw values 6 3 0 3 0 0 2 4 5 0 1
// deltas 6 -3 -3 3 -3 0 2 2 1 -5 1
// delta mods: / \ / \ / \
//
// Note for histograms with delta-encoded buckets: Whenever any new buckets are
// introduced, the subsequent "old" bucket needs to readjust its delta to the
// new base of 0. Thus, for the caller who wants to transform the set of
// original deltas to a new set of deltas to match a new span layout that adds
// buckets, we simply need to generate a list of inserts.
//
// Note: Within expandSpansForward we don't have to worry about the changes to the
// spans themselves, thanks to the iterators we get to work with the more useful
// bucket indices (which of course directly correspond to the buckets we have to
// adjust).
func expandFloatSpansAndBuckets(a, b []histogram.Span, aBuckets []xorValue, bBuckets []float64) (forward, backward []Insert, ok bool) {
ai := newBucketIterator(a)
bi := newBucketIterator(b)
var aInserts []Insert // To insert into buckets of a, to make up for missing buckets in b.
var bInserts []Insert // To insert into buckets of b, to make up for missing empty(!) buckets in a.
// When aInter.num or bInter.num becomes > 0, this becomes a valid insert that should
// be yielded when we finish a streak of new buckets.
var aInter Insert
var bInter Insert
aIdx, aOK := ai.Next()
bIdx, bOK := bi.Next()
// Bucket count. Initialize the absolute count and index into the
// positive/negative counts or deltas array. The bucket count is
// used to detect counter reset as well as unused buckets in a.
var (
aCount float64
bCount float64
aCountIdx int
bCountIdx int
)
if aOK {
aCount = aBuckets[aCountIdx].value
}
if bOK {
bCount = bBuckets[bCountIdx]
}
loop:
for {
switch {
case aOK && bOK:
switch {
case aIdx == bIdx: // Both have an identical bucket index.
// Bucket count. Check bucket for reset from a to b.
if aCount > bCount {
return nil, nil, false
}
// Finish WIP insert for a and reset.
if aInter.num > 0 {
aInserts = append(aInserts, aInter)
aInter.num = 0
}
// Finish WIP insert for b and reset.
if bInter.num > 0 {
bInserts = append(bInserts, bInter)
bInter.num = 0
}
aIdx, aOK = ai.Next()
bIdx, bOK = bi.Next()
aInter.pos++ // Advance potential insert position.
aCountIdx++ // Advance absolute bucket count index for a.
if aOK {
aCount = aBuckets[aCountIdx].value
}
bInter.pos++ // Advance potential insert position.
bCountIdx++ // Advance absolute bucket count index for b.
if bOK {
bCount = bBuckets[bCountIdx]
}
continue
case aIdx < bIdx: // b misses a bucket index that is in a.
// This is ok if the count in a is 0, in which case we make a note to
// fill in the bucket in b and advance a.
if aCount == 0 {
bInter.num++ // Mark that we need to insert a bucket in b.
bInter.bucketIdx = aIdx
// Advance a
if aInter.num > 0 {
aInserts = append(aInserts, aInter)
aInter.num = 0
}
aIdx, aOK = ai.Next()
aInter.pos++
aCountIdx++
if aOK {
aCount = aBuckets[aCountIdx].value
}
continue
}
// Otherwise we are missing a bucket that was in use in a, which is a reset.
return nil, nil, false
case aIdx > bIdx: // a misses a value that is in b. Forward b and recompare.
aInter.num++
bInter.bucketIdx = bIdx
// Advance b
if bInter.num > 0 {
bInserts = append(bInserts, bInter)
bInter.num = 0
}
bIdx, bOK = bi.Next()
bInter.pos++
bCountIdx++
if bOK {
bCount = bBuckets[bCountIdx]
}
}
case aOK && !bOK: // b misses a value that is in a.
// This is ok if the count in a is 0, in which case we make a note to
// fill in the bucket in b and advance a.
if aCount == 0 {
bInter.num++
bInter.bucketIdx = aIdx
// Advance a
if aInter.num > 0 {
aInserts = append(aInserts, aInter)
aInter.num = 0
}
aIdx, aOK = ai.Next()
aInter.pos++ // Advance potential insert position.
// Update absolute bucket counts for a.
aCountIdx++
if aOK {
aCount = aBuckets[aCountIdx].value
}
continue
}
// Otherwise we are missing a bucket that was in use in a, which is a reset.
return nil, nil, false
case !aOK && bOK: // a misses a value that is in b. Forward b and recompare.
aInter.num++
bInter.bucketIdx = bIdx
// Advance b
if bInter.num > 0 {
bInserts = append(bInserts, bInter)
bInter.num = 0
}
bIdx, bOK = bi.Next()
bInter.pos++ // Advance potential insert position.
// Update absolute bucket counts for b.
bCountIdx++
if bOK {
bCount = bBuckets[bCountIdx]
}
default: // Both iterators ran out. We're done.
if aInter.num > 0 {
aInserts = append(aInserts, aInter)
}
if bInter.num > 0 {
bInserts = append(bInserts, bInter)
}
break loop
}
}
return aInserts, bInserts, true
}
// appendableGauge returns whether the chunk can be appended to, and if so
// whether:
// 1. Any recoding needs to happen to the chunk using the provided inserts
@ -349,76 +550,6 @@ func (a *FloatHistogramAppender) appendableGauge(h *histogram.FloatHistogram) (
return
}
// counterResetInAnyFloatBucket returns true if there was a counter reset for any
// bucket. This should be called only when the bucket layout is the same or new
// buckets were added. It does not handle the case of buckets missing.
func counterResetInAnyFloatBucket(oldBuckets []xorValue, newBuckets []float64, oldSpans, newSpans []histogram.Span) bool {
if len(oldSpans) == 0 || len(oldBuckets) == 0 {
return false
}
var (
oldSpanSliceIdx, newSpanSliceIdx int = -1, -1 // Index for the span slices. Starts at -1 to indicate that the first non empty span is not yet found.
oldInsideSpanIdx, newInsideSpanIdx uint32 // Index inside a span.
oldIdx, newIdx int32 // Index inside a bucket slice.
oldBucketSliceIdx, newBucketSliceIdx int // Index inside bucket slice.
)
// Find first non empty spans.
oldSpanSliceIdx, oldIdx = nextNonEmptySpanSliceIdx(oldSpanSliceIdx, oldIdx, oldSpans)
newSpanSliceIdx, newIdx = nextNonEmptySpanSliceIdx(newSpanSliceIdx, newIdx, newSpans)
oldVal, newVal := oldBuckets[0].value, newBuckets[0]
// Since we assume that new spans won't have missing buckets, there will never be a case
// where the old index will not find a matching new index.
for {
if oldIdx == newIdx {
if newVal < oldVal {
return true
}
}
if oldIdx <= newIdx {
// Moving ahead old bucket and span by 1 index.
if oldInsideSpanIdx+1 >= oldSpans[oldSpanSliceIdx].Length {
// Current span is over.
oldSpanSliceIdx, oldIdx = nextNonEmptySpanSliceIdx(oldSpanSliceIdx, oldIdx, oldSpans)
oldInsideSpanIdx = 0
if oldSpanSliceIdx >= len(oldSpans) {
// All old spans are over.
break
}
} else {
oldInsideSpanIdx++
oldIdx++
}
oldBucketSliceIdx++
oldVal = oldBuckets[oldBucketSliceIdx].value
}
if oldIdx > newIdx {
// Moving ahead new bucket and span by 1 index.
if newInsideSpanIdx+1 >= newSpans[newSpanSliceIdx].Length {
// Current span is over.
newSpanSliceIdx, newIdx = nextNonEmptySpanSliceIdx(newSpanSliceIdx, newIdx, newSpans)
newInsideSpanIdx = 0
if newSpanSliceIdx >= len(newSpans) {
// All new spans are over.
// This should not happen, old spans above should catch this first.
panic("new spans over before old spans in counterReset")
}
} else {
newInsideSpanIdx++
newIdx++
}
newBucketSliceIdx++
newVal = newBuckets[newBucketSliceIdx]
}
}
return false
}
// appendFloatHistogram appends a float histogram to the chunk. The caller must ensure that
// the histogram is properly structured, e.g. the number of buckets used
// corresponds to the number conveyed by the span structures. First call
@ -614,7 +745,7 @@ func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppend
a.setCounterResetHeader(CounterReset)
case prev != nil:
// This is a new chunk, but continued from a previous one. We need to calculate the reset header unless already set.
_, _, _, counterReset := prev.appendable(h)
_, _, _, _, _, counterReset := prev.appendable(h)
if counterReset {
a.setCounterResetHeader(CounterReset)
} else {
@ -626,7 +757,7 @@ func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppend
// Adding counter-like histogram.
if h.CounterResetHint != histogram.GaugeType {
pForwardInserts, nForwardInserts, okToAppend, counterReset := a.appendable(h)
pForwardInserts, nForwardInserts, pBackwardInserts, nBackwardInserts, okToAppend, counterReset := a.appendable(h)
if !okToAppend || counterReset {
if appendOnly {
if counterReset {
@ -646,6 +777,23 @@ func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppend
happ.appendFloatHistogram(t, h)
return newChunk, false, app, nil
}
if len(pBackwardInserts) > 0 || len(nBackwardInserts) > 0 {
// The histogram needs to be expanded to have the extra empty buckets
// of the chunk.
if len(pForwardInserts) == 0 && len(nForwardInserts) == 0 {
// No new chunks from the histogram, so the spans of the appender can accommodate the new buckets.
// However we need to make a copy in case the input is sharing spans from an iterator.
h.PositiveSpans = make([]histogram.Span, len(a.pSpans))
copy(h.PositiveSpans, a.pSpans)
h.NegativeSpans = make([]histogram.Span, len(a.nSpans))
copy(h.NegativeSpans, a.nSpans)
} else {
// Spans need pre-adjusting to accommodate the new buckets.
h.PositiveSpans = adjustForInserts(h.PositiveSpans, pBackwardInserts)
h.NegativeSpans = adjustForInserts(h.NegativeSpans, nBackwardInserts)
}
a.recodeHistogram(h, pBackwardInserts, nBackwardInserts)
}
if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 {
if appendOnly {
return nil, false, a, fmt.Errorf("float histogram layout change with %d positive and %d negative forwards inserts", len(pForwardInserts), len(nForwardInserts))

View file

@ -245,9 +245,11 @@ func TestFloatHistogramChunkBucketChanges(t *testing.T) {
h2.NegativeBuckets = []int64{2, -1} // 2 1 (total 3)
// This is how span changes will be handled.
hApp, _ := app.(*FloatHistogramAppender)
posInterjections, negInterjections, ok, cr := hApp.appendable(h2.ToFloat(nil))
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2.ToFloat(nil))
require.NotEmpty(t, posInterjections)
require.NotEmpty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.True(t, ok) // Only new buckets came in.
require.False(t, cr)
c, app = hApp.recode(posInterjections, negInterjections, h2.PositiveSpans, h2.NegativeSpans)
@ -333,7 +335,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy()
h2.Schema++
_, _, ok, _ := hApp.appendable(h2)
_, _, _, _, ok, _ := hApp.appendable(h2)
require.False(t, ok)
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
@ -343,7 +345,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy()
h2.ZeroThreshold += 0.1
_, _, ok, _ := hApp.appendable(h2)
_, _, _, _, ok, _ := hApp.appendable(h2)
require.False(t, ok)
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
@ -363,9 +365,11 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
h2.Sum = 30
h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1}
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.NotEmpty(t, posInterjections)
require.Empty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.True(t, ok) // Only new buckets came in.
require.False(t, cr)
@ -385,24 +389,94 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
h2.Sum = 21
h2.PositiveBuckets = []float64{6, 3, 2, 4, 5, 1}
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.False(t, ok) // Need to cut a new chunk.
require.True(t, cr)
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
}
{ // New histogram that has buckets missing but the buckets missing were empty.
emptyBucketH := eh.Copy()
emptyBucketH.PositiveBuckets = []float64{6, 0, 3, 2, 4, 0, 1}
c, hApp, ts, h1 := setup(emptyBucketH)
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 1},
{Offset: 3, Length: 1},
{Offset: 3, Length: 2},
{Offset: 5, Length: 1},
}
savedH2Spans := h2.PositiveSpans
h2.PositiveBuckets = []float64{7, 4, 3, 5, 2}
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.NotEmpty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.True(t, ok)
require.False(t, cr)
assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
// Check that h2 was recoded.
require.Equal(t, []float64{7, 0, 4, 3, 5, 0, 2}, h2.PositiveBuckets)
require.Equal(t, emptyBucketH.PositiveSpans, h2.PositiveSpans)
require.NotEqual(t, savedH2Spans, h2.PositiveSpans, "recoding must make a copy")
}
{ // New histogram that has new buckets AND buckets missing but the buckets missing were empty.
emptyBucketH := eh.Copy()
emptyBucketH.PositiveBuckets = []float64{6, 0, 3, 2, 4, 0, 1}
c, hApp, ts, h1 := setup(emptyBucketH)
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 1},
{Offset: 3, Length: 1},
{Offset: 3, Length: 2},
{Offset: 5, Length: 2},
}
savedH2Spans := h2.PositiveSpans
h2.PositiveBuckets = []float64{7, 4, 3, 5, 2, 3}
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.NotEmpty(t, posInterjections)
require.Empty(t, negInterjections)
require.NotEmpty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.True(t, ok)
require.False(t, cr)
assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
// Check that h2 was recoded.
require.Equal(t, []float64{7, 0, 4, 3, 5, 0, 2, 3}, h2.PositiveBuckets)
require.Equal(t, []histogram.Span{
{Offset: 0, Length: 2}, // Added empty bucket.
{Offset: 2, Length: 1}, // Existing - offset adjusted.
{Offset: 3, Length: 2}, // Existing.
{Offset: 3, Length: 1}, // Added empty bucket.
{Offset: 1, Length: 2}, // Existing + the extra bucket.
}, h2.PositiveSpans)
require.NotEqual(t, savedH2Spans, h2.PositiveSpans, "recoding must make a copy")
}
{ // New histogram that has a counter reset while buckets are same.
c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy()
h2.Sum = 23
h2.PositiveBuckets = []float64{6, 2, 3, 2, 4, 5, 1}
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.False(t, ok) // Need to cut a new chunk.
require.True(t, cr)
@ -421,9 +495,11 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
h2.Sum = 29
h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 0}
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.False(t, ok) // Need to cut a new chunk.
require.True(t, cr)
@ -448,9 +524,11 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
h2.Sum = 26
h2.PositiveBuckets = []float64{1, 2, 5, 3, 3, 2, 4, 5, 1}
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.False(t, ok) // Need to cut a new chunk.
require.True(t, cr)
@ -524,10 +602,44 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
require.Equal(t, NotCounterReset, nextChunk.GetCounterResetHeader())
}
{
// Start a new chunk with a histogram that has an empty bucket.
// Add a histogram that has the same bucket missing.
// This should be appendable and can happen if we are merging from chunks
// where the first sample came from a recoded chunk that added the
// empty bucket.
h1 := eh.Copy()
// Add a bucket that is empty -10 offsets from the first bucket.
h1.PositiveSpans = make([]histogram.Span, len(eh.PositiveSpans)+1)
h1.PositiveSpans[0] = histogram.Span{Offset: eh.PositiveSpans[0].Offset - 10, Length: 1}
h1.PositiveSpans[1] = histogram.Span{Offset: eh.PositiveSpans[0].Offset + 9, Length: eh.PositiveSpans[0].Length}
for i, v := range eh.PositiveSpans[1:] {
h1.PositiveSpans[i+2] = v
}
h1.PositiveBuckets = make([]float64, len(eh.PositiveBuckets)+1)
h1.PositiveBuckets[0] = 0
for i, v := range eh.PositiveBuckets {
h1.PositiveBuckets[i+1] = v
}
c, hApp, ts, _ := setup(h1)
h2 := eh.Copy()
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.NotEmpty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.True(t, ok)
require.False(t, cr)
assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
}
{ // Custom buckets, no change.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
_, _, ok, _ := hApp.appendable(h2)
_, _, _, _, ok, _ := hApp.appendable(h2)
require.True(t, ok)
assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
@ -538,7 +650,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
h2 := h1.Copy()
h2.Count++
h2.PositiveBuckets = []float64{6, 3, 3, 2, 4, 5, 2}
_, _, ok, _ := hApp.appendable(h2)
_, _, _, _, ok, _ := hApp.appendable(h2)
require.True(t, ok)
assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
@ -549,7 +661,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
h2 := h1.Copy()
h2.Count--
h2.PositiveBuckets = []float64{6, 3, 3, 2, 4, 5, 0}
_, _, ok, _ := hApp.appendable(h2)
_, _, _, _, ok, _ := hApp.appendable(h2)
require.False(t, ok)
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
@ -559,7 +671,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
h2.CustomValues = []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21}
_, _, ok, _ := hApp.appendable(h2)
_, _, _, _, ok, _ := hApp.appendable(h2)
require.False(t, ok)
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
@ -581,9 +693,11 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1} // (total 30)
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.NotEmpty(t, posInterjections)
require.Empty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.True(t, ok) // Only new buckets came in.
require.False(t, cr)
@ -839,9 +953,11 @@ func TestFloatHistogramChunkAppendableWithEmptySpan(t *testing.T) {
require.Equal(t, 1, c.NumSamples())
hApp, _ := app.(*FloatHistogramAppender)
pI, nI, okToAppend, counterReset := hApp.appendable(tc.h2)
pI, nI, bpI, bnI, okToAppend, counterReset := hApp.appendable(tc.h2)
require.Empty(t, pI)
require.Empty(t, nI)
require.Empty(t, bpI)
require.Empty(t, bnI)
require.True(t, okToAppend)
require.False(t, counterReset)
})

View file

@ -237,16 +237,23 @@ func (a *HistogramAppender) Append(int64, float64) {
}
// appendable returns whether the chunk can be appended to, and if so whether
// any recoding needs to happen using the provided inserts (in case of any new
// buckets, positive or negative range, respectively). If the sample is a gauge
// histogram, AppendableGauge must be used instead.
// 1. Any recoding needs to happen to the chunk using the provided forward
// inserts (in case of any new buckets, positive or negative range,
// respectively).
// 2. Any recoding needs to happen for the histogram being appended, using the
// backward inserts (in case of any missing buckets, positive or negative
// range, respectively).
//
// If the sample is a gauge histogram, AppendableGauge must be used instead.
//
// The chunk is not appendable in the following cases:
//
// - The schema has changed.
// - The custom bounds have changed if the current schema is custom buckets.
// - The threshold for the zero bucket has changed.
// - Any buckets have disappeared.
// - Any buckets have disappeared, unless the bucket count was 0, unused.
// Empty bucket can happen if the chunk was recoded and we're merging a non
// recoded histogram. In this case backward inserts will be provided.
// - There was a counter reset in the count of observations or in any bucket,
// including the zero bucket.
// - The last sample in the chunk was stale while the current sample is not stale.
@ -256,6 +263,7 @@ func (a *HistogramAppender) Append(int64, float64) {
// append. If counterReset is true, okToAppend is always false.
func (a *HistogramAppender) appendable(h *histogram.Histogram) (
positiveInserts, negativeInserts []Insert,
backwardPositiveInserts, backwardNegativeInserts []Insert,
okToAppend, counterReset bool,
) {
if a.NumSamples() > 0 && a.GetCounterResetHeader() == GaugeType {
@ -299,31 +307,223 @@ func (a *HistogramAppender) appendable(h *histogram.Histogram) (
}
var ok bool
positiveInserts, ok = expandSpansForward(a.pSpans, h.PositiveSpans)
positiveInserts, backwardPositiveInserts, ok = expandIntSpansAndBuckets(a.pSpans, h.PositiveSpans, a.pBuckets, h.PositiveBuckets)
if !ok {
counterReset = true
return
}
negativeInserts, ok = expandSpansForward(a.nSpans, h.NegativeSpans)
negativeInserts, backwardNegativeInserts, ok = expandIntSpansAndBuckets(a.nSpans, h.NegativeSpans, a.nBuckets, h.NegativeBuckets)
if !ok {
counterReset = true
return
}
if counterResetInAnyBucket(a.pBuckets, h.PositiveBuckets, a.pSpans, h.PositiveSpans) ||
counterResetInAnyBucket(a.nBuckets, h.NegativeBuckets, a.nSpans, h.NegativeSpans) {
counterReset, positiveInserts, negativeInserts = true, nil, nil
return
}
okToAppend = true
return
}
// expandIntSpansAndBuckets returns the inserts to expand the bucket spans 'a' so that
// they match the spans in 'b'. 'b' must cover the same or more buckets than
// 'a', otherwise the function will return false.
// The function also returns the inserts to expand 'b' to also cover all the
// buckets that are missing in 'b', but are present with 0 counter value in 'a'.
// The function also checks for counter resets between 'a' and 'b'.
//
// Example:
//
// Let's say the old buckets look like this:
//
// span syntax: [offset, length]
// spans : [ 0 , 2 ] [2,1] [ 3 , 2 ] [3,1] [1,1]
// bucket idx : [0] [1] 2 3 [4] 5 6 7 [8] [9] 10 11 12 [13] 14 [15]
// raw values 6 3 3 2 4 5 1
// deltas 6 -3 0 -1 2 1 -4
//
// But now we introduce a new bucket layout. (Carefully chosen example where we
// have a span appended, one unchanged[*], one prepended, and two merge - in
// that order.)
//
// [*] unchanged in terms of which bucket indices they represent. but to achieve
// that, their offset needs to change if "disrupted" by spans changing ahead of
// them
//
// \/ this one is "unchanged"
// spans : [ 0 , 3 ] [1,1] [ 1 , 4 ] [ 3 , 3 ]
// bucket idx : [0] [1] [2] 3 [4] 5 [6] [7] [8] [9] 10 11 12 [13] [14] [15]
// raw values 6 3 0 3 0 0 2 4 5 0 1
// deltas 6 -3 -3 3 -3 0 2 2 1 -5 1
// delta mods: / \ / \ / \
//
// Note for histograms with delta-encoded buckets: Whenever any new buckets are
// introduced, the subsequent "old" bucket needs to readjust its delta to the
// new base of 0. Thus, for the caller who wants to transform the set of
// original deltas to a new set of deltas to match a new span layout that adds
// buckets, we simply need to generate a list of inserts.
//
// Note: Within expandSpansForward we don't have to worry about the changes to the
// spans themselves, thanks to the iterators we get to work with the more useful
// bucket indices (which of course directly correspond to the buckets we have to
// adjust).
func expandIntSpansAndBuckets(a, b []histogram.Span, aBuckets, bBuckets []int64) (forward, backward []Insert, ok bool) {
ai := newBucketIterator(a)
bi := newBucketIterator(b)
var aInserts []Insert // To insert into buckets of a, to make up for missing buckets in b.
var bInserts []Insert // To insert into buckets of b, to make up for missing empty(!) buckets in a.
// When aInter.num or bInter.num becomes > 0, this becomes a valid insert that should
// be yielded when we finish a streak of new buckets.
var aInter Insert
var bInter Insert
aIdx, aOK := ai.Next()
bIdx, bOK := bi.Next()
// Bucket count. Initialize the absolute count and index into the
// positive/negative counts or deltas array. The bucket count is
// used to detect counter reset as well as unused buckets in a.
var (
aCount int64
bCount int64
aCountIdx int
bCountIdx int
)
if aOK {
aCount = aBuckets[aCountIdx]
}
if bOK {
bCount = bBuckets[bCountIdx]
}
loop:
for {
switch {
case aOK && bOK:
switch {
case aIdx == bIdx: // Both have an identical bucket index.
// Bucket count. Check bucket for reset from a to b.
if aCount > bCount {
return nil, nil, false
}
// Finish WIP insert for a and reset.
if aInter.num > 0 {
aInserts = append(aInserts, aInter)
aInter.num = 0
}
// Finish WIP insert for b and reset.
if bInter.num > 0 {
bInserts = append(bInserts, bInter)
bInter.num = 0
}
aIdx, aOK = ai.Next()
bIdx, bOK = bi.Next()
aInter.pos++ // Advance potential insert position.
aCountIdx++ // Advance absolute bucket count index for a.
if aOK {
aCount += aBuckets[aCountIdx]
}
bInter.pos++ // Advance potential insert position.
bCountIdx++ // Advance absolute bucket count index for b.
if bOK {
bCount += bBuckets[bCountIdx]
}
continue
case aIdx < bIdx: // b misses a bucket index that is in a.
// This is ok if the count in a is 0, in which case we make a note to
// fill in the bucket in b and advance a.
if aCount == 0 {
bInter.num++ // Mark that we need to insert a bucket in b.
bInter.bucketIdx = aIdx
// Advance a
if aInter.num > 0 {
aInserts = append(aInserts, aInter)
aInter.num = 0
}
aIdx, aOK = ai.Next()
aInter.pos++
aCountIdx++
if aOK {
aCount += aBuckets[aCountIdx]
}
continue
}
// Otherwise we are missing a bucket that was in use in a, which is a reset.
return nil, nil, false
case aIdx > bIdx: // a misses a value that is in b. Forward b and recompare.
aInter.num++
aInter.bucketIdx = bIdx
// Advance b
if bInter.num > 0 {
bInserts = append(bInserts, bInter)
bInter.num = 0
}
bIdx, bOK = bi.Next()
bInter.pos++
bCountIdx++
if bOK {
bCount += bBuckets[bCountIdx]
}
}
case aOK && !bOK: // b misses a value that is in a.
// This is ok if the count in a is 0, in which case we make a note to
// fill in the bucket in b and advance a.
if aCount == 0 {
bInter.num++
bInter.bucketIdx = aIdx
// Advance a
if aInter.num > 0 {
aInserts = append(aInserts, aInter)
aInter.num = 0
}
aIdx, aOK = ai.Next()
aInter.pos++ // Advance potential insert position.
// Update absolute bucket counts for a.
aCountIdx++
if aOK {
aCount += aBuckets[aCountIdx]
}
continue
}
// Otherwise we are missing a bucket that was in use in a, which is a reset.
return nil, nil, false
case !aOK && bOK: // a misses a value that is in b. Forward b and recompare.
aInter.num++
aInter.bucketIdx = bIdx
// Advance b
if bInter.num > 0 {
bInserts = append(bInserts, bInter)
bInter.num = 0
}
bIdx, bOK = bi.Next()
bInter.pos++ // Advance potential insert position.
// Update absolute bucket counts for b.
bCountIdx++
if bOK {
bCount += bBuckets[bCountIdx]
}
default: // Both iterators ran out. We're done.
if aInter.num > 0 {
aInserts = append(aInserts, aInter)
}
if bInter.num > 0 {
bInserts = append(bInserts, bInter)
}
break loop
}
}
return aInserts, bInserts, true
}
// appendableGauge returns whether the chunk can be appended to, and if so
// whether:
// 1. Any recoding needs to happen to the chunk using the provided inserts
// (in case of any new buckets, positive or negative range, respectively).
// 1. Any recoding needs to happen to the chunk using the provided forward
// inserts (in case of any new buckets, positive or negative range,
// respectively).
// 2. Any recoding needs to happen for the histogram being appended, using the
// backward inserts (in case of any missing buckets, positive or negative
// range, respectively).
@ -369,76 +569,6 @@ func (a *HistogramAppender) appendableGauge(h *histogram.Histogram) (
return
}
// counterResetInAnyBucket returns true if there was a counter reset for any
// bucket. This should be called only when the bucket layout is the same or new
// buckets were added. It does not handle the case of buckets missing.
func counterResetInAnyBucket(oldBuckets, newBuckets []int64, oldSpans, newSpans []histogram.Span) bool {
if len(oldSpans) == 0 || len(oldBuckets) == 0 {
return false
}
var (
oldSpanSliceIdx, newSpanSliceIdx int = -1, -1 // Index for the span slices. Starts at -1 to indicate that the first non empty span is not yet found.
oldInsideSpanIdx, newInsideSpanIdx uint32 // Index inside a span.
oldIdx, newIdx int32 // Index inside a bucket slice.
oldBucketSliceIdx, newBucketSliceIdx int // Index inside bucket slice.
)
// Find first non empty spans.
oldSpanSliceIdx, oldIdx = nextNonEmptySpanSliceIdx(oldSpanSliceIdx, oldIdx, oldSpans)
newSpanSliceIdx, newIdx = nextNonEmptySpanSliceIdx(newSpanSliceIdx, newIdx, newSpans)
oldVal, newVal := oldBuckets[0], newBuckets[0]
// Since we assume that new spans won't have missing buckets, there will never be a case
// where the old index will not find a matching new index.
for {
if oldIdx == newIdx {
if newVal < oldVal {
return true
}
}
if oldIdx <= newIdx {
// Moving ahead old bucket and span by 1 index.
if oldInsideSpanIdx+1 >= oldSpans[oldSpanSliceIdx].Length {
// Current span is over.
oldSpanSliceIdx, oldIdx = nextNonEmptySpanSliceIdx(oldSpanSliceIdx, oldIdx, oldSpans)
oldInsideSpanIdx = 0
if oldSpanSliceIdx >= len(oldSpans) {
// All old spans are over.
break
}
} else {
oldInsideSpanIdx++
oldIdx++
}
oldBucketSliceIdx++
oldVal += oldBuckets[oldBucketSliceIdx]
}
if oldIdx > newIdx {
// Moving ahead new bucket and span by 1 index.
if newInsideSpanIdx+1 >= newSpans[newSpanSliceIdx].Length {
// Current span is over.
newSpanSliceIdx, newIdx = nextNonEmptySpanSliceIdx(newSpanSliceIdx, newIdx, newSpans)
newInsideSpanIdx = 0
if newSpanSliceIdx >= len(newSpans) {
// All new spans are over.
// This should not happen, old spans above should catch this first.
panic("new spans over before old spans in counterReset")
}
} else {
newInsideSpanIdx++
newIdx++
}
newBucketSliceIdx++
newVal += newBuckets[newBucketSliceIdx]
}
}
return false
}
// appendHistogram appends a histogram to the chunk. The caller must ensure that
// the histogram is properly structured, e.g. the number of buckets used
// corresponds to the number conveyed by the span structures. First call
@ -649,7 +779,7 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h
a.setCounterResetHeader(CounterReset)
case prev != nil:
// This is a new chunk, but continued from a previous one. We need to calculate the reset header unless already set.
_, _, _, counterReset := prev.appendable(h)
_, _, _, _, _, counterReset := prev.appendable(h)
if counterReset {
a.setCounterResetHeader(CounterReset)
} else {
@ -661,7 +791,7 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h
// Adding counter-like histogram.
if h.CounterResetHint != histogram.GaugeType {
pForwardInserts, nForwardInserts, okToAppend, counterReset := a.appendable(h)
pForwardInserts, nForwardInserts, pBackwardInserts, nBackwardInserts, okToAppend, counterReset := a.appendable(h)
if !okToAppend || counterReset {
if appendOnly {
if counterReset {
@ -681,6 +811,23 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h
happ.appendHistogram(t, h)
return newChunk, false, app, nil
}
if len(pBackwardInserts) > 0 || len(nBackwardInserts) > 0 {
// The histogram needs to be expanded to have the extra empty buckets
// of the chunk.
if len(pForwardInserts) == 0 && len(nForwardInserts) == 0 {
// No new chunks from the histogram, so the spans of the appender can accommodate the new buckets.
// However we need to make a copy in case the input is sharing spans from an iterator.
h.PositiveSpans = make([]histogram.Span, len(a.pSpans))
copy(h.PositiveSpans, a.pSpans)
h.NegativeSpans = make([]histogram.Span, len(a.nSpans))
copy(h.NegativeSpans, a.nSpans)
} else {
// Spans need pre-adjusting to accommodate the new buckets.
h.PositiveSpans = adjustForInserts(h.PositiveSpans, pBackwardInserts)
h.NegativeSpans = adjustForInserts(h.NegativeSpans, nBackwardInserts)
}
a.recodeHistogram(h, pBackwardInserts, nBackwardInserts)
}
if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 {
if appendOnly {
return nil, false, a, fmt.Errorf("histogram layout change with %d positive and %d negative forwards inserts", len(pForwardInserts), len(nForwardInserts))

View file

@ -278,8 +278,15 @@ func (b *bucketIterator) Next() (int, bool) {
type Insert struct {
pos int
num int
// Optional: bucketIdx is the index of the bucket that is inserted.
// Can be used to adjust spans.
bucketIdx int
}
// Deprecated: expandSpansForward, use expandIntSpansAndBuckets or
// expandFloatSpansAndBuckets instead.
// expandSpansForward is left here for reference.
// expandSpansForward returns the inserts to expand the bucket spans 'a' so that
// they match the spans in 'b'. 'b' must cover the same or more buckets than
// 'a', otherwise the function will return false.
@ -575,14 +582,64 @@ func counterResetHint(crh CounterResetHeader, numRead uint16) histogram.CounterR
}
}
// Handle pathological case of empty span when advancing span idx.
// Call it with idx==-1 to find the first non empty span.
func nextNonEmptySpanSliceIdx(idx int, bucketIdx int32, spans []histogram.Span) (newIdx int, newBucketIdx int32) {
for idx++; idx < len(spans); idx++ {
if spans[idx].Length > 0 {
return idx, bucketIdx + spans[idx].Offset + 1
}
bucketIdx += spans[idx].Offset
// adjustForInserts adjusts the spans for the given inserts.
func adjustForInserts(spans []histogram.Span, inserts []Insert) (mergedSpans []histogram.Span) {
if len(inserts) == 0 {
return spans
}
return idx, 0
it := newBucketIterator(spans)
var (
lastBucket int
i int
insertIdx = inserts[i].bucketIdx
insertNum = inserts[i].num
)
addBucket := func(b int) {
offset := b - lastBucket - 1
if offset == 0 && len(mergedSpans) > 0 {
mergedSpans[len(mergedSpans)-1].Length++
} else {
if len(mergedSpans) == 0 {
offset++
}
mergedSpans = append(mergedSpans, histogram.Span{
Offset: int32(offset),
Length: 1,
})
}
lastBucket = b
}
consumeInsert := func() {
// Consume the insert.
insertNum--
if insertNum == 0 {
i++
if i < len(inserts) {
insertIdx = inserts[i].bucketIdx
insertNum = inserts[i].num
}
} else {
insertIdx++
}
}
bucket, ok := it.Next()
for ok {
if i < len(inserts) && insertIdx < bucket {
addBucket(insertIdx)
consumeInsert()
} else {
addBucket(bucket)
bucket, ok = it.Next()
}
}
for i < len(inserts) {
addBucket(inserts[i].bucketIdx)
consumeInsert()
}
return
}

View file

@ -256,9 +256,11 @@ func TestHistogramChunkBucketChanges(t *testing.T) {
h2.NegativeBuckets = []int64{2, -1} // 2 1 (total 3)
// This is how span changes will be handled.
hApp, _ := app.(*HistogramAppender)
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.NotEmpty(t, posInterjections)
require.NotEmpty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.True(t, ok) // Only new buckets came in.
require.False(t, cr)
c, app = hApp.recode(posInterjections, negInterjections, h2.PositiveSpans, h2.NegativeSpans)
@ -347,7 +349,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy()
h2.Schema++
_, _, ok, _ := hApp.appendable(h2)
_, _, _, _, ok, _ := hApp.appendable(h2)
require.False(t, ok)
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
@ -357,7 +359,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy()
h2.ZeroThreshold += 0.1
_, _, ok, _ := hApp.appendable(h2)
_, _, _, _, ok, _ := hApp.appendable(h2)
require.False(t, ok)
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
@ -380,9 +382,11 @@ func TestHistogramChunkAppendable(t *testing.T) {
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 1} // 7 5 1 3 1 0 2 5 5 0 1 (total 30)
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.NotEmpty(t, posInterjections)
require.Empty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.True(t, ok) // Only new buckets came in.
require.False(t, cr)
@ -401,24 +405,96 @@ func TestHistogramChunkAppendable(t *testing.T) {
h2.Sum = 21
h2.PositiveBuckets = []int64{6, -3, -1, 2, 1, -4} // counts: 6, 3, 2, 4, 5, 1 (total 21)
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.False(t, ok) // Need to cut a new chunk.
require.True(t, cr)
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
}
{ // New histogram that has buckets missing but the buckets missing were empty.
emptyBucketH := eh.Copy()
emptyBucketH.PositiveBuckets = []int64{6, -6, 1, 1, -2, 1, 1} // counts: 6, 0, 1, 2, 0, 1, 2 (total 12)
c, hApp, ts, h1 := setup(emptyBucketH)
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{ // Missing buckets at offset 1 and 9.
{Offset: 0, Length: 1},
{Offset: 3, Length: 1},
{Offset: 3, Length: 1},
{Offset: 4, Length: 1},
{Offset: 1, Length: 1},
}
savedH2Spans := h2.PositiveSpans
h2.PositiveBuckets = []int64{7, -5, 1, 0, 1} // counts: 7, 2, 3, 3, 4 (total 18)
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.NotEmpty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.True(t, ok)
require.False(t, cr)
assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
// Check that h2 was recoded.
require.Equal(t, []int64{7, -7, 2, 1, -3, 3, 1}, h2.PositiveBuckets) // counts: 7, 0, 2, 3 , 0, 3, 4 (total 18)
require.Equal(t, emptyBucketH.PositiveSpans, h2.PositiveSpans)
require.NotEqual(t, savedH2Spans, h2.PositiveSpans, "recoding must make a copy")
}
{ // New histogram that has new buckets AND buckets missing but the buckets missing were empty.
emptyBucketH := eh.Copy()
emptyBucketH.PositiveBuckets = []int64{6, -6, 1, 1, -2, 1, 1} // counts: 6, 0, 1, 2, 0, 1, 2 (total 12)
c, hApp, ts, h1 := setup(emptyBucketH)
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{ // Missing buckets at offset 1 and 9.
{Offset: 0, Length: 1},
{Offset: 3, Length: 1},
{Offset: 3, Length: 1},
{Offset: 4, Length: 1},
{Offset: 1, Length: 2},
}
savedH2Spans := h2.PositiveSpans
h2.PositiveBuckets = []int64{7, -5, 1, 0, 1, 1} // counts: 7, 2, 3, 3, 4, 5 (total 23)
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.NotEmpty(t, posInterjections)
require.Empty(t, negInterjections)
require.NotEmpty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.True(t, ok)
require.False(t, cr)
assertRecodedHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
// Check that h2 was recoded.
require.Equal(t, []int64{7, -7, 2, 1, -3, 3, 1, 1}, h2.PositiveBuckets) // counts: 7, 0, 2, 3 , 0, 3, 5 (total 23)
require.Equal(t, []histogram.Span{
{Offset: 0, Length: 2}, // Added empty bucket.
{Offset: 2, Length: 1}, // Existing - offset adjusted.
{Offset: 3, Length: 2}, // Added empty bucket.
{Offset: 3, Length: 1}, // Existing - offset adjusted.
{Offset: 1, Length: 2}, // Existing.
}, h2.PositiveSpans)
require.NotEqual(t, savedH2Spans, h2.PositiveSpans, "recoding must make a copy")
}
{ // New histogram that has a counter reset while buckets are same.
c, hApp, ts, h1 := setup(eh)
h2 := h1.Copy()
h2.Sum = 23
h2.PositiveBuckets = []int64{6, -4, 1, -1, 2, 1, -4} // counts: 6, 2, 3, 2, 4, 5, 1 (total 23)
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.False(t, ok) // Need to cut a new chunk.
require.True(t, cr)
@ -440,9 +516,11 @@ func TestHistogramChunkAppendable(t *testing.T) {
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 0} // 7 5 1 3 1 0 2 5 5 0 0 (total 29)
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.False(t, ok) // Need to cut a new chunk.
require.True(t, cr)
@ -470,9 +548,11 @@ func TestHistogramChunkAppendable(t *testing.T) {
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
h2.PositiveBuckets = []int64{1, 1, 3, -2, 0, -1, 2, 1, -4} // counts: 1, 2, 5, 3, 3, 2, 4, 5, 1 (total 26)
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.False(t, ok) // Need to cut a new chunk.
require.True(t, cr)
@ -549,10 +629,44 @@ func TestHistogramChunkAppendable(t *testing.T) {
require.Equal(t, NotCounterReset, nextChunk.GetCounterResetHeader())
}
{
// Start a new chunk with a histogram that has an empty bucket.
// Add a histogram that has the same bucket missing.
// This should be appendable and can happen if we are merging from chunks
// where the first sample came from a recoded chunk that added the
// empty bucket.
h1 := eh.Copy()
// Add a bucket that is empty -10 offsets from the first bucket.
h1.PositiveSpans = make([]histogram.Span, len(eh.PositiveSpans)+1)
h1.PositiveSpans[0] = histogram.Span{Offset: eh.PositiveSpans[0].Offset - 10, Length: 1}
h1.PositiveSpans[1] = histogram.Span{Offset: eh.PositiveSpans[0].Offset + 9, Length: eh.PositiveSpans[0].Length}
for i, v := range eh.PositiveSpans[1:] {
h1.PositiveSpans[i+2] = v
}
h1.PositiveBuckets = make([]int64, len(eh.PositiveBuckets)+1)
h1.PositiveBuckets[0] = 0
for i, v := range eh.PositiveBuckets {
h1.PositiveBuckets[i+1] = v
}
c, hApp, ts, _ := setup(h1)
h2 := eh.Copy()
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.NotEmpty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.True(t, ok)
require.False(t, cr)
assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
}
{ // Custom buckets, no change.
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
_, _, ok, _ := hApp.appendable(h2)
_, _, _, _, ok, _ := hApp.appendable(h2)
require.True(t, ok)
assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
@ -563,7 +677,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
h2 := h1.Copy()
h2.Count++
h2.PositiveBuckets = []int64{6, -3, 0, -1, 2, 1, -3}
_, _, ok, _ := hApp.appendable(h2)
_, _, _, _, ok, _ := hApp.appendable(h2)
require.True(t, ok)
assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
@ -574,7 +688,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
h2 := h1.Copy()
h2.Count--
h2.PositiveBuckets = []int64{6, -3, 0, -1, 2, 1, -5}
_, _, ok, _ := hApp.appendable(h2)
_, _, _, _, ok, _ := hApp.appendable(h2)
require.False(t, ok)
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
@ -584,7 +698,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
c, hApp, ts, h1 := setup(cbh)
h2 := h1.Copy()
h2.CustomValues = []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21}
_, _, ok, _ := hApp.appendable(h2)
_, _, _, _, ok, _ := hApp.appendable(h2)
require.False(t, ok)
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
@ -606,9 +720,11 @@ func TestHistogramChunkAppendable(t *testing.T) {
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 1} // 7 5 1 3 1 0 2 5 5 0 1 (total 30)
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.NotEmpty(t, posInterjections)
require.Empty(t, negInterjections)
require.Empty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.True(t, ok) // Only new buckets came in.
require.False(t, cr)
@ -875,9 +991,11 @@ func TestHistogramChunkAppendableWithEmptySpan(t *testing.T) {
require.Equal(t, 1, c.NumSamples())
hApp, _ := app.(*HistogramAppender)
pI, nI, okToAppend, counterReset := hApp.appendable(tc.h2)
pI, nI, bpI, bnI, okToAppend, counterReset := hApp.appendable(tc.h2)
require.Empty(t, pI)
require.Empty(t, nI)
require.Empty(t, bpI)
require.Empty(t, bnI)
require.True(t, okToAppend)
require.False(t, counterReset)
})
@ -1368,3 +1486,50 @@ func TestHistogramAppendOnlyErrors(t *testing.T) {
require.EqualError(t, err, "histogram counter reset")
})
}
func BenchmarkAppendable(b *testing.B) {
// Create a histogram with a bunch of spans and buckets.
const (
numSpans = 1000
spanLength = 10
)
h := &histogram.Histogram{
Schema: 0,
Count: 100,
Sum: 1000,
ZeroThreshold: 0.001,
ZeroCount: 5,
}
for i := 0; i < numSpans; i++ {
h.PositiveSpans = append(h.PositiveSpans, histogram.Span{Offset: 5, Length: spanLength})
h.NegativeSpans = append(h.NegativeSpans, histogram.Span{Offset: 5, Length: spanLength})
for j := 0; j < spanLength; j++ {
h.PositiveBuckets = append(h.PositiveBuckets, int64(j))
h.NegativeBuckets = append(h.NegativeBuckets, int64(j))
}
}
c := Chunk(NewHistogramChunk())
// Create fresh appender and add the first histogram.
app, err := c.Appender()
if err != nil {
b.Fatal(err)
}
_, _, _, err = app.AppendHistogram(nil, 1, h, true)
if err != nil {
b.Fatal(err)
}
hApp := app.(*HistogramAppender)
isAppendable := true
for i := 0; i < b.N; i++ {
_, _, _, _, ok, _ := hApp.appendable(h)
isAppendable = isAppendable && ok
}
if !isAppendable {
b.Fail()
}
}

View file

@ -22,6 +22,7 @@ import (
"os"
"path"
"path/filepath"
"runtime"
"strconv"
"sync"
"testing"
@ -1925,3 +1926,229 @@ func TestCompactEmptyResultBlockWithTombstone(t *testing.T) {
require.Nil(t, ulids)
require.NoError(t, block.Close())
}
func TestDelayedCompaction(t *testing.T) {
// The delay is chosen in such a way as to not slow down the tests, but also to make
// the effective compaction duration negligible compared to it, so that the duration comparisons make sense.
delay := 1000 * time.Millisecond
waitUntilCompactedAndCheck := func(db *DB) {
t.Helper()
start := time.Now()
for db.head.compactable() {
// This simulates what happens at the end of commits, for less busy DB, a compaction
// is triggered every minute. This is to speed up the test.
select {
case db.compactc <- struct{}{}:
default:
}
time.Sleep(time.Millisecond)
}
duration := time.Since(start)
// Only waited for one offset: offset<=delay<<<2*offset
require.Greater(t, duration, db.opts.CompactionDelay)
require.Less(t, duration, 2*db.opts.CompactionDelay)
}
compactAndCheck := func(db *DB) {
t.Helper()
start := time.Now()
db.Compact(context.Background())
for db.head.compactable() {
time.Sleep(time.Millisecond)
}
if runtime.GOOS == "windows" {
// TODO: enable on windows once ms resolution timers are better supported.
return
}
duration := time.Since(start)
require.Less(t, duration, delay)
}
cases := []struct {
name string
// The delays are chosen in such a way as to not slow down the tests, but also in a way to make the
// effective compaction duration negligible compared to them, so that the duration comparisons make sense.
compactionDelay time.Duration
}{
{
"delayed compaction not enabled",
0,
},
{
"delayed compaction enabled",
delay,
},
}
for _, c := range cases {
c := c
t.Run(c.name, func(t *testing.T) {
t.Parallel()
var options *Options
if c.compactionDelay > 0 {
options = &Options{CompactionDelay: c.compactionDelay}
}
db := openTestDB(t, options, []int64{10})
defer func() {
require.NoError(t, db.Close())
}()
label := labels.FromStrings("foo", "bar")
// The first compaction is expected to result in 1 block.
db.DisableCompactions()
app := db.Appender(context.Background())
_, err := app.Append(0, label, 0, 0)
require.NoError(t, err)
_, err = app.Append(0, label, 11, 0)
require.NoError(t, err)
_, err = app.Append(0, label, 21, 0)
require.NoError(t, err)
require.NoError(t, app.Commit())
if c.compactionDelay == 0 {
// When delay is not enabled, compaction should run on the first trigger.
compactAndCheck(db)
} else {
db.EnableCompactions()
waitUntilCompactedAndCheck(db)
// The db.compactc signals have been processed multiple times since a compaction is triggered every 1ms by waitUntilCompacted.
// This implies that the compaction delay doesn't block or wait on the initial trigger.
// 3 is an arbitrary value because it's difficult to determine the precise value.
require.GreaterOrEqual(t, prom_testutil.ToFloat64(db.metrics.compactionsTriggered)-prom_testutil.ToFloat64(db.metrics.compactionsSkipped), 3.0)
// The delay doesn't change the head blocks alignement.
require.Eventually(t, func() bool {
return db.head.MinTime() == db.compactor.(*LeveledCompactor).ranges[0]+1
}, 500*time.Millisecond, 10*time.Millisecond)
// One compaction was run and one block was produced.
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran))
}
// The second compaction is expected to result in 2 blocks.
// This ensures that the logic for compaction delay doesn't only work for the first compaction, but also takes into account the future compactions.
// This also ensures that no delay happens between consecutive compactions.
db.DisableCompactions()
app = db.Appender(context.Background())
_, err = app.Append(0, label, 31, 0)
require.NoError(t, err)
_, err = app.Append(0, label, 41, 0)
require.NoError(t, err)
require.NoError(t, app.Commit())
if c.compactionDelay == 0 {
// Compaction should still run on the first trigger.
compactAndCheck(db)
} else {
db.EnableCompactions()
waitUntilCompactedAndCheck(db)
}
// Two other compactions were run.
require.Eventually(t, func() bool {
return prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran) == 3.0
}, 500*time.Millisecond, 10*time.Millisecond)
if c.compactionDelay == 0 {
return
}
// This test covers a special case. If auto compaction is in a delay period and a manual compaction is triggered,
// auto compaction should stop waiting for the delay if the head is no longer compactable.
// Of course, if the head is still compactable after the manual compaction, auto compaction will continue waiting for the same delay.
getTimeWhenCompactionDelayStarted := func() time.Time {
t.Helper()
db.cmtx.Lock()
defer db.cmtx.Unlock()
return db.timeWhenCompactionDelayStarted
}
db.DisableCompactions()
app = db.Appender(context.Background())
_, err = app.Append(0, label, 51, 0)
require.NoError(t, err)
require.NoError(t, app.Commit())
require.True(t, db.head.compactable())
db.EnableCompactions()
// Trigger an auto compaction.
db.compactc <- struct{}{}
// That made auto compaction start waiting for the delay.
require.Eventually(t, func() bool {
return !getTimeWhenCompactionDelayStarted().IsZero()
}, 100*time.Millisecond, 10*time.Millisecond)
// Trigger a manual compaction.
require.NoError(t, db.CompactHead(NewRangeHead(db.Head(), 0, 50.0)))
require.Equal(t, 4.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran))
// Re-trigger an auto compaction.
db.compactc <- struct{}{}
// That made auto compaction stop waiting for the delay.
require.Eventually(t, func() bool {
return getTimeWhenCompactionDelayStarted().IsZero()
}, 100*time.Millisecond, 10*time.Millisecond)
})
}
}
// TestDelayedCompactionDoesNotBlockUnrelatedOps makes sure that when delayed compaction is enabled,
// operations that don't directly derive from the Head compaction are not delayed, here we consider disk blocks compaction.
func TestDelayedCompactionDoesNotBlockUnrelatedOps(t *testing.T) {
cases := []struct {
name string
whenCompactable bool
}{
{
"Head is compactable",
true,
},
{
"Head is not compactable",
false,
},
}
for _, c := range cases {
c := c
t.Run(c.name, func(t *testing.T) {
t.Parallel()
tmpdir := t.TempDir()
// Some blocks that need compation are present.
createBlock(t, tmpdir, genSeries(1, 1, 0, 100))
createBlock(t, tmpdir, genSeries(1, 1, 100, 200))
createBlock(t, tmpdir, genSeries(1, 1, 200, 300))
options := DefaultOptions()
// This will make the test timeout if compaction really waits for it.
options.CompactionDelay = time.Hour
db, err := open(tmpdir, log.NewNopLogger(), nil, options, []int64{10, 200}, nil)
require.NoError(t, err)
defer func() {
require.NoError(t, db.Close())
}()
db.DisableCompactions()
require.Len(t, db.Blocks(), 3)
if c.whenCompactable {
label := labels.FromStrings("foo", "bar")
app := db.Appender(context.Background())
_, err := app.Append(0, label, 301, 0)
require.NoError(t, err)
_, err = app.Append(0, label, 317, 0)
require.NoError(t, err)
require.NoError(t, app.Commit())
// The Head is compactable and will still be at the end.
require.True(t, db.head.compactable())
defer func() {
require.True(t, db.head.compactable())
}()
}
// The blocks were compacted.
db.Compact(context.Background())
require.Len(t, db.Blocks(), 2)
})
}
}

View file

@ -21,6 +21,7 @@ import (
"io"
"io/fs"
"math"
"math/rand"
"os"
"path/filepath"
"slices"
@ -84,6 +85,8 @@ func DefaultOptions() *Options {
OutOfOrderCapMax: DefaultOutOfOrderCapMax,
EnableOverlappingCompaction: true,
EnableSharding: false,
EnableDelayedCompaction: false,
CompactionDelay: time.Duration(0),
}
}
@ -184,12 +187,18 @@ type Options struct {
// The reason why this flag exists is because there are various users of the TSDB
// that do not want vertical compaction happening on ingest time. Instead,
// they'd rather keep overlapping blocks and let another component do the overlapping compaction later.
// For Prometheus, this will always be true.
EnableOverlappingCompaction bool
// EnableSharding enables query sharding support in TSDB.
EnableSharding bool
// EnableDelayedCompaction, when set to true, assigns a random value to CompactionDelay during DB opening.
// When set to false, delayed compaction is disabled, unless CompactionDelay is set directly.
EnableDelayedCompaction bool
// CompactionDelay delays the start time of auto compactions.
// It can be increased by up to one minute if the DB does not commit too often.
CompactionDelay time.Duration
// NewCompactorFunc is a function that returns a TSDB compactor.
NewCompactorFunc NewCompactorFunc
@ -246,6 +255,9 @@ type DB struct {
// Cancel a running compaction when a shutdown is initiated.
compactCancel context.CancelFunc
// timeWhenCompactionDelayStarted helps delay the compactions start time.
timeWhenCompactionDelayStarted time.Time
// oooWasEnabled is true if out of order support was enabled at least one time
// during the time TSDB was up. In which case we need to keep supporting
// out-of-order compaction and vertical queries.
@ -998,6 +1010,10 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
db.oooWasEnabled.Store(true)
}
if opts.EnableDelayedCompaction {
opts.CompactionDelay = db.generateCompactionDelay()
}
go db.run(ctx)
return db, nil
@ -1186,6 +1202,12 @@ func (a dbAppender) Commit() error {
return err
}
// waitingForCompactionDelay returns true if the DB is waiting for the Head compaction delay.
// This doesn't guarantee that the Head is really compactable.
func (db *DB) waitingForCompactionDelay() bool {
return time.Since(db.timeWhenCompactionDelayStarted) < db.opts.CompactionDelay
}
// Compact data if possible. After successful compaction blocks are reloaded
// which will also delete the blocks that fall out of the retention window.
// Old blocks are only deleted on reloadBlocks based on the new block's parent information.
@ -1219,7 +1241,21 @@ func (db *DB) Compact(ctx context.Context) (returnErr error) {
return nil
default:
}
if !db.head.compactable() {
// Reset the counter once the head compactions are done.
// This would also reset it if a manual compaction was triggered while the auto compaction was in its delay period.
if !db.timeWhenCompactionDelayStarted.IsZero() {
db.timeWhenCompactionDelayStarted = time.Time{}
}
break
}
if db.timeWhenCompactionDelayStarted.IsZero() {
// Start counting for the delay.
db.timeWhenCompactionDelayStarted = time.Now()
}
if db.waitingForCompactionDelay() {
break
}
mint := db.head.MinTime()
@ -1295,6 +1331,9 @@ func (db *DB) CompactOOOHead(ctx context.Context) error {
return db.compactOOOHead(ctx)
}
// Callback for testing.
var compactOOOHeadTestingCallback func()
func (db *DB) compactOOOHead(ctx context.Context) error {
if !db.oooWasEnabled.Load() {
return nil
@ -1304,6 +1343,11 @@ func (db *DB) compactOOOHead(ctx context.Context) error {
return fmt.Errorf("get ooo compaction head: %w", err)
}
if compactOOOHeadTestingCallback != nil {
compactOOOHeadTestingCallback()
compactOOOHeadTestingCallback = nil
}
ulids, err := db.compactOOO(db.dir, oooHead)
if err != nil {
return fmt.Errorf("compact ooo head: %w", err)
@ -1421,7 +1465,7 @@ func (db *DB) compactBlocks() (err error) {
// If we have a lot of blocks to compact the whole process might take
// long enough that we end up with a HEAD block that needs to be written.
// Check if that's the case and stop compactions early.
if db.head.compactable() {
if db.head.compactable() && !db.waitingForCompactionDelay() {
level.Warn(db.logger).Log("msg", "aborting block compactions to persit the head block")
return nil
}
@ -1924,6 +1968,11 @@ func (db *DB) EnableCompactions() {
level.Info(db.logger).Log("msg", "Compactions enabled")
}
func (db *DB) generateCompactionDelay() time.Duration {
// Up to 10% of the head's chunkRange.
return time.Duration(rand.Int63n(db.head.chunkRange.Load()/10)) * time.Millisecond
}
// ForceHeadMMap is intended for use only in tests and benchmarks.
func (db *DB) ForceHeadMMap() {
db.head.mmapHeadChunks()
@ -1980,7 +2029,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) {
}
}
blockQueriers := make([]storage.Querier, 0, len(blocks)+2) // +2 to allow for possible in-order and OOO head queriers
blockQueriers := make([]storage.Querier, 0, len(blocks)+1) // +1 to allow for possible head querier.
defer func() {
if err != nil {
@ -1992,10 +2041,12 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) {
}
}()
if maxt >= db.head.MinTime() {
overlapsOOO := overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime())
var headQuerier storage.Querier
if maxt >= db.head.MinTime() || overlapsOOO {
rh := NewRangeHead(db.head, mint, maxt)
var err error
inOrderHeadQuerier, err := db.blockQuerierFunc(rh, mint, maxt)
headQuerier, err = db.blockQuerierFunc(rh, mint, maxt)
if err != nil {
return nil, fmt.Errorf("open block querier for head %s: %w", rh, err)
}
@ -2005,36 +2056,28 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) {
// won't run into a race later since any truncation that comes after will wait on this querier if it overlaps.
shouldClose, getNew, newMint := db.head.IsQuerierCollidingWithTruncation(mint, maxt)
if shouldClose {
if err := inOrderHeadQuerier.Close(); err != nil {
if err := headQuerier.Close(); err != nil {
return nil, fmt.Errorf("closing head block querier %s: %w", rh, err)
}
inOrderHeadQuerier = nil
headQuerier = nil
}
if getNew {
rh := NewRangeHead(db.head, newMint, maxt)
inOrderHeadQuerier, err = db.blockQuerierFunc(rh, newMint, maxt)
headQuerier, err = db.blockQuerierFunc(rh, newMint, maxt)
if err != nil {
return nil, fmt.Errorf("open block querier for head while getting new querier %s: %w", rh, err)
}
}
if inOrderHeadQuerier != nil {
blockQueriers = append(blockQueriers, inOrderHeadQuerier)
}
}
if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) {
rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef)
var err error
outOfOrderHeadQuerier, err := db.blockQuerierFunc(rh, mint, maxt)
if err != nil {
// If BlockQuerierFunc() failed, make sure to clean up the pending read created by NewOOORangeHead.
rh.isoState.Close()
if overlapsOOO {
// We need to fetch from in-order and out-of-order chunks: wrap the headQuerier.
isoState := db.head.oooIso.TrackReadAfter(db.lastGarbageCollectedMmapRef)
headQuerier = NewHeadAndOOOQuerier(mint, maxt, db.head, isoState, headQuerier)
}
return nil, fmt.Errorf("open block querier for ooo head %s: %w", rh, err)
}
blockQueriers = append(blockQueriers, outOfOrderHeadQuerier)
if headQuerier != nil {
blockQueriers = append(blockQueriers, headQuerier)
}
for _, b := range blocks {
@ -2062,7 +2105,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer
}
}
blockQueriers := make([]storage.ChunkQuerier, 0, len(blocks)+2) // +2 to allow for possible in-order and OOO head queriers
blockQueriers := make([]storage.ChunkQuerier, 0, len(blocks)+1) // +1 to allow for possible head querier.
defer func() {
if err != nil {
@ -2074,9 +2117,11 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer
}
}()
if maxt >= db.head.MinTime() {
overlapsOOO := overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime())
var headQuerier storage.ChunkQuerier
if maxt >= db.head.MinTime() || overlapsOOO {
rh := NewRangeHead(db.head, mint, maxt)
inOrderHeadQuerier, err := db.blockChunkQuerierFunc(rh, mint, maxt)
headQuerier, err = db.blockChunkQuerierFunc(rh, mint, maxt)
if err != nil {
return nil, fmt.Errorf("open querier for head %s: %w", rh, err)
}
@ -2086,35 +2131,28 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer
// won't run into a race later since any truncation that comes after will wait on this querier if it overlaps.
shouldClose, getNew, newMint := db.head.IsQuerierCollidingWithTruncation(mint, maxt)
if shouldClose {
if err := inOrderHeadQuerier.Close(); err != nil {
if err := headQuerier.Close(); err != nil {
return nil, fmt.Errorf("closing head querier %s: %w", rh, err)
}
inOrderHeadQuerier = nil
headQuerier = nil
}
if getNew {
rh := NewRangeHead(db.head, newMint, maxt)
inOrderHeadQuerier, err = db.blockChunkQuerierFunc(rh, newMint, maxt)
headQuerier, err = db.blockChunkQuerierFunc(rh, newMint, maxt)
if err != nil {
return nil, fmt.Errorf("open querier for head while getting new querier %s: %w", rh, err)
}
}
if inOrderHeadQuerier != nil {
blockQueriers = append(blockQueriers, inOrderHeadQuerier)
}
}
if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) {
rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef)
outOfOrderHeadQuerier, err := db.blockChunkQuerierFunc(rh, mint, maxt)
if err != nil {
// If NewBlockQuerier() failed, make sure to clean up the pending read created by NewOOORangeHead.
rh.isoState.Close()
if overlapsOOO {
// We need to fetch from in-order and out-of-order chunks: wrap the headQuerier.
isoState := db.head.oooIso.TrackReadAfter(db.lastGarbageCollectedMmapRef)
headQuerier = NewHeadAndOOOChunkQuerier(mint, maxt, db.head, isoState, headQuerier)
}
return nil, fmt.Errorf("open block chunk querier for ooo head %s: %w", rh, err)
}
blockQueriers = append(blockQueriers, outOfOrderHeadQuerier)
if headQuerier != nil {
blockQueriers = append(blockQueriers, headQuerier)
}
for _, b := range blocks {

View file

@ -63,7 +63,10 @@ func TestMain(m *testing.M) {
flag.Parse()
defaultIsolationDisabled = !isolationEnabled
goleak.VerifyTestMain(m, goleak.IgnoreTopFunction("github.com/prometheus/prometheus/tsdb.(*SegmentWAL).cut.func1"), goleak.IgnoreTopFunction("github.com/prometheus/prometheus/tsdb.(*SegmentWAL).cut.func2"))
goleak.VerifyTestMain(m,
goleak.IgnoreTopFunction("github.com/prometheus/prometheus/tsdb.(*SegmentWAL).cut.func1"),
goleak.IgnoreTopFunction("github.com/prometheus/prometheus/tsdb.(*SegmentWAL).cut.func2"),
goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"))
}
func openTestDB(t testing.TB, opts *Options, rngs []int64) (db *DB) {
@ -4497,12 +4500,15 @@ func TestMetadataAssertInMemoryData(t *testing.T) {
func TestOOOCompaction(t *testing.T) {
for name, scenario := range sampleTypeScenarios {
t.Run(name, func(t *testing.T) {
testOOOCompaction(t, scenario)
testOOOCompaction(t, scenario, false)
})
t.Run(name+"+extra", func(t *testing.T) {
testOOOCompaction(t, scenario, true)
})
}
}
func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) {
func testOOOCompaction(t *testing.T, scenario sampleTypeScenario, addExtraSamples bool) {
dir := t.TempDir()
ctx := context.Background()
@ -4533,7 +4539,7 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) {
}
// Add an in-order samples.
addSample(250, 350)
addSample(250, 300)
// Verify that the in-memory ooo chunk is empty.
checkEmptyOOOChunk := func(lbls labels.Labels) {
@ -4547,15 +4553,17 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) {
// Add ooo samples that creates multiple chunks.
// 90 to 300 spans across 3 block ranges: [0, 120), [120, 240), [240, 360)
addSample(90, 310)
addSample(90, 300)
// Adding same samples to create overlapping chunks.
// Since the active chunk won't start at 90 again, all the new
// chunks will have different time ranges than the previous chunks.
addSample(90, 310)
addSample(90, 300)
var highest int64 = 300
verifyDBSamples := func() {
var series1Samples, series2Samples []chunks.Sample
for _, r := range [][2]int64{{90, 119}, {120, 239}, {240, 350}} {
for _, r := range [][2]int64{{90, 119}, {120, 239}, {240, highest}} {
fromMins, toMins := r[0], r[1]
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
@ -4583,7 +4591,7 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) {
require.NoError(t, err)
require.False(t, created)
require.Positive(t, ms.ooo.oooHeadChunk.chunk.NumSamples())
require.Len(t, ms.ooo.oooMmappedChunks, 14) // 7 original, 7 duplicate.
require.Len(t, ms.ooo.oooMmappedChunks, 13) // 7 original, 6 duplicate.
}
checkNonEmptyOOOChunk(series1)
checkNonEmptyOOOChunk(series2)
@ -4601,6 +4609,15 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) {
require.NoError(t, err)
require.Greater(t, f.Size(), int64(100))
if addExtraSamples {
compactOOOHeadTestingCallback = func() {
addSample(90, 120) // Back in time, to generate a new OOO chunk.
addSample(300, 330) // Now some samples after the previous highest timestamp.
addSample(300, 330) // Repeat to generate an OOO chunk at these timestamps.
}
highest = 330
}
// OOO compaction happens here.
require.NoError(t, db.CompactOOOHead(ctx))
@ -4616,11 +4633,13 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) {
require.Equal(t, "00000001", files[0].Name())
f, err = files[0].Info()
require.NoError(t, err)
require.Equal(t, int64(0), f.Size())
// OOO stuff should not be present in the Head now.
checkEmptyOOOChunk(series1)
checkEmptyOOOChunk(series2)
if !addExtraSamples {
require.Equal(t, int64(0), f.Size())
// OOO stuff should not be present in the Head now.
checkEmptyOOOChunk(series1)
checkEmptyOOOChunk(series2)
}
verifySamples := func(block *Block, fromMins, toMins int64) {
series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
@ -4645,7 +4664,7 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) {
// Checking for expected data in the blocks.
verifySamples(db.Blocks()[0], 90, 119)
verifySamples(db.Blocks()[1], 120, 239)
verifySamples(db.Blocks()[2], 240, 310)
verifySamples(db.Blocks()[2], 240, 299)
// There should be a single m-map file.
mmapDir := mmappedChunksDir(db.head.opts.ChunkDirRoot)
@ -4658,7 +4677,7 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) {
err = db.CompactHead(NewRangeHead(db.head, 250*time.Minute.Milliseconds(), 350*time.Minute.Milliseconds()))
require.NoError(t, err)
require.Len(t, db.Blocks(), 4) // [0, 120), [120, 240), [240, 360), [250, 351)
verifySamples(db.Blocks()[3], 250, 350)
verifySamples(db.Blocks()[3], 250, highest)
verifyDBSamples() // Blocks created out of normal and OOO head now. But not merged.
@ -4675,7 +4694,7 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) {
require.Len(t, db.Blocks(), 3) // [0, 120), [120, 240), [240, 360)
verifySamples(db.Blocks()[0], 90, 119)
verifySamples(db.Blocks()[1], 120, 239)
verifySamples(db.Blocks()[2], 240, 350) // Merged block.
verifySamples(db.Blocks()[2], 240, highest) // Merged block.
verifyDBSamples() // Final state. Blocks from normal and OOO head are merged.
}
@ -7338,3 +7357,25 @@ func TestBlockQuerierAndBlockChunkQuerier(t *testing.T) {
// Make sure only block-1 is queried.
require.Equal(t, "block-1", lbls.Get("block"))
}
func TestGenerateCompactionDelay(t *testing.T) {
assertDelay := func(delay time.Duration) {
t.Helper()
require.GreaterOrEqual(t, delay, time.Duration(0))
// Less than 10% of the chunkRange.
require.LessOrEqual(t, delay, 6000*time.Millisecond)
}
opts := DefaultOptions()
opts.EnableDelayedCompaction = true
db := openTestDB(t, opts, []int64{60000})
defer func() {
require.NoError(t, db.Close())
}()
// The offset is generated and changed while opening.
assertDelay(db.opts.CompactionDelay)
for i := 0; i < 1000; i++ {
assertDelay(db.generateCompactionDelay())
}
}

View file

@ -178,6 +178,7 @@ type HeadOptions struct {
WALReplayConcurrency int
// EnableSharding enables ShardedPostings() support in the Head.
// EnableSharding is temporarily disabled during Init().
EnableSharding bool
}
@ -609,7 +610,7 @@ const cardinalityCacheExpirationTime = time.Duration(30) * time.Second
// Init loads data from the write ahead log and prepares the head for writes.
// It should be called before using an appender so that it
// limits the ingested samples to the head min valid time.
func (h *Head) Init(minValidTime int64) error {
func (h *Head) Init(minValidTime int64) (err error) {
h.minValidTime.Store(minValidTime)
defer func() {
h.postings.EnsureOrder(h.opts.WALReplayConcurrency)
@ -623,6 +624,24 @@ func (h *Head) Init(minValidTime int64) error {
}
}()
// If sharding is enabled, disable it while initializing, and calculate the shards later.
// We're going to use that field for other purposes during WAL replay,
// so we don't want to waste time on calculating the shard that we're going to lose anyway.
if h.opts.EnableSharding {
h.opts.EnableSharding = false
defer func() {
h.opts.EnableSharding = true
if err == nil {
// No locking is needed here as nobody should be writing while we're in Init.
for _, stripe := range h.series.series {
for _, s := range stripe {
s.shardHashOrMemoryMappedMaxTime = labels.StableHash(s.lset)
}
}
}
}()
}
level.Info(h.logger).Log("msg", "Replaying on-disk memory mappable chunks if any")
start := time.Now()
@ -683,7 +702,6 @@ func (h *Head) Init(minValidTime int64) error {
mmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk
oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk
lastMmapRef chunks.ChunkDiskMapperRef
err error
mmapChunkReplayDuration time.Duration
)
@ -2068,9 +2086,11 @@ type memSeries struct {
ref chunks.HeadSeriesRef
meta *metadata.Metadata
// Series labels hash to use for sharding purposes. The value is always 0 when sharding has not
// been explicitly enabled in TSDB.
shardHash uint64
// Series labels hash to use for sharding purposes.
// The value is always 0 when sharding has not been explicitly enabled in TSDB.
// While the WAL replay the value stored here is the max time of any mmapped chunk,
// and the shard hash is re-calculated after WAL replay is complete.
shardHashOrMemoryMappedMaxTime uint64
// Everything after here should only be accessed with the lock held.
sync.Mutex
@ -2095,8 +2115,6 @@ type memSeries struct {
ooo *memSeriesOOOFields
mmMaxTime int64 // Max time of any mmapped chunk, only used during WAL replay.
nextAt int64 // Timestamp at which to cut the next chunk.
histogramChunkHasComputedEndTime bool // True if nextAt has been predicted for the current histograms chunk; false otherwise.
pendingCommit bool // Whether there are samples waiting to be committed to this series.
@ -2127,10 +2145,10 @@ type memSeriesOOOFields struct {
func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, shardHash uint64, isolationDisabled bool) *memSeries {
s := &memSeries{
lset: lset,
ref: id,
nextAt: math.MinInt64,
shardHash: shardHash,
lset: lset,
ref: id,
nextAt: math.MinInt64,
shardHashOrMemoryMappedMaxTime: shardHash,
}
if !isolationDisabled {
s.txs = newTxRing(0)
@ -2218,6 +2236,12 @@ func (s *memSeries) truncateChunksBefore(mint int64, minOOOMmapRef chunks.ChunkD
return removedInOrder + removedOOO
}
// shardHash returns the shard hash of the series, only available after WAL replay.
func (s *memSeries) shardHash() uint64 { return s.shardHashOrMemoryMappedMaxTime }
// mmMaxTime returns the max time of any mmapped chunk in the series, only available during WAL replay.
func (s *memSeries) mmMaxTime() int64 { return int64(s.shardHashOrMemoryMappedMaxTime) }
// cleanupAppendIDsBelow cleans up older appendIDs. Has to be called after
// acquiring lock.
func (s *memSeries) cleanupAppendIDsBelow(bound uint64) {

View file

@ -19,6 +19,7 @@ import (
"fmt"
"math"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/prometheus/model/exemplar"
@ -466,6 +467,9 @@ func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooTi
// like federation and erroring out at that time would be extremely noisy.
// This only checks against the latest in-order sample.
// The OOO headchunk has its own method to detect these duplicates.
if s.lastHistogramValue != nil || s.lastFloatHistogramValue != nil {
return false, 0, storage.NewDuplicateHistogramToFloatErr(t, v)
}
if math.Float64bits(s.lastValue) != math.Float64bits(v) {
return false, 0, storage.NewDuplicateFloatErr(t, s.lastValue, v)
}
@ -837,7 +841,7 @@ func (a *headAppender) Commit() (err error) {
floatsAppended = len(a.samples)
histogramsAppended = len(a.histograms) + len(a.floatHistograms)
// number of samples out of order but accepted: with ooo enabled and within time window
floatOOOAccepted int
oooFloatsAccepted int
// number of samples rejected due to: out of order but OOO support disabled.
floatOOORejected int
histoOOORejected int
@ -933,7 +937,7 @@ func (a *headAppender) Commit() (err error) {
// Sample is OOO and OOO handling is enabled
// and the delta is within the OOO tolerance.
var mmapRefs []chunks.ChunkDiskMapperRef
ok, chunkCreated, mmapRefs = series.insert(s.T, s.V, a.head.chunkDiskMapper, oooCapMax)
ok, chunkCreated, mmapRefs = series.insert(s.T, s.V, nil, nil, a.head.chunkDiskMapper, oooCapMax, a.head.logger)
if chunkCreated {
r, ok := oooMmapMarkers[series.ref]
if !ok || r != nil {
@ -966,7 +970,7 @@ func (a *headAppender) Commit() (err error) {
if s.T > oooMaxT {
oooMaxT = s.T
}
floatOOOAccepted++
oooFloatsAccepted++
} else {
// Sample is an exact duplicate of the last sample.
// NOTE: We can only detect updates if they clash with a sample in the OOOHeadChunk,
@ -1062,7 +1066,7 @@ func (a *headAppender) Commit() (err error) {
a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatTooOldRejected))
a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatsAppended))
a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(histogramsAppended))
a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatOOOAccepted))
a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(oooFloatsAccepted))
a.head.updateMinMaxTime(inOrderMint, inOrderMaxt)
a.head.updateMinOOOMaxOOOTime(oooMinT, oooMaxT)
@ -1080,18 +1084,18 @@ func (a *headAppender) Commit() (err error) {
}
// insert is like append, except it inserts. Used for OOO samples.
func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64) (inserted, chunkCreated bool, mmapRefs []chunks.ChunkDiskMapperRef) {
func (s *memSeries) insert(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64, logger log.Logger) (inserted, chunkCreated bool, mmapRefs []chunks.ChunkDiskMapperRef) {
if s.ooo == nil {
s.ooo = &memSeriesOOOFields{}
}
c := s.ooo.oooHeadChunk
if c == nil || c.chunk.NumSamples() == int(oooCapMax) {
// Note: If no new samples come in then we rely on compaction to clean up stale in-memory OOO chunks.
c, mmapRefs = s.cutNewOOOHeadChunk(t, chunkDiskMapper)
c, mmapRefs = s.cutNewOOOHeadChunk(t, chunkDiskMapper, logger)
chunkCreated = true
}
ok := c.chunk.Insert(t, v)
ok := c.chunk.Insert(t, v, h, fh)
if ok {
if chunkCreated || t < c.minTime {
c.minTime = t
@ -1441,9 +1445,9 @@ func (s *memSeries) cutNewHeadChunk(mint int64, e chunkenc.Encoding, chunkRange
}
// cutNewOOOHeadChunk cuts a new OOO chunk and m-maps the old chunk.
// The caller must ensure that s.ooo is not nil.
func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper) (*oooHeadChunk, []chunks.ChunkDiskMapperRef) {
ref := s.mmapCurrentOOOHeadChunk(chunkDiskMapper)
// The caller must ensure that s is locked and s.ooo is not nil.
func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper, logger log.Logger) (*oooHeadChunk, []chunks.ChunkDiskMapperRef) {
ref := s.mmapCurrentOOOHeadChunk(chunkDiskMapper, logger)
s.ooo.oooHeadChunk = &oooHeadChunk{
chunk: NewOOOChunk(),
@ -1454,7 +1458,8 @@ func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.Chunk
return s.ooo.oooHeadChunk, ref
}
func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper) []chunks.ChunkDiskMapperRef {
// s must be locked when calling.
func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper, logger log.Logger) []chunks.ChunkDiskMapperRef {
if s.ooo == nil || s.ooo.oooHeadChunk == nil {
// OOO is not enabled or there is no head chunk, so nothing to m-map here.
return nil
@ -1466,6 +1471,10 @@ func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMap
}
chunkRefs := make([]chunks.ChunkDiskMapperRef, 0, 1)
for _, memchunk := range chks {
if len(s.ooo.oooMmappedChunks) >= (oooChunkIDMask - 1) {
level.Error(logger).Log("msg", "Too many OOO chunks, dropping data", "series", s.lset.String())
break
}
chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.ooo.oooHeadChunk.minTime, s.ooo.oooHeadChunk.maxTime, memchunk.chunk, true, handleChunkWriteError)
chunkRefs = append(chunkRefs, chunkRef)
s.ooo.oooMmappedChunks = append(s.ooo.oooMmappedChunks, &mmappedChunk{

View file

@ -170,7 +170,7 @@ func (h *headIndexReader) ShardedPostings(p index.Postings, shardIndex, shardCou
}
// Check if the series belong to the shard.
if s.shardHash%shardCount != shardIndex {
if s.shardHash()%shardCount != shardIndex {
continue
}
@ -199,13 +199,18 @@ func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchB
defer s.Unlock()
*chks = (*chks)[:0]
*chks = appendSeriesChunks(s, h.mint, h.maxt, *chks)
return nil
}
func appendSeriesChunks(s *memSeries, mint, maxt int64, chks []chunks.Meta) []chunks.Meta {
for i, c := range s.mmappedChunks {
// Do not expose chunks that are outside of the specified range.
if !c.OverlapsClosedInterval(h.mint, h.maxt) {
if !c.OverlapsClosedInterval(mint, maxt) {
continue
}
*chks = append(*chks, chunks.Meta{
chks = append(chks, chunks.Meta{
MinTime: c.minTime,
MaxTime: c.maxTime,
Ref: chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.headChunkID(i))),
@ -223,8 +228,8 @@ func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchB
} else {
maxTime = chk.maxTime
}
if chk.OverlapsClosedInterval(h.mint, h.maxt) {
*chks = append(*chks, chunks.Meta{
if chk.OverlapsClosedInterval(mint, maxt) {
chks = append(chks, chunks.Meta{
MinTime: chk.minTime,
MaxTime: maxTime,
Ref: chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.headChunkID(len(s.mmappedChunks)+j))),
@ -233,8 +238,7 @@ func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchB
j++
}
}
return nil
return chks
}
// headChunkID returns the HeadChunkID referred to by the given position.
@ -244,12 +248,20 @@ func (s *memSeries) headChunkID(pos int) chunks.HeadChunkID {
return chunks.HeadChunkID(pos) + s.firstChunkID
}
const oooChunkIDMask = 1 << 23
// oooHeadChunkID returns the HeadChunkID referred to by the given position.
// Only the bottom 24 bits are used. Bit 23 is always 1 for an OOO chunk; for the rest:
// * 0 <= pos < len(s.oooMmappedChunks) refer to s.oooMmappedChunks[pos]
// * pos == len(s.oooMmappedChunks) refers to s.oooHeadChunk
// The caller must ensure that s.ooo is not nil.
func (s *memSeries) oooHeadChunkID(pos int) chunks.HeadChunkID {
return chunks.HeadChunkID(pos) + s.ooo.firstOOOChunkID
return (chunks.HeadChunkID(pos) + s.ooo.firstOOOChunkID) | oooChunkIDMask
}
func unpackHeadChunkRef(ref chunks.ChunkRef) (seriesID chunks.HeadSeriesRef, chunkID chunks.HeadChunkID, isOOO bool) {
sid, cid := chunks.HeadChunkRef(ref).Unpack()
return sid, (cid & (oooChunkIDMask - 1)), (cid & oooChunkIDMask) != 0
}
// LabelValueFor returns label value for the given label name in the series referred to by ID.
@ -339,10 +351,15 @@ func (h *headChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chu
return chk, nil, err
}
// ChunkWithCopy returns the chunk for the reference number.
// If the chunk is the in-memory chunk, then it makes a copy and returns the copied chunk.
func (h *headChunkReader) ChunkWithCopy(meta chunks.Meta) (chunkenc.Chunk, int64, error) {
return h.chunk(meta, true)
type ChunkReaderWithCopy interface {
ChunkOrIterableWithCopy(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, int64, error)
}
// ChunkOrIterableWithCopy returns the chunk for the reference number.
// If the chunk is the in-memory chunk, then it makes a copy and returns the copied chunk, plus the max time of the chunk.
func (h *headChunkReader) ChunkOrIterableWithCopy(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, int64, error) {
chk, maxTime, err := h.chunk(meta, true)
return chk, nil, maxTime, err
}
// chunk returns the chunk for the reference number.
@ -358,9 +375,14 @@ func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc.
}
s.Lock()
defer s.Unlock()
return h.chunkFromSeries(s, cid, copyLastChunk)
}
// Call with s locked.
func (h *headChunkReader) chunkFromSeries(s *memSeries, cid chunks.HeadChunkID, copyLastChunk bool) (chunkenc.Chunk, int64, error) {
c, headChunk, isOpen, err := s.chunk(cid, h.head.chunkDiskMapper, &h.head.memChunkPool)
if err != nil {
s.Unlock()
return nil, 0, err
}
defer func() {
@ -374,7 +396,6 @@ func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc.
// This means that the chunk is outside the specified range.
if !c.OverlapsClosedInterval(h.mint, h.maxt) {
s.Unlock()
return nil, 0, storage.ErrNotFound
}
@ -391,7 +412,6 @@ func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc.
return nil, 0, err
}
}
s.Unlock()
return &safeHeadChunk{
Chunk: chk,
@ -461,14 +481,15 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDi
return elem, true, offset == 0, nil
}
// oooMergedChunks return an iterable over one or more OOO chunks for the given
// mergedChunks return an iterable over one or more OOO chunks for the given
// chunks.Meta reference from memory or by m-mapping it from the disk. The
// returned iterable will be a merge of all the overlapping chunks, if any,
// amongst all the chunks in the OOOHead.
// If hr is non-nil then in-order chunks are included.
// This function is not thread safe unless the caller holds a lock.
// The caller must ensure that s.ooo is not nil.
func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMapper, mint, maxt int64) (*mergedOOOChunks, error) {
_, cid := chunks.HeadChunkRef(meta.Ref).Unpack()
func (s *memSeries) mergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMapper, hr *headChunkReader, mint, maxt int64, maxMmapRef chunks.ChunkDiskMapperRef) (chunkenc.Iterable, error) {
_, cid, _ := unpackHeadChunkRef(meta.Ref)
// ix represents the index of chunk in the s.mmappedChunks slice. The chunk meta's are
// incremented by 1 when new chunk is created, hence (meta - firstChunkID) gives the slice index.
@ -490,6 +511,9 @@ func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMappe
tmpChks := make([]chunkMetaAndChunkDiskMapperRef, 0, len(s.ooo.oooMmappedChunks)+1)
for i, c := range s.ooo.oooMmappedChunks {
if maxMmapRef != 0 && c.ref > maxMmapRef {
break
}
if c.OverlapsClosedInterval(mint, maxt) {
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{
meta: chunks.Meta{
@ -506,6 +530,16 @@ func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMappe
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{meta: meta})
}
if hr != nil { // Include in-order chunks.
metas := appendSeriesChunks(s, max(meta.MinTime, mint), min(meta.MaxTime, maxt), nil)
for _, m := range metas {
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{
meta: m,
ref: 0, // This tells the loop below it's an in-order head chunk.
})
}
}
// Next we want to sort all the collected chunks by min time so we can find
// those that overlap and stop when we know the rest don't.
slices.SortFunc(tmpChks, refLessByMinTimeAndMinRef)
@ -517,9 +551,17 @@ func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMappe
continue
}
var iterable chunkenc.Iterable
if c.meta.Chunk != nil {
switch {
case c.meta.Chunk != nil:
iterable = c.meta.Chunk
} else {
case c.ref == 0: // This is an in-order head chunk.
_, cid := chunks.HeadChunkRef(c.meta.Ref).Unpack()
var err error
iterable, _, err = hr.chunkFromSeries(s, cid, false)
if err != nil {
return nil, fmt.Errorf("invalid head chunk: %w", err)
}
default:
chk, err := cdm.Chunk(c.ref)
if err != nil {
var cerr *chunks.CorruptionErr

View file

@ -23,6 +23,7 @@ import (
"path"
"path/filepath"
"reflect"
"runtime/pprof"
"sort"
"strconv"
"strings"
@ -89,6 +90,43 @@ func newTestHeadWithOptions(t testing.TB, compressWAL wlog.CompressionType, opts
return h, wal
}
// BenchmarkLoadRealWLs will be skipped unless the BENCHMARK_LOAD_REAL_WLS_DIR environment variable is set.
// BENCHMARK_LOAD_REAL_WLS_DIR should be the folder where `wal` and `chunks_head` are located.
// Optionally, BENCHMARK_LOAD_REAL_WLS_PROFILE can be set to a file path to write a CPU profile.
func BenchmarkLoadRealWLs(b *testing.B) {
dir := os.Getenv("BENCHMARK_LOAD_REAL_WLS_DIR")
if dir == "" {
b.Skipped()
}
profileFile := os.Getenv("BENCHMARK_LOAD_REAL_WLS_PROFILE")
if profileFile != "" {
b.Logf("Will profile in %s", profileFile)
f, err := os.Create(profileFile)
require.NoError(b, err)
b.Cleanup(func() { f.Close() })
require.NoError(b, pprof.StartCPUProfile(f))
b.Cleanup(pprof.StopCPUProfile)
}
wal, err := wlog.New(nil, nil, filepath.Join(dir, "wal"), wlog.CompressionNone)
require.NoError(b, err)
b.Cleanup(func() { wal.Close() })
wbl, err := wlog.New(nil, nil, filepath.Join(dir, "wbl"), wlog.CompressionNone)
require.NoError(b, err)
b.Cleanup(func() { wbl.Close() })
// Load the WAL.
for i := 0; i < b.N; i++ {
opts := DefaultHeadOptions()
opts.ChunkDirRoot = dir
h, err := NewHead(nil, nil, wal, wbl, opts, nil)
require.NoError(b, err)
h.Init(0)
}
}
func BenchmarkCreateSeries(b *testing.B) {
series := genSeries(b.N, 10, 0, 0)
h, _ := newTestHead(b, 10000, wlog.CompressionNone, false)
@ -5881,6 +5919,35 @@ func TestPostingsCardinalityStats(t *testing.T) {
require.Equal(t, statsForSomeLabel1, head.PostingsCardinalityStats("n", 1))
}
func TestHeadAppender_AppendFloatWithSameTimestampAsPreviousHistogram(t *testing.T) {
head, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false)
t.Cleanup(func() { head.Close() })
ls := labels.FromStrings(labels.MetricName, "test")
{
// Append a float 10.0 @ 1_000
app := head.Appender(context.Background())
_, err := app.Append(0, ls, 1_000, 10.0)
require.NoError(t, err)
require.NoError(t, app.Commit())
}
{
// Append a float histogram @ 2_000
app := head.Appender(context.Background())
h := tsdbutil.GenerateTestHistogram(1)
_, err := app.AppendHistogram(0, ls, 2_000, h, nil)
require.NoError(t, err)
require.NoError(t, app.Commit())
}
app := head.Appender(context.Background())
_, err := app.Append(0, ls, 2_000, 10.0)
require.Error(t, err)
require.ErrorIs(t, err, storage.NewDuplicateHistogramToFloatErr(2_000, 10.0))
}
func TestHeadAppender_AppendCTZeroSample(t *testing.T) {
type appendableSamples struct {
ts int64

View file

@ -435,6 +435,8 @@ Outer:
return nil
}
func minInt64() int64 { return math.MinInt64 }
// resetSeriesWithMMappedChunks is only used during the WAL replay.
func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*mmappedChunk, walSeriesRef chunks.HeadSeriesRef) (overlapped bool) {
if mSeries.ref != walSeriesRef {
@ -481,10 +483,11 @@ func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*m
}
// Cache the last mmapped chunk time, so we can skip calling append() for samples it will reject.
if len(mmc) == 0 {
mSeries.mmMaxTime = math.MinInt64
mSeries.shardHashOrMemoryMappedMaxTime = uint64(minInt64())
} else {
mSeries.mmMaxTime = mmc[len(mmc)-1].maxTime
h.updateMinMaxTime(mmc[0].minTime, mSeries.mmMaxTime)
mmMaxTime := mmc[len(mmc)-1].maxTime
mSeries.shardHashOrMemoryMappedMaxTime = uint64(mmMaxTime)
h.updateMinMaxTime(mmc[0].minTime, mmMaxTime)
}
if len(oooMmc) != 0 {
// Mint and maxt can be in any chunk, they are not sorted.
@ -585,7 +588,7 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp
unknownRefs++
continue
}
if s.T <= ms.mmMaxTime {
if s.T <= ms.mmMaxTime() {
continue
}
if _, chunkCreated := ms.append(s.T, s.V, 0, appendChunkOpts); chunkCreated {
@ -614,7 +617,7 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp
unknownHistogramRefs++
continue
}
if s.t <= ms.mmMaxTime {
if s.t <= ms.mmMaxTime() {
continue
}
var chunkCreated bool
@ -887,7 +890,7 @@ func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (unknownRefs uint64) {
unknownRefs++
continue
}
ok, chunkCreated, _ := ms.insert(s.T, s.V, h.chunkDiskMapper, oooCapMax)
ok, chunkCreated, _ := ms.insert(s.T, s.V, nil, nil, h.chunkDiskMapper, oooCapMax, h.logger)
if chunkCreated {
h.metrics.chunksCreated.Inc()
h.metrics.chunks.Inc()

View file

@ -14,15 +14,10 @@
package tsdb
import (
"fmt"
"sort"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/oklog/ulid"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/tombstones"
)
// OOOChunk maintains samples in time-ascending order.
@ -39,13 +34,13 @@ func NewOOOChunk() *OOOChunk {
// Insert inserts the sample such that order is maintained.
// Returns false if insert was not possible due to the same timestamp already existing.
func (o *OOOChunk) Insert(t int64, v float64) bool {
func (o *OOOChunk) Insert(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram) bool {
// Although out-of-order samples can be out-of-order amongst themselves, we
// are opinionated and expect them to be usually in-order meaning we could
// try to append at the end first if the new timestamp is higher than the
// last known timestamp.
if len(o.samples) == 0 || t > o.samples[len(o.samples)-1].t {
o.samples = append(o.samples, sample{t, v, nil, nil})
o.samples = append(o.samples, sample{t, v, h, fh})
return true
}
@ -54,7 +49,7 @@ func (o *OOOChunk) Insert(t int64, v float64) bool {
if i >= len(o.samples) {
// none found. append it at the end
o.samples = append(o.samples, sample{t, v, nil, nil})
o.samples = append(o.samples, sample{t, v, h, fh})
return true
}
@ -66,7 +61,7 @@ func (o *OOOChunk) Insert(t int64, v float64) bool {
// Expand length by 1 to make room. use a zero sample, we will overwrite it anyway.
o.samples = append(o.samples, sample{})
copy(o.samples[i+1:], o.samples[i:])
o.samples[i] = sample{t, v, nil, nil}
o.samples[i] = sample{t, v, h, fh}
return true
}
@ -142,9 +137,9 @@ func (o *OOOChunk) ToEncodedChunks(mint, maxt int64) (chks []memChunk, err error
if newChunk != nil { // A new chunk was allocated.
if !recoded {
chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
cmint = s.t
}
chunk = newChunk
cmint = s.t
}
case chunkenc.EncFloatHistogram:
// Ignoring ok is ok, since we don't want to compare to the wrong previous appender anyway.
@ -157,9 +152,9 @@ func (o *OOOChunk) ToEncodedChunks(mint, maxt int64) (chks []memChunk, err error
if newChunk != nil { // A new chunk was allocated.
if !recoded {
chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
cmint = s.t
}
chunk = newChunk
cmint = s.t
}
}
cmaxt = s.t
@ -170,75 +165,3 @@ func (o *OOOChunk) ToEncodedChunks(mint, maxt int64) (chks []memChunk, err error
}
return chks, nil
}
var _ BlockReader = &OOORangeHead{}
// OOORangeHead allows querying Head out of order samples via BlockReader
// interface implementation.
type OOORangeHead struct {
head *Head
// mint and maxt are tracked because when a query is handled we only want
// the timerange of the query and having preexisting pointers to the first
// and last timestamp help with that.
mint, maxt int64
isoState *oooIsolationState
}
func NewOOORangeHead(head *Head, mint, maxt int64, minRef chunks.ChunkDiskMapperRef) *OOORangeHead {
isoState := head.oooIso.TrackReadAfter(minRef)
return &OOORangeHead{
head: head,
mint: mint,
maxt: maxt,
isoState: isoState,
}
}
func (oh *OOORangeHead) Index() (IndexReader, error) {
return NewOOOHeadIndexReader(oh.head, oh.mint, oh.maxt, oh.isoState.minRef), nil
}
func (oh *OOORangeHead) Chunks() (ChunkReader, error) {
return NewOOOHeadChunkReader(oh.head, oh.mint, oh.maxt, oh.isoState), nil
}
func (oh *OOORangeHead) Tombstones() (tombstones.Reader, error) {
// As stated in the design doc https://docs.google.com/document/d/1Kppm7qL9C-BJB1j6yb6-9ObG3AbdZnFUBYPNNWwDBYM/edit?usp=sharing
// Tombstones are not supported for out of order metrics.
return tombstones.NewMemTombstones(), nil
}
var oooRangeHeadULID = ulid.MustParse("0000000000XXXX000RANGEHEAD")
func (oh *OOORangeHead) Meta() BlockMeta {
return BlockMeta{
MinTime: oh.mint,
MaxTime: oh.maxt,
ULID: oooRangeHeadULID,
Stats: BlockStats{
NumSeries: oh.head.NumSeries(),
},
}
}
// Size returns the size taken by the Head block.
func (oh *OOORangeHead) Size() int64 {
return oh.head.Size()
}
// String returns an human readable representation of the out of order range
// head. It's important to keep this function in order to avoid the struct dump
// when the head is stringified in errors or logs.
func (oh *OOORangeHead) String() string {
return fmt.Sprintf("ooo range head (mint: %d, maxt: %d)", oh.MinTime(), oh.MaxTime())
}
func (oh *OOORangeHead) MinTime() int64 {
return oh.mint
}
func (oh *OOORangeHead) MaxTime() int64 {
return oh.maxt
}

View file

@ -27,17 +27,12 @@ import (
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/tsdb/tombstones"
"github.com/prometheus/prometheus/util/annotations"
)
var _ IndexReader = &OOOHeadIndexReader{}
var _ IndexReader = &HeadAndOOOIndexReader{}
// OOOHeadIndexReader implements IndexReader so ooo samples in the head can be
// accessed.
// It also has a reference to headIndexReader so we can leverage on its
// IndexReader implementation for all the methods that remain the same. We
// decided to do this to avoid code duplication.
// The only methods that change are the ones about getting Series and Postings.
type OOOHeadIndexReader struct {
type HeadAndOOOIndexReader struct {
*headIndexReader // A reference to the headIndexReader so we can reuse as many interface implementation as possible.
lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef
}
@ -53,25 +48,16 @@ func (o mergedOOOChunks) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator
return storage.ChainSampleIteratorFromIterables(iterator, o.chunkIterables)
}
func NewOOOHeadIndexReader(head *Head, mint, maxt int64, lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef) *OOOHeadIndexReader {
func NewHeadAndOOOIndexReader(head *Head, mint, maxt int64, lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef) *HeadAndOOOIndexReader {
hr := &headIndexReader{
head: head,
mint: mint,
maxt: maxt,
}
return &OOOHeadIndexReader{hr, lastGarbageCollectedMmapRef}
return &HeadAndOOOIndexReader{hr, lastGarbageCollectedMmapRef}
}
func (oh *OOOHeadIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
return oh.series(ref, builder, chks, oh.lastGarbageCollectedMmapRef, 0)
}
// lastGarbageCollectedMmapRef gives the last mmap chunk that may be being garbage collected and so
// any chunk at or before this ref will not be considered. 0 disables this check.
//
// maxMmapRef tells upto what max m-map chunk that we can consider. If it is non-0, then
// the oooHeadChunk will not be considered.
func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta, lastGarbageCollectedMmapRef, maxMmapRef chunks.ChunkDiskMapperRef) error {
func (oh *HeadAndOOOIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
s := oh.head.series.getByID(chunks.HeadSeriesRef(ref))
if s == nil {
@ -88,10 +74,19 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
defer s.Unlock()
*chks = (*chks)[:0]
if s.ooo == nil {
return nil
if s.ooo != nil {
return getOOOSeriesChunks(s, oh.mint, oh.maxt, oh.lastGarbageCollectedMmapRef, 0, true, chks)
}
*chks = appendSeriesChunks(s, oh.mint, oh.maxt, *chks)
return nil
}
// lastGarbageCollectedMmapRef gives the last mmap chunk that may be being garbage collected and so
// any chunk at or before this ref will not be considered. 0 disables this check.
//
// maxMmapRef tells upto what max m-map chunk that we can consider. If it is non-0, then
// the oooHeadChunk will not be considered.
func getOOOSeriesChunks(s *memSeries, mint, maxt int64, lastGarbageCollectedMmapRef, maxMmapRef chunks.ChunkDiskMapperRef, includeInOrder bool, chks *[]chunks.Meta) error {
tmpChks := make([]chunks.Meta, 0, len(s.ooo.oooMmappedChunks))
addChunk := func(minT, maxT int64, ref chunks.ChunkRef, chunk chunkenc.Chunk) {
@ -106,7 +101,7 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
// Collect all chunks that overlap the query range.
if s.ooo.oooHeadChunk != nil {
c := s.ooo.oooHeadChunk
if c.OverlapsClosedInterval(oh.mint, oh.maxt) && maxMmapRef == 0 {
if c.OverlapsClosedInterval(mint, maxt) && maxMmapRef == 0 {
ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.ooo.oooMmappedChunks))))
if len(c.chunk.samples) > 0 { // Empty samples happens in tests, at least.
chks, err := s.ooo.oooHeadChunk.chunk.ToEncodedChunks(c.minTime, c.maxTime)
@ -125,12 +120,16 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
}
for i := len(s.ooo.oooMmappedChunks) - 1; i >= 0; i-- {
c := s.ooo.oooMmappedChunks[i]
if c.OverlapsClosedInterval(oh.mint, oh.maxt) && (maxMmapRef == 0 || maxMmapRef.GreaterThanOrEqualTo(c.ref)) && (lastGarbageCollectedMmapRef == 0 || c.ref.GreaterThan(lastGarbageCollectedMmapRef)) {
if c.OverlapsClosedInterval(mint, maxt) && (maxMmapRef == 0 || maxMmapRef.GreaterThanOrEqualTo(c.ref)) && (lastGarbageCollectedMmapRef == 0 || c.ref.GreaterThan(lastGarbageCollectedMmapRef)) {
ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(i)))
addChunk(c.minTime, c.maxTime, ref, nil)
}
}
if includeInOrder {
tmpChks = appendSeriesChunks(s, mint, maxt, tmpChks)
}
// There is nothing to do if we did not collect any chunk.
if len(tmpChks) == 0 {
return nil
@ -167,11 +166,10 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
return nil
}
// LabelValues needs to be overridden from the headIndexReader implementation due
// to the check that happens at the beginning where we make sure that the query
// interval overlaps with the head minooot and maxooot.
func (oh *OOOHeadIndexReader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
if oh.maxt < oh.head.MinOOOTime() || oh.mint > oh.head.MaxOOOTime() {
// LabelValues needs to be overridden from the headIndexReader implementation
// so we can return labels within either in-order range or ooo range.
func (oh *HeadAndOOOIndexReader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
if oh.maxt < oh.head.MinTime() && oh.maxt < oh.head.MinOOOTime() || oh.mint > oh.head.MaxTime() && oh.mint > oh.head.MaxOOOTime() {
return []string{}, nil
}
@ -223,39 +221,30 @@ func lessByMinTimeAndMinRef(a, b chunks.Meta) int {
}
}
func (oh *OOOHeadIndexReader) Postings(ctx context.Context, name string, values ...string) (index.Postings, error) {
switch len(values) {
case 0:
return index.EmptyPostings(), nil
case 1:
return oh.head.postings.Get(name, values[0]), nil // TODO(ganesh) Also call GetOOOPostings
default:
// TODO(ganesh) We want to only return postings for out of order series.
res := make([]index.Postings, 0, len(values))
for _, value := range values {
res = append(res, oh.head.postings.Get(name, value)) // TODO(ganesh) Also call GetOOOPostings
}
return index.Merge(ctx, res...), nil
type HeadAndOOOChunkReader struct {
head *Head
mint, maxt int64
cr *headChunkReader // If nil, only read OOO chunks.
maxMmapRef chunks.ChunkDiskMapperRef
oooIsoState *oooIsolationState
}
func NewHeadAndOOOChunkReader(head *Head, mint, maxt int64, cr *headChunkReader, oooIsoState *oooIsolationState, maxMmapRef chunks.ChunkDiskMapperRef) *HeadAndOOOChunkReader {
return &HeadAndOOOChunkReader{
head: head,
mint: mint,
maxt: maxt,
cr: cr,
maxMmapRef: maxMmapRef,
oooIsoState: oooIsoState,
}
}
type OOOHeadChunkReader struct {
head *Head
mint, maxt int64
isoState *oooIsolationState
}
func NewOOOHeadChunkReader(head *Head, mint, maxt int64, isoState *oooIsolationState) *OOOHeadChunkReader {
return &OOOHeadChunkReader{
head: head,
mint: mint,
maxt: maxt,
isoState: isoState,
func (cr *HeadAndOOOChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) {
sid, _, isOOO := unpackHeadChunkRef(meta.Ref)
if !isOOO {
return cr.cr.ChunkOrIterable(meta)
}
}
func (cr OOOHeadChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) {
sid, _ := chunks.HeadChunkRef(meta.Ref).Unpack()
s := cr.head.series.getByID(sid)
// This means that the series has been garbage collected.
@ -264,34 +253,35 @@ func (cr OOOHeadChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk,
}
s.Lock()
if s.ooo == nil {
// There is no OOO data for this series.
s.Unlock()
return nil, nil, storage.ErrNotFound
}
mc, err := s.oooMergedChunks(meta, cr.head.chunkDiskMapper, cr.mint, cr.maxt)
mc, err := s.mergedChunks(meta, cr.head.chunkDiskMapper, cr.cr, cr.mint, cr.maxt, cr.maxMmapRef)
s.Unlock()
if err != nil {
return nil, nil, err
}
// This means that the query range did not overlap with the requested chunk.
if len(mc.chunkIterables) == 0 {
return nil, nil, storage.ErrNotFound
}
return nil, mc, nil
return nil, mc, err
}
func (cr OOOHeadChunkReader) Close() error {
if cr.isoState != nil {
cr.isoState.Close()
// ChunkOrIterableWithCopy: implements ChunkReaderWithCopy. The special Copy behaviour
// is only implemented for the in-order head chunk.
func (cr *HeadAndOOOChunkReader) ChunkOrIterableWithCopy(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, int64, error) {
_, _, isOOO := unpackHeadChunkRef(meta.Ref)
if !isOOO {
return cr.cr.ChunkOrIterableWithCopy(meta)
}
chk, iter, err := cr.ChunkOrIterable(meta)
return chk, iter, 0, err
}
func (cr *HeadAndOOOChunkReader) Close() error {
if cr.cr != nil && cr.cr.isoState != nil {
cr.cr.isoState.Close()
}
if cr.oooIsoState != nil {
cr.oooIsoState.Close()
}
return nil
}
type OOOCompactionHead struct {
oooIR *OOOHeadIndexReader
head *Head
lastMmapRef chunks.ChunkDiskMapperRef
lastWBLFile int
postings []storage.SeriesRef
@ -308,6 +298,7 @@ type OOOCompactionHead struct {
// on the sample append latency. So call NewOOOCompactionHead only right before compaction.
func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead, error) {
ch := &OOOCompactionHead{
head: head,
chunkRange: head.chunkRange.Load(),
mint: math.MaxInt64,
maxt: math.MinInt64,
@ -321,15 +312,14 @@ func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead,
ch.lastWBLFile = lastWBLFile
}
ch.oooIR = NewOOOHeadIndexReader(head, math.MinInt64, math.MaxInt64, 0)
hr := headIndexReader{head: head, mint: ch.mint, maxt: ch.maxt}
n, v := index.AllPostingsKey()
// TODO: verify this gets only ooo samples.
p, err := ch.oooIR.Postings(ctx, n, v)
// TODO: filter to series with OOO samples, before sorting.
p, err := hr.Postings(ctx, n, v)
if err != nil {
return nil, err
}
p = ch.oooIR.SortedPostings(p)
p = hr.SortedPostings(p)
var lastSeq, lastOff int
for p.Next() {
@ -350,7 +340,7 @@ func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead,
}
var lastMmapRef chunks.ChunkDiskMapperRef
mmapRefs := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper)
mmapRefs := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper, head.logger)
if len(mmapRefs) == 0 && len(ms.ooo.oooMmappedChunks) > 0 {
// Nothing was m-mapped. So take the mmapRef from the existing slice if it exists.
mmapRefs = []chunks.ChunkDiskMapperRef{ms.ooo.oooMmappedChunks[len(ms.ooo.oooMmappedChunks)-1].ref}
@ -386,7 +376,7 @@ func (ch *OOOCompactionHead) Index() (IndexReader, error) {
}
func (ch *OOOCompactionHead) Chunks() (ChunkReader, error) {
return NewOOOHeadChunkReader(ch.oooIR.head, ch.oooIR.mint, ch.oooIR.maxt, nil), nil
return NewHeadAndOOOChunkReader(ch.head, ch.mint, ch.maxt, nil, nil, ch.lastMmapRef), nil
}
func (ch *OOOCompactionHead) Tombstones() (tombstones.Reader, error) {
@ -412,12 +402,12 @@ func (ch *OOOCompactionHead) Meta() BlockMeta {
// Only the method of BlockReader interface are valid for the cloned OOOCompactionHead.
func (ch *OOOCompactionHead) CloneForTimeRange(mint, maxt int64) *OOOCompactionHead {
return &OOOCompactionHead{
oooIR: NewOOOHeadIndexReader(ch.oooIR.head, mint, maxt, 0),
head: ch.head,
lastMmapRef: ch.lastMmapRef,
postings: ch.postings,
chunkRange: ch.chunkRange,
mint: ch.mint,
maxt: ch.maxt,
mint: mint,
maxt: maxt,
}
}
@ -437,7 +427,8 @@ func NewOOOCompactionHeadIndexReader(ch *OOOCompactionHead) IndexReader {
}
func (ir *OOOCompactionHeadIndexReader) Symbols() index.StringIter {
return ir.ch.oooIR.Symbols()
hr := headIndexReader{head: ir.ch.head, mint: ir.ch.mint, maxt: ir.ch.maxt}
return hr.Symbols()
}
func (ir *OOOCompactionHeadIndexReader) Postings(_ context.Context, name string, values ...string) (index.Postings, error) {
@ -458,11 +449,28 @@ func (ir *OOOCompactionHeadIndexReader) SortedPostings(p index.Postings) index.P
}
func (ir *OOOCompactionHeadIndexReader) ShardedPostings(p index.Postings, shardIndex, shardCount uint64) index.Postings {
return ir.ch.oooIR.ShardedPostings(p, shardIndex, shardCount)
hr := headIndexReader{head: ir.ch.head, mint: ir.ch.mint, maxt: ir.ch.maxt}
return hr.ShardedPostings(p, shardIndex, shardCount)
}
func (ir *OOOCompactionHeadIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
return ir.ch.oooIR.series(ref, builder, chks, 0, ir.ch.lastMmapRef)
s := ir.ch.head.series.getByID(chunks.HeadSeriesRef(ref))
if s == nil {
ir.ch.head.metrics.seriesNotFound.Inc()
return storage.ErrNotFound
}
builder.Assign(s.labels())
s.Lock()
defer s.Unlock()
*chks = (*chks)[:0]
if s.ooo == nil {
return nil
}
return getOOOSeriesChunks(s, ir.ch.mint, ir.ch.maxt, 0, ir.ch.lastMmapRef, false, chks)
}
func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
@ -490,5 +498,91 @@ func (ir *OOOCompactionHeadIndexReader) LabelNamesFor(ctx context.Context, posti
}
func (ir *OOOCompactionHeadIndexReader) Close() error {
return ir.ch.oooIR.Close()
return nil
}
// HeadAndOOOQuerier queries both the head and the out-of-order head.
type HeadAndOOOQuerier struct {
mint, maxt int64
head *Head
index IndexReader
chunkr ChunkReader
querier storage.Querier
}
func NewHeadAndOOOQuerier(mint, maxt int64, head *Head, oooIsoState *oooIsolationState, querier storage.Querier) storage.Querier {
cr := &headChunkReader{
head: head,
mint: mint,
maxt: maxt,
isoState: head.iso.State(mint, maxt),
}
return &HeadAndOOOQuerier{
mint: mint,
maxt: maxt,
head: head,
index: NewHeadAndOOOIndexReader(head, mint, maxt, oooIsoState.minRef),
chunkr: NewHeadAndOOOChunkReader(head, mint, maxt, cr, oooIsoState, 0),
querier: querier,
}
}
func (q *HeadAndOOOQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return q.querier.LabelValues(ctx, name, hints, matchers...)
}
func (q *HeadAndOOOQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return q.querier.LabelNames(ctx, hints, matchers...)
}
func (q *HeadAndOOOQuerier) Close() error {
q.chunkr.Close()
return q.querier.Close()
}
func (q *HeadAndOOOQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
return selectSeriesSet(ctx, sortSeries, hints, matchers, q.index, q.chunkr, q.head.tombstones, q.mint, q.maxt)
}
// HeadAndOOOChunkQuerier queries both the head and the out-of-order head.
type HeadAndOOOChunkQuerier struct {
mint, maxt int64
head *Head
index IndexReader
chunkr ChunkReader
querier storage.ChunkQuerier
}
func NewHeadAndOOOChunkQuerier(mint, maxt int64, head *Head, oooIsoState *oooIsolationState, querier storage.ChunkQuerier) storage.ChunkQuerier {
cr := &headChunkReader{
head: head,
mint: mint,
maxt: maxt,
isoState: head.iso.State(mint, maxt),
}
return &HeadAndOOOChunkQuerier{
mint: mint,
maxt: maxt,
head: head,
index: NewHeadAndOOOIndexReader(head, mint, maxt, oooIsoState.minRef),
chunkr: NewHeadAndOOOChunkReader(head, mint, maxt, cr, oooIsoState, 0),
querier: querier,
}
}
func (q *HeadAndOOOChunkQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return q.querier.LabelValues(ctx, name, hints, matchers...)
}
func (q *HeadAndOOOChunkQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return q.querier.LabelNames(ctx, hints, matchers...)
}
func (q *HeadAndOOOChunkQuerier) Close() error {
q.chunkr.Close()
return q.querier.Close()
}
func (q *HeadAndOOOChunkQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.ChunkSeriesSet {
return selectChunkSeriesSet(ctx, sortSeries, hints, matchers, rangeHeadULID, q.index, q.chunkr, q.head.tombstones, q.mint, q.maxt)
}

View file

@ -316,7 +316,7 @@ func TestOOOHeadIndexReader_Series(t *testing.T) {
// Ref to whatever Ref the chunk has, that we refer to by ID
for ref, c := range intervals {
if c.ID == e.ID {
meta.Ref = chunks.ChunkRef(chunks.NewHeadChunkRef(chunks.HeadSeriesRef(s1ID), chunks.HeadChunkID(ref)))
meta.Ref = chunks.ChunkRef(chunks.NewHeadChunkRef(chunks.HeadSeriesRef(s1ID), s1.oooHeadChunkID(ref)))
break
}
}
@ -341,7 +341,7 @@ func TestOOOHeadIndexReader_Series(t *testing.T) {
})
}
ir := NewOOOHeadIndexReader(h, tc.queryMinT, tc.queryMaxT, 0)
ir := NewHeadAndOOOIndexReader(h, tc.queryMinT, tc.queryMaxT, 0)
var chks []chunks.Meta
var b labels.ScratchBuilder
@ -421,17 +421,17 @@ func testOOOHeadChunkReader_LabelValues(t *testing.T, scenario sampleTypeScenari
name: "LabelValues calls with ooo head query range not overlapping out-of-order data",
queryMinT: 100,
queryMaxT: 100,
expValues1: []string{},
expValues2: []string{},
expValues3: []string{},
expValues4: []string{},
expValues1: []string{"bar1"},
expValues2: nil,
expValues3: []string{"bar1", "bar2"},
expValues4: []string{"bar1", "bar2"},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
// We first want to test using a head index reader that covers the biggest query interval
oh := NewOOOHeadIndexReader(head, tc.queryMinT, tc.queryMaxT, 0)
oh := NewHeadAndOOOIndexReader(head, tc.queryMinT, tc.queryMaxT, 0)
matchers := []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1")}
values, err := oh.LabelValues(ctx, "foo", matchers...)
sort.Strings(values)
@ -481,10 +481,10 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) {
t.Run("Getting a non existing chunk fails with not found error", func(t *testing.T) {
db := newTestDBWithOpts(t, opts)
cr := NewOOOHeadChunkReader(db.head, 0, 1000, nil)
cr := NewHeadAndOOOChunkReader(db.head, 0, 1000, nil, nil, 0)
defer cr.Close()
c, iterable, err := cr.ChunkOrIterable(chunks.Meta{
Ref: 0x1000000, Chunk: chunkenc.Chunk(nil), MinTime: 100, MaxTime: 300,
Ref: 0x1800000, Chunk: chunkenc.Chunk(nil), MinTime: 100, MaxTime: 300,
})
require.Nil(t, iterable)
require.Equal(t, err, fmt.Errorf("not found"))
@ -832,14 +832,14 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) {
// The Series method populates the chunk metas, taking a copy of the
// head OOO chunk if necessary. These are then used by the ChunkReader.
ir := NewOOOHeadIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0)
ir := NewHeadAndOOOIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0)
var chks []chunks.Meta
var b labels.ScratchBuilder
err = ir.Series(s1Ref, &b, &chks)
require.NoError(t, err)
require.Equal(t, len(tc.expChunksSamples), len(chks))
cr := NewOOOHeadChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil)
cr := NewHeadAndOOOChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil, nil, 0)
defer cr.Close()
for i := 0; i < len(chks); i++ {
c, iterable, err := cr.ChunkOrIterable(chks[i])
@ -997,7 +997,7 @@ func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
// The Series method populates the chunk metas, taking a copy of the
// head OOO chunk if necessary. These are then used by the ChunkReader.
ir := NewOOOHeadIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0)
ir := NewHeadAndOOOIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0)
var chks []chunks.Meta
var b labels.ScratchBuilder
err = ir.Series(s1Ref, &b, &chks)
@ -1013,7 +1013,7 @@ func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
}
require.NoError(t, app.Commit())
cr := NewOOOHeadChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil)
cr := NewHeadAndOOOChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil, nil, 0)
defer cr.Close()
for i := 0; i < len(chks); i++ {
c, iterable, err := cr.ChunkOrIterable(chks[i])

View file

@ -14,8 +14,14 @@
package tsdb
import (
"math"
"testing"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/stretchr/testify/require"
)
@ -52,7 +58,7 @@ func TestOOOInsert(t *testing.T) {
chunk := NewOOOChunk()
chunk.samples = makeEvenSampleSlice(numPreExisting)
newSample := samplify(valOdd(insertPos))
chunk.Insert(newSample.t, newSample.f)
chunk.Insert(newSample.t, newSample.f, nil, nil)
var expSamples []sample
// Our expected new samples slice, will be first the original samples.
@ -83,7 +89,7 @@ func TestOOOInsertDuplicate(t *testing.T) {
dupSample := chunk.samples[dupPos]
dupSample.f = 0.123
ok := chunk.Insert(dupSample.t, dupSample.f)
ok := chunk.Insert(dupSample.t, dupSample.f, nil, nil)
expSamples := makeEvenSampleSlice(num) // We expect no change.
require.False(t, ok)
@ -91,3 +97,136 @@ func TestOOOInsertDuplicate(t *testing.T) {
}
}
}
type chunkVerify struct {
encoding chunkenc.Encoding
minTime int64
maxTime int64
}
func TestOOOChunks_ToEncodedChunks(t *testing.T) {
h1 := tsdbutil.GenerateTestHistogram(1)
// Make h2 appendable but with more buckets, to trigger recoding.
h2 := h1.Copy()
h2.PositiveSpans = append(h2.PositiveSpans, histogram.Span{Offset: 1, Length: 1})
h2.PositiveBuckets = append(h2.PositiveBuckets, 12)
testCases := map[string]struct {
samples []sample
expectedCounterResets []histogram.CounterResetHint
expectedChunks []chunkVerify
}{
"empty": {
samples: []sample{},
},
"has floats": {
samples: []sample{
{t: 1000, f: 43.0},
{t: 1100, f: 42.0},
},
expectedCounterResets: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.UnknownCounterReset},
expectedChunks: []chunkVerify{
{encoding: chunkenc.EncXOR, minTime: 1000, maxTime: 1100},
},
},
"mix of floats and histograms": {
samples: []sample{
{t: 1000, f: 43.0},
{t: 1100, h: h1},
{t: 1200, f: 42.0},
},
expectedCounterResets: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.UnknownCounterReset, histogram.UnknownCounterReset},
expectedChunks: []chunkVerify{
{encoding: chunkenc.EncXOR, minTime: 1000, maxTime: 1000},
{encoding: chunkenc.EncHistogram, minTime: 1100, maxTime: 1100},
{encoding: chunkenc.EncXOR, minTime: 1200, maxTime: 1200},
},
},
"has a counter reset": {
samples: []sample{
{t: 1000, h: h2},
{t: 1100, h: h1},
},
expectedCounterResets: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.CounterReset},
expectedChunks: []chunkVerify{
{encoding: chunkenc.EncHistogram, minTime: 1000, maxTime: 1000},
{encoding: chunkenc.EncHistogram, minTime: 1100, maxTime: 1100},
},
},
"has a recoded histogram": { // Regression test for wrong minT, maxT in histogram recoding.
samples: []sample{
{t: 0, h: h1},
{t: 1, h: h2},
},
expectedCounterResets: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.NotCounterReset},
expectedChunks: []chunkVerify{
{encoding: chunkenc.EncHistogram, minTime: 0, maxTime: 1},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// Sanity check.
require.Equal(t, len(tc.samples), len(tc.expectedCounterResets), "number of samples and counter resets")
oooChunk := OOOChunk{}
for _, s := range tc.samples {
switch s.Type() {
case chunkenc.ValFloat:
oooChunk.Insert(s.t, s.f, nil, nil)
case chunkenc.ValHistogram:
oooChunk.Insert(s.t, 0, s.h.Copy(), nil)
case chunkenc.ValFloatHistogram:
oooChunk.Insert(s.t, 0, nil, s.fh.Copy())
default:
t.Fatalf("unexpected sample type %d", s.Type())
}
}
chunks, err := oooChunk.ToEncodedChunks(math.MinInt64, math.MaxInt64)
require.NoError(t, err)
require.Equal(t, len(tc.expectedChunks), len(chunks), "number of chunks")
sampleIndex := 0
for i, c := range chunks {
require.Equal(t, tc.expectedChunks[i].encoding, c.chunk.Encoding(), "chunk %d encoding", i)
require.Equal(t, tc.expectedChunks[i].minTime, c.minTime, "chunk %d minTime", i)
require.Equal(t, tc.expectedChunks[i].maxTime, c.maxTime, "chunk %d maxTime", i)
samples, err := storage.ExpandSamples(c.chunk.Iterator(nil), newSample)
require.GreaterOrEqual(t, len(tc.samples)-sampleIndex, len(samples), "too many samples in chunk %d expected less than %d", i, len(tc.samples)-sampleIndex)
require.NoError(t, err)
if len(samples) == 0 {
// Ignore empty chunks.
continue
}
switch c.chunk.Encoding() {
case chunkenc.EncXOR:
for j, s := range samples {
require.Equal(t, chunkenc.ValFloat, s.Type())
// XOR chunks don't have counter reset hints, so we shouldn't expect anything else than UnknownCounterReset.
require.Equal(t, histogram.UnknownCounterReset, tc.expectedCounterResets[sampleIndex+j], "sample reset hint %d", sampleIndex+j)
require.Equal(t, tc.samples[sampleIndex+j].f, s.F(), "sample %d", sampleIndex+j)
}
case chunkenc.EncHistogram:
for j, s := range samples {
require.Equal(t, chunkenc.ValHistogram, s.Type())
require.Equal(t, tc.expectedCounterResets[sampleIndex+j], s.H().CounterResetHint, "sample reset hint %d", sampleIndex+j)
compareTo := tc.samples[sampleIndex+j].h.Copy()
compareTo.CounterResetHint = tc.expectedCounterResets[sampleIndex+j]
require.Equal(t, compareTo, s.H().Compact(0), "sample %d", sampleIndex+j)
}
case chunkenc.EncFloatHistogram:
for j, s := range samples {
require.Equal(t, chunkenc.ValFloatHistogram, s.Type())
require.Equal(t, tc.expectedCounterResets[sampleIndex+j], s.FH().CounterResetHint, "sample reset hint %d", sampleIndex+j)
compareTo := tc.samples[sampleIndex+j].fh.Copy()
compareTo.CounterResetHint = tc.expectedCounterResets[sampleIndex+j]
require.Equal(t, compareTo, s.FH().Compact(0), "sample %d", sampleIndex+j)
}
}
sampleIndex += len(samples)
}
require.Equal(t, len(tc.samples), sampleIndex, "number of samples")
})
}
}

View file

@ -115,20 +115,24 @@ func NewBlockQuerier(b BlockReader, mint, maxt int64) (storage.Querier, error) {
}
func (q *blockQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.SeriesSet {
mint := q.mint
maxt := q.maxt
return selectSeriesSet(ctx, sortSeries, hints, ms, q.index, q.chunks, q.tombstones, q.mint, q.maxt)
}
func selectSeriesSet(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms []*labels.Matcher,
index IndexReader, chunks ChunkReader, tombstones tombstones.Reader, mint, maxt int64,
) storage.SeriesSet {
disableTrimming := false
sharded := hints != nil && hints.ShardCount > 0
p, err := PostingsForMatchers(ctx, q.index, ms...)
p, err := PostingsForMatchers(ctx, index, ms...)
if err != nil {
return storage.ErrSeriesSet(err)
}
if sharded {
p = q.index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount)
p = index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount)
}
if sortSeries {
p = q.index.SortedPostings(p)
p = index.SortedPostings(p)
}
if hints != nil {
@ -137,11 +141,11 @@ func (q *blockQuerier) Select(ctx context.Context, sortSeries bool, hints *stora
disableTrimming = hints.DisableTrimming
if hints.Func == "series" {
// When you're only looking up metadata (for example series API), you don't need to load any chunks.
return newBlockSeriesSet(q.index, newNopChunkReader(), q.tombstones, p, mint, maxt, disableTrimming)
return newBlockSeriesSet(index, newNopChunkReader(), tombstones, p, mint, maxt, disableTrimming)
}
}
return newBlockSeriesSet(q.index, q.chunks, q.tombstones, p, mint, maxt, disableTrimming)
return newBlockSeriesSet(index, chunks, tombstones, p, mint, maxt, disableTrimming)
}
// blockChunkQuerier provides chunk querying access to a single block database.
@ -159,8 +163,12 @@ func NewBlockChunkQuerier(b BlockReader, mint, maxt int64) (storage.ChunkQuerier
}
func (q *blockChunkQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.ChunkSeriesSet {
mint := q.mint
maxt := q.maxt
return selectChunkSeriesSet(ctx, sortSeries, hints, ms, q.blockID, q.index, q.chunks, q.tombstones, q.mint, q.maxt)
}
func selectChunkSeriesSet(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms []*labels.Matcher,
blockID ulid.ULID, index IndexReader, chunks ChunkReader, tombstones tombstones.Reader, mint, maxt int64,
) storage.ChunkSeriesSet {
disableTrimming := false
sharded := hints != nil && hints.ShardCount > 0
@ -169,17 +177,17 @@ func (q *blockChunkQuerier) Select(ctx context.Context, sortSeries bool, hints *
maxt = hints.End
disableTrimming = hints.DisableTrimming
}
p, err := PostingsForMatchers(ctx, q.index, ms...)
p, err := PostingsForMatchers(ctx, index, ms...)
if err != nil {
return storage.ErrChunkSeriesSet(err)
}
if sharded {
p = q.index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount)
p = index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount)
}
if sortSeries {
p = q.index.SortedPostings(p)
p = index.SortedPostings(p)
}
return NewBlockChunkSeriesSet(q.blockID, q.index, q.chunks, q.tombstones, p, mint, maxt, disableTrimming)
return NewBlockChunkSeriesSet(blockID, index, chunks, tombstones, p, mint, maxt, disableTrimming)
}
// PostingsForMatchers assembles a single postings iterator against the index reader
@ -633,14 +641,16 @@ func (p *populateWithDelGenericSeriesIterator) next(copyHeadChunk bool) bool {
}
}
hcr, ok := p.cr.(*headChunkReader)
hcr, ok := p.cr.(ChunkReaderWithCopy)
var iterable chunkenc.Iterable
if ok && copyHeadChunk && len(p.bufIter.Intervals) == 0 {
// ChunkWithCopy will copy the head chunk.
// ChunkOrIterableWithCopy will copy the head chunk, if it can.
var maxt int64
p.currMeta.Chunk, maxt, p.err = hcr.ChunkWithCopy(p.currMeta)
// For the in-memory head chunk the index reader sets maxt as MaxInt64. We fix it here.
p.currMeta.MaxTime = maxt
p.currMeta.Chunk, iterable, maxt, p.err = hcr.ChunkOrIterableWithCopy(p.currMeta)
if p.currMeta.Chunk != nil {
// For the in-memory head chunk the index reader sets maxt as MaxInt64. We fix it here.
p.currMeta.MaxTime = maxt
}
} else {
p.currMeta.Chunk, iterable, p.err = p.cr.ChunkOrIterable(p.currMeta)
}

View file

@ -20,6 +20,7 @@ import (
"testing"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/stretchr/testify/require"
@ -254,56 +255,98 @@ func BenchmarkMergedStringIter(b *testing.B) {
b.ReportAllocs()
}
func BenchmarkQuerierSelect(b *testing.B) {
opts := DefaultHeadOptions()
opts.ChunkRange = 1000
opts.ChunkDirRoot = b.TempDir()
h, err := NewHead(nil, nil, nil, nil, opts, nil)
func createHeadForBenchmarkSelect(b *testing.B, numSeries int, addSeries func(app storage.Appender, i int)) (*Head, *DB) {
dir := b.TempDir()
opts := DefaultOptions()
opts.OutOfOrderCapMax = 255
opts.OutOfOrderTimeWindow = 1000
db, err := Open(dir, nil, nil, opts, nil)
require.NoError(b, err)
defer h.Close()
b.Cleanup(func() {
require.NoError(b, db.Close())
})
h := db.Head()
app := h.Appender(context.Background())
numSeries := 1000000
for i := 0; i < numSeries; i++ {
app.Append(0, labels.FromStrings("foo", "bar", "i", fmt.Sprintf("%d%s", i, postingsBenchSuffix)), int64(i), 0)
addSeries(app, i)
}
require.NoError(b, app.Commit())
return h, db
}
bench := func(b *testing.B, br BlockReader, sorted bool) {
matcher := labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")
for s := 1; s <= numSeries; s *= 10 {
b.Run(fmt.Sprintf("%dof%d", s, numSeries), func(b *testing.B) {
q, err := NewBlockQuerier(br, 0, int64(s-1))
require.NoError(b, err)
func benchmarkSelect(b *testing.B, queryable storage.Queryable, numSeries int, sorted bool) {
matcher := labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")
b.ResetTimer()
for s := 1; s <= numSeries; s *= 10 {
b.Run(fmt.Sprintf("%dof%d", s, numSeries), func(b *testing.B) {
q, err := queryable.Querier(0, int64(s-1))
require.NoError(b, err)
b.ResetTimer()
for i := 0; i < b.N; i++ {
ss := q.Select(context.Background(), sorted, nil, matcher)
for ss.Next() {
}
require.NoError(b, ss.Err())
b.ResetTimer()
for i := 0; i < b.N; i++ {
ss := q.Select(context.Background(), sorted, nil, matcher)
for ss.Next() {
}
q.Close()
})
}
require.NoError(b, ss.Err())
}
q.Close()
})
}
}
func BenchmarkQuerierSelect(b *testing.B) {
numSeries := 1000000
h, db := createHeadForBenchmarkSelect(b, numSeries, func(app storage.Appender, i int) {
_, err := app.Append(0, labels.FromStrings("foo", "bar", "i", fmt.Sprintf("%d%s", i, postingsBenchSuffix)), int64(i), 0)
if err != nil {
b.Fatal(err)
}
})
b.Run("Head", func(b *testing.B) {
bench(b, h, false)
benchmarkSelect(b, db, numSeries, false)
})
b.Run("SortedHead", func(b *testing.B) {
bench(b, h, true)
benchmarkSelect(b, db, numSeries, true)
})
tmpdir := b.TempDir()
blockdir := createBlockFromHead(b, tmpdir, h)
block, err := OpenBlock(nil, blockdir, nil)
require.NoError(b, err)
defer func() {
require.NoError(b, block.Close())
}()
b.Run("Block", func(b *testing.B) {
bench(b, block, false)
tmpdir := b.TempDir()
blockdir := createBlockFromHead(b, tmpdir, h)
block, err := OpenBlock(nil, blockdir, nil)
require.NoError(b, err)
defer func() {
require.NoError(b, block.Close())
}()
benchmarkSelect(b, (*queryableBlock)(block), numSeries, false)
})
}
// Type wrapper to let a Block be a Queryable in benchmarkSelect().
type queryableBlock Block
func (pb *queryableBlock) Querier(mint, maxt int64) (storage.Querier, error) {
return NewBlockQuerier((*Block)(pb), mint, maxt)
}
func BenchmarkQuerierSelectWithOutOfOrder(b *testing.B) {
numSeries := 1000000
_, db := createHeadForBenchmarkSelect(b, numSeries, func(app storage.Appender, i int) {
l := labels.FromStrings("foo", "bar", "i", fmt.Sprintf("%d%s", i, postingsBenchSuffix))
ref, err := app.Append(0, l, int64(i+1), 0)
if err != nil {
b.Fatal(err)
}
_, err = app.Append(ref, l, int64(i), 1) // Out of order sample
if err != nil {
b.Fatal(err)
}
})
b.Run("Head", func(b *testing.B) {
benchmarkSelect(b, db, numSeries, false)
})
}

View file

@ -3169,12 +3169,11 @@ func BenchmarkQueries(b *testing.B) {
qHead, err := NewBlockQuerier(NewRangeHead(head, 1, nSamples), 1, nSamples)
require.NoError(b, err)
qOOOHead, err := NewBlockQuerier(NewOOORangeHead(head, 1, nSamples, 0), 1, nSamples)
require.NoError(b, err)
isoState := head.oooIso.TrackReadAfter(0)
qOOOHead := NewHeadAndOOOQuerier(1, nSamples, head, isoState, qHead)
queryTypes = append(queryTypes, qt{
fmt.Sprintf("_Head_oooPercent:%d", oooPercentage),
storage.NewMergeQuerier([]storage.Querier{qHead, qOOOHead}, nil, storage.ChainedSeriesMerge),
fmt.Sprintf("_Head_oooPercent:%d", oooPercentage), qOOOHead,
})
}

View file

@ -30,12 +30,10 @@ func GenerateTestHistograms(n int) (r []*histogram.Histogram) {
return r
}
func GenerateTestHistogramsWithUnknownResetHint(n int) []*histogram.Histogram {
hs := GenerateTestHistograms(n)
for i := range hs {
hs[i].CounterResetHint = histogram.UnknownCounterReset
}
return hs
func GenerateTestHistogramWithHint(n int, hint histogram.CounterResetHint) *histogram.Histogram {
h := GenerateTestHistogram(n)
h.CounterResetHint = hint
return h
}
// GenerateTestHistogram but it is up to the user to set any known counter reset hint.

View file

@ -20,7 +20,6 @@ import (
"math"
"os"
"path/filepath"
"slices"
"strconv"
"strings"
"time"
@ -265,9 +264,9 @@ func (w *Watcher) loop() {
// Run the watcher, which will tail the WAL until the quit channel is closed
// or an error case is hit.
func (w *Watcher) Run() error {
_, lastSegment, err := w.firstAndLast()
_, lastSegment, err := Segments(w.walDir)
if err != nil {
return fmt.Errorf("wal.Segments: %w", err)
return fmt.Errorf("Segments: %w", err)
}
// We want to ensure this is false across iterations since
@ -318,57 +317,20 @@ func (w *Watcher) Run() error {
// findSegmentForIndex finds the first segment greater than or equal to index.
func (w *Watcher) findSegmentForIndex(index int) (int, error) {
refs, err := w.segments(w.walDir)
refs, err := listSegments(w.walDir)
if err != nil {
return -1, err
}
for _, r := range refs {
if r >= index {
return r, nil
if r.index >= index {
return r.index, nil
}
}
return -1, errors.New("failed to find segment for index")
}
func (w *Watcher) firstAndLast() (int, int, error) {
refs, err := w.segments(w.walDir)
if err != nil {
return -1, -1, err
}
if len(refs) == 0 {
return -1, -1, nil
}
return refs[0], refs[len(refs)-1], nil
}
// Copied from tsdb/wlog/wlog.go so we do not have to open a WAL.
// Plan is to move WAL watcher to TSDB and dedupe these implementations.
func (w *Watcher) segments(dir string) ([]int, error) {
files, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
var refs []int
for _, f := range files {
k, err := strconv.Atoi(f.Name())
if err != nil {
continue
}
refs = append(refs, k)
}
slices.Sort(refs)
for i := 0; i < len(refs)-1; i++ {
if refs[i]+1 != refs[i+1] {
return nil, errors.New("segments are not sequential")
}
}
return refs, nil
}
func (w *Watcher) readAndHandleError(r *LiveReader, segmentNum int, tail bool, size int64) error {
err := w.readSegment(r, segmentNum, tail)
@ -447,35 +409,17 @@ func (w *Watcher) watch(segmentNum int, tail bool) error {
// Currently doing a garbage collect, try again later.
}
// if a newer segment is produced, read the current one until the end and move on.
case <-segmentTicker.C:
_, last, err := w.firstAndLast()
_, last, err := Segments(w.walDir)
if err != nil {
return fmt.Errorf("segments: %w", err)
return fmt.Errorf("Segments: %w", err)
}
// Check if new segments exists.
if last <= segmentNum {
continue
if last > segmentNum {
return w.readAndHandleError(reader, segmentNum, tail, size)
}
err = w.readSegment(reader, segmentNum, tail)
// Ignore errors reading to end of segment whilst replaying the WAL.
if !tail {
switch {
case err != nil && !errors.Is(err, io.EOF):
level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "err", err)
case reader.Offset() != size:
level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size)
}
return nil
}
// Otherwise, when we are tailing, non-EOFs are fatal.
if err != nil && !errors.Is(err, io.EOF) {
return err
}
return nil
continue
// we haven't read due to a notification in quite some time, try reading anyways
case <-readTicker.C:
@ -484,7 +428,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error {
if err != nil {
return err
}
// still want to reset the ticker so we don't read too often
// reset the ticker so we don't read too often
readTicker.Reset(readTimeout)
case <-w.readNotify:
@ -492,7 +436,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error {
if err != nil {
return err
}
// still want to reset the ticker so we don't read too often
// reset the ticker so we don't read too often
readTicker.Reset(readTimeout)
}
}
@ -732,29 +676,30 @@ func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) err
}
// Ensure we read the whole contents of every segment in the checkpoint dir.
segs, err := w.segments(checkpointDir)
segs, err := listSegments(checkpointDir)
if err != nil {
return fmt.Errorf("Unable to get segments checkpoint dir: %w", err)
}
for _, seg := range segs {
size, err := getSegmentSize(checkpointDir, seg)
for _, segRef := range segs {
size, err := getSegmentSize(checkpointDir, segRef.index)
if err != nil {
return fmt.Errorf("getSegmentSize: %w", err)
}
sr, err := OpenReadSegment(SegmentName(checkpointDir, seg))
sr, err := OpenReadSegment(SegmentName(checkpointDir, segRef.index))
if err != nil {
return fmt.Errorf("unable to open segment: %w", err)
}
defer sr.Close()
r := NewLiveReader(w.logger, w.readerMetrics, sr)
if err := readFn(w, r, index, false); err != nil && !errors.Is(err, io.EOF) {
err = readFn(w, r, index, false)
sr.Close()
if err != nil && !errors.Is(err, io.EOF) {
return fmt.Errorf("readSegment: %w", err)
}
if r.Offset() != size {
return fmt.Errorf("readCheckpoint wasn't able to read all data from the checkpoint %s/%08d, size: %d, totalRead: %d", checkpointDir, seg, size, r.Offset())
return fmt.Errorf("readCheckpoint wasn't able to read all data from the checkpoint %s/%08d, size: %d, totalRead: %d", checkpointDir, segRef.index, size, r.Offset())
}
}

View file

@ -230,11 +230,11 @@ func TestTailSamples(t *testing.T) {
for i := first; i <= last; i++ {
segment, err := OpenReadSegment(SegmentName(watcher.walDir, i))
require.NoError(t, err)
defer segment.Close()
reader := NewLiveReader(nil, NewLiveReaderMetrics(nil), segment)
// Use tail true so we can ensure we got the right number of samples.
watcher.readSegment(reader, i, true)
require.NoError(t, segment.Close())
}
expectedSeries := seriesCount

81
util/junitxml/junitxml.go Normal file
View file

@ -0,0 +1,81 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package junitxml
import (
"encoding/xml"
"io"
)
type JUnitXML struct {
XMLName xml.Name `xml:"testsuites"`
Suites []*TestSuite `xml:"testsuite"`
}
type TestSuite struct {
Name string `xml:"name,attr"`
TestCount int `xml:"tests,attr"`
FailureCount int `xml:"failures,attr"`
ErrorCount int `xml:"errors,attr"`
SkippedCount int `xml:"skipped,attr"`
Timestamp string `xml:"timestamp,attr"`
Cases []*TestCase `xml:"testcase"`
}
type TestCase struct {
Name string `xml:"name,attr"`
Failures []string `xml:"failure,omitempty"`
Error string `xml:"error,omitempty"`
}
func (j *JUnitXML) WriteXML(h io.Writer) error {
return xml.NewEncoder(h).Encode(j)
}
func (j *JUnitXML) Suite(name string) *TestSuite {
ts := &TestSuite{Name: name}
j.Suites = append(j.Suites, ts)
return ts
}
func (ts *TestSuite) Fail(f string) {
ts.FailureCount++
curt := ts.lastCase()
curt.Failures = append(curt.Failures, f)
}
func (ts *TestSuite) lastCase() *TestCase {
if len(ts.Cases) == 0 {
ts.Case("unknown")
}
return ts.Cases[len(ts.Cases)-1]
}
func (ts *TestSuite) Case(name string) *TestSuite {
j := &TestCase{
Name: name,
}
ts.Cases = append(ts.Cases, j)
ts.TestCount++
return ts
}
func (ts *TestSuite) Settime(name string) {
ts.Timestamp = name
}
func (ts *TestSuite) Abort(e error) {
ts.ErrorCount++
curt := ts.lastCase()
curt.Error = e.Error()
}

View file

@ -0,0 +1,66 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package junitxml
import (
"bytes"
"encoding/xml"
"errors"
"testing"
)
func TestJunitOutput(t *testing.T) {
var buf bytes.Buffer
var test JUnitXML
x := FakeTestSuites()
if err := x.WriteXML(&buf); err != nil {
t.Fatalf("Failed to encode XML: %v", err)
}
output := buf.Bytes()
err := xml.Unmarshal(output, &test)
if err != nil {
t.Errorf("Unmarshal failed with error: %v", err)
}
var total int
var cases int
total = len(test.Suites)
if total != 3 {
t.Errorf("JUnit output had %d testsuite elements; expected 3\n", total)
}
for _, i := range test.Suites {
cases += len(i.Cases)
}
if cases != 7 {
t.Errorf("JUnit output had %d testcase; expected 7\n", cases)
}
}
func FakeTestSuites() *JUnitXML {
ju := &JUnitXML{}
good := ju.Suite("all good")
good.Case("alpha")
good.Case("beta")
good.Case("gamma")
mixed := ju.Suite("mixed")
mixed.Case("good")
bad := mixed.Case("bad")
bad.Fail("once")
bad.Fail("twice")
mixed.Case("ugly").Abort(errors.New("buggy"))
ju.Suite("fast").Fail("fail early")
return ju
}

View file

@ -1,6 +1,6 @@
{
"name": "@prometheus-io/codemirror-promql",
"version": "0.53.1",
"version": "0.54.0-rc.1",
"description": "a CodeMirror mode for the PromQL language",
"types": "dist/esm/index.d.ts",
"module": "dist/esm/index.js",
@ -29,7 +29,7 @@
},
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
"dependencies": {
"@prometheus-io/lezer-promql": "0.53.1",
"@prometheus-io/lezer-promql": "0.54.0-rc.1",
"lru-cache": "^7.18.3"
},
"devDependencies": {
@ -37,10 +37,10 @@
"@codemirror/language": "^6.10.2",
"@codemirror/lint": "^6.8.1",
"@codemirror/state": "^6.3.3",
"@codemirror/view": "^6.28.3",
"@codemirror/view": "^6.29.1",
"@lezer/common": "^1.2.1",
"@lezer/highlight": "^1.2.0",
"@lezer/lr": "^1.4.1",
"@lezer/lr": "^1.4.2",
"isomorphic-fetch": "^3.0.0",
"nock": "^13.5.4"
},

View file

@ -775,7 +775,7 @@ describe('computeStartCompletePosition test', () => {
it(value.title, () => {
const state = createEditorState(value.expr);
const node = syntaxTree(state).resolve(value.pos, -1);
const result = computeStartCompletePosition(node, value.pos);
const result = computeStartCompletePosition(state, node, value.pos);
expect(value.expectedStart).toEqual(result);
});
});

View file

@ -21,7 +21,6 @@ import {
BinaryExpr,
BoolModifier,
Div,
Duration,
Eql,
EqlRegex,
EqlSingle,
@ -40,7 +39,6 @@ import {
Mul,
Neq,
NeqRegex,
NumberLiteral,
OffsetExpr,
Or,
Pow,
@ -54,6 +52,8 @@ import {
UnquotedLabelMatcher,
QuotedLabelMatcher,
QuotedLabelName,
NumberDurationLiteralInDurationContext,
NumberDurationLiteral,
} from '@prometheus-io/lezer-promql';
import { Completion, CompletionContext, CompletionResult } from '@codemirror/autocomplete';
import { EditorState } from '@codemirror/state';
@ -179,7 +179,8 @@ function computeStartCompleteLabelPositionInLabelMatcherOrInGroupingLabel(node:
// It is an important step because the start position will be used by CMN to find the string and then to use it to filter the CompletionResult.
// A wrong `start` position will lead to have the completion not working.
// Note: this method is exported only for testing purpose.
export function computeStartCompletePosition(node: SyntaxNode, pos: number): number {
export function computeStartCompletePosition(state: EditorState, node: SyntaxNode, pos: number): number {
const currentText = state.doc.slice(node.from, pos).toString();
let start = node.from;
if (node.type.id === LabelMatchers || node.type.id === GroupingLabels) {
start = computeStartCompleteLabelPositionInLabelMatcherOrInGroupingLabel(node, pos);
@ -191,11 +192,16 @@ export function computeStartCompletePosition(node: SyntaxNode, pos: number): num
start++;
} else if (
node.type.id === OffsetExpr ||
(node.type.id === NumberLiteral && node.parent?.type.id === 0 && node.parent.parent?.type.id === SubqueryExpr) ||
// Since duration and number are equivalent, writing go[5] or go[5d] is syntactically accurate.
// Before we were able to guess when we had to autocomplete the duration later based on the error node,
// which is not possible anymore.
// So we have to analyze the string about the current node to see if the duration unit is already present or not.
(node.type.id === NumberDurationLiteralInDurationContext && !durationTerms.map((v) => v.label).includes(currentText[currentText.length - 1])) ||
(node.type.id === NumberDurationLiteral && node.parent?.type.id === 0 && node.parent.parent?.type.id === SubqueryExpr) ||
(node.type.id === 0 &&
(node.parent?.type.id === OffsetExpr ||
node.parent?.type.id === MatrixSelector ||
(node.parent?.type.id === SubqueryExpr && containsAtLeastOneChild(node.parent, Duration))))
(node.parent?.type.id === SubqueryExpr && containsAtLeastOneChild(node.parent, NumberDurationLiteralInDurationContext))))
) {
start = pos;
}
@ -230,7 +236,7 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context
result.push({ kind: ContextKind.Duration });
break;
}
if (node.parent?.type.id === SubqueryExpr && containsAtLeastOneChild(node.parent, Duration)) {
if (node.parent?.type.id === SubqueryExpr && containsAtLeastOneChild(node.parent, NumberDurationLiteralInDurationContext)) {
// we are likely in the given situation:
// `rate(foo[5d:5])`
// so we should autocomplete a duration
@ -434,7 +440,7 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context
result.push({ kind: ContextKind.MetricName, metricName: state.sliceDoc(node.from, node.to).slice(1, -1) });
}
break;
case NumberLiteral:
case NumberDurationLiteral:
if (node.parent?.type.id === 0 && node.parent.parent?.type.id === SubqueryExpr) {
// Here we are likely in this situation:
// `go[5d:4]`
@ -449,7 +455,7 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context
result.push({ kind: ContextKind.Number });
}
break;
case Duration:
case NumberDurationLiteralInDurationContext:
case OffsetExpr:
result.push({ kind: ContextKind.Duration });
break;
@ -591,7 +597,7 @@ export class HybridComplete implements CompleteStrategy {
}
}
return asyncResult.then((result) => {
return arrayToCompletionResult(result, computeStartCompletePosition(tree, pos), pos, completeSnippet, span);
return arrayToCompletionResult(result, computeStartCompletePosition(state, tree, pos), pos, completeSnippet, span);
});
}

View file

@ -17,7 +17,7 @@ import {
BinaryExpr,
FunctionCall,
MatrixSelector,
NumberLiteral,
NumberDurationLiteral,
OffsetExpr,
ParenExpr,
StepInvariantExpr,
@ -42,7 +42,7 @@ export function getType(node: SyntaxNode | null): ValueType {
return getType(node.firstChild);
case StringLiteral:
return ValueType.string;
case NumberLiteral:
case NumberDurationLiteral:
return ValueType.scalar;
case MatrixSelector:
return ValueType.matrix;

Some files were not shown because too many files have changed in this diff Show more