Merge pull request #14703 from jan--f/3.0-main-sync-24-08-21
Some checks failed
CI / Go tests (push) Has been cancelled
CI / More Go tests (push) Has been cancelled
CI / Go tests with previous Go version (push) Has been cancelled
CI / UI tests (push) Has been cancelled
CI / Go tests on Windows (push) Has been cancelled
CI / Mixins tests (push) Has been cancelled
CI / Build Prometheus for common architectures (0) (push) Has been cancelled
CI / Build Prometheus for common architectures (1) (push) Has been cancelled
CI / Build Prometheus for common architectures (2) (push) Has been cancelled
CI / Build Prometheus for all architectures (0) (push) Has been cancelled
CI / Build Prometheus for all architectures (1) (push) Has been cancelled
CI / Build Prometheus for all architectures (10) (push) Has been cancelled
CI / Build Prometheus for all architectures (11) (push) Has been cancelled
CI / Build Prometheus for all architectures (2) (push) Has been cancelled
CI / Build Prometheus for all architectures (3) (push) Has been cancelled
CI / Build Prometheus for all architectures (4) (push) Has been cancelled
CI / Build Prometheus for all architectures (5) (push) Has been cancelled
CI / Build Prometheus for all architectures (6) (push) Has been cancelled
CI / Build Prometheus for all architectures (7) (push) Has been cancelled
CI / Build Prometheus for all architectures (8) (push) Has been cancelled
CI / Build Prometheus for all architectures (9) (push) Has been cancelled
CI / Check generated parser (push) Has been cancelled
CI / golangci-lint (push) Has been cancelled
CI / fuzzing (push) Has been cancelled
CI / codeql (push) Has been cancelled
CI / Report status of build Prometheus for all architectures (push) Has been cancelled
CI / Publish main branch artifacts (push) Has been cancelled
CI / Publish release artefacts (push) Has been cancelled
CI / Publish UI on npm Registry (push) Has been cancelled

3.0 main sync 24-08-21
This commit is contained in:
Björn Rabenstein 2024-08-21 13:54:22 +02:00 committed by GitHub
commit b860327989
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
96 changed files with 3333 additions and 1323 deletions

View file

@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0
- uses: bufbuild/buf-setup-action@aceb106d2419c4cff48863df90161d92decb8591 # v1.35.1
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1

View file

@ -13,7 +13,7 @@ jobs:
if: github.repository_owner == 'prometheus'
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0
- uses: bufbuild/buf-setup-action@aceb106d2419c4cff48863df90161d92decb8591 # v1.35.1
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1

View file

@ -75,7 +75,7 @@ jobs:
runs-on: windows-latest
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
- uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with:
go-version: 1.22.x
- run: |
@ -166,7 +166,7 @@ jobs:
- name: Checkout repository
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Install Go
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with:
cache: false
go-version: 1.22.x
@ -179,18 +179,18 @@ jobs:
- name: Checkout repository
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Install Go
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with:
go-version: 1.22.x
- name: Install snmp_exporter/generator dependencies
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter'
- name: Lint
uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1
uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0
with:
args: --verbose
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
version: v1.59.1
version: v1.60.1
fuzzing:
uses: ./.github/workflows/fuzzing.yml
if: github.event_name == 'pull_request'

View file

@ -27,12 +27,12 @@ jobs:
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Initialize CodeQL
uses: github/codeql-action/init@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11
uses: github/codeql-action/init@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # v3.25.15
with:
languages: ${{ matrix.language }}
- name: Autobuild
uses: github/codeql-action/autobuild@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11
uses: github/codeql-action/autobuild@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # v3.25.15
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11
uses: github/codeql-action/analyze@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # v3.25.15

View file

@ -21,7 +21,7 @@ jobs:
fuzz-seconds: 600
dry-run: false
- name: Upload Crash
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4
if: failure() && steps.build.outcome == 'success'
with:
name: artifacts

View file

@ -26,7 +26,7 @@ jobs:
persist-credentials: false
- name: "Run analysis"
uses: ossf/scorecard-action@dc50aa9510b46c811795eb24b2f1ba02a914e534 # tag=v2.3.3
uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # tag=v2.4.0
with:
results_file: results.sarif
results_format: sarif
@ -37,7 +37,7 @@ jobs:
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
# format to the repository Actions tab.
- name: "Upload artifact"
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # tag=v4.3.3
uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # tag=v4.3.4
with:
name: SARIF file
path: results.sarif
@ -45,6 +45,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@b611370bb5703a7efb587f9d136a52ea24c5c38c # tag=v3.25.11
uses: github/codeql-action/upload-sarif@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # tag=v3.25.15
with:
sarif_file: results.sarif

View file

@ -11,8 +11,49 @@ _Please add changes here that are only in the release-3.0 branch. These will be
## unreleased
* [FEATURE] OTLP receiver: Add new option `otlp.promote_resource_attributes`, for any OTel resource attributes that should be promoted to metric labels. #14200
* [FEATURE] Remote-Write: Add sender and receiver support for [Remote Write 2.0-rc.2](https://prometheus.io/docs/specs/remote_write_spec_2_0/) specification #14395 #14427 #14444
* [ENHANCEMENT] Remote-Write: 1.x messages against Remote Write 2.x Receivers will have now correct values for `prometheus_storage_<samples|histograms|exemplar>_failed_total` in case of partial errors #14444
* [BUGFIX] tsdb/wlog.Watcher.readSegmentForGC: Only count unknown record types against record_decode_failures_total metric. #14042
## 2.54.0-rc.1 / 2024-08-05
Release 2.54 brings a release candidate of a major new version of [Remote Write: 2.0](https://prometheus.io/docs/specs/remote_write_spec_2_0/).
This is experimental at this time and may still change.
Remote-write v2 is enabled by default, but can be disabled via feature-flag `web.remote-write-receiver.accepted-protobuf-messages`.
* [CHANGE] Remote-Write: `highest_timestamp_in_seconds` and `queue_highest_sent_timestamp_seconds` metrics now initialized to 0. #14437
* [CHANGE] API: Split warnings from info annotations in API response. #14327
* [FEATURE] Remote-Write: Version 2.0 experimental, plus metadata in WAL via feature flag `metadata-wal-records` (defaults on). #14395,#14427,#14444
* [FEATURE] PromQL: add limitk() and limit_ratio() aggregation operators. #12503
* [ENHANCEMENT] PromQL: Accept underscores in literal numbers, e.g. 1_000_000 for 1 million. #12821
* [ENHANCEMENT] PromQL: float literal numbers and durations are now interchangeable (experimental). Example: `time() - my_timestamp > 10m`. #9138
* [ENHANCEMENT] PromQL: use Kahan summation for sum(). #14074,#14362
* [ENHANCEMENT] PromQL (experimental native histograms): Optimize `histogram_count` and `histogram_sum` functions. #14097
* [ENHANCEMENT] TSDB: Better support for out-of-order experimental native histogram samples. #14438
* [ENHANCEMENT] TSDB: Optimise seek within index. #14393
* [ENHANCEMENT] TSDB: Optimise deletion of stale series. #14307
* [ENHANCEMENT] TSDB: Reduce locking to optimise adding and removing series. #13286,#14286
* [ENHANCEMENT] TSDB: Small optimisation: streamline special handling for out-of-order data. #14396,#14584
* [ENHANCEMENT] Regexps: Optimize patterns with multiple prefixes. #13843,#14368
* [ENHANCEMENT] Regexps: Optimize patterns containing multiple literal strings. #14173
* [ENHANCEMENT] AWS SD: expose Primary IPv6 addresses as __meta_ec2_primary_ipv6_addresses. #14156
* [ENHANCEMENT] Docker SD: add MatchFirstNetwork for containers with multiple networks. #10490
* [ENHANCEMENT] OpenStack SD: Use `flavor.original_name` if available. #14312
* [ENHANCEMENT] UI (experimental native histograms): more accurate representation. #13680,#14430
* [ENHANCEMENT] Agent: `out_of_order_time_window` config option now applies to agent. #14094
* [ENHANCEMENT] Notifier: Send any outstanding Alertmanager notifications when shutting down. #14290
* [ENHANCEMENT] Rules: Add label-matcher support to Rules API. #10194
* [ENHANCEMENT] HTTP API: Add url to message logged on error while sending response. #14209
* [BUGFIX] CLI: escape `|` characters when generating docs. #14420
* [BUGFIX] PromQL (experimental native histograms): Fix some binary operators between native histogram values. #14454
* [BUGFIX] TSDB: LabelNames API could fail during compaction. #14279
* [BUGFIX] TSDB: Fix rare issue where pending OOO read can be left dangling if creating querier fails. #14341
* [BUGFIX] TSDB: fix check for context cancellation in LabelNamesFor. #14302
* [BUGFIX] Rules: Fix rare panic on reload. #14366
* [BUGFIX] Config: In YAML marshalling, do not output a regexp field if it was never set. #14004
* [BUGFIX] Remote-Write: reject samples with future timestamps. #14304
* [BUGFIX] Remote-Write: Fix data corruption in remote write if max_sample_age is applied. #14078
* [BUGFIX] Notifier: Fix Alertmanager discovery not updating under heavy load. #14174
* [BUGFIX] Regexes: some Unicode characters were not matched by case-insensitive comparison. #14170,#14299
* [BUGFIX] Remote-Read: Resolve occasional segmentation fault on query. #14515
## 2.53.1 / 2024-07-10

View file

@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.59.1
GOLANGCI_LINT_VERSION ?= v1.60.1
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))

View file

@ -152,6 +152,7 @@ type flagConfig struct {
queryConcurrency int
queryMaxSamples int
RemoteFlushDeadline model.Duration
nameEscapingScheme string
featureList []string
memlimitRatio float64
@ -231,6 +232,12 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
level.Info(logger).Log("msg", "Experimental created timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
case "delayed-compaction":
c.tsdb.EnableDelayedCompaction = true
level.Info(logger).Log("msg", "Experimental delayed compaction is enabled.")
case "utf8-names":
model.NameValidationScheme = model.UTF8Validation
level.Info(logger).Log("msg", "Experimental UTF-8 support enabled")
case "":
continue
default:
@ -482,6 +489,15 @@ func main() {
os.Exit(1)
}
if cfg.nameEscapingScheme != "" {
scheme, err := model.ToEscapingScheme(cfg.nameEscapingScheme)
if err != nil {
fmt.Fprintf(os.Stderr, `Invalid name escaping scheme: %q; Needs to be one of "values", "underscores", or "dots"`, cfg.nameEscapingScheme)
os.Exit(1)
}
model.NameEscapingScheme = scheme
}
if agentMode && len(serverOnlyFlags) > 0 {
fmt.Fprintf(os.Stderr, "The following flag(s) can not be used in agent mode: %q", serverOnlyFlags)
os.Exit(3)
@ -1679,6 +1695,8 @@ type tsdbOptions struct {
MaxExemplars int64
EnableMemorySnapshotOnShutdown bool
EnableNativeHistograms bool
EnableDelayedCompaction bool
EnableOverlappingCompaction bool
}
func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
@ -1699,7 +1717,8 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
EnableMemorySnapshotOnShutdown: opts.EnableMemorySnapshotOnShutdown,
EnableNativeHistograms: opts.EnableNativeHistograms,
OutOfOrderTimeWindow: opts.OutOfOrderTimeWindow,
EnableOverlappingCompaction: true,
EnableDelayedCompaction: opts.EnableDelayedCompaction,
EnableOverlappingCompaction: opts.EnableOverlappingCompaction,
}
}

View file

@ -866,16 +866,16 @@ func displayHistogram(dataType string, datas []int, total int) {
fmt.Println()
}
func generateBucket(min, max int) (start, end, step int) {
s := (max - min) / 10
func generateBucket(minVal, maxVal int) (start, end, step int) {
s := (maxVal - minVal) / 10
step = 10
for step < s && step <= 10000 {
step *= 10
}
start = min - min%step
end = max - max%step + step
start = minVal - minVal%step
end = maxVal - maxVal%step + step
return
}

View file

@ -20,6 +20,7 @@ import (
"math"
"os"
"runtime"
"slices"
"strings"
"testing"
"time"
@ -152,12 +153,18 @@ func TestTSDBDump(t *testing.T) {
expectedMetrics, err := os.ReadFile(tt.expectedDump)
require.NoError(t, err)
expectedMetrics = normalizeNewLine(expectedMetrics)
// even though in case of one matcher samples are not sorted, the order in the cases above should stay the same.
require.Equal(t, string(expectedMetrics), dumpedMetrics)
// Sort both, because Prometheus does not guarantee the output order.
require.Equal(t, sortLines(string(expectedMetrics)), sortLines(dumpedMetrics))
})
}
}
func sortLines(buf string) string {
lines := strings.Split(buf, "\n")
slices.Sort(lines)
return strings.Join(lines, "\n")
}
func TestTSDBDumpOpenMetrics(t *testing.T) {
storage := promqltest.LoadedStorage(t, `
load 1m
@ -169,7 +176,7 @@ func TestTSDBDumpOpenMetrics(t *testing.T) {
require.NoError(t, err)
expectedMetrics = normalizeNewLine(expectedMetrics)
dumpedMetrics := getDumpedSamples(t, storage.Dir(), math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics)
require.Equal(t, string(expectedMetrics), dumpedMetrics)
require.Equal(t, sortLines(string(expectedMetrics)), sortLines(dumpedMetrics))
}
func TestTSDBDumpOpenMetricsRoundTrip(t *testing.T) {

View file

@ -67,6 +67,11 @@ var (
}
)
const (
LegacyValidationConfig = "legacy"
UTF8ValidationConfig = "utf8"
)
// Load parses the YAML input s into a Config.
func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, error) {
cfg := &Config{}
@ -446,6 +451,8 @@ type GlobalConfig struct {
// Keep no more than this many dropped targets per job.
// 0 means no limit.
KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"`
// Allow UTF8 Metric and Label Names.
MetricNameValidationScheme string `yaml:"metric_name_validation_scheme,omitempty"`
}
// ScrapeProtocol represents supported protocol for scraping metrics.
@ -471,6 +478,7 @@ var (
PrometheusText0_0_4 ScrapeProtocol = "PrometheusText0.0.4"
OpenMetricsText0_0_1 ScrapeProtocol = "OpenMetricsText0.0.1"
OpenMetricsText1_0_0 ScrapeProtocol = "OpenMetricsText1.0.0"
UTF8NamesHeader string = model.EscapingKey + "=" + model.AllowUTF8
ScrapeProtocolsHeaders = map[ScrapeProtocol]string{
PrometheusProto: "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
@ -656,6 +664,8 @@ type ScrapeConfig struct {
// Keep no more than this many dropped targets per job.
// 0 means no limit.
KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"`
// Allow UTF8 Metric and Label Names.
MetricNameValidationScheme string `yaml:"metric_name_validation_scheme,omitempty"`
// We cannot do proper Go type embedding below as the parser will then parse
// values arbitrarily into the overflow maps of further-down types.
@ -762,6 +772,17 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
return fmt.Errorf("%w for scrape config with job name %q", err, c.JobName)
}
switch globalConfig.MetricNameValidationScheme {
case "", LegacyValidationConfig:
case UTF8ValidationConfig:
if model.NameValidationScheme != model.UTF8Validation {
return fmt.Errorf("utf8 name validation requested but feature not enabled via --enable-feature=utf8-names")
}
default:
return fmt.Errorf("unknown name validation method specified, must be either 'legacy' or 'utf8', got %s", globalConfig.MetricNameValidationScheme)
}
c.MetricNameValidationScheme = globalConfig.MetricNameValidationScheme
return nil
}
@ -1090,8 +1111,9 @@ func (m RemoteWriteProtoMsgs) String() string {
}
var (
// RemoteWriteProtoMsgV1 represents the deprecated `prometheus.WriteRequest` protobuf
// message introduced in the https://prometheus.io/docs/specs/remote_write_spec/.
// RemoteWriteProtoMsgV1 represents the `prometheus.WriteRequest` protobuf
// message introduced in the https://prometheus.io/docs/specs/remote_write_spec/,
// which will eventually be deprecated.
//
// NOTE: This string is used for both HTTP header values and config value, so don't change
// this reference.

View file

@ -97,7 +97,6 @@ func fetchApps(ctx context.Context, server string, client *http.Client) (*Applic
resp.Body.Close()
}()
//nolint:usestdlibvars
if resp.StatusCode/100 != 2 {
return nil, fmt.Errorf("non 2xx status '%d' response during eureka service discovery", resp.StatusCode)
}

View file

@ -87,7 +87,6 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error)
resp.Body.Close()
}()
//nolint:usestdlibvars
if resp.StatusCode/100 != 2 {
return nil, fmt.Errorf("non 2xx status '%d' response during hetzner service discovery with role robot", resp.StatusCode)
}

View file

@ -970,7 +970,7 @@ func TestEndpointsDiscoveryEmptyPodStatus(t *testing.T) {
}.Run(t)
}
// TestEndpointsUpdatePod makes sure that Endpoints discovery detects underlying Pods changes.
// TestEndpointsDiscoveryUpdatePod makes sure that Endpoints discovery detects underlying Pods changes.
// See https://github.com/prometheus/prometheus/issues/11305 for more details.
func TestEndpointsDiscoveryUpdatePod(t *testing.T) {
pod := &v1.Pod{

View file

@ -154,7 +154,7 @@ func (d k8sDiscoveryTest) Run(t *testing.T) {
// readResultWithTimeout reads all targetgroups from channel with timeout.
// It merges targetgroups by source and sends the result to result channel.
func readResultWithTimeout(t *testing.T, ctx context.Context, ch <-chan []*targetgroup.Group, max int, stopAfter time.Duration, resChan chan<- map[string]*targetgroup.Group) {
func readResultWithTimeout(t *testing.T, ctx context.Context, ch <-chan []*targetgroup.Group, maxGroups int, stopAfter time.Duration, resChan chan<- map[string]*targetgroup.Group) {
res := make(map[string]*targetgroup.Group)
timeout := time.After(stopAfter)
Loop:
@ -167,7 +167,7 @@ Loop:
}
res[tg.Source] = tg
}
if len(res) == max {
if len(res) == maxGroups {
// Reached max target groups we may get, break fast.
break Loop
}
@ -175,10 +175,10 @@ Loop:
// Because we use queue, an object that is created then
// deleted or updated may be processed only once.
// So possibly we may skip events, timed out here.
t.Logf("timed out, got %d (max: %d) items, some events are skipped", len(res), max)
t.Logf("timed out, got %d (max: %d) items, some events are skipped", len(res), maxGroups)
break Loop
case <-ctx.Done():
t.Logf("stopped, got %d (max: %d) items", len(res), max)
t.Logf("stopped, got %d (max: %d) items", len(res), maxGroups)
break Loop
}
}

View file

@ -121,6 +121,11 @@ global:
# that will be kept in memory. 0 means no limit.
[ keep_dropped_targets: <int> | default = 0 ]
# Specifies the validation scheme for metric and label names. Either blank or
# "legacy" for letters, numbers, colons, and underscores; or "utf8" for full
# UTF-8 support.
[ metric_name_validation_scheme <string> | default "legacy" ]
runtime:
# Configure the Go garbage collector GOGC parameter
# See: https://tip.golang.org/doc/gc-guide#GOGC
@ -461,6 +466,11 @@ metric_relabel_configs:
# that will be kept in memory. 0 means no limit.
[ keep_dropped_targets: <int> | default = 0 ]
# Specifies the validation scheme for metric and label names. Either blank or
# "legacy" for letters, numbers, colons, and underscores; or "utf8" for full
# UTF-8 support.
[ metric_name_validation_scheme <string> | default "legacy" ]
# Limit on total number of positive and negative buckets allowed in a single
# native histogram. The resolution of a histogram with more buckets will be
# reduced until the number of buckets is within the limit. If the limit cannot

View file

@ -192,8 +192,9 @@ won't work when you push OTLP metrics.
`--enable-feature=promql-experimental-functions`
Enables PromQL functions that are considered experimental and whose name or
semantics could change.
Enables PromQL functions that are considered experimental. These functions
might change their name, syntax, or semantics. They might also get removed
entirely.
## Created Timestamps Zero Injection
@ -226,3 +227,25 @@ metadata changes as WAL records on a per-series basis.
This must be used if
you are also using remote write 2.0 as it will only gather metadata from the WAL.
## Delay compaction start time
`--enable-feature=delayed-compaction`
A random offset, up to `10%` of the chunk range, is added to the Head compaction start time. This assists Prometheus instances in avoiding simultaneous compactions and reduces the load on shared resources.
Only auto Head compactions and the operations directly resulting from them are subject to this delay.
In the event of multiple consecutive Head compactions being possible, only the first compaction experiences this delay.
Note that during this delay, the Head continues its usual operations, which include serving and appending series.
Despite the delay in compaction, the blocks produced are time-aligned in the same manner as they would be if the delay was not in place.
## UTF-8 Name Support
`--enable-feature=utf8-names`
When enabled, changes the metric and label name validation scheme inside Prometheus to allow the full UTF-8 character set.
By itself, this flag does not enable the request of UTF-8 names via content negotiation.
Users will also have to set `metric_name_validation_scheme` in scrape configs to enable the feature either on the global config or on a per-scrape config basis.

View file

@ -617,7 +617,7 @@ Like `sort`, `sort_desc` only affects the results of instant queries, as range q
## `sort_by_label()`
**This function has to be enabled via the [feature flag](../feature_flags/) `--enable-feature=promql-experimental-functions`.**
**This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.**
`sort_by_label(v instant-vector, label string, ...)` returns vector elements sorted by their label values and sample value in case of label values being equal, in ascending order.
@ -627,7 +627,7 @@ This function uses [natural sort order](https://en.wikipedia.org/wiki/Natural_so
## `sort_by_label_desc()`
**This function has to be enabled via the [feature flag](../feature_flags/) `--enable-feature=promql-experimental-functions`.**
**This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.**
Same as `sort_by_label`, but sorts in descending order.
@ -676,7 +676,7 @@ over time and return an instant vector with per-series aggregation results:
* `last_over_time(range-vector)`: the most recent point value in the specified interval.
* `present_over_time(range-vector)`: the value 1 for any series in the specified interval.
If the [feature flag](../feature_flags/)
If the [feature flag](../feature_flags.md#experimental-promql-functions)
`--enable-feature=promql-experimental-functions` is set, the following
additional functions are available:

View file

@ -19,7 +19,7 @@ remote_write:
protobuf_message: "io.prometheus.write.v2.Request"
```
or for deprecated Remote Write 1.0 message:
or for the eventually deprecated Remote Write 1.0 message:
```yaml
remote_write:

View file

@ -16,11 +16,11 @@ require (
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect
github.com/aws/aws-sdk-go v1.53.16 // indirect
github.com/aws/aws-sdk-go v1.55.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
@ -36,7 +36,6 @@ require (
github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.8 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
@ -63,7 +62,7 @@ require (
golang.org/x/text v0.16.0 // indirect
golang.org/x/time v0.5.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect
google.golang.org/grpc v1.64.0 // indirect
google.golang.org/grpc v1.65.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect

View file

@ -1,9 +1,9 @@
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 h1:sUFnFjzDUie80h24I7mrKtwCKgLY9L8h5Tp2x9+TWqk=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0/go.mod h1:52JbnQTp15qg5mRkMBHwp0j0ZFwHJ42Sx3zVV5RE9p0=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA=
@ -26,8 +26,8 @@ github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8V
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.53.16 h1:8oZjKQO/ml1WLUZw5hvF7pvYjPf8o9f57Wldoy/q9Qc=
github.com/aws/aws-sdk-go v1.53.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@ -37,9 +37,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc=
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw=
github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
@ -50,8 +49,6 @@ github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LO
github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo=
github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
@ -279,8 +276,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c h1:6GEA48LnonkYZhQ654v7QTIP5uBTbCEVm49oIhif5lc=
github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c/go.mod h1:FcNs5wa7M9yV8IlxlB/05s5oy9vULUIlu/tZsviRIT8=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
@ -405,8 +402,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

18
go.mod
View file

@ -13,12 +13,12 @@ require (
github.com/KimMachineGun/automemlimit v0.6.1
github.com/alecthomas/kingpin/v2 v2.4.0
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30
github.com/aws/aws-sdk-go v1.54.19
github.com/aws/aws-sdk-go v1.55.5
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
github.com/cespare/xxhash/v2 v2.3.0
github.com/dennwc/varint v1.0.0
github.com/digitalocean/godo v1.119.0
github.com/docker/docker v27.0.3+incompatible
github.com/docker/docker v27.1.1+incompatible
github.com/edsrzf/mmap-go v1.1.0
github.com/envoyproxy/go-control-plane v0.12.0
github.com/envoyproxy/protoc-gen-validate v1.0.4
@ -39,7 +39,7 @@ require (
github.com/hashicorp/consul/api v1.29.2
github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3
github.com/hetznercloud/hcloud-go/v2 v2.12.0
github.com/ionos-cloud/sdk-go/v6 v6.1.11
github.com/ionos-cloud/sdk-go/v6 v6.2.0
github.com/json-iterator/go v1.1.12
github.com/klauspost/compress v1.17.9
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
@ -82,8 +82,8 @@ require (
golang.org/x/text v0.16.0
golang.org/x/time v0.5.0
golang.org/x/tools v0.23.0
google.golang.org/api v0.189.0
google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d
google.golang.org/api v0.190.0
google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f
google.golang.org/grpc v1.65.0
google.golang.org/protobuf v1.34.2
gopkg.in/yaml.v2 v2.4.0
@ -96,7 +96,7 @@ require (
)
require (
cloud.google.com/go/auth v0.7.2 // indirect
cloud.google.com/go/auth v0.7.3 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect
cloud.google.com/go/compute/metadata v0.5.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
@ -140,9 +140,9 @@ require (
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/s2a-go v0.1.7 // indirect
github.com/google/s2a-go v0.1.8 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
github.com/googleapis/gax-go/v2 v2.12.5 // indirect
github.com/googleapis/gax-go/v2 v2.13.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
github.com/hashicorp/cronexpr v1.1.2 // indirect
@ -191,7 +191,7 @@ require (
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
golang.org/x/mod v0.19.0 // indirect
golang.org/x/term v0.22.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gotest.tools/v3 v3.0.3 // indirect

36
go.sum
View file

@ -12,8 +12,8 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go/auth v0.7.2 h1:uiha352VrCDMXg+yoBtaD0tUF4Kv9vrtrWPYXwutnDE=
cloud.google.com/go/auth v0.7.2/go.mod h1:VEc4p5NNxycWQTMQEDQF0bd6aTMb6VgYDXEwiJJQAbs=
cloud.google.com/go/auth v0.7.3 h1:98Vr+5jMaCZ5NZk6e/uBgf60phTk/XN84r8QEWB9yjY=
cloud.google.com/go/auth v0.7.3/go.mod h1:HJtWUx1P5eqjy/f6Iq5KeytNpbAcGolPhOgyop2LlzA=
cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI=
cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
@ -92,8 +92,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI=
github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
@ -149,8 +149,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/docker/docker v27.0.3+incompatible h1:aBGI9TeQ4MPlhquTQKq9XbK79rKFVwXNUAYz9aXyEBE=
github.com/docker/docker v27.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY=
github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@ -322,8 +322,8 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da h1:xRmpO92tb8y+Z85iUOMOicpCfaYcv7o3Cg3wKrIpg8g=
github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM=
github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
@ -332,8 +332,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA=
github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E=
github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s=
github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A=
github.com/gophercloud/gophercloud v1.14.0 h1:Bt9zQDhPrbd4qX7EILGmy+i7GP35cc+AAL2+wIJpUE8=
github.com/gophercloud/gophercloud v1.14.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
@ -423,8 +423,8 @@ github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8=
github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k=
github.com/ionos-cloud/sdk-go/v6 v6.2.0 h1:qX7gachC0wJSmFfVRnd+DHmz9AStvVraKcwQ/JokIB4=
github.com/ionos-cloud/sdk-go/v6 v6.2.0/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k=
github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww=
github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
@ -1047,8 +1047,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.189.0 h1:equMo30LypAkdkLMBqfeIqtyAnlyig1JSZArl4XPwdI=
google.golang.org/api v0.189.0/go.mod h1:FLWGJKb0hb+pU2j+rJqwbnsF+ym+fQs73rbJ+KAUgy8=
google.golang.org/api v0.190.0 h1:ASM+IhLY1zljNdLu19W1jTmU6A+gMk6M46Wlur61s+Q=
google.golang.org/api v0.190.0/go.mod h1:QIr6I9iedBLnfqoD6L6Vze1UvS5Hzj5r2aUBOaZnLHo=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -1085,10 +1085,10 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d h1:kHjw/5UfflP/L5EbledDrcG4C2597RtymmGRZvHiCuY=
google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d/go.mod h1:mw8MG/Qz5wfgYr6VqVCiZcHe/GJEfI+oGGDCohaVgB0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade h1:oCRSWfwGXQsqlVdErcyTt4A93Y8fo0/9D4b1gnI++qo=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f h1:b1Ln/PG8orm0SsBbHZWke8dDp2lrCD4jSmfglFpTZbk=
google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf h1:liao9UHurZLtiEwBgT9LMOnKYsHze6eA6w1KQCMVN2Q=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=

View file

@ -94,16 +94,46 @@ type OpenMetricsParser struct {
exemplarVal float64
exemplarTs int64
hasExemplarTs bool
skipCTSeries bool
}
// NewOpenMetricsParser returns a new parser of the byte slice.
func NewOpenMetricsParser(b []byte, st *labels.SymbolTable) Parser {
return &OpenMetricsParser{
l: &openMetricsLexer{b: b},
builder: labels.NewScratchBuilderWithSymbolTable(st, 16),
type openMetricsParserOptions struct {
SkipCTSeries bool
}
type OpenMetricsOption func(*openMetricsParserOptions)
// WithOMParserCTSeriesSkipped turns off exposing _created lines
// as series, which makes those only used for parsing created timestamp
// for `CreatedTimestamp` method purposes.
//
// It's recommended to use this option to avoid using _created lines for other
// purposes than created timestamp, but leave false by default for the
// best-effort compatibility.
func WithOMParserCTSeriesSkipped() OpenMetricsOption {
return func(o *openMetricsParserOptions) {
o.SkipCTSeries = true
}
}
// NewOpenMetricsParser returns a new parser for the byte slice with option to skip CT series parsing.
func NewOpenMetricsParser(b []byte, st *labels.SymbolTable, opts ...OpenMetricsOption) Parser {
options := &openMetricsParserOptions{}
for _, opt := range opts {
opt(options)
}
parser := &OpenMetricsParser{
l: &openMetricsLexer{b: b},
builder: labels.NewScratchBuilderWithSymbolTable(st, 16),
skipCTSeries: options.SkipCTSeries,
}
return parser
}
// Series returns the bytes of the series, the timestamp if set, and the value
// of the current sample.
func (p *OpenMetricsParser) Series() ([]byte, *int64, float64) {
@ -219,10 +249,90 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool {
return true
}
// CreatedTimestamp returns nil as it's not implemented yet.
// TODO(bwplotka): https://github.com/prometheus/prometheus/issues/12980
// CreatedTimestamp returns the created timestamp for a current Metric if exists or nil.
// NOTE(Maniktherana): Might use additional CPU/mem resources due to deep copy of parser required for peeking given 1.0 OM specification on _created series.
func (p *OpenMetricsParser) CreatedTimestamp() *int64 {
return nil
if !TypeRequiresCT(p.mtype) {
// Not a CT supported metric type, fast path.
return nil
}
var (
currLset labels.Labels
buf []byte
peekWithoutNameLsetHash uint64
)
p.Metric(&currLset)
currFamilyLsetHash, buf := currLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile")
// Search for the _created line for the currFamilyLsetHash using ephemeral parser until
// we see EOF or new metric family. We have to do it as we don't know where (and if)
// that CT line is.
// TODO(bwplotka): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this.
peek := deepCopy(p)
for {
eType, err := peek.Next()
if err != nil {
// This means peek will give error too later on, so def no CT line found.
// This might result in partial scrape with wrong/missing CT, but only
// spec improvement would help.
// TODO(bwplotka): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this.
return nil
}
if eType != EntrySeries {
// Assume we hit different family, no CT line found.
return nil
}
var peekedLset labels.Labels
peek.Metric(&peekedLset)
peekedName := peekedLset.Get(model.MetricNameLabel)
if !strings.HasSuffix(peekedName, "_created") {
// Not a CT line, search more.
continue
}
// We got a CT line here, but let's search if CT line is actually for our series, edge case.
peekWithoutNameLsetHash, _ = peekedLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile")
if peekWithoutNameLsetHash != currFamilyLsetHash {
// CT line for a different series, for our series no CT.
return nil
}
ct := int64(peek.val)
return &ct
}
}
// TypeRequiresCT returns true if the metric type requires a _created timestamp.
func TypeRequiresCT(t model.MetricType) bool {
switch t {
case model.MetricTypeCounter, model.MetricTypeSummary, model.MetricTypeHistogram:
return true
default:
return false
}
}
// deepCopy creates a copy of a parser without re-using the slices' original memory addresses.
func deepCopy(p *OpenMetricsParser) OpenMetricsParser {
newB := make([]byte, len(p.l.b))
copy(newB, p.l.b)
newLexer := &openMetricsLexer{
b: newB,
i: p.l.i,
start: p.l.start,
err: p.l.err,
state: p.l.state,
}
newParser := OpenMetricsParser{
l: newLexer,
builder: p.builder,
mtype: p.mtype,
val: p.val,
skipCTSeries: false,
}
return newParser
}
// nextToken returns the next token from the openMetricsLexer.
@ -337,7 +447,13 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
}
p.series = p.l.b[p.start:p.l.i]
return p.parseMetricSuffix(p.nextToken())
if err := p.parseSeriesEndOfLine(p.nextToken()); err != nil {
return EntryInvalid, err
}
if p.skipCTSeries && p.isCreatedSeries() {
return p.Next()
}
return EntrySeries, nil
case tMName:
p.offsets = append(p.offsets, p.start, p.l.i)
p.series = p.l.b[p.start:p.l.i]
@ -351,8 +467,14 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
p.series = p.l.b[p.start:p.l.i]
t2 = p.nextToken()
}
return p.parseMetricSuffix(t2)
if err := p.parseSeriesEndOfLine(t2); err != nil {
return EntryInvalid, err
}
if p.skipCTSeries && p.isCreatedSeries() {
return p.Next()
}
return EntrySeries, nil
default:
err = p.parseError("expected a valid start token", t)
}
@ -467,51 +589,64 @@ func (p *OpenMetricsParser) parseLVals(offsets []int, isExemplar bool) ([]int, e
}
}
// parseMetricSuffix parses the end of the line after the metric name and
// labels. It starts parsing with the provided token.
func (p *OpenMetricsParser) parseMetricSuffix(t token) (Entry, error) {
// isCreatedSeries returns true if the current series is a _created series.
func (p *OpenMetricsParser) isCreatedSeries() bool {
var newLbs labels.Labels
p.Metric(&newLbs)
name := newLbs.Get(model.MetricNameLabel)
if TypeRequiresCT(p.mtype) && strings.HasSuffix(name, "_created") {
return true
}
return false
}
// parseSeriesEndOfLine parses the series end of the line (value, optional
// timestamp, commentary, etc.) after the metric name and labels.
// It starts parsing with the provided token.
func (p *OpenMetricsParser) parseSeriesEndOfLine(t token) error {
if p.offsets[0] == -1 {
return EntryInvalid, fmt.Errorf("metric name not set while parsing: %q", p.l.b[p.start:p.l.i])
return fmt.Errorf("metric name not set while parsing: %q", p.l.b[p.start:p.l.i])
}
var err error
p.val, err = p.getFloatValue(t, "metric")
if err != nil {
return EntryInvalid, err
return err
}
p.hasTS = false
switch t2 := p.nextToken(); t2 {
case tEOF:
return EntryInvalid, errors.New("data does not end with # EOF")
return errors.New("data does not end with # EOF")
case tLinebreak:
break
case tComment:
if err := p.parseComment(); err != nil {
return EntryInvalid, err
return err
}
case tTimestamp:
p.hasTS = true
var ts float64
// A float is enough to hold what we need for millisecond resolution.
if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil {
return EntryInvalid, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i])
return fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i])
}
if math.IsNaN(ts) || math.IsInf(ts, 0) {
return EntryInvalid, fmt.Errorf("invalid timestamp %f", ts)
return fmt.Errorf("invalid timestamp %f", ts)
}
p.ts = int64(ts * 1000)
switch t3 := p.nextToken(); t3 {
case tLinebreak:
case tComment:
if err := p.parseComment(); err != nil {
return EntryInvalid, err
return err
}
default:
return EntryInvalid, p.parseError("expected next entry after timestamp", t3)
return p.parseError("expected next entry after timestamp", t3)
}
}
return EntrySeries, nil
return nil
}
func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error) {

View file

@ -14,6 +14,7 @@
package textparse
import (
"errors"
"io"
"testing"
@ -24,6 +25,8 @@ import (
"github.com/prometheus/prometheus/model/labels"
)
func int64p(x int64) *int64 { return &x }
func TestOpenMetricsParse(t *testing.T) {
input := `# HELP go_gc_duration_seconds A summary of the GC invocation durations.
# TYPE go_gc_duration_seconds summary
@ -63,15 +66,34 @@ ss{A="a"} 0
_metric_starting_with_underscore 1
testmetric{_label_starting_with_underscore="foo"} 1
testmetric{label="\"bar\""} 1
# HELP foo Counter with and without labels to certify CT is parsed for both cases
# TYPE foo counter
foo_total 17.0 1520879607.789 # {id="counter-test"} 5`
foo_total 17.0 1520879607.789 # {id="counter-test"} 5
foo_created 1000
foo_total{a="b"} 17.0 1520879607.789 # {id="counter-test"} 5
foo_created{a="b"} 1000
# HELP bar Summary with CT at the end, making sure we find CT even if it's multiple lines a far
# TYPE bar summary
bar_count 17.0
bar_sum 324789.3
bar{quantile="0.95"} 123.7
bar{quantile="0.99"} 150.0
bar_created 1520430000
# HELP baz Histogram with the same objective as above's summary
# TYPE baz histogram
baz_bucket{le="0.0"} 0
baz_bucket{le="+Inf"} 17
baz_count 17
baz_sum 324789.3
baz_created 1520430000
# HELP fizz_created Gauge which shouldn't be parsed as CT
# TYPE fizz_created gauge
fizz_created 17.0`
input += "\n# HELP metric foo\x00bar"
input += "\nnull_byte_metric{a=\"abc\x00\"} 1"
input += "\n# EOF\n"
int64p := func(x int64) *int64 { return &x }
exp := []expectedParse{
{
m: "go_gc_duration_seconds",
@ -216,6 +238,9 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5`
m: "testmetric{label=\"\\\"bar\\\"\"}",
v: 1,
lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`),
}, {
m: "foo",
help: "Counter with and without labels to certify CT is parsed for both cases",
}, {
m: "foo",
typ: model.MetricTypeCounter,
@ -225,6 +250,76 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5`
lset: labels.FromStrings("__name__", "foo_total"),
t: int64p(1520879607789),
e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5},
ct: int64p(1000),
}, {
m: `foo_total{a="b"}`,
v: 17.0,
lset: labels.FromStrings("__name__", "foo_total", "a", "b"),
t: int64p(1520879607789),
e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5},
ct: int64p(1000),
}, {
m: "bar",
help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far",
}, {
m: "bar",
typ: model.MetricTypeSummary,
}, {
m: "bar_count",
v: 17.0,
lset: labels.FromStrings("__name__", "bar_count"),
ct: int64p(1520430000),
}, {
m: "bar_sum",
v: 324789.3,
lset: labels.FromStrings("__name__", "bar_sum"),
ct: int64p(1520430000),
}, {
m: `bar{quantile="0.95"}`,
v: 123.7,
lset: labels.FromStrings("__name__", "bar", "quantile", "0.95"),
ct: int64p(1520430000),
}, {
m: `bar{quantile="0.99"}`,
v: 150.0,
lset: labels.FromStrings("__name__", "bar", "quantile", "0.99"),
ct: int64p(1520430000),
}, {
m: "baz",
help: "Histogram with the same objective as above's summary",
}, {
m: "baz",
typ: model.MetricTypeHistogram,
}, {
m: `baz_bucket{le="0.0"}`,
v: 0,
lset: labels.FromStrings("__name__", "baz_bucket", "le", "0.0"),
ct: int64p(1520430000),
}, {
m: `baz_bucket{le="+Inf"}`,
v: 17,
lset: labels.FromStrings("__name__", "baz_bucket", "le", "+Inf"),
ct: int64p(1520430000),
}, {
m: `baz_count`,
v: 17,
lset: labels.FromStrings("__name__", "baz_count"),
ct: int64p(1520430000),
}, {
m: `baz_sum`,
v: 324789.3,
lset: labels.FromStrings("__name__", "baz_sum"),
ct: int64p(1520430000),
}, {
m: "fizz_created",
help: "Gauge which shouldn't be parsed as CT",
}, {
m: "fizz_created",
typ: model.MetricTypeGauge,
}, {
m: `fizz_created`,
v: 17,
lset: labels.FromStrings("__name__", "fizz_created"),
}, {
m: "metric",
help: "foo\x00bar",
@ -235,8 +330,8 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5`
},
}
p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable())
checkParseResults(t, p, exp)
p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped())
checkParseResultsWithCT(t, p, exp, true)
}
func TestUTF8OpenMetricsParse(t *testing.T) {
@ -251,6 +346,7 @@ func TestUTF8OpenMetricsParse(t *testing.T) {
# UNIT "go.gc_duration_seconds" seconds
{"go.gc_duration_seconds",quantile="0"} 4.9351e-05
{"go.gc_duration_seconds",quantile="0.25"} 7.424100000000001e-05
{"go.gc_duration_seconds_created"} 12313
{"go.gc_duration_seconds",quantile="0.5",a="b"} 8.3835e-05
{"http.status",q="0.9",a="b"} 8.3835e-05
{"http.status",q="0.9",a="b"} 8.3835e-05
@ -274,10 +370,12 @@ func TestUTF8OpenMetricsParse(t *testing.T) {
m: `{"go.gc_duration_seconds",quantile="0"}`,
v: 4.9351e-05,
lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0"),
ct: int64p(12313),
}, {
m: `{"go.gc_duration_seconds",quantile="0.25"}`,
v: 7.424100000000001e-05,
lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.25"),
ct: int64p(12313),
}, {
m: `{"go.gc_duration_seconds",quantile="0.5",a="b"}`,
v: 8.3835e-05,
@ -306,8 +404,8 @@ choices}`, "strange©™\n'quoted' \"name\"", "6"),
},
}
p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable())
checkParseResults(t, p, exp)
p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped())
checkParseResultsWithCT(t, p, exp, true)
}
func TestOpenMetricsParseErrors(t *testing.T) {
@ -598,10 +696,6 @@ func TestOpenMetricsParseErrors(t *testing.T) {
input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 -Inf",
err: `invalid exemplar timestamp -Inf`,
},
{
input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 Inf",
err: `invalid exemplar timestamp +Inf`,
},
}
for i, c := range cases {
@ -684,3 +778,217 @@ func TestOMNullByteHandling(t *testing.T) {
require.Equal(t, c.err, err.Error(), "test %d", i)
}
}
// While not desirable, there are cases were CT fails to parse and
// these tests show them.
// TODO(maniktherana): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this.
func TestCTParseFailures(t *testing.T) {
input := `# HELP something Histogram with _created between buckets and summary
# TYPE something histogram
something_count 17
something_sum 324789.3
something_created 1520430001
something_bucket{le="0.0"} 0
something_bucket{le="+Inf"} 17
# HELP thing Histogram with _created as first line
# TYPE thing histogram
thing_created 1520430002
thing_count 17
thing_sum 324789.3
thing_bucket{le="0.0"} 0
thing_bucket{le="+Inf"} 17
# HELP yum Summary with _created between sum and quantiles
# TYPE yum summary
yum_count 17.0
yum_sum 324789.3
yum_created 1520430003
yum{quantile="0.95"} 123.7
yum{quantile="0.99"} 150.0
# HELP foobar Summary with _created as the first line
# TYPE foobar summary
foobar_created 1520430004
foobar_count 17.0
foobar_sum 324789.3
foobar{quantile="0.95"} 123.7
foobar{quantile="0.99"} 150.0`
input += "\n# EOF\n"
int64p := func(x int64) *int64 { return &x }
type expectCT struct {
m string
ct *int64
typ model.MetricType
help string
isErr bool
}
exp := []expectCT{
{
m: "something",
help: "Histogram with _created between buckets and summary",
isErr: false,
}, {
m: "something",
typ: model.MetricTypeHistogram,
isErr: false,
}, {
m: `something_count`,
ct: int64p(1520430001),
isErr: false,
}, {
m: `something_sum`,
ct: int64p(1520430001),
isErr: false,
}, {
m: `something_bucket{le="0.0"}`,
ct: int64p(1520430001),
isErr: true,
}, {
m: `something_bucket{le="+Inf"}`,
ct: int64p(1520430001),
isErr: true,
}, {
m: "thing",
help: "Histogram with _created as first line",
isErr: false,
}, {
m: "thing",
typ: model.MetricTypeHistogram,
isErr: false,
}, {
m: `thing_count`,
ct: int64p(1520430002),
isErr: true,
}, {
m: `thing_sum`,
ct: int64p(1520430002),
isErr: true,
}, {
m: `thing_bucket{le="0.0"}`,
ct: int64p(1520430002),
isErr: true,
}, {
m: `thing_bucket{le="+Inf"}`,
ct: int64p(1520430002),
isErr: true,
}, {
m: "yum",
help: "Summary with _created between summary and quantiles",
isErr: false,
}, {
m: "yum",
typ: model.MetricTypeSummary,
isErr: false,
}, {
m: "yum_count",
ct: int64p(1520430003),
isErr: false,
}, {
m: "yum_sum",
ct: int64p(1520430003),
isErr: false,
}, {
m: `yum{quantile="0.95"}`,
ct: int64p(1520430003),
isErr: true,
}, {
m: `yum{quantile="0.99"}`,
ct: int64p(1520430003),
isErr: true,
}, {
m: "foobar",
help: "Summary with _created as the first line",
isErr: false,
}, {
m: "foobar",
typ: model.MetricTypeSummary,
isErr: false,
}, {
m: "foobar_count",
ct: int64p(1520430004),
isErr: true,
}, {
m: "foobar_sum",
ct: int64p(1520430004),
isErr: true,
}, {
m: `foobar{quantile="0.95"}`,
ct: int64p(1520430004),
isErr: true,
}, {
m: `foobar{quantile="0.99"}`,
ct: int64p(1520430004),
isErr: true,
},
}
p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped())
i := 0
var res labels.Labels
for {
et, err := p.Next()
if errors.Is(err, io.EOF) {
break
}
require.NoError(t, err)
switch et {
case EntrySeries:
p.Metric(&res)
if ct := p.CreatedTimestamp(); exp[i].isErr {
require.Nil(t, ct)
} else {
require.Equal(t, *exp[i].ct, *ct)
}
default:
i++
continue
}
i++
}
}
func TestDeepCopy(t *testing.T) {
input := []byte(`# HELP go_goroutines A gauge goroutines.
# TYPE go_goroutines gauge
go_goroutines 33 123.123
# TYPE go_gc_duration_seconds summary
go_gc_duration_seconds
go_gc_duration_seconds_created`)
st := labels.NewSymbolTable()
parser := NewOpenMetricsParser(input, st, WithOMParserCTSeriesSkipped()).(*OpenMetricsParser)
// Modify the original parser state
_, err := parser.Next()
require.NoError(t, err)
require.Equal(t, "go_goroutines", string(parser.l.b[parser.offsets[0]:parser.offsets[1]]))
require.True(t, parser.skipCTSeries)
// Create a deep copy of the parser
copyParser := deepCopy(parser)
etype, err := copyParser.Next()
require.NoError(t, err)
require.Equal(t, EntryType, etype)
require.True(t, parser.skipCTSeries)
require.False(t, copyParser.skipCTSeries)
// Modify the original parser further
parser.Next()
parser.Next()
parser.Next()
require.Equal(t, "go_gc_duration_seconds", string(parser.l.b[parser.offsets[0]:parser.offsets[1]]))
require.Equal(t, "summary", string(parser.mtype))
require.False(t, copyParser.skipCTSeries)
require.True(t, parser.skipCTSeries)
// Ensure the copy remains unchanged
copyParser.Next()
copyParser.Next()
require.Equal(t, "go_gc_duration_seconds", string(copyParser.l.b[copyParser.offsets[0]:copyParser.offsets[1]]))
require.False(t, copyParser.skipCTSeries)
}

View file

@ -18,6 +18,7 @@ import (
"errors"
"io"
"os"
"strings"
"testing"
"github.com/klauspost/compress/gzip"
@ -41,6 +42,7 @@ type expectedParse struct {
unit string
comment string
e *exemplar.Exemplar
ct *int64
}
func TestPromParse(t *testing.T) {
@ -188,6 +190,10 @@ testmetric{label="\"bar\""} 1`
}
func checkParseResults(t *testing.T, p Parser, exp []expectedParse) {
checkParseResultsWithCT(t, p, exp, false)
}
func checkParseResultsWithCT(t *testing.T, p Parser, exp []expectedParse, ctLinesRemoved bool) {
i := 0
var res labels.Labels
@ -205,6 +211,14 @@ func checkParseResults(t *testing.T, p Parser, exp []expectedParse) {
p.Metric(&res)
if ctLinesRemoved {
// Are CT series skipped?
_, typ := p.Type()
if TypeRequiresCT(typ) && strings.HasSuffix(res.Get(labels.MetricName), "_created") {
t.Fatalf("we exped created lines skipped")
}
}
require.Equal(t, exp[i].m, string(m))
require.Equal(t, exp[i].t, ts)
require.Equal(t, exp[i].v, v)
@ -218,6 +232,11 @@ func checkParseResults(t *testing.T, p Parser, exp []expectedParse) {
require.True(t, found)
testutil.RequireEqual(t, *exp[i].e, e)
}
if ct := p.CreatedTimestamp(); ct != nil {
require.Equal(t, *exp[i].ct, *ct)
} else {
require.Nil(t, exp[i].ct)
}
case EntryType:
m, typ := p.Type()
@ -475,8 +494,10 @@ const (
func BenchmarkParse(b *testing.B) {
for parserName, parser := range map[string]func([]byte, *labels.SymbolTable) Parser{
"prometheus": NewPromParser,
"openmetrics": NewOpenMetricsParser,
"prometheus": NewPromParser,
"openmetrics": func(b []byte, st *labels.SymbolTable) Parser {
return NewOpenMetricsParser(b, st)
},
} {
for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} {
f, err := os.Open(fn)

View file

@ -674,7 +674,6 @@ func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []b
}()
// Any HTTP status 2xx is OK.
//nolint:usestdlibvars
if resp.StatusCode/100 != 2 {
return fmt.Errorf("bad response status %s", resp.Status)
}

View file

@ -711,7 +711,7 @@ func TestHangingNotifier(t *testing.T) {
)
var (
sendTimeout = 10 * time.Millisecond
sendTimeout = 100 * time.Millisecond
sdUpdatert = sendTimeout / 2
done = make(chan struct{})

View file

@ -302,15 +302,10 @@ type Exemplar struct {
// value represents an exact example value. This can be useful when the exemplar
// is attached to a histogram, which only gives an estimated value through buckets.
Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
// timestamp represents an optional timestamp of the sample in ms.
// timestamp represents the timestamp of the exemplar in ms.
//
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
// for conversion from/to time.Time to Prometheus timestamp.
//
// Note that the "optional" keyword is omitted due to
// https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
// Zero value means value not set. If you need to use exactly zero value for
// the timestamp, use 1 millisecond before or after.
Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`

View file

@ -107,15 +107,10 @@ message Exemplar {
// value represents an exact example value. This can be useful when the exemplar
// is attached to a histogram, which only gives an estimated value through buckets.
double value = 2;
// timestamp represents an optional timestamp of the sample in ms.
// timestamp represents the timestamp of the exemplar in ms.
//
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
// for conversion from/to time.Time to Prometheus timestamp.
//
// Note that the "optional" keyword is omitted due to
// https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
// Zero value means value not set. If you need to use exactly zero value for
// the timestamp, use 1 millisecond before or after.
int64 timestamp = 3;
}

View file

@ -165,6 +165,9 @@ func rangeQueryCases() []benchCase {
{
expr: "sum(a_X)",
},
{
expr: "avg(a_X)",
},
{
expr: "sum without (l)(h_X)",
},

View file

@ -1063,7 +1063,7 @@ func (ev *evaluator) recover(expr parser.Expr, ws *annotations.Annotations, errp
buf := make([]byte, 64<<10)
buf = buf[:runtime.Stack(buf, false)]
level.Error(ev.logger).Log("msg", "runtime panic in parser", "expr", expr.String(), "err", e, "stacktrace", string(buf))
level.Error(ev.logger).Log("msg", "runtime panic during query evaluation", "expr", expr.String(), "err", e, "stacktrace", string(buf))
*errp = fmt.Errorf("unexpected error: %w", err)
case errWithWarnings:
*errp = err.err
@ -2362,6 +2362,11 @@ loop:
} else {
histograms = append(histograms, HPoint{H: &histogram.FloatHistogram{}})
}
if histograms[n].H == nil {
// Make sure to pass non-nil H to AtFloatHistogram so that it does a deep-copy.
// Not an issue in the loop above since that uses an intermediate buffer.
histograms[n].H = &histogram.FloatHistogram{}
}
histograms[n].T, histograms[n].H = it.AtFloatHistogram(histograms[n].H)
if value.IsStaleNaN(histograms[n].H.Sum) {
histograms = histograms[:n]
@ -2779,15 +2784,20 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram
}
type groupedAggregation struct {
seen bool // Was this output groups seen in the input at this timestamp.
hasFloat bool // Has at least 1 float64 sample aggregated.
hasHistogram bool // Has at least 1 histogram sample aggregated.
floatValue float64
histogramValue *histogram.FloatHistogram
floatMean float64 // Mean, or "compensating value" for Kahan summation.
groupCount int
groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group
heap vectorByValueHeap
floatValue float64
histogramValue *histogram.FloatHistogram
floatMean float64
floatKahanC float64 // "Compensating value" for Kahan summation.
groupCount float64
heap vectorByValueHeap
// All bools together for better packing within the struct.
seen bool // Was this output groups seen in the input at this timestamp.
hasFloat bool // Has at least 1 float64 sample aggregated.
hasHistogram bool // Has at least 1 histogram sample aggregated.
incompatibleHistograms bool // If true, group has seen mixed exponential and custom buckets, or incompatible custom buckets.
groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group.
incrementalMean bool // True after reverting to incremental calculation of the mean value.
}
// aggregation evaluates sum, avg, count, stdvar, stddev or quantile at one timestep on inputMatrix.
@ -2811,15 +2821,14 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
// Initialize this group if it's the first time we've seen it.
if !group.seen {
*group = groupedAggregation{
seen: true,
floatValue: f,
groupCount: 1,
seen: true,
floatValue: f,
floatMean: f,
incompatibleHistograms: false,
groupCount: 1,
}
switch op {
case parser.AVG:
group.floatMean = f
fallthrough
case parser.SUM:
case parser.AVG, parser.SUM:
if h == nil {
group.hasFloat = true
} else {
@ -2827,7 +2836,6 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
group.hasHistogram = true
}
case parser.STDVAR, parser.STDDEV:
group.floatMean = f
group.floatValue = 0
case parser.QUANTILE:
group.heap = make(vectorByValueHeap, 1)
@ -2838,6 +2846,10 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
continue
}
if group.incompatibleHistograms {
continue
}
switch op {
case parser.SUM:
if h != nil {
@ -2846,6 +2858,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
_, err := group.histogramValue.Add(h)
if err != nil {
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
group.incompatibleHistograms = true
}
}
// Otherwise the aggregation contained floats
@ -2853,7 +2866,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
// point in copying the histogram in that case.
} else {
group.hasFloat = true
group.floatValue, group.floatMean = kahanSumInc(f, group.floatValue, group.floatMean)
group.floatValue, group.floatKahanC = kahanSumInc(f, group.floatValue, group.floatKahanC)
}
case parser.AVG:
@ -2861,15 +2874,19 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
if h != nil {
group.hasHistogram = true
if group.histogramValue != nil {
left := h.Copy().Div(float64(group.groupCount))
right := group.histogramValue.Copy().Div(float64(group.groupCount))
left := h.Copy().Div(group.groupCount)
right := group.histogramValue.Copy().Div(group.groupCount)
toAdd, err := left.Sub(right)
if err != nil {
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
group.incompatibleHistograms = true
continue
}
_, err = group.histogramValue.Add(toAdd)
if err != nil {
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
group.incompatibleHistograms = true
continue
}
}
// Otherwise the aggregation contained floats
@ -2877,6 +2894,22 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
// point in copying the histogram in that case.
} else {
group.hasFloat = true
if !group.incrementalMean {
newV, newC := kahanSumInc(f, group.floatValue, group.floatKahanC)
if !math.IsInf(newV, 0) {
// The sum doesn't overflow, so we propagate it to the
// group struct and continue with the regular
// calculation of the mean value.
group.floatValue, group.floatKahanC = newV, newC
break
}
// If we are here, we know that the sum _would_ overflow. So
// instead of continue to sum up, we revert to incremental
// calculation of the mean value from here on.
group.incrementalMean = true
group.floatMean = group.floatValue / (group.groupCount - 1)
group.floatKahanC /= group.groupCount - 1
}
if math.IsInf(group.floatMean, 0) {
if math.IsInf(f, 0) && (group.floatMean > 0) == (f > 0) {
// The `floatMean` and `s.F` values are `Inf` of the same sign. They
@ -2894,8 +2927,13 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
break
}
}
// Divide each side of the `-` by `group.groupCount` to avoid float64 overflows.
group.floatMean += f/float64(group.groupCount) - group.floatMean/float64(group.groupCount)
currentMean := group.floatMean + group.floatKahanC
group.floatMean, group.floatKahanC = kahanSumInc(
// Divide each side of the `-` by `group.groupCount` to avoid float64 overflows.
f/group.groupCount-currentMean/group.groupCount,
group.floatMean,
group.floatKahanC,
)
}
case parser.GROUP:
@ -2918,7 +2956,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
if h == nil { // Ignore native histograms.
group.groupCount++
delta := f - group.floatMean
group.floatMean += delta / float64(group.groupCount)
group.floatMean += delta / group.groupCount
group.floatValue += delta * (f - group.floatMean)
}
@ -2944,20 +2982,25 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(e.Expr.PositionRange()))
continue
}
if aggr.hasHistogram {
switch {
case aggr.incompatibleHistograms:
continue
case aggr.hasHistogram:
aggr.histogramValue = aggr.histogramValue.Compact(0)
} else {
aggr.floatValue = aggr.floatMean
case aggr.incrementalMean:
aggr.floatValue = aggr.floatMean + aggr.floatKahanC
default:
aggr.floatValue = (aggr.floatValue + aggr.floatKahanC) / aggr.groupCount
}
case parser.COUNT:
aggr.floatValue = float64(aggr.groupCount)
aggr.floatValue = aggr.groupCount
case parser.STDVAR:
aggr.floatValue /= float64(aggr.groupCount)
aggr.floatValue /= aggr.groupCount
case parser.STDDEV:
aggr.floatValue = math.Sqrt(aggr.floatValue / float64(aggr.groupCount))
aggr.floatValue = math.Sqrt(aggr.floatValue / aggr.groupCount)
case parser.QUANTILE:
aggr.floatValue = quantile(q, aggr.heap)
@ -2968,10 +3011,13 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(e.Expr.PositionRange()))
continue
}
if aggr.hasHistogram {
switch {
case aggr.incompatibleHistograms:
continue
case aggr.hasHistogram:
aggr.histogramValue.Compact(0)
} else {
aggr.floatValue += aggr.floatMean // Add Kahan summation compensating term.
default:
aggr.floatValue += aggr.floatKahanC
}
default:
// For other aggregations, we already have the right value.
@ -3137,7 +3183,7 @@ seriesLoop:
return mat, annos
}
// aggregationK evaluates count_values on vec.
// aggregationCountValues evaluates count_values on vec.
// Outputs as many series per group as there are values in the input.
func (ev *evaluator) aggregationCountValues(e *parser.AggregateExpr, grouping []string, valueLabel string, vec Vector, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
type groupCount struct {

View file

@ -3793,3 +3793,62 @@ func makeInt64Pointer(val int64) *int64 {
*valp = val
return valp
}
func TestHistogramCopyFromIteratorRegression(t *testing.T) {
// Loading the following histograms creates two chunks because there's a
// counter reset. Not only the counter is lower in the last histogram
// but also there's missing buckets.
// This in turns means that chunk iterators will have different spans.
load := `load 1m
histogram {{sum:4 count:4 buckets:[2 2]}} {{sum:6 count:6 buckets:[3 3]}} {{sum:1 count:1 buckets:[1]}}
`
storage := promqltest.LoadedStorage(t, load)
t.Cleanup(func() { storage.Close() })
engine := promqltest.NewTestEngine(false, 0, promqltest.DefaultMaxSamplesPerQuery)
verify := func(t *testing.T, qry promql.Query, expected []histogram.FloatHistogram) {
res := qry.Exec(context.Background())
require.NoError(t, res.Err)
m, ok := res.Value.(promql.Matrix)
require.True(t, ok)
require.Len(t, m, 1)
series := m[0]
require.Empty(t, series.Floats)
require.Len(t, series.Histograms, len(expected))
for i, e := range expected {
series.Histograms[i].H.CounterResetHint = histogram.UnknownCounterReset // Don't care.
require.Equal(t, &e, series.Histograms[i].H)
}
}
qry, err := engine.NewRangeQuery(context.Background(), storage, nil, "increase(histogram[90s])", time.Unix(0, 0), time.Unix(0, 0).Add(60*time.Second), time.Minute)
require.NoError(t, err)
verify(t, qry, []histogram.FloatHistogram{
{
Count: 3,
Sum: 3, // Increase from 4 to 6 is 2. Interpolation adds 1.
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, // Two buckets changed between the first and second histogram.
PositiveBuckets: []float64{1.5, 1.5}, // Increase from 2 to 3 is 1 in both buckets. Interpolation adds 0.5.
},
})
qry, err = engine.NewInstantQuery(context.Background(), storage, nil, "histogram[61s]", time.Unix(0, 0).Add(2*time.Minute))
require.NoError(t, err)
verify(t, qry, []histogram.FloatHistogram{
{
Count: 6,
Sum: 6,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}},
PositiveBuckets: []float64{3, 3},
},
{
Count: 1,
Sum: 1,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
PositiveBuckets: []float64{1},
},
})
}

View file

@ -97,9 +97,10 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod
lastT = samples.Histograms[numSamplesMinusOne].T
var newAnnos annotations.Annotations
resultHistogram, newAnnos = histogramRate(samples.Histograms, isCounter, metricName, args[0].PositionRange())
annos.Merge(newAnnos)
if resultHistogram == nil {
// The histograms are not compatible with each other.
return enh.Out, annos.Merge(newAnnos)
return enh.Out, annos
}
case len(samples.Floats) > 1:
numSamplesMinusOne = len(samples.Floats) - 1
@ -178,17 +179,29 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod
// Otherwise, it returns the calculated histogram and an empty annotation.
func histogramRate(points []HPoint, isCounter bool, metricName string, pos posrange.PositionRange) (*histogram.FloatHistogram, annotations.Annotations) {
prev := points[0].H
usingCustomBuckets := prev.UsesCustomBuckets()
last := points[len(points)-1].H
if last == nil {
return nil, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos))
}
minSchema := prev.Schema
if last.Schema < minSchema {
minSchema = last.Schema
}
if last.UsesCustomBuckets() != usingCustomBuckets {
return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
}
var annos annotations.Annotations
// We check for gauge type histograms in the loop below, but the loop below does not run on the first and last point,
// so check the first and last point now.
if isCounter && (prev.CounterResetHint == histogram.GaugeType || last.CounterResetHint == histogram.GaugeType) {
annos.Add(annotations.NewNativeHistogramNotCounterWarning(metricName, pos))
}
// First iteration to find out two things:
// - What's the smallest relevant schema?
// - Are all data points histograms?
@ -208,6 +221,9 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra
if curr.Schema < minSchema {
minSchema = curr.Schema
}
if curr.UsesCustomBuckets() != usingCustomBuckets {
return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
}
}
h := last.CopyToSchema(minSchema)
@ -241,7 +257,7 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra
}
h.CounterResetHint = histogram.GaugeType
return h.Compact(0), nil
return h.Compact(0), annos
}
// === delta(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
@ -451,15 +467,15 @@ func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, enh *Eval
// === clamp(Vector parser.ValueTypeVector, min, max Scalar) (Vector, Annotations) ===
func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
vec := vals[0].(Vector)
min := vals[1].(Vector)[0].F
max := vals[2].(Vector)[0].F
if max < min {
minVal := vals[1].(Vector)[0].F
maxVal := vals[2].(Vector)[0].F
if maxVal < minVal {
return enh.Out, nil
}
for _, el := range vec {
enh.Out = append(enh.Out, Sample{
Metric: el.Metric.DropMetricName(),
F: math.Max(min, math.Min(max, el.F)),
F: math.Max(minVal, math.Min(maxVal, el.F)),
})
}
return enh.Out, nil
@ -468,11 +484,11 @@ func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper
// === clamp_max(Vector parser.ValueTypeVector, max Scalar) (Vector, Annotations) ===
func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
vec := vals[0].(Vector)
max := vals[1].(Vector)[0].F
maxVal := vals[1].(Vector)[0].F
for _, el := range vec {
enh.Out = append(enh.Out, Sample{
Metric: el.Metric.DropMetricName(),
F: math.Min(max, el.F),
F: math.Min(maxVal, el.F),
})
}
return enh.Out, nil
@ -481,11 +497,11 @@ func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel
// === clamp_min(Vector parser.ValueTypeVector, min Scalar) (Vector, Annotations) ===
func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
vec := vals[0].(Vector)
min := vals[1].(Vector)[0].F
minVal := vals[1].(Vector)[0].F
for _, el := range vec {
enh.Out = append(enh.Out, Sample{
Metric: el.Metric.DropMetricName(),
F: math.Max(min, el.F),
F: math.Max(minVal, el.F),
})
}
return enh.Out, nil
@ -573,9 +589,28 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
return vec, nil
}
return aggrOverTime(vals, enh, func(s Series) float64 {
var mean, count, c float64
var (
sum, mean, count, kahanC float64
incrementalMean bool
)
for _, f := range s.Floats {
count++
if !incrementalMean {
newSum, newC := kahanSumInc(f.F, sum, kahanC)
// Perform regular mean calculation as long as
// the sum doesn't overflow and (in any case)
// for the first iteration (even if we start
// with ±Inf) to not run into division-by-zero
// problems below.
if count == 1 || !math.IsInf(newSum, 0) {
sum, kahanC = newSum, newC
continue
}
// Handle overflow by reverting to incremental calculation of the mean value.
incrementalMean = true
mean = sum / (count - 1)
kahanC /= count - 1
}
if math.IsInf(mean, 0) {
if math.IsInf(f.F, 0) && (mean > 0) == (f.F > 0) {
// The `mean` and `f.F` values are `Inf` of the same sign. They
@ -593,13 +628,13 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
continue
}
}
mean, c = kahanSumInc(f.F/count-mean/count, mean, c)
correctedMean := mean + kahanC
mean, kahanC = kahanSumInc(f.F/count-correctedMean/count, mean, kahanC)
}
if math.IsInf(mean, 0) {
return mean
if incrementalMean {
return mean + kahanC
}
return mean + c
return (sum + kahanC) / count
}), nil
}
@ -665,13 +700,13 @@ func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
return enh.Out, nil
}
return aggrOverTime(vals, enh, func(s Series) float64 {
max := s.Floats[0].F
maxVal := s.Floats[0].F
for _, f := range s.Floats {
if f.F > max || math.IsNaN(max) {
max = f.F
if f.F > maxVal || math.IsNaN(maxVal) {
maxVal = f.F
}
}
return max
return maxVal
}), nil
}
@ -685,13 +720,13 @@ func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
return enh.Out, nil
}
return aggrOverTime(vals, enh, func(s Series) float64 {
min := s.Floats[0].F
minVal := s.Floats[0].F
for _, f := range s.Floats {
if f.F < min || math.IsNaN(min) {
min = f.F
if f.F < minVal || math.IsNaN(minVal) {
minVal = f.F
}
}
return min
return minVal
}), nil
}

View file

@ -68,6 +68,10 @@ func fuzzParseMetricWithContentType(in []byte, contentType string) int {
panic(warning)
}
if contentType == "application/openmetrics-text" {
p = textparse.NewOpenMetricsParser(in, symbolTable)
}
var err error
for {
_, err = p.Next()

View file

@ -23,6 +23,8 @@ import (
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/promql/parser/posrange"
"github.com/prometheus/common/model"
)
%}
@ -360,11 +362,19 @@ grouping_label_list:
grouping_label : maybe_label
{
if !isLabel($1.Val) {
if !model.LabelName($1.Val).IsValid() {
yylex.(*parser).unexpected("grouping opts", "label")
}
$$ = $1
}
| STRING {
if !model.LabelName(yylex.(*parser).unquoteString($1.Val)).IsValid() {
yylex.(*parser).unexpected("grouping opts", "label")
}
$$ = $1
$$.Pos++
$$.Val = yylex.(*parser).unquoteString($$.Val)
}
| error
{ yylex.(*parser).unexpected("grouping opts", "label"); $$ = Item{} }
;

File diff suppressed because it is too large Load diff

View file

@ -727,23 +727,23 @@ func lexValueSequence(l *Lexer) stateFn {
// was only modified to integrate with our lexer.
func lexEscape(l *Lexer) stateFn {
var n int
var base, max uint32
var base, maxVal uint32
ch := l.next()
switch ch {
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', l.stringOpen:
return lexString
case '0', '1', '2', '3', '4', '5', '6', '7':
n, base, max = 3, 8, 255
n, base, maxVal = 3, 8, 255
case 'x':
ch = l.next()
n, base, max = 2, 16, 255
n, base, maxVal = 2, 16, 255
case 'u':
ch = l.next()
n, base, max = 4, 16, unicode.MaxRune
n, base, maxVal = 4, 16, unicode.MaxRune
case 'U':
ch = l.next()
n, base, max = 8, 16, unicode.MaxRune
n, base, maxVal = 8, 16, unicode.MaxRune
case eof:
l.errorf("escape sequence not terminated")
return lexString
@ -772,7 +772,7 @@ func lexEscape(l *Lexer) stateFn {
}
}
if x > max || 0xD800 <= x && x < 0xE000 {
if x > maxVal || 0xD800 <= x && x < 0xE000 {
l.errorf("escape sequence is an invalid Unicode code point")
}
return lexString
@ -1059,16 +1059,3 @@ func isDigit(r rune) bool {
func isAlpha(r rune) bool {
return r == '_' || ('a' <= r && r <= 'z') || ('A' <= r && r <= 'Z')
}
// isLabel reports whether the string can be used as label.
func isLabel(s string) bool {
if len(s) == 0 || !isAlpha(rune(s[0])) {
return false
}
for _, c := range s[1:] {
if !isAlphaNumeric(c) {
return false
}
}
return true
}

View file

@ -2397,6 +2397,51 @@ var testExpr = []struct {
},
},
},
{
input: `sum by ("foo")({"some.metric"})`,
expected: &AggregateExpr{
Op: SUM,
Expr: &VectorSelector{
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "some.metric"),
},
PosRange: posrange.PositionRange{
Start: 15,
End: 30,
},
},
Grouping: []string{"foo"},
PosRange: posrange.PositionRange{
Start: 0,
End: 31,
},
},
},
{
input: `sum by ("foo)(some_metric{})`,
fail: true,
errMsg: "unterminated quoted string",
},
{
input: `sum by ("foo", bar, 'baz')({"some.metric"})`,
expected: &AggregateExpr{
Op: SUM,
Expr: &VectorSelector{
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "some.metric"),
},
PosRange: posrange.PositionRange{
Start: 27,
End: 42,
},
},
Grouping: []string{"foo", "bar", "baz"},
PosRange: posrange.PositionRange{
Start: 0,
End: 43,
},
},
},
{
input: "avg by (foo)(some_metric)",
expected: &AggregateExpr{
@ -3844,6 +3889,7 @@ func readable(s string) string {
}
func TestParseExpressions(t *testing.T) {
model.NameValidationScheme = model.UTF8Validation
for _, test := range testExpr {
t.Run(readable(test.input), func(t *testing.T) {
expr, err := ParseExpr(test.input)

View file

@ -77,14 +77,24 @@ func (node *AggregateExpr) getAggOpStr() string {
switch {
case node.Without:
aggrString += fmt.Sprintf(" without (%s) ", strings.Join(node.Grouping, ", "))
aggrString += fmt.Sprintf(" without (%s) ", joinLabels(node.Grouping))
case len(node.Grouping) > 0:
aggrString += fmt.Sprintf(" by (%s) ", strings.Join(node.Grouping, ", "))
aggrString += fmt.Sprintf(" by (%s) ", joinLabels(node.Grouping))
}
return aggrString
}
func joinLabels(ss []string) string {
for i, s := range ss {
// If the label is already quoted, don't quote it again.
if s[0] != '"' && s[0] != '\'' && s[0] != '`' && !model.IsValidLegacyMetricName(model.LabelValue(s)) {
ss[i] = fmt.Sprintf("\"%s\"", s)
}
}
return strings.Join(ss, ", ")
}
func (node *BinaryExpr) String() string {
returnBool := ""
if node.ReturnBool {

View file

@ -16,6 +16,7 @@ package parser
import (
"testing"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels"
@ -44,6 +45,14 @@ func TestExprString(t *testing.T) {
in: `sum without(instance) (task:errors:rate10s{job="s"})`,
out: `sum without (instance) (task:errors:rate10s{job="s"})`,
},
{
in: `sum by("foo.bar") (task:errors:rate10s{job="s"})`,
out: `sum by ("foo.bar") (task:errors:rate10s{job="s"})`,
},
{
in: `sum without("foo.bar") (task:errors:rate10s{job="s"})`,
out: `sum without ("foo.bar") (task:errors:rate10s{job="s"})`,
},
{
in: `topk(5, task:errors:rate10s{job="s"})`,
},
@ -157,6 +166,8 @@ func TestExprString(t *testing.T) {
},
}
model.NameValidationScheme = model.UTF8Validation
for _, test := range inputs {
expr, err := ParseExpr(test.in)
require.NoError(t, err)

View file

@ -1003,13 +1003,6 @@ func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error {
return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err)
}
res := q.Exec(t.context)
countWarnings, _ := res.Warnings.CountWarningsAndInfo()
if !cmd.warn && countWarnings > 0 {
return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", cmd.expr, cmd.line, res.Warnings)
}
if cmd.warn && countWarnings == 0 {
return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", cmd.expr, cmd.line)
}
if res.Err != nil {
if cmd.fail {
return cmd.checkExpectedFailure(res.Err)
@ -1020,6 +1013,13 @@ func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error {
if res.Err == nil && cmd.fail {
return fmt.Errorf("expected error evaluating query %q (line %d) but got none", cmd.expr, cmd.line)
}
countWarnings, _ := res.Warnings.CountWarningsAndInfo()
if !cmd.warn && countWarnings > 0 {
return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", cmd.expr, cmd.line, res.Warnings)
}
if cmd.warn && countWarnings == 0 {
return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", cmd.expr, cmd.line)
}
defer q.Close()
if err := cmd.compareResult(res.Value); err != nil {
@ -1050,13 +1050,6 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq
}
defer q.Close()
res := q.Exec(t.context)
countWarnings, _ := res.Warnings.CountWarningsAndInfo()
if !cmd.warn && countWarnings > 0 {
return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings)
}
if cmd.warn && countWarnings == 0 {
return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", iq.expr, cmd.line)
}
if res.Err != nil {
if cmd.fail {
if err := cmd.checkExpectedFailure(res.Err); err != nil {
@ -1070,6 +1063,13 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq
if res.Err == nil && cmd.fail {
return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line)
}
countWarnings, _ := res.Warnings.CountWarningsAndInfo()
if !cmd.warn && countWarnings > 0 {
return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings)
}
if cmd.warn && countWarnings == 0 {
return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", iq.expr, cmd.line)
}
err = cmd.compareResult(res.Value)
if err != nil {
return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err)

View file

@ -503,7 +503,7 @@ eval instant at 1m avg(data{test="-big"})
eval instant at 1m avg(data{test="bigzero"})
{} 0
# Test summing extreme values.
# Test summing and averaging extreme values.
clear
load 10s
@ -529,21 +529,39 @@ load 10s
eval instant at 1m sum(data{test="ten"})
{} 10
eval instant at 1m avg(data{test="ten"})
{} 2.5
eval instant at 1m sum by (group) (data{test="pos_inf"})
{group="1"} Inf
{group="2"} Inf
eval instant at 1m avg by (group) (data{test="pos_inf"})
{group="1"} Inf
{group="2"} Inf
eval instant at 1m sum by (group) (data{test="neg_inf"})
{group="1"} -Inf
{group="2"} -Inf
eval instant at 1m avg by (group) (data{test="neg_inf"})
{group="1"} -Inf
{group="2"} -Inf
eval instant at 1m sum(data{test="inf_inf"})
{} NaN
eval instant at 1m avg(data{test="inf_inf"})
{} NaN
eval instant at 1m sum by (group) (data{test="nan"})
{group="1"} NaN
{group="2"} NaN
eval instant at 1m avg by (group) (data{test="nan"})
{group="1"} NaN
{group="2"} NaN
clear
# Test that aggregations are deterministic.

View file

@ -759,7 +759,6 @@ eval instant at 1m avg_over_time(metric6c[1m])
eval instant at 1m sum_over_time(metric6c[1m])/count_over_time(metric6c[1m])
{} NaN
eval instant at 1m avg_over_time(metric7[1m])
{} NaN
@ -800,6 +799,9 @@ load 10s
eval instant at 1m sum_over_time(metric[2m])
{} 2
eval instant at 1m avg_over_time(metric[2m])
{} 0.5
# Tests for stddev_over_time and stdvar_over_time.
clear
load 10s

View file

@ -478,3 +478,29 @@ load_with_nhcb 5m
eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket"})
eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*"})
# Histogram with constant buckets.
load_with_nhcb 1m
const_histogram_bucket{le="0.0"} 1 1 1 1 1
const_histogram_bucket{le="1.0"} 1 1 1 1 1
const_histogram_bucket{le="2.0"} 1 1 1 1 1
const_histogram_bucket{le="+Inf"} 1 1 1 1 1
# There is no change to the bucket count over time, thus rate is 0 in each bucket.
eval instant at 5m rate(const_histogram_bucket[5m])
{le="0.0"} 0
{le="1.0"} 0
{le="2.0"} 0
{le="+Inf"} 0
# Native histograms do not represent empty buckets, so here the zeros are implicit.
eval instant at 5m rate(const_histogram[5m])
{} {{schema:-53 sum:0 count:0 custom_values:[0.0 1.0 2.0]}}
# Zero buckets mean no observations, so there is no value that observations fall below,
# which means that any quantile is a NaN.
eval instant at 5m histogram_quantile(1.0, sum by (le) (rate(const_histogram_bucket[5m])))
{} NaN
eval instant at 5m histogram_quantile(1.0, sum(rate(const_histogram[5m])))
{} NaN

View file

@ -780,3 +780,141 @@ eval instant at 5m histogram_quantile(0.5, custom_buckets_histogram)
eval instant at 5m sum(custom_buckets_histogram)
{} {{schema:-53 sum:5 count:4 custom_values:[5 10] buckets:[1 2 1]}}
clear
# Test 'this native histogram metric is not a gauge' warning for rate
load 30s
some_metric {{schema:0 sum:1 count:1 buckets:[1] counter_reset_hint:gauge}} {{schema:0 sum:2 count:2 buckets:[2] counter_reset_hint:gauge}} {{schema:0 sum:3 count:3 buckets:[3] counter_reset_hint:gauge}}
# Test the case where we only have two points for rate
eval_warn instant at 30s rate(some_metric[1m])
{} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}}
# Test the case where we have more than two points for rate
eval_warn instant at 1m rate(some_metric[1m])
{} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}}
clear
# Test rate() over mixed exponential and custom buckets.
load 30s
some_metric {{schema:0 sum:1 count:1 buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:0 sum:5 count:4 buckets:[1 2 1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
# Start and end with exponential, with custom in the middle.
eval_warn instant at 1m rate(some_metric[1m])
# Should produce no results.
# Start and end with custom, with exponential in the middle.
eval_warn instant at 1m30s rate(some_metric[1m])
# Should produce no results.
# Start with custom, end with exponential.
eval_warn instant at 1m rate(some_metric[1m])
# Should produce no results.
# Start with exponential, end with custom.
eval_warn instant at 30s rate(some_metric[1m])
# Should produce no results.
clear
# Histogram with constant buckets.
load 1m
const_histogram {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}}
# There is no change to the bucket count over time, thus rate is 0 in each bucket.
# However native histograms do not represent empty buckets, so here the zeros are implicit.
eval instant at 5m rate(const_histogram[5m])
{} {{schema:0 sum:0 count:0}}
# Zero buckets mean no observations, thus the denominator in the average is 0
# leading to 0/0, which is NaN.
eval instant at 5m histogram_avg(rate(const_histogram[5m]))
{} NaN
# Zero buckets mean no observations, so count is 0.
eval instant at 5m histogram_count(rate(const_histogram[5m]))
{} 0.0
# Zero buckets mean no observations and empty histogram has a sum of 0 by definition.
eval instant at 5m histogram_sum(rate(const_histogram[5m]))
{} 0.0
# Zero buckets mean no observations, thus the denominator in the fraction is 0,
# leading to 0/0, which is NaN.
eval instant at 5m histogram_fraction(0.0, 1.0, rate(const_histogram[5m]))
{} NaN
# Workaround to calculate the observation count corresponding to NaN fraction.
eval instant at 5m histogram_count(rate(const_histogram[5m])) == 0.0 or histogram_fraction(0.0, 1.0, rate(const_histogram[5m])) * histogram_count(rate(const_histogram[5m]))
{} 0.0
# Zero buckets mean no observations, so there is no value that observations fall below,
# which means that any quantile is a NaN.
eval instant at 5m histogram_quantile(1.0, rate(const_histogram[5m]))
{} NaN
# Zero buckets mean no observations, so there is no standard deviation.
eval instant at 5m histogram_stddev(rate(const_histogram[5m]))
{} NaN
# Zero buckets mean no observations, so there is no standard variance.
eval instant at 5m histogram_stdvar(rate(const_histogram[5m]))
{} NaN
clear
# Test mixing exponential and custom buckets.
load 6m
metric{series="exponential"} {{sum:4 count:3 buckets:[1 2 1]}} _ {{sum:4 count:3 buckets:[1 2 1]}}
metric{series="other-exponential"} {{sum:3 count:2 buckets:[1 1 1]}} _ {{sum:3 count:2 buckets:[1 1 1]}}
metric{series="custom"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
metric{series="other-custom"} _ {{schema:-53 sum:15 count:2 custom_values:[5 10] buckets:[0 2]}} {{schema:-53 sum:15 count:2 custom_values:[5 10] buckets:[0 2]}}
# T=0: only exponential
# T=6: only custom
# T=12: mixed, should be ignored and emit a warning
eval_warn range from 0 to 12m step 6m sum(metric)
{} {{sum:7 count:5 buckets:[2 3 2]}} {{schema:-53 sum:16 count:3 custom_values:[5 10] buckets:[1 2]}} _
eval_warn range from 0 to 12m step 6m avg(metric)
{} {{sum:3.5 count:2.5 buckets:[1 1.5 1]}} {{schema:-53 sum:8 count:1.5 custom_values:[5 10] buckets:[0.5 1]}} _
clear
# Test incompatible custom bucket schemas.
load 6m
metric{series="1"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}}
metric{series="3"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
# T=0: incompatible, should be ignored and emit a warning
# T=6: compatible
# T=12: incompatible followed by compatible, should be ignored and emit a warning
eval_warn range from 0 to 12m step 6m sum(metric)
{} _ {{schema:-53 sum:2 count:2 custom_values:[5 10] buckets:[2]}} _
eval_warn range from 0 to 12m step 6m avg(metric)
{} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} _
clear
load 1m
metric{group="just-floats", series="1"} 2
metric{group="just-floats", series="2"} 3
metric{group="just-exponential-histograms", series="1"} {{sum:3 count:4 buckets:[1 2 1]}}
metric{group="just-exponential-histograms", series="2"} {{sum:2 count:3 buckets:[1 1 1]}}
metric{group="just-custom-histograms", series="1"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}}
metric{group="just-custom-histograms", series="2"} {{schema:-53 sum:3 count:4 custom_values:[2] buckets:[7]}}
metric{group="floats-and-histograms", series="1"} 2
metric{group="floats-and-histograms", series="2"} {{sum:2 count:3 buckets:[1 1 1]}}
metric{group="exponential-and-custom-histograms", series="1"} {{sum:2 count:3 buckets:[1 1 1]}}
metric{group="exponential-and-custom-histograms", series="2"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
metric{group="incompatible-custom-histograms", series="1"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
metric{group="incompatible-custom-histograms", series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}}
eval_warn instant at 0 sum by (group) (metric)
{group="just-floats"} 5
{group="just-exponential-histograms"} {{sum:5 count:7 buckets:[2 3 2]}}
{group="just-custom-histograms"} {{schema:-53 sum:4 count:5 custom_values:[2] buckets:[8]}}

View file

@ -93,6 +93,8 @@ type Options struct {
skipOffsetting bool
}
const DefaultNameEscapingScheme = model.ValueEncodingEscaping
// Manager maintains a set of scrape pools and manages start/stop cycles
// when receiving new target groups from the discovery manager.
type Manager struct {

View file

@ -303,6 +303,11 @@ func (sp *scrapePool) restartLoops(reuseCache bool) {
mrc = sp.config.MetricRelabelConfigs
)
validationScheme := model.LegacyValidation
if sp.config.MetricNameValidationScheme == config.UTF8ValidationConfig {
validationScheme = model.UTF8Validation
}
sp.targetMtx.Lock()
forcedErr := sp.refreshTargetLimitErr()
@ -323,7 +328,7 @@ func (sp *scrapePool) restartLoops(reuseCache bool) {
client: sp.client,
timeout: timeout,
bodySizeLimit: bodySizeLimit,
acceptHeader: acceptHeader(sp.config.ScrapeProtocols),
acceptHeader: acceptHeader(sp.config.ScrapeProtocols, validationScheme),
acceptEncodingHeader: acceptEncodingHeader(enableCompression),
}
newLoop = sp.newLoop(scrapeLoopOptions{
@ -452,6 +457,11 @@ func (sp *scrapePool) sync(targets []*Target) {
scrapeClassicHistograms = sp.config.ScrapeClassicHistograms
)
validationScheme := model.LegacyValidation
if sp.config.MetricNameValidationScheme == config.UTF8ValidationConfig {
validationScheme = model.UTF8Validation
}
sp.targetMtx.Lock()
for _, t := range targets {
hash := t.hash()
@ -467,7 +477,7 @@ func (sp *scrapePool) sync(targets []*Target) {
client: sp.client,
timeout: timeout,
bodySizeLimit: bodySizeLimit,
acceptHeader: acceptHeader(sp.config.ScrapeProtocols),
acceptHeader: acceptHeader(sp.config.ScrapeProtocols, validationScheme),
acceptEncodingHeader: acceptEncodingHeader(enableCompression),
metrics: sp.metrics,
}
@ -714,11 +724,16 @@ var errBodySizeLimit = errors.New("body size limit exceeded")
// acceptHeader transforms preference from the options into specific header values as
// https://www.rfc-editor.org/rfc/rfc9110.html#name-accept defines.
// No validation is here, we expect scrape protocols to be validated already.
func acceptHeader(sps []config.ScrapeProtocol) string {
func acceptHeader(sps []config.ScrapeProtocol, scheme model.ValidationScheme) string {
var vals []string
weight := len(config.ScrapeProtocolsHeaders) + 1
for _, sp := range sps {
vals = append(vals, fmt.Sprintf("%s;q=0.%d", config.ScrapeProtocolsHeaders[sp], weight))
val := config.ScrapeProtocolsHeaders[sp]
if scheme == model.UTF8Validation {
val += ";" + config.UTF8NamesHeader
}
val += fmt.Sprintf(";q=0.%d", weight)
vals = append(vals, val)
weight--
}
// Default match anything.

View file

@ -2339,11 +2339,15 @@ func TestTargetScraperScrapeOK(t *testing.T) {
)
var protobufParsing bool
var allowUTF8 bool
server := httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
accept := r.Header.Get("Accept")
if allowUTF8 {
require.Truef(t, strings.Contains(accept, "escaping=allow-utf-8"), "Expected Accept header to allow utf8, got %q", accept)
}
if protobufParsing {
accept := r.Header.Get("Accept")
require.True(t, strings.HasPrefix(accept, "application/vnd.google.protobuf;"),
"Expected Accept header to prefer application/vnd.google.protobuf.")
}
@ -2351,7 +2355,11 @@ func TestTargetScraperScrapeOK(t *testing.T) {
timeout := r.Header.Get("X-Prometheus-Scrape-Timeout-Seconds")
require.Equal(t, expectedTimeout, timeout, "Expected scrape timeout header.")
w.Header().Set("Content-Type", `text/plain; version=0.0.4`)
if allowUTF8 {
w.Header().Set("Content-Type", `text/plain; version=1.0.0; escaping=allow-utf-8`)
} else {
w.Header().Set("Content-Type", `text/plain; version=0.0.4`)
}
w.Write([]byte("metric_a 1\nmetric_b 2\n"))
}),
)
@ -2380,13 +2388,22 @@ func TestTargetScraperScrapeOK(t *testing.T) {
require.NoError(t, err)
contentType, err := ts.readResponse(context.Background(), resp, &buf)
require.NoError(t, err)
require.Equal(t, "text/plain; version=0.0.4", contentType)
if allowUTF8 {
require.Equal(t, "text/plain; version=1.0.0; escaping=allow-utf-8", contentType)
} else {
require.Equal(t, "text/plain; version=0.0.4", contentType)
}
require.Equal(t, "metric_a 1\nmetric_b 2\n", buf.String())
}
runTest(acceptHeader(config.DefaultScrapeProtocols))
runTest(acceptHeader(config.DefaultScrapeProtocols, model.LegacyValidation))
protobufParsing = true
runTest(acceptHeader(config.DefaultProtoFirstScrapeProtocols))
runTest(acceptHeader(config.DefaultProtoFirstScrapeProtocols, model.LegacyValidation))
protobufParsing = false
allowUTF8 = true
runTest(acceptHeader(config.DefaultScrapeProtocols, model.UTF8Validation))
protobufParsing = true
runTest(acceptHeader(config.DefaultProtoFirstScrapeProtocols, model.UTF8Validation))
}
func TestTargetScrapeScrapeCancel(t *testing.T) {
@ -2412,7 +2429,7 @@ func TestTargetScrapeScrapeCancel(t *testing.T) {
),
},
client: http.DefaultClient,
acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols),
acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols, model.LegacyValidation),
}
ctx, cancel := context.WithCancel(context.Background())
@ -2467,7 +2484,7 @@ func TestTargetScrapeScrapeNotFound(t *testing.T) {
),
},
client: http.DefaultClient,
acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols),
acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols, model.LegacyValidation),
}
resp, err := ts.scrape(context.Background())
@ -2511,7 +2528,7 @@ func TestTargetScraperBodySizeLimit(t *testing.T) {
},
client: http.DefaultClient,
bodySizeLimit: bodySizeLimit,
acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols),
acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols, model.LegacyValidation),
metrics: newTestScrapeMetrics(t),
}
var buf bytes.Buffer

View file

@ -1,8 +1,8 @@
-----BEGIN CERTIFICATE-----
MIIDkTCCAnmgAwIBAgIJAJNsnimNN3tmMA0GCSqGSIb3DQEBCwUAMF8xCzAJBgNV
BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg
Q29tcGFueSBMdGQxGzAZBgNVBAMMElByb21ldGhldXMgVGVzdCBDQTAeFw0xNTA4
MDQxNDA5MjFaFw0yNTA4MDExNDA5MjFaMF8xCzAJBgNVBAYTAlhYMRUwEwYDVQQH
Q29tcGFueSBMdGQxGzAZBgNVBAMMElByb21ldGhldXMgVGVzdCBDQTAeFw0yNDA4
MjAxMTUxMjNaFw00NDEyMDUxMTUxMjNaMF8xCzAJBgNVBAYTAlhYMRUwEwYDVQQH
DAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxGzAZ
BgNVBAMMElByb21ldGhldXMgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP
ADCCAQoCggEBAOlSBU3yWpUELbhzizznR0hnAL7dbEHzfEtEc6N3PoSvMNcqrUVq
@ -12,11 +12,11 @@ yB9M1ypWomzBz1UFXZp1oiNO5o7/dgXW4MgLUfC2obJ9j5xqpc6GkhWMW4ZFwEr/
VLjuzxG9B8tLfQuhnXKGn1W8+WzZVWCWMD/sLfZfmjKaWlwcXzL51g8E+IEIBJqV
w51aMI6lDkcvAM7gLq1auLZMVXyKWSKw7XMCAwEAAaNQME4wHQYDVR0OBBYEFMz1
BZnlqxJp2HiJSjHK8IsLrWYbMB8GA1UdIwQYMBaAFMz1BZnlqxJp2HiJSjHK8IsL
rWYbMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAI2iA3w3TK5J15Pu
e4fPFB4jxQqsbUwuyXbCCv/jKLeFNCD4BjM181WZEYjPMumeTBVzU3aF45LWQIG1
0DJcrCL4mjMz9qgAoGqA7aDDXiJGbukMgYYsn7vrnVmrZH8T3E8ySlltr7+W578k
pJ5FxnbCroQwn0zLyVB3sFbS8E3vpBr3L8oy8PwPHhIScexcNVc3V6/m4vTZsXTH
U+vUm1XhDgpDcFMTg2QQiJbfpOYUkwIgnRDAT7t282t2KQWtnlqc3zwPQ1F/6Cpx
j19JeNsaF1DArkD7YlyKj/GhZLtHwFHG5cxznH0mLDJTW7bQvqqh2iQTeXmBk1lU
mM5lH/s=
rWYbMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAEqhsLzIh098lmLl
CSmuOi5o0NLFaO3qgzaxhvO56RkrtnMQb99/u/F2gQPBoVrubES4fBDRWtfBkmRZ
NabgqghBN27nyLa9DEtHeOzEtBWjYnZKOY5uGf/wwIp+HM2H5QBs8c4nJv+46ev3
L73CS5zWV950dLNPA5iatQgtFsp/tsh2YoYbfPI+bHjMLJWau3cl6ID/m+j4moU7
hbcXTnehz0250CXoXYzmfPHZUjA97Cs3kbzi6Dkxbz3pmHCAfEHdGRMFIZR7Fs/Y
7k44NF5q/82FrI+Umt1OdwUTprSAUrKXZHaI9N1CClAcgP1LbqliEKrvLsEvvg7C
LrUoX4M=
-----END CERTIFICATE-----

View file

@ -26,14 +26,14 @@ jobs:
- name: Checkout repository
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Install Go
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with:
go-version: 1.22.x
- name: Install snmp_exporter/generator dependencies
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter'
- name: Lint
uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1
uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0
with:
args: --verbose
version: v1.59.1
version: v1.60.1

View file

@ -96,10 +96,10 @@ func TestSampleRingMixed(t *testing.T) {
// With ValNone as the preferred type, nothing should be initialized.
r := newSampleRing(10, 2, chunkenc.ValNone)
require.Zero(t, len(r.fBuf))
require.Zero(t, len(r.hBuf))
require.Zero(t, len(r.fhBuf))
require.Zero(t, len(r.iBuf))
require.Empty(t, r.fBuf)
require.Empty(t, r.hBuf)
require.Empty(t, r.fhBuf)
require.Empty(t, r.iBuf)
// But then mixed adds should work as expected.
r.addF(fSample{t: 1, f: 3.14})
@ -146,10 +146,10 @@ func TestSampleRingAtFloatHistogram(t *testing.T) {
// With ValNone as the preferred type, nothing should be initialized.
r := newSampleRing(10, 2, chunkenc.ValNone)
require.Zero(t, len(r.fBuf))
require.Zero(t, len(r.hBuf))
require.Zero(t, len(r.fhBuf))
require.Zero(t, len(r.iBuf))
require.Empty(t, r.fBuf)
require.Empty(t, r.hBuf)
require.Empty(t, r.fhBuf)
require.Empty(t, r.iBuf)
var (
h *histogram.Histogram

View file

@ -16,9 +16,10 @@ package storage
import "fmt"
type errDuplicateSampleForTimestamp struct {
timestamp int64
existing float64
newValue float64
timestamp int64
existing float64
existingIsHistogram bool
newValue float64
}
func NewDuplicateFloatErr(t int64, existing, newValue float64) error {
@ -29,13 +30,26 @@ func NewDuplicateFloatErr(t int64, existing, newValue float64) error {
}
}
// NewDuplicateHistogramToFloatErr describes an error where a new float sample is sent for same timestamp as previous histogram.
func NewDuplicateHistogramToFloatErr(t int64, newValue float64) error {
return errDuplicateSampleForTimestamp{
timestamp: t,
existingIsHistogram: true,
newValue: newValue,
}
}
func (e errDuplicateSampleForTimestamp) Error() string {
if e.timestamp == 0 {
return "duplicate sample for timestamp"
}
if e.existingIsHistogram {
return fmt.Sprintf("duplicate sample for timestamp %d; overrides not allowed: existing is a histogram, new value %g", e.timestamp, e.newValue)
}
return fmt.Sprintf("duplicate sample for timestamp %d; overrides not allowed: existing %g, new value %g", e.timestamp, e.existing, e.newValue)
}
// Is implements the anonymous interface checked by errors.Is.
// Every errDuplicateSampleForTimestamp compares equal to the global ErrDuplicateSampleForTimestamp.
func (e errDuplicateSampleForTimestamp) Is(t error) bool {
if t == ErrDuplicateSampleForTimestamp {

38
storage/errors_test.go Normal file
View file

@ -0,0 +1,38 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
)
func TestErrDuplicateSampleForTimestamp(t *testing.T) {
// All errDuplicateSampleForTimestamp are ErrDuplicateSampleForTimestamp
require.ErrorIs(t, ErrDuplicateSampleForTimestamp, errDuplicateSampleForTimestamp{})
// Same type only is if it has same properties.
err := NewDuplicateFloatErr(1_000, 10, 20)
sameErr := NewDuplicateFloatErr(1_000, 10, 20)
differentErr := NewDuplicateFloatErr(1_001, 30, 40)
require.ErrorIs(t, err, sameErr)
require.NotErrorIs(t, err, differentErr)
// Also works when err is wrapped.
require.ErrorIs(t, fmt.Errorf("failed: %w", err), sameErr)
require.NotErrorIs(t, fmt.Errorf("failed: %w", err), differentErr)
}

View file

@ -287,7 +287,6 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) (WriteRespo
// we can continue handling.
rs, _ := ParseWriteResponseStats(httpResp)
//nolint:usestdlibvars
if httpResp.StatusCode/100 == 2 {
return rs, nil
}
@ -297,7 +296,6 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) (WriteRespo
body, _ := io.ReadAll(io.LimitReader(httpResp.Body, maxErrMsgLen))
err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, body)
//nolint:usestdlibvars
if httpResp.StatusCode/100 == 5 ||
(c.retryOnRateLimit && httpResp.StatusCode == http.StatusTooManyRequests) {
return rs, RecoverableError{err, retryAfterDuration(httpResp.Header.Get("Retry-After"))}
@ -382,7 +380,6 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe
return nil, fmt.Errorf("error reading response. HTTP status code: %s: %w", httpResp.Status, err)
}
//nolint:usestdlibvars
if httpResp.StatusCode/100 != 2 {
return nil, fmt.Errorf("remote server %s returned HTTP status %s: %s", c.urlString, httpResp.Status, strings.TrimSpace(string(compressed)))
}

View file

@ -21,15 +21,14 @@ import (
"unicode"
)
// Normalizes the specified label to follow Prometheus label names standard
// Normalizes the specified label to follow Prometheus label names standard.
//
// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels
// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels.
//
// Labels that start with non-letter rune will be prefixed with "key_"
// Labels that start with non-letter rune will be prefixed with "key_".
//
// Exception is made for double-underscores which are allowed
// An exception is made for double-underscores which are allowed.
func NormalizeLabel(label string) string {
// Trivial case
if len(label) == 0 {
return label
@ -48,7 +47,7 @@ func NormalizeLabel(label string) string {
return label
}
// Return '_' for anything non-alphanumeric
// Return '_' for anything non-alphanumeric.
func sanitizeRune(r rune) rune {
if unicode.IsLetter(r) || unicode.IsDigit(r) {
return r

View file

@ -76,14 +76,15 @@ var perUnitMap = map[string]string{
"y": "year",
}
// BuildCompliantName builds a Prometheus-compliant metric name for the specified metric
// BuildCompliantName builds a Prometheus-compliant metric name for the specified metric.
//
// Metric name is prefixed with specified namespace and underscore (if any).
// Namespace is not cleaned up. Make sure specified namespace follows Prometheus
// naming convention.
//
// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels
// and https://prometheus.io/docs/practices/naming/#metric-and-label-naming
// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels,
// https://prometheus.io/docs/practices/naming/#metric-and-label-naming
// and https://github.com/open-telemetry/opentelemetry-specification/blob/v1.33.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus.
func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffixes bool) string {
var metricName string
@ -110,7 +111,7 @@ func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffix
// Build a normalized name for the specified metric
func normalizeName(metric pmetric.Metric, namespace string) string {
// Split metric name in "tokens" (remove all non-alphanumeric)
// Split metric name into "tokens" (remove all non-alphanumerics)
nameTokens := strings.FieldsFunc(
metric.Name(),
func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) },
@ -122,9 +123,9 @@ func normalizeName(metric pmetric.Metric, namespace string) string {
// Main unit
// Append if not blank, doesn't contain '{}', and is not present in metric name already
if len(unitTokens) > 0 {
mainUnitOtel := strings.TrimSpace(unitTokens[0])
if mainUnitOtel != "" && !strings.ContainsAny(mainUnitOtel, "{}") {
mainUnitProm := CleanUpString(unitMapGetOrDefault(mainUnitOtel))
mainUnitOTel := strings.TrimSpace(unitTokens[0])
if mainUnitOTel != "" && !strings.ContainsAny(mainUnitOTel, "{}") {
mainUnitProm := CleanUpString(unitMapGetOrDefault(mainUnitOTel))
if mainUnitProm != "" && !contains(nameTokens, mainUnitProm) {
nameTokens = append(nameTokens, mainUnitProm)
}
@ -133,11 +134,11 @@ func normalizeName(metric pmetric.Metric, namespace string) string {
// Per unit
// Append if not blank, doesn't contain '{}', and is not present in metric name already
if len(unitTokens) > 1 && unitTokens[1] != "" {
perUnitOtel := strings.TrimSpace(unitTokens[1])
if perUnitOtel != "" && !strings.ContainsAny(perUnitOtel, "{}") {
perUnitProm := CleanUpString(perUnitMapGetOrDefault(perUnitOtel))
perUnitOTel := strings.TrimSpace(unitTokens[1])
if perUnitOTel != "" && !strings.ContainsAny(perUnitOTel, "{}") {
perUnitProm := CleanUpString(perUnitMapGetOrDefault(perUnitOTel))
if perUnitProm != "" && !contains(nameTokens, perUnitProm) {
nameTokens = append(append(nameTokens, "per"), perUnitProm)
nameTokens = append(nameTokens, "per", perUnitProm)
}
}
}
@ -150,7 +151,7 @@ func normalizeName(metric pmetric.Metric, namespace string) string {
}
// Append _ratio for metrics with unit "1"
// Some Otel receivers improperly use unit "1" for counters of objects
// Some OTel receivers improperly use unit "1" for counters of objects
// See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aissue+some+metric+units+don%27t+follow+otel+semantic+conventions
// Until these issues have been fixed, we're appending `_ratio` for gauges ONLY
// Theoretically, counters could be ratios as well, but it's absurd (for mathematical reasons)

View file

@ -0,0 +1,205 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_name_test.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
package prometheus
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/collector/pdata/pmetric"
)
func TestByte(t *testing.T) {
require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("system.filesystem.usage", "By"), ""))
}
func TestByteCounter(t *testing.T) {
require.Equal(t, "system_io_bytes_total", normalizeName(createCounter("system.io", "By"), ""))
require.Equal(t, "network_transmitted_bytes_total", normalizeName(createCounter("network_transmitted_bytes_total", "By"), ""))
}
func TestWhiteSpaces(t *testing.T) {
require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("\t system.filesystem.usage ", " By\t"), ""))
}
func TestNonStandardUnit(t *testing.T) {
require.Equal(t, "system_network_dropped", normalizeName(createGauge("system.network.dropped", "{packets}"), ""))
}
func TestNonStandardUnitCounter(t *testing.T) {
require.Equal(t, "system_network_dropped_total", normalizeName(createCounter("system.network.dropped", "{packets}"), ""))
}
func TestBrokenUnit(t *testing.T) {
require.Equal(t, "system_network_dropped_packets", normalizeName(createGauge("system.network.dropped", "packets"), ""))
require.Equal(t, "system_network_packets_dropped", normalizeName(createGauge("system.network.packets.dropped", "packets"), ""))
require.Equal(t, "system_network_packets", normalizeName(createGauge("system.network.packets", "packets"), ""))
}
func TestBrokenUnitCounter(t *testing.T) {
require.Equal(t, "system_network_dropped_packets_total", normalizeName(createCounter("system.network.dropped", "packets"), ""))
require.Equal(t, "system_network_packets_dropped_total", normalizeName(createCounter("system.network.packets.dropped", "packets"), ""))
require.Equal(t, "system_network_packets_total", normalizeName(createCounter("system.network.packets", "packets"), ""))
}
func TestRatio(t *testing.T) {
require.Equal(t, "hw_gpu_memory_utilization_ratio", normalizeName(createGauge("hw.gpu.memory.utilization", "1"), ""))
require.Equal(t, "hw_fan_speed_ratio", normalizeName(createGauge("hw.fan.speed_ratio", "1"), ""))
require.Equal(t, "objects_total", normalizeName(createCounter("objects", "1"), ""))
}
func TestHertz(t *testing.T) {
require.Equal(t, "hw_cpu_speed_limit_hertz", normalizeName(createGauge("hw.cpu.speed_limit", "Hz"), ""))
}
func TestPer(t *testing.T) {
require.Equal(t, "broken_metric_speed_km_per_hour", normalizeName(createGauge("broken.metric.speed", "km/h"), ""))
require.Equal(t, "astro_light_speed_limit_meters_per_second", normalizeName(createGauge("astro.light.speed_limit", "m/s"), ""))
}
func TestPercent(t *testing.T) {
require.Equal(t, "broken_metric_success_ratio_percent", normalizeName(createGauge("broken.metric.success_ratio", "%"), ""))
require.Equal(t, "broken_metric_success_percent", normalizeName(createGauge("broken.metric.success_percent", "%"), ""))
}
func TestEmpty(t *testing.T) {
require.Equal(t, "test_metric_no_unit", normalizeName(createGauge("test.metric.no_unit", ""), ""))
require.Equal(t, "test_metric_spaces", normalizeName(createGauge("test.metric.spaces", " \t "), ""))
}
func TestUnsupportedRunes(t *testing.T) {
require.Equal(t, "unsupported_metric_temperature_F", normalizeName(createGauge("unsupported.metric.temperature", "°F"), ""))
require.Equal(t, "unsupported_metric_weird", normalizeName(createGauge("unsupported.metric.weird", "+=.:,!* & #"), ""))
require.Equal(t, "unsupported_metric_redundant_test_per_C", normalizeName(createGauge("unsupported.metric.redundant", "__test $/°C"), ""))
}
func TestOTelReceivers(t *testing.T) {
require.Equal(t, "active_directory_ds_replication_network_io_bytes_total", normalizeName(createCounter("active_directory.ds.replication.network.io", "By"), ""))
require.Equal(t, "active_directory_ds_replication_sync_object_pending_total", normalizeName(createCounter("active_directory.ds.replication.sync.object.pending", "{objects}"), ""))
require.Equal(t, "active_directory_ds_replication_object_rate_per_second", normalizeName(createGauge("active_directory.ds.replication.object.rate", "{objects}/s"), ""))
require.Equal(t, "active_directory_ds_name_cache_hit_rate_percent", normalizeName(createGauge("active_directory.ds.name_cache.hit_rate", "%"), ""))
require.Equal(t, "active_directory_ds_ldap_bind_last_successful_time_milliseconds", normalizeName(createGauge("active_directory.ds.ldap.bind.last_successful.time", "ms"), ""))
require.Equal(t, "apache_current_connections", normalizeName(createGauge("apache.current_connections", "connections"), ""))
require.Equal(t, "apache_workers_connections", normalizeName(createGauge("apache.workers", "connections"), ""))
require.Equal(t, "apache_requests_total", normalizeName(createCounter("apache.requests", "1"), ""))
require.Equal(t, "bigip_virtual_server_request_count_total", normalizeName(createCounter("bigip.virtual_server.request.count", "{requests}"), ""))
require.Equal(t, "system_cpu_utilization_ratio", normalizeName(createGauge("system.cpu.utilization", "1"), ""))
require.Equal(t, "system_disk_operation_time_seconds_total", normalizeName(createCounter("system.disk.operation_time", "s"), ""))
require.Equal(t, "system_cpu_load_average_15m_ratio", normalizeName(createGauge("system.cpu.load_average.15m", "1"), ""))
require.Equal(t, "memcached_operation_hit_ratio_percent", normalizeName(createGauge("memcached.operation_hit_ratio", "%"), ""))
require.Equal(t, "mongodbatlas_process_asserts_per_second", normalizeName(createGauge("mongodbatlas.process.asserts", "{assertions}/s"), ""))
require.Equal(t, "mongodbatlas_process_journaling_data_files_mebibytes", normalizeName(createGauge("mongodbatlas.process.journaling.data_files", "MiBy"), ""))
require.Equal(t, "mongodbatlas_process_network_io_bytes_per_second", normalizeName(createGauge("mongodbatlas.process.network.io", "By/s"), ""))
require.Equal(t, "mongodbatlas_process_oplog_rate_gibibytes_per_hour", normalizeName(createGauge("mongodbatlas.process.oplog.rate", "GiBy/h"), ""))
require.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", normalizeName(createGauge("mongodbatlas.process.db.query_targeting.scanned_per_returned", "{scanned}/{returned}"), ""))
require.Equal(t, "nginx_requests", normalizeName(createGauge("nginx.requests", "requests"), ""))
require.Equal(t, "nginx_connections_accepted", normalizeName(createGauge("nginx.connections_accepted", "connections"), ""))
require.Equal(t, "nsxt_node_memory_usage_kilobytes", normalizeName(createGauge("nsxt.node.memory.usage", "KBy"), ""))
require.Equal(t, "redis_latest_fork_microseconds", normalizeName(createGauge("redis.latest_fork", "us"), ""))
}
func TestTrimPromSuffixes(t *testing.T) {
assert.Equal(t, "active_directory_ds_replication_network_io", TrimPromSuffixes("active_directory_ds_replication_network_io_bytes_total", pmetric.MetricTypeSum, "bytes"))
assert.Equal(t, "active_directory_ds_name_cache_hit_rate", TrimPromSuffixes("active_directory_ds_name_cache_hit_rate_percent", pmetric.MetricTypeGauge, "percent"))
assert.Equal(t, "active_directory_ds_ldap_bind_last_successful_time", TrimPromSuffixes("active_directory_ds_ldap_bind_last_successful_time_milliseconds", pmetric.MetricTypeGauge, "milliseconds"))
assert.Equal(t, "apache_requests", TrimPromSuffixes("apache_requests_total", pmetric.MetricTypeSum, "1"))
assert.Equal(t, "system_cpu_utilization", TrimPromSuffixes("system_cpu_utilization_ratio", pmetric.MetricTypeGauge, "ratio"))
assert.Equal(t, "mongodbatlas_process_journaling_data_files", TrimPromSuffixes("mongodbatlas_process_journaling_data_files_mebibytes", pmetric.MetricTypeGauge, "mebibytes"))
assert.Equal(t, "mongodbatlas_process_network_io", TrimPromSuffixes("mongodbatlas_process_network_io_bytes_per_second", pmetric.MetricTypeGauge, "bytes_per_second"))
assert.Equal(t, "mongodbatlas_process_oplog_rate", TrimPromSuffixes("mongodbatlas_process_oplog_rate_gibibytes_per_hour", pmetric.MetricTypeGauge, "gibibytes_per_hour"))
assert.Equal(t, "nsxt_node_memory_usage", TrimPromSuffixes("nsxt_node_memory_usage_kilobytes", pmetric.MetricTypeGauge, "kilobytes"))
assert.Equal(t, "redis_latest_fork", TrimPromSuffixes("redis_latest_fork_microseconds", pmetric.MetricTypeGauge, "microseconds"))
assert.Equal(t, "up", TrimPromSuffixes("up", pmetric.MetricTypeGauge, ""))
// These are not necessarily valid OM units, only tested for the sake of completeness.
assert.Equal(t, "active_directory_ds_replication_sync_object_pending", TrimPromSuffixes("active_directory_ds_replication_sync_object_pending_total", pmetric.MetricTypeSum, "{objects}"))
assert.Equal(t, "apache_current", TrimPromSuffixes("apache_current_connections", pmetric.MetricTypeGauge, "connections"))
assert.Equal(t, "bigip_virtual_server_request_count", TrimPromSuffixes("bigip_virtual_server_request_count_total", pmetric.MetricTypeSum, "{requests}"))
assert.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", TrimPromSuffixes("mongodbatlas_process_db_query_targeting_scanned_per_returned", pmetric.MetricTypeGauge, "{scanned}/{returned}"))
assert.Equal(t, "nginx_connections_accepted", TrimPromSuffixes("nginx_connections_accepted", pmetric.MetricTypeGauge, "connections"))
assert.Equal(t, "apache_workers", TrimPromSuffixes("apache_workers_connections", pmetric.MetricTypeGauge, "connections"))
assert.Equal(t, "nginx", TrimPromSuffixes("nginx_requests", pmetric.MetricTypeGauge, "requests"))
// Units shouldn't be trimmed if the unit is not a direct match with the suffix, i.e, a suffix "_seconds" shouldn't be removed if unit is "sec" or "s"
assert.Equal(t, "system_cpu_load_average_15m_ratio", TrimPromSuffixes("system_cpu_load_average_15m_ratio", pmetric.MetricTypeGauge, "1"))
assert.Equal(t, "mongodbatlas_process_asserts_per_second", TrimPromSuffixes("mongodbatlas_process_asserts_per_second", pmetric.MetricTypeGauge, "{assertions}/s"))
assert.Equal(t, "memcached_operation_hit_ratio_percent", TrimPromSuffixes("memcached_operation_hit_ratio_percent", pmetric.MetricTypeGauge, "%"))
assert.Equal(t, "active_directory_ds_replication_object_rate_per_second", TrimPromSuffixes("active_directory_ds_replication_object_rate_per_second", pmetric.MetricTypeGauge, "{objects}/s"))
assert.Equal(t, "system_disk_operation_time_seconds", TrimPromSuffixes("system_disk_operation_time_seconds_total", pmetric.MetricTypeSum, "s"))
}
func TestNamespace(t *testing.T) {
require.Equal(t, "space_test", normalizeName(createGauge("test", ""), "space"))
require.Equal(t, "space_test", normalizeName(createGauge("#test", ""), "space"))
}
func TestCleanUpString(t *testing.T) {
require.Equal(t, "", CleanUpString(""))
require.Equal(t, "a_b", CleanUpString("a b"))
require.Equal(t, "hello_world", CleanUpString("hello, world!"))
require.Equal(t, "hello_you_2", CleanUpString("hello you 2"))
require.Equal(t, "1000", CleanUpString("$1000"))
require.Equal(t, "", CleanUpString("*+$^=)"))
}
func TestUnitMapGetOrDefault(t *testing.T) {
require.Equal(t, "", unitMapGetOrDefault(""))
require.Equal(t, "seconds", unitMapGetOrDefault("s"))
require.Equal(t, "invalid", unitMapGetOrDefault("invalid"))
}
func TestPerUnitMapGetOrDefault(t *testing.T) {
require.Equal(t, "", perUnitMapGetOrDefault(""))
require.Equal(t, "second", perUnitMapGetOrDefault("s"))
require.Equal(t, "invalid", perUnitMapGetOrDefault("invalid"))
}
func TestRemoveItem(t *testing.T) {
require.Equal(t, []string{}, removeItem([]string{}, "test"))
require.Equal(t, []string{}, removeItem([]string{}, ""))
require.Equal(t, []string{"a", "b", "c"}, removeItem([]string{"a", "b", "c"}, "d"))
require.Equal(t, []string{"a", "b", "c"}, removeItem([]string{"a", "b", "c"}, ""))
require.Equal(t, []string{"a", "b"}, removeItem([]string{"a", "b", "c"}, "c"))
require.Equal(t, []string{"a", "c"}, removeItem([]string{"a", "b", "c"}, "b"))
require.Equal(t, []string{"b", "c"}, removeItem([]string{"a", "b", "c"}, "a"))
}
func TestBuildCompliantNameWithNormalize(t *testing.T) {
require.Equal(t, "system_io_bytes_total", BuildCompliantName(createCounter("system.io", "By"), "", true))
require.Equal(t, "system_network_io_bytes_total", BuildCompliantName(createCounter("network.io", "By"), "system", true))
require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", ""), "", true))
require.Equal(t, "envoy_rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", true))
require.Equal(t, "foo_bar", BuildCompliantName(createGauge(":foo::bar", ""), "", true))
require.Equal(t, "foo_bar_total", BuildCompliantName(createCounter(":foo::bar", ""), "", true))
// Gauges with unit 1 are considered ratios.
require.Equal(t, "foo_bar_ratio", BuildCompliantName(createGauge("foo.bar", "1"), "", true))
// Slashes in units are converted.
require.Equal(t, "system_io_foo_per_bar_total", BuildCompliantName(createCounter("system.io", "foo/bar"), "", true))
}
func TestBuildCompliantNameWithoutSuffixes(t *testing.T) {
require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "By"), "", false))
require.Equal(t, "system_network_io", BuildCompliantName(createCounter("network.io", "By"), "system", false))
require.Equal(t, "system_network_I_O", BuildCompliantName(createCounter("network (I/O)", "By"), "system", false))
require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", "By"), "", false))
require.Equal(t, "envoy__rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", false))
require.Equal(t, ":foo::bar", BuildCompliantName(createGauge(":foo::bar", ""), "", false))
require.Equal(t, ":foo::bar", BuildCompliantName(createCounter(":foo::bar", ""), "", false))
require.Equal(t, "foo_bar", BuildCompliantName(createGauge("foo.bar", "1"), "", false))
require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "foo/bar"), "", false))
}

View file

@ -0,0 +1,49 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/testutils_test.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
package prometheus
import (
"go.opentelemetry.io/collector/pdata/pmetric"
)
var ilm pmetric.ScopeMetrics
func init() {
metrics := pmetric.NewMetrics()
resourceMetrics := metrics.ResourceMetrics().AppendEmpty()
ilm = resourceMetrics.ScopeMetrics().AppendEmpty()
}
// Returns a new Metric of type "Gauge" with specified name and unit
func createGauge(name string, unit string) pmetric.Metric {
gauge := ilm.Metrics().AppendEmpty()
gauge.SetName(name)
gauge.SetUnit(unit)
gauge.SetEmptyGauge()
return gauge
}
// Returns a new Metric of type Monotonic Sum with specified name and unit
func createCounter(name string, unit string) pmetric.Metric {
counter := ilm.Metrics().AppendEmpty()
counter.SetEmptySum().SetIsMonotonic(true)
counter.SetName(name)
counter.SetUnit(unit)
return counter
}

View file

@ -1522,7 +1522,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
// Send batches of at most MaxSamplesPerSend samples to the remote storage.
// If we have fewer samples than that, flush them out after a deadline anyways.
var (
max = s.qm.cfg.MaxSamplesPerSend
maxCount = s.qm.cfg.MaxSamplesPerSend
pBuf = proto.NewBuffer(nil)
pBufRaw []byte
@ -1530,19 +1530,19 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
)
// TODO(@tpaschalis) Should we also raise the max if we have WAL metadata?
if s.qm.sendExemplars {
max += int(float64(max) * 0.1)
maxCount += int(float64(maxCount) * 0.1)
}
// TODO: Dry all of this, we should make an interface/generic for the timeseries type.
batchQueue := queue.Chan()
pendingData := make([]prompb.TimeSeries, max)
pendingData := make([]prompb.TimeSeries, maxCount)
for i := range pendingData {
pendingData[i].Samples = []prompb.Sample{{}}
if s.qm.sendExemplars {
pendingData[i].Exemplars = []prompb.Exemplar{{}}
}
}
pendingDataV2 := make([]writev2.TimeSeries, max)
pendingDataV2 := make([]writev2.TimeSeries, maxCount)
for i := range pendingDataV2 {
pendingDataV2[i].Samples = []writev2.Sample{{}}
}

View file

@ -453,10 +453,10 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) {
expectHeaderValue(t, 0, resp.Header.Get(rw20WrittenHistogramsHeader))
expectHeaderValue(t, 0, resp.Header.Get(rw20WrittenExemplarsHeader))
require.Empty(t, len(appendable.samples))
require.Empty(t, len(appendable.histograms))
require.Empty(t, len(appendable.exemplars))
require.Empty(t, len(appendable.metadata))
require.Empty(t, appendable.samples)
require.Empty(t, appendable.histograms)
require.Empty(t, appendable.exemplars)
require.Empty(t, appendable.metadata)
return
}

View file

@ -72,7 +72,7 @@ func TestListSeriesIterator(t *testing.T) {
require.Equal(t, chunkenc.ValNone, it.Seek(2))
}
// TestSeriesSetToChunkSet test the property of SeriesSet that says
// TestChunkSeriesSetToSeriesSet test the property of SeriesSet that says
// returned series should be iterable even after Next is called.
func TestChunkSeriesSetToSeriesSet(t *testing.T) {
series := []struct {

View file

@ -23,7 +23,6 @@ import (
"net"
"net/url"
"sort"
"strconv"
"strings"
text_template "text/template"
"time"
@ -106,25 +105,6 @@ func query(ctx context.Context, q string, ts time.Time, queryFn QueryFunc) (quer
return result, nil
}
func convertToFloat(i interface{}) (float64, error) {
switch v := i.(type) {
case float64:
return v, nil
case string:
return strconv.ParseFloat(v, 64)
case int:
return float64(v), nil
case uint:
return float64(v), nil
case int64:
return float64(v), nil
case uint64:
return float64(v), nil
default:
return 0, fmt.Errorf("can't convert %T to float", v)
}
}
// Expander executes templates in text or HTML mode with a common set of Prometheus template functions.
type Expander struct {
text string
@ -186,7 +166,7 @@ func NewTemplateExpander(
return html_template.HTML(text)
},
"match": regexp.MatchString,
"title": strings.Title, //nolint:staticcheck
"title": strings.Title,
"toUpper": strings.ToUpper,
"toLower": strings.ToLower,
"graphLink": strutil.GraphLinkForExpression,
@ -219,7 +199,7 @@ func NewTemplateExpander(
return host
},
"humanize": func(i interface{}) (string, error) {
v, err := convertToFloat(i)
v, err := common_templates.ConvertToFloat(i)
if err != nil {
return "", err
}
@ -248,7 +228,7 @@ func NewTemplateExpander(
return fmt.Sprintf("%.4g%s", v, prefix), nil
},
"humanize1024": func(i interface{}) (string, error) {
v, err := convertToFloat(i)
v, err := common_templates.ConvertToFloat(i)
if err != nil {
return "", err
}
@ -267,30 +247,15 @@ func NewTemplateExpander(
},
"humanizeDuration": common_templates.HumanizeDuration,
"humanizePercentage": func(i interface{}) (string, error) {
v, err := convertToFloat(i)
v, err := common_templates.ConvertToFloat(i)
if err != nil {
return "", err
}
return fmt.Sprintf("%.4g%%", v*100), nil
},
"humanizeTimestamp": func(i interface{}) (string, error) {
v, err := convertToFloat(i)
if err != nil {
return "", err
}
tm, err := floatToTime(v)
switch {
case errors.Is(err, errNaNOrInf):
return fmt.Sprintf("%.4g", v), nil
case err != nil:
return "", err
}
return fmt.Sprint(tm), nil
},
"humanizeTimestamp": common_templates.HumanizeTimestamp,
"toTime": func(i interface{}) (*time.Time, error) {
v, err := convertToFloat(i)
v, err := common_templates.ConvertToFloat(i)
if err != nil {
return nil, err
}

View file

@ -1,8 +1,8 @@
-----BEGIN CERTIFICATE-----
MIIDkTCCAnmgAwIBAgIJAJNsnimNN3tmMA0GCSqGSIb3DQEBCwUAMF8xCzAJBgNV
BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg
Q29tcGFueSBMdGQxGzAZBgNVBAMMElByb21ldGhldXMgVGVzdCBDQTAeFw0xNTA4
MDQxNDA5MjFaFw0yNTA4MDExNDA5MjFaMF8xCzAJBgNVBAYTAlhYMRUwEwYDVQQH
Q29tcGFueSBMdGQxGzAZBgNVBAMMElByb21ldGhldXMgVGVzdCBDQTAeFw0yNDA4
MjAxMTUxMjNaFw00NDEyMDUxMTUxMjNaMF8xCzAJBgNVBAYTAlhYMRUwEwYDVQQH
DAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxGzAZ
BgNVBAMMElByb21ldGhldXMgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP
ADCCAQoCggEBAOlSBU3yWpUELbhzizznR0hnAL7dbEHzfEtEc6N3PoSvMNcqrUVq
@ -12,11 +12,11 @@ yB9M1ypWomzBz1UFXZp1oiNO5o7/dgXW4MgLUfC2obJ9j5xqpc6GkhWMW4ZFwEr/
VLjuzxG9B8tLfQuhnXKGn1W8+WzZVWCWMD/sLfZfmjKaWlwcXzL51g8E+IEIBJqV
w51aMI6lDkcvAM7gLq1auLZMVXyKWSKw7XMCAwEAAaNQME4wHQYDVR0OBBYEFMz1
BZnlqxJp2HiJSjHK8IsLrWYbMB8GA1UdIwQYMBaAFMz1BZnlqxJp2HiJSjHK8IsL
rWYbMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAI2iA3w3TK5J15Pu
e4fPFB4jxQqsbUwuyXbCCv/jKLeFNCD4BjM181WZEYjPMumeTBVzU3aF45LWQIG1
0DJcrCL4mjMz9qgAoGqA7aDDXiJGbukMgYYsn7vrnVmrZH8T3E8ySlltr7+W578k
pJ5FxnbCroQwn0zLyVB3sFbS8E3vpBr3L8oy8PwPHhIScexcNVc3V6/m4vTZsXTH
U+vUm1XhDgpDcFMTg2QQiJbfpOYUkwIgnRDAT7t282t2KQWtnlqc3zwPQ1F/6Cpx
j19JeNsaF1DArkD7YlyKj/GhZLtHwFHG5cxznH0mLDJTW7bQvqqh2iQTeXmBk1lU
mM5lH/s=
rWYbMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAEqhsLzIh098lmLl
CSmuOi5o0NLFaO3qgzaxhvO56RkrtnMQb99/u/F2gQPBoVrubES4fBDRWtfBkmRZ
NabgqghBN27nyLa9DEtHeOzEtBWjYnZKOY5uGf/wwIp+HM2H5QBs8c4nJv+46ev3
L73CS5zWV950dLNPA5iatQgtFsp/tsh2YoYbfPI+bHjMLJWau3cl6ID/m+j4moU7
hbcXTnehz0250CXoXYzmfPHZUjA97Cs3kbzi6Dkxbz3pmHCAfEHdGRMFIZR7Fs/Y
7k44NF5q/82FrI+Umt1OdwUTprSAUrKXZHaI9N1CClAcgP1LbqliEKrvLsEvvg7C
LrUoX4M=
-----END CERTIFICATE-----

View file

@ -419,6 +419,7 @@ loop:
// fill in the bucket in b and advance a.
if aCount == 0 {
bInter.num++ // Mark that we need to insert a bucket in b.
bInter.bucketIdx = aIdx
// Advance a
if aInter.num > 0 {
aInserts = append(aInserts, aInter)
@ -436,6 +437,7 @@ loop:
return nil, nil, false
case aIdx > bIdx: // a misses a value that is in b. Forward b and recompare.
aInter.num++
bInter.bucketIdx = bIdx
// Advance b
if bInter.num > 0 {
bInserts = append(bInserts, bInter)
@ -453,6 +455,7 @@ loop:
// fill in the bucket in b and advance a.
if aCount == 0 {
bInter.num++
bInter.bucketIdx = aIdx
// Advance a
if aInter.num > 0 {
aInserts = append(aInserts, aInter)
@ -471,6 +474,7 @@ loop:
return nil, nil, false
case !aOK && bOK: // a misses a value that is in b. Forward b and recompare.
aInter.num++
bInter.bucketIdx = bIdx
// Advance b
if bInter.num > 0 {
bInserts = append(bInserts, bInter)
@ -773,6 +777,23 @@ func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppend
happ.appendFloatHistogram(t, h)
return newChunk, false, app, nil
}
if len(pBackwardInserts) > 0 || len(nBackwardInserts) > 0 {
// The histogram needs to be expanded to have the extra empty buckets
// of the chunk.
if len(pForwardInserts) == 0 && len(nForwardInserts) == 0 {
// No new chunks from the histogram, so the spans of the appender can accommodate the new buckets.
// However we need to make a copy in case the input is sharing spans from an iterator.
h.PositiveSpans = make([]histogram.Span, len(a.pSpans))
copy(h.PositiveSpans, a.pSpans)
h.NegativeSpans = make([]histogram.Span, len(a.nSpans))
copy(h.NegativeSpans, a.nSpans)
} else {
// Spans need pre-adjusting to accommodate the new buckets.
h.PositiveSpans = adjustForInserts(h.PositiveSpans, pBackwardInserts)
h.NegativeSpans = adjustForInserts(h.NegativeSpans, nBackwardInserts)
}
a.recodeHistogram(h, pBackwardInserts, nBackwardInserts)
}
if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 {
if appendOnly {
return nil, false, a, fmt.Errorf("float histogram layout change with %d positive and %d negative forwards inserts", len(pForwardInserts), len(nForwardInserts))
@ -784,13 +805,6 @@ func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppend
app.(*FloatHistogramAppender).appendFloatHistogram(t, h)
return chk, true, app, nil
}
if len(pBackwardInserts) > 0 || len(nBackwardInserts) > 0 {
// The histogram needs to be expanded to have the extra empty buckets
// of the chunk.
h.PositiveSpans = a.pSpans
h.NegativeSpans = a.nSpans
a.recodeHistogram(h, pBackwardInserts, nBackwardInserts)
}
a.appendFloatHistogram(t, h)
return nil, false, a, nil
}

View file

@ -411,6 +411,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
{Offset: 3, Length: 2},
{Offset: 5, Length: 1},
}
savedH2Spans := h2.PositiveSpans
h2.PositiveBuckets = []float64{7, 4, 3, 5, 2}
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
@ -426,6 +427,43 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
// Check that h2 was recoded.
require.Equal(t, []float64{7, 0, 4, 3, 5, 0, 2}, h2.PositiveBuckets)
require.Equal(t, emptyBucketH.PositiveSpans, h2.PositiveSpans)
require.NotEqual(t, savedH2Spans, h2.PositiveSpans, "recoding must make a copy")
}
{ // New histogram that has new buckets AND buckets missing but the buckets missing were empty.
emptyBucketH := eh.Copy()
emptyBucketH.PositiveBuckets = []float64{6, 0, 3, 2, 4, 0, 1}
c, hApp, ts, h1 := setup(emptyBucketH)
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{
{Offset: 0, Length: 1},
{Offset: 3, Length: 1},
{Offset: 3, Length: 2},
{Offset: 5, Length: 2},
}
savedH2Spans := h2.PositiveSpans
h2.PositiveBuckets = []float64{7, 4, 3, 5, 2, 3}
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.NotEmpty(t, posInterjections)
require.Empty(t, negInterjections)
require.NotEmpty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.True(t, ok)
require.False(t, cr)
assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
// Check that h2 was recoded.
require.Equal(t, []float64{7, 0, 4, 3, 5, 0, 2, 3}, h2.PositiveBuckets)
require.Equal(t, []histogram.Span{
{Offset: 0, Length: 2}, // Added empty bucket.
{Offset: 2, Length: 1}, // Existing - offset adjusted.
{Offset: 3, Length: 2}, // Existing.
{Offset: 3, Length: 1}, // Added empty bucket.
{Offset: 1, Length: 2}, // Existing + the extra bucket.
}, h2.PositiveSpans)
require.NotEqual(t, savedH2Spans, h2.PositiveSpans, "recoding must make a copy")
}
{ // New histogram that has a counter reset while buckets are same.

View file

@ -437,6 +437,7 @@ loop:
// fill in the bucket in b and advance a.
if aCount == 0 {
bInter.num++ // Mark that we need to insert a bucket in b.
bInter.bucketIdx = aIdx
// Advance a
if aInter.num > 0 {
aInserts = append(aInserts, aInter)
@ -454,6 +455,7 @@ loop:
return nil, nil, false
case aIdx > bIdx: // a misses a value that is in b. Forward b and recompare.
aInter.num++
aInter.bucketIdx = bIdx
// Advance b
if bInter.num > 0 {
bInserts = append(bInserts, bInter)
@ -471,6 +473,7 @@ loop:
// fill in the bucket in b and advance a.
if aCount == 0 {
bInter.num++
bInter.bucketIdx = aIdx
// Advance a
if aInter.num > 0 {
aInserts = append(aInserts, aInter)
@ -489,6 +492,7 @@ loop:
return nil, nil, false
case !aOK && bOK: // a misses a value that is in b. Forward b and recompare.
aInter.num++
aInter.bucketIdx = bIdx
// Advance b
if bInter.num > 0 {
bInserts = append(bInserts, bInter)
@ -807,6 +811,23 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h
happ.appendHistogram(t, h)
return newChunk, false, app, nil
}
if len(pBackwardInserts) > 0 || len(nBackwardInserts) > 0 {
// The histogram needs to be expanded to have the extra empty buckets
// of the chunk.
if len(pForwardInserts) == 0 && len(nForwardInserts) == 0 {
// No new chunks from the histogram, so the spans of the appender can accommodate the new buckets.
// However we need to make a copy in case the input is sharing spans from an iterator.
h.PositiveSpans = make([]histogram.Span, len(a.pSpans))
copy(h.PositiveSpans, a.pSpans)
h.NegativeSpans = make([]histogram.Span, len(a.nSpans))
copy(h.NegativeSpans, a.nSpans)
} else {
// Spans need pre-adjusting to accommodate the new buckets.
h.PositiveSpans = adjustForInserts(h.PositiveSpans, pBackwardInserts)
h.NegativeSpans = adjustForInserts(h.NegativeSpans, nBackwardInserts)
}
a.recodeHistogram(h, pBackwardInserts, nBackwardInserts)
}
if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 {
if appendOnly {
return nil, false, a, fmt.Errorf("histogram layout change with %d positive and %d negative forwards inserts", len(pForwardInserts), len(nForwardInserts))
@ -818,13 +839,6 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h
app.(*HistogramAppender).appendHistogram(t, h)
return chk, true, app, nil
}
if len(pBackwardInserts) > 0 || len(nBackwardInserts) > 0 {
// The histogram needs to be expanded to have the extra empty buckets
// of the chunk.
h.PositiveSpans = a.pSpans
h.NegativeSpans = a.nSpans
a.recodeHistogram(h, pBackwardInserts, nBackwardInserts)
}
a.appendHistogram(t, h)
return nil, false, a, nil
}

View file

@ -278,6 +278,10 @@ func (b *bucketIterator) Next() (int, bool) {
type Insert struct {
pos int
num int
// Optional: bucketIdx is the index of the bucket that is inserted.
// Can be used to adjust spans.
bucketIdx int
}
// Deprecated: expandSpansForward, use expandIntSpansAndBuckets or
@ -577,3 +581,65 @@ func counterResetHint(crh CounterResetHeader, numRead uint16) histogram.CounterR
return histogram.UnknownCounterReset
}
}
// adjustForInserts adjusts the spans for the given inserts.
func adjustForInserts(spans []histogram.Span, inserts []Insert) (mergedSpans []histogram.Span) {
if len(inserts) == 0 {
return spans
}
it := newBucketIterator(spans)
var (
lastBucket int
i int
insertIdx = inserts[i].bucketIdx
insertNum = inserts[i].num
)
addBucket := func(b int) {
offset := b - lastBucket - 1
if offset == 0 && len(mergedSpans) > 0 {
mergedSpans[len(mergedSpans)-1].Length++
} else {
if len(mergedSpans) == 0 {
offset++
}
mergedSpans = append(mergedSpans, histogram.Span{
Offset: int32(offset),
Length: 1,
})
}
lastBucket = b
}
consumeInsert := func() {
// Consume the insert.
insertNum--
if insertNum == 0 {
i++
if i < len(inserts) {
insertIdx = inserts[i].bucketIdx
insertNum = inserts[i].num
}
} else {
insertIdx++
}
}
bucket, ok := it.Next()
for ok {
if i < len(inserts) && insertIdx < bucket {
addBucket(insertIdx)
consumeInsert()
} else {
addBucket(bucket)
bucket, ok = it.Next()
}
}
for i < len(inserts) {
addBucket(inserts[i].bucketIdx)
consumeInsert()
}
return
}

View file

@ -428,6 +428,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
{Offset: 4, Length: 1},
{Offset: 1, Length: 1},
}
savedH2Spans := h2.PositiveSpans
h2.PositiveBuckets = []int64{7, -5, 1, 0, 1} // counts: 7, 2, 3, 3, 4 (total 18)
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
@ -443,6 +444,44 @@ func TestHistogramChunkAppendable(t *testing.T) {
// Check that h2 was recoded.
require.Equal(t, []int64{7, -7, 2, 1, -3, 3, 1}, h2.PositiveBuckets) // counts: 7, 0, 2, 3 , 0, 3, 4 (total 18)
require.Equal(t, emptyBucketH.PositiveSpans, h2.PositiveSpans)
require.NotEqual(t, savedH2Spans, h2.PositiveSpans, "recoding must make a copy")
}
{ // New histogram that has new buckets AND buckets missing but the buckets missing were empty.
emptyBucketH := eh.Copy()
emptyBucketH.PositiveBuckets = []int64{6, -6, 1, 1, -2, 1, 1} // counts: 6, 0, 1, 2, 0, 1, 2 (total 12)
c, hApp, ts, h1 := setup(emptyBucketH)
h2 := h1.Copy()
h2.PositiveSpans = []histogram.Span{ // Missing buckets at offset 1 and 9.
{Offset: 0, Length: 1},
{Offset: 3, Length: 1},
{Offset: 3, Length: 1},
{Offset: 4, Length: 1},
{Offset: 1, Length: 2},
}
savedH2Spans := h2.PositiveSpans
h2.PositiveBuckets = []int64{7, -5, 1, 0, 1, 1} // counts: 7, 2, 3, 3, 4, 5 (total 23)
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
require.NotEmpty(t, posInterjections)
require.Empty(t, negInterjections)
require.NotEmpty(t, backwardPositiveInserts)
require.Empty(t, backwardNegativeInserts)
require.True(t, ok)
require.False(t, cr)
assertRecodedHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
// Check that h2 was recoded.
require.Equal(t, []int64{7, -7, 2, 1, -3, 3, 1, 1}, h2.PositiveBuckets) // counts: 7, 0, 2, 3 , 0, 3, 5 (total 23)
require.Equal(t, []histogram.Span{
{Offset: 0, Length: 2}, // Added empty bucket.
{Offset: 2, Length: 1}, // Existing - offset adjusted.
{Offset: 3, Length: 2}, // Added empty bucket.
{Offset: 3, Length: 1}, // Existing - offset adjusted.
{Offset: 1, Length: 2}, // Existing.
}, h2.PositiveSpans)
require.NotEqual(t, savedH2Spans, h2.PositiveSpans, "recoding must make a copy")
}
{ // New histogram that has a counter reset while buckets are same.

View file

@ -69,16 +69,16 @@ func TestQueuePushPopSingleGoroutine(t *testing.T) {
const maxSize = 500
const maxIters = 50
for max := 1; max < maxSize; max++ {
queue := newWriteJobQueue(max, 1+(r.Int()%max))
for maxCount := 1; maxCount < maxSize; maxCount++ {
queue := newWriteJobQueue(maxCount, 1+(r.Int()%maxCount))
elements := 0 // total elements in the queue
lastWriteID := 0
lastReadID := 0
for iter := 0; iter < maxIters; iter++ {
if elements < max {
toWrite := r.Int() % (max - elements)
if elements < maxCount {
toWrite := r.Int() % (maxCount - elements)
if toWrite == 0 {
toWrite = 1
}

View file

@ -22,6 +22,7 @@ import (
"os"
"path"
"path/filepath"
"runtime"
"strconv"
"sync"
"testing"
@ -1925,3 +1926,229 @@ func TestCompactEmptyResultBlockWithTombstone(t *testing.T) {
require.Nil(t, ulids)
require.NoError(t, block.Close())
}
func TestDelayedCompaction(t *testing.T) {
// The delay is chosen in such a way as to not slow down the tests, but also to make
// the effective compaction duration negligible compared to it, so that the duration comparisons make sense.
delay := 1000 * time.Millisecond
waitUntilCompactedAndCheck := func(db *DB) {
t.Helper()
start := time.Now()
for db.head.compactable() {
// This simulates what happens at the end of commits, for less busy DB, a compaction
// is triggered every minute. This is to speed up the test.
select {
case db.compactc <- struct{}{}:
default:
}
time.Sleep(time.Millisecond)
}
duration := time.Since(start)
// Only waited for one offset: offset<=delay<<<2*offset
require.Greater(t, duration, db.opts.CompactionDelay)
require.Less(t, duration, 2*db.opts.CompactionDelay)
}
compactAndCheck := func(db *DB) {
t.Helper()
start := time.Now()
db.Compact(context.Background())
for db.head.compactable() {
time.Sleep(time.Millisecond)
}
if runtime.GOOS == "windows" {
// TODO: enable on windows once ms resolution timers are better supported.
return
}
duration := time.Since(start)
require.Less(t, duration, delay)
}
cases := []struct {
name string
// The delays are chosen in such a way as to not slow down the tests, but also in a way to make the
// effective compaction duration negligible compared to them, so that the duration comparisons make sense.
compactionDelay time.Duration
}{
{
"delayed compaction not enabled",
0,
},
{
"delayed compaction enabled",
delay,
},
}
for _, c := range cases {
c := c
t.Run(c.name, func(t *testing.T) {
t.Parallel()
var options *Options
if c.compactionDelay > 0 {
options = &Options{CompactionDelay: c.compactionDelay}
}
db := openTestDB(t, options, []int64{10})
defer func() {
require.NoError(t, db.Close())
}()
label := labels.FromStrings("foo", "bar")
// The first compaction is expected to result in 1 block.
db.DisableCompactions()
app := db.Appender(context.Background())
_, err := app.Append(0, label, 0, 0)
require.NoError(t, err)
_, err = app.Append(0, label, 11, 0)
require.NoError(t, err)
_, err = app.Append(0, label, 21, 0)
require.NoError(t, err)
require.NoError(t, app.Commit())
if c.compactionDelay == 0 {
// When delay is not enabled, compaction should run on the first trigger.
compactAndCheck(db)
} else {
db.EnableCompactions()
waitUntilCompactedAndCheck(db)
// The db.compactc signals have been processed multiple times since a compaction is triggered every 1ms by waitUntilCompacted.
// This implies that the compaction delay doesn't block or wait on the initial trigger.
// 3 is an arbitrary value because it's difficult to determine the precise value.
require.GreaterOrEqual(t, prom_testutil.ToFloat64(db.metrics.compactionsTriggered)-prom_testutil.ToFloat64(db.metrics.compactionsSkipped), 3.0)
// The delay doesn't change the head blocks alignement.
require.Eventually(t, func() bool {
return db.head.MinTime() == db.compactor.(*LeveledCompactor).ranges[0]+1
}, 500*time.Millisecond, 10*time.Millisecond)
// One compaction was run and one block was produced.
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran))
}
// The second compaction is expected to result in 2 blocks.
// This ensures that the logic for compaction delay doesn't only work for the first compaction, but also takes into account the future compactions.
// This also ensures that no delay happens between consecutive compactions.
db.DisableCompactions()
app = db.Appender(context.Background())
_, err = app.Append(0, label, 31, 0)
require.NoError(t, err)
_, err = app.Append(0, label, 41, 0)
require.NoError(t, err)
require.NoError(t, app.Commit())
if c.compactionDelay == 0 {
// Compaction should still run on the first trigger.
compactAndCheck(db)
} else {
db.EnableCompactions()
waitUntilCompactedAndCheck(db)
}
// Two other compactions were run.
require.Eventually(t, func() bool {
return prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran) == 3.0
}, 500*time.Millisecond, 10*time.Millisecond)
if c.compactionDelay == 0 {
return
}
// This test covers a special case. If auto compaction is in a delay period and a manual compaction is triggered,
// auto compaction should stop waiting for the delay if the head is no longer compactable.
// Of course, if the head is still compactable after the manual compaction, auto compaction will continue waiting for the same delay.
getTimeWhenCompactionDelayStarted := func() time.Time {
t.Helper()
db.cmtx.Lock()
defer db.cmtx.Unlock()
return db.timeWhenCompactionDelayStarted
}
db.DisableCompactions()
app = db.Appender(context.Background())
_, err = app.Append(0, label, 51, 0)
require.NoError(t, err)
require.NoError(t, app.Commit())
require.True(t, db.head.compactable())
db.EnableCompactions()
// Trigger an auto compaction.
db.compactc <- struct{}{}
// That made auto compaction start waiting for the delay.
require.Eventually(t, func() bool {
return !getTimeWhenCompactionDelayStarted().IsZero()
}, 100*time.Millisecond, 10*time.Millisecond)
// Trigger a manual compaction.
require.NoError(t, db.CompactHead(NewRangeHead(db.Head(), 0, 50.0)))
require.Equal(t, 4.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran))
// Re-trigger an auto compaction.
db.compactc <- struct{}{}
// That made auto compaction stop waiting for the delay.
require.Eventually(t, func() bool {
return getTimeWhenCompactionDelayStarted().IsZero()
}, 100*time.Millisecond, 10*time.Millisecond)
})
}
}
// TestDelayedCompactionDoesNotBlockUnrelatedOps makes sure that when delayed compaction is enabled,
// operations that don't directly derive from the Head compaction are not delayed, here we consider disk blocks compaction.
func TestDelayedCompactionDoesNotBlockUnrelatedOps(t *testing.T) {
cases := []struct {
name string
whenCompactable bool
}{
{
"Head is compactable",
true,
},
{
"Head is not compactable",
false,
},
}
for _, c := range cases {
c := c
t.Run(c.name, func(t *testing.T) {
t.Parallel()
tmpdir := t.TempDir()
// Some blocks that need compation are present.
createBlock(t, tmpdir, genSeries(1, 1, 0, 100))
createBlock(t, tmpdir, genSeries(1, 1, 100, 200))
createBlock(t, tmpdir, genSeries(1, 1, 200, 300))
options := DefaultOptions()
// This will make the test timeout if compaction really waits for it.
options.CompactionDelay = time.Hour
db, err := open(tmpdir, log.NewNopLogger(), nil, options, []int64{10, 200}, nil)
require.NoError(t, err)
defer func() {
require.NoError(t, db.Close())
}()
db.DisableCompactions()
require.Len(t, db.Blocks(), 3)
if c.whenCompactable {
label := labels.FromStrings("foo", "bar")
app := db.Appender(context.Background())
_, err := app.Append(0, label, 301, 0)
require.NoError(t, err)
_, err = app.Append(0, label, 317, 0)
require.NoError(t, err)
require.NoError(t, app.Commit())
// The Head is compactable and will still be at the end.
require.True(t, db.head.compactable())
defer func() {
require.True(t, db.head.compactable())
}()
}
// The blocks were compacted.
db.Compact(context.Background())
require.Len(t, db.Blocks(), 2)
})
}
}

View file

@ -21,6 +21,7 @@ import (
"io"
"io/fs"
"math"
"math/rand"
"os"
"path/filepath"
"slices"
@ -84,6 +85,8 @@ func DefaultOptions() *Options {
OutOfOrderCapMax: DefaultOutOfOrderCapMax,
EnableOverlappingCompaction: true,
EnableSharding: false,
EnableDelayedCompaction: false,
CompactionDelay: time.Duration(0),
}
}
@ -184,12 +187,18 @@ type Options struct {
// The reason why this flag exists is because there are various users of the TSDB
// that do not want vertical compaction happening on ingest time. Instead,
// they'd rather keep overlapping blocks and let another component do the overlapping compaction later.
// For Prometheus, this will always be true.
EnableOverlappingCompaction bool
// EnableSharding enables query sharding support in TSDB.
EnableSharding bool
// EnableDelayedCompaction, when set to true, assigns a random value to CompactionDelay during DB opening.
// When set to false, delayed compaction is disabled, unless CompactionDelay is set directly.
EnableDelayedCompaction bool
// CompactionDelay delays the start time of auto compactions.
// It can be increased by up to one minute if the DB does not commit too often.
CompactionDelay time.Duration
// NewCompactorFunc is a function that returns a TSDB compactor.
NewCompactorFunc NewCompactorFunc
@ -246,6 +255,9 @@ type DB struct {
// Cancel a running compaction when a shutdown is initiated.
compactCancel context.CancelFunc
// timeWhenCompactionDelayStarted helps delay the compactions start time.
timeWhenCompactionDelayStarted time.Time
// oooWasEnabled is true if out of order support was enabled at least one time
// during the time TSDB was up. In which case we need to keep supporting
// out-of-order compaction and vertical queries.
@ -681,7 +693,7 @@ func (db *DBReadOnly) LastBlockID() (string, error) {
return "", err
}
max := uint64(0)
maxT := uint64(0)
lastBlockID := ""
@ -693,8 +705,8 @@ func (db *DBReadOnly) LastBlockID() (string, error) {
continue // Not a block dir.
}
timestamp := ulidObj.Time()
if timestamp > max {
max = timestamp
if timestamp > maxT {
maxT = timestamp
lastBlockID = dirName
}
}
@ -998,6 +1010,10 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
db.oooWasEnabled.Store(true)
}
if opts.EnableDelayedCompaction {
opts.CompactionDelay = db.generateCompactionDelay()
}
go db.run(ctx)
return db, nil
@ -1186,6 +1202,12 @@ func (a dbAppender) Commit() error {
return err
}
// waitingForCompactionDelay returns true if the DB is waiting for the Head compaction delay.
// This doesn't guarantee that the Head is really compactable.
func (db *DB) waitingForCompactionDelay() bool {
return time.Since(db.timeWhenCompactionDelayStarted) < db.opts.CompactionDelay
}
// Compact data if possible. After successful compaction blocks are reloaded
// which will also delete the blocks that fall out of the retention window.
// Old blocks are only deleted on reloadBlocks based on the new block's parent information.
@ -1219,7 +1241,21 @@ func (db *DB) Compact(ctx context.Context) (returnErr error) {
return nil
default:
}
if !db.head.compactable() {
// Reset the counter once the head compactions are done.
// This would also reset it if a manual compaction was triggered while the auto compaction was in its delay period.
if !db.timeWhenCompactionDelayStarted.IsZero() {
db.timeWhenCompactionDelayStarted = time.Time{}
}
break
}
if db.timeWhenCompactionDelayStarted.IsZero() {
// Start counting for the delay.
db.timeWhenCompactionDelayStarted = time.Now()
}
if db.waitingForCompactionDelay() {
break
}
mint := db.head.MinTime()
@ -1295,6 +1331,9 @@ func (db *DB) CompactOOOHead(ctx context.Context) error {
return db.compactOOOHead(ctx)
}
// Callback for testing.
var compactOOOHeadTestingCallback func()
func (db *DB) compactOOOHead(ctx context.Context) error {
if !db.oooWasEnabled.Load() {
return nil
@ -1304,6 +1343,11 @@ func (db *DB) compactOOOHead(ctx context.Context) error {
return fmt.Errorf("get ooo compaction head: %w", err)
}
if compactOOOHeadTestingCallback != nil {
compactOOOHeadTestingCallback()
compactOOOHeadTestingCallback = nil
}
ulids, err := db.compactOOO(db.dir, oooHead)
if err != nil {
return fmt.Errorf("compact ooo head: %w", err)
@ -1421,7 +1465,7 @@ func (db *DB) compactBlocks() (err error) {
// If we have a lot of blocks to compact the whole process might take
// long enough that we end up with a HEAD block that needs to be written.
// Check if that's the case and stop compactions early.
if db.head.compactable() {
if db.head.compactable() && !db.waitingForCompactionDelay() {
level.Warn(db.logger).Log("msg", "aborting block compactions to persit the head block")
return nil
}
@ -1924,6 +1968,11 @@ func (db *DB) EnableCompactions() {
level.Info(db.logger).Log("msg", "Compactions enabled")
}
func (db *DB) generateCompactionDelay() time.Duration {
// Up to 10% of the head's chunkRange.
return time.Duration(rand.Int63n(db.head.chunkRange.Load()/10)) * time.Millisecond
}
// ForceHeadMMap is intended for use only in tests and benchmarks.
func (db *DB) ForceHeadMMap() {
db.head.mmapHeadChunks()
@ -1980,7 +2029,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) {
}
}
blockQueriers := make([]storage.Querier, 0, len(blocks)+2) // +2 to allow for possible in-order and OOO head queriers
blockQueriers := make([]storage.Querier, 0, len(blocks)+1) // +1 to allow for possible head querier.
defer func() {
if err != nil {
@ -1992,10 +2041,12 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) {
}
}()
if maxt >= db.head.MinTime() {
overlapsOOO := overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime())
var headQuerier storage.Querier
if maxt >= db.head.MinTime() || overlapsOOO {
rh := NewRangeHead(db.head, mint, maxt)
var err error
inOrderHeadQuerier, err := db.blockQuerierFunc(rh, mint, maxt)
headQuerier, err = db.blockQuerierFunc(rh, mint, maxt)
if err != nil {
return nil, fmt.Errorf("open block querier for head %s: %w", rh, err)
}
@ -2005,36 +2056,28 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) {
// won't run into a race later since any truncation that comes after will wait on this querier if it overlaps.
shouldClose, getNew, newMint := db.head.IsQuerierCollidingWithTruncation(mint, maxt)
if shouldClose {
if err := inOrderHeadQuerier.Close(); err != nil {
if err := headQuerier.Close(); err != nil {
return nil, fmt.Errorf("closing head block querier %s: %w", rh, err)
}
inOrderHeadQuerier = nil
headQuerier = nil
}
if getNew {
rh := NewRangeHead(db.head, newMint, maxt)
inOrderHeadQuerier, err = db.blockQuerierFunc(rh, newMint, maxt)
headQuerier, err = db.blockQuerierFunc(rh, newMint, maxt)
if err != nil {
return nil, fmt.Errorf("open block querier for head while getting new querier %s: %w", rh, err)
}
}
if inOrderHeadQuerier != nil {
blockQueriers = append(blockQueriers, inOrderHeadQuerier)
}
}
if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) {
rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef)
var err error
outOfOrderHeadQuerier, err := db.blockQuerierFunc(rh, mint, maxt)
if err != nil {
// If BlockQuerierFunc() failed, make sure to clean up the pending read created by NewOOORangeHead.
rh.isoState.Close()
if overlapsOOO {
// We need to fetch from in-order and out-of-order chunks: wrap the headQuerier.
isoState := db.head.oooIso.TrackReadAfter(db.lastGarbageCollectedMmapRef)
headQuerier = NewHeadAndOOOQuerier(mint, maxt, db.head, isoState, headQuerier)
}
return nil, fmt.Errorf("open block querier for ooo head %s: %w", rh, err)
}
blockQueriers = append(blockQueriers, outOfOrderHeadQuerier)
if headQuerier != nil {
blockQueriers = append(blockQueriers, headQuerier)
}
for _, b := range blocks {
@ -2062,7 +2105,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer
}
}
blockQueriers := make([]storage.ChunkQuerier, 0, len(blocks)+2) // +2 to allow for possible in-order and OOO head queriers
blockQueriers := make([]storage.ChunkQuerier, 0, len(blocks)+1) // +1 to allow for possible head querier.
defer func() {
if err != nil {
@ -2074,9 +2117,11 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer
}
}()
if maxt >= db.head.MinTime() {
overlapsOOO := overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime())
var headQuerier storage.ChunkQuerier
if maxt >= db.head.MinTime() || overlapsOOO {
rh := NewRangeHead(db.head, mint, maxt)
inOrderHeadQuerier, err := db.blockChunkQuerierFunc(rh, mint, maxt)
headQuerier, err = db.blockChunkQuerierFunc(rh, mint, maxt)
if err != nil {
return nil, fmt.Errorf("open querier for head %s: %w", rh, err)
}
@ -2086,35 +2131,28 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer
// won't run into a race later since any truncation that comes after will wait on this querier if it overlaps.
shouldClose, getNew, newMint := db.head.IsQuerierCollidingWithTruncation(mint, maxt)
if shouldClose {
if err := inOrderHeadQuerier.Close(); err != nil {
if err := headQuerier.Close(); err != nil {
return nil, fmt.Errorf("closing head querier %s: %w", rh, err)
}
inOrderHeadQuerier = nil
headQuerier = nil
}
if getNew {
rh := NewRangeHead(db.head, newMint, maxt)
inOrderHeadQuerier, err = db.blockChunkQuerierFunc(rh, newMint, maxt)
headQuerier, err = db.blockChunkQuerierFunc(rh, newMint, maxt)
if err != nil {
return nil, fmt.Errorf("open querier for head while getting new querier %s: %w", rh, err)
}
}
if inOrderHeadQuerier != nil {
blockQueriers = append(blockQueriers, inOrderHeadQuerier)
}
}
if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) {
rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef)
outOfOrderHeadQuerier, err := db.blockChunkQuerierFunc(rh, mint, maxt)
if err != nil {
// If NewBlockQuerier() failed, make sure to clean up the pending read created by NewOOORangeHead.
rh.isoState.Close()
if overlapsOOO {
// We need to fetch from in-order and out-of-order chunks: wrap the headQuerier.
isoState := db.head.oooIso.TrackReadAfter(db.lastGarbageCollectedMmapRef)
headQuerier = NewHeadAndOOOChunkQuerier(mint, maxt, db.head, isoState, headQuerier)
}
return nil, fmt.Errorf("open block chunk querier for ooo head %s: %w", rh, err)
}
blockQueriers = append(blockQueriers, outOfOrderHeadQuerier)
if headQuerier != nil {
blockQueriers = append(blockQueriers, headQuerier)
}
for _, b := range blocks {
@ -2278,13 +2316,13 @@ func blockDirs(dir string) ([]string, error) {
return dirs, nil
}
func exponential(d, min, max time.Duration) time.Duration {
func exponential(d, minD, maxD time.Duration) time.Duration {
d *= 2
if d < min {
d = min
if d < minD {
d = minD
}
if d > max {
d = max
if d > maxD {
d = maxD
}
return d
}

View file

@ -2690,8 +2690,9 @@ func TestDBReadOnly_Querier_NoAlteration(t *testing.T) {
require.NoError(t, db.Close())
// Simulate a corrupted chunk: without a header.
_, err := os.Create(path.Join(mmappedChunksDir(db.dir), "000001"))
chunk, err := os.Create(path.Join(mmappedChunksDir(db.dir), "000001"))
require.NoError(t, err)
require.NoError(t, chunk.Close())
spinUpQuerierAndCheck(db.dir, t.TempDir(), 1)
@ -4500,12 +4501,15 @@ func TestMetadataAssertInMemoryData(t *testing.T) {
func TestOOOCompaction(t *testing.T) {
for name, scenario := range sampleTypeScenarios {
t.Run(name, func(t *testing.T) {
testOOOCompaction(t, scenario)
testOOOCompaction(t, scenario, false)
})
t.Run(name+"+extra", func(t *testing.T) {
testOOOCompaction(t, scenario, true)
})
}
}
func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) {
func testOOOCompaction(t *testing.T, scenario sampleTypeScenario, addExtraSamples bool) {
dir := t.TempDir()
ctx := context.Background()
@ -4525,8 +4529,8 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) {
addSample := func(fromMins, toMins int64) {
app := db.Appender(context.Background())
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
_, _, err := scenario.appendFunc(app, series1, ts, ts)
require.NoError(t, err)
_, _, err = scenario.appendFunc(app, series2, ts, 2*ts)
@ -4536,7 +4540,7 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) {
}
// Add an in-order samples.
addSample(250, 350)
addSample(250, 300)
// Verify that the in-memory ooo chunk is empty.
checkEmptyOOOChunk := func(lbls labels.Labels) {
@ -4550,18 +4554,20 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) {
// Add ooo samples that creates multiple chunks.
// 90 to 300 spans across 3 block ranges: [0, 120), [120, 240), [240, 360)
addSample(90, 310)
addSample(90, 300)
// Adding same samples to create overlapping chunks.
// Since the active chunk won't start at 90 again, all the new
// chunks will have different time ranges than the previous chunks.
addSample(90, 310)
addSample(90, 300)
var highest int64 = 300
verifyDBSamples := func() {
var series1Samples, series2Samples []chunks.Sample
for _, r := range [][2]int64{{90, 119}, {120, 239}, {240, 350}} {
for _, r := range [][2]int64{{90, 119}, {120, 239}, {240, highest}} {
fromMins, toMins := r[0], r[1]
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
series2Samples = append(series2Samples, scenario.sampleFunc(ts, 2*ts))
}
@ -4586,7 +4592,7 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) {
require.NoError(t, err)
require.False(t, created)
require.Positive(t, ms.ooo.oooHeadChunk.chunk.NumSamples())
require.Len(t, ms.ooo.oooMmappedChunks, 14) // 7 original, 7 duplicate.
require.Len(t, ms.ooo.oooMmappedChunks, 13) // 7 original, 6 duplicate.
}
checkNonEmptyOOOChunk(series1)
checkNonEmptyOOOChunk(series2)
@ -4604,6 +4610,15 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) {
require.NoError(t, err)
require.Greater(t, f.Size(), int64(100))
if addExtraSamples {
compactOOOHeadTestingCallback = func() {
addSample(90, 120) // Back in time, to generate a new OOO chunk.
addSample(300, 330) // Now some samples after the previous highest timestamp.
addSample(300, 330) // Repeat to generate an OOO chunk at these timestamps.
}
highest = 330
}
// OOO compaction happens here.
require.NoError(t, db.CompactOOOHead(ctx))
@ -4619,17 +4634,19 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) {
require.Equal(t, "00000001", files[0].Name())
f, err = files[0].Info()
require.NoError(t, err)
require.Equal(t, int64(0), f.Size())
// OOO stuff should not be present in the Head now.
checkEmptyOOOChunk(series1)
checkEmptyOOOChunk(series2)
if !addExtraSamples {
require.Equal(t, int64(0), f.Size())
// OOO stuff should not be present in the Head now.
checkEmptyOOOChunk(series1)
checkEmptyOOOChunk(series2)
}
verifySamples := func(block *Block, fromMins, toMins int64) {
series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
series2Samples = append(series2Samples, scenario.sampleFunc(ts, 2*ts))
}
@ -4648,7 +4665,7 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) {
// Checking for expected data in the blocks.
verifySamples(db.Blocks()[0], 90, 119)
verifySamples(db.Blocks()[1], 120, 239)
verifySamples(db.Blocks()[2], 240, 310)
verifySamples(db.Blocks()[2], 240, 299)
// There should be a single m-map file.
mmapDir := mmappedChunksDir(db.head.opts.ChunkDirRoot)
@ -4661,7 +4678,7 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) {
err = db.CompactHead(NewRangeHead(db.head, 250*time.Minute.Milliseconds(), 350*time.Minute.Milliseconds()))
require.NoError(t, err)
require.Len(t, db.Blocks(), 4) // [0, 120), [120, 240), [240, 360), [250, 351)
verifySamples(db.Blocks()[3], 250, 350)
verifySamples(db.Blocks()[3], 250, highest)
verifyDBSamples() // Blocks created out of normal and OOO head now. But not merged.
@ -4678,7 +4695,7 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) {
require.Len(t, db.Blocks(), 3) // [0, 120), [120, 240), [240, 360)
verifySamples(db.Blocks()[0], 90, 119)
verifySamples(db.Blocks()[1], 120, 239)
verifySamples(db.Blocks()[2], 240, 350) // Merged block.
verifySamples(db.Blocks()[2], 240, highest) // Merged block.
verifyDBSamples() // Final state. Blocks from normal and OOO head are merged.
}
@ -4713,8 +4730,8 @@ func testOOOCompactionWithNormalCompaction(t *testing.T, scenario sampleTypeScen
addSamples := func(fromMins, toMins int64) {
app := db.Appender(context.Background())
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
_, _, err := scenario.appendFunc(app, series1, ts, ts)
require.NoError(t, err)
_, _, err = scenario.appendFunc(app, series2, ts, 2*ts)
@ -4768,8 +4785,8 @@ func testOOOCompactionWithNormalCompaction(t *testing.T, scenario sampleTypeScen
verifySamples := func(block *Block, fromMins, toMins int64) {
series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
series2Samples = append(series2Samples, scenario.sampleFunc(ts, 2*ts))
}
@ -4822,8 +4839,8 @@ func testOOOCompactionWithDisabledWriteLog(t *testing.T, scenario sampleTypeScen
addSamples := func(fromMins, toMins int64) {
app := db.Appender(context.Background())
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
_, _, err := scenario.appendFunc(app, series1, ts, ts)
require.NoError(t, err)
_, _, err = scenario.appendFunc(app, series2, ts, 2*ts)
@ -4877,8 +4894,8 @@ func testOOOCompactionWithDisabledWriteLog(t *testing.T, scenario sampleTypeScen
verifySamples := func(block *Block, fromMins, toMins int64) {
series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
series2Samples = append(series2Samples, scenario.sampleFunc(ts, 2*ts))
}
@ -4931,8 +4948,8 @@ func testOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T, scenario sa
addSamples := func(fromMins, toMins int64) {
app := db.Appender(context.Background())
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
_, _, err := scenario.appendFunc(app, series1, ts, ts)
require.NoError(t, err)
_, _, err = scenario.appendFunc(app, series2, ts, 2*ts)
@ -4979,8 +4996,8 @@ func testOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T, scenario sa
verifySamples := func(fromMins, toMins int64) {
series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
series2Samples = append(series2Samples, scenario.sampleFunc(ts, ts*2))
}
@ -5028,10 +5045,10 @@ func Test_Querier_OOOQuery(t *testing.T) {
addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []chunks.Sample) ([]chunks.Sample, int) {
app := db.Appender(context.Background())
totalAppended := 0
for min := fromMins; min <= toMins; min += time.Minute.Milliseconds() {
_, err := app.Append(0, series1, min, float64(min))
if min >= queryMinT && min <= queryMaxT {
expSamples = append(expSamples, sample{t: min, f: float64(min)})
for m := fromMins; m <= toMins; m += time.Minute.Milliseconds() {
_, err := app.Append(0, series1, m, float64(m))
if m >= queryMinT && m <= queryMaxT {
expSamples = append(expSamples, sample{t: m, f: float64(m)})
}
require.NoError(t, err)
totalAppended++
@ -5112,10 +5129,10 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) {
addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []chunks.Sample) ([]chunks.Sample, int) {
app := db.Appender(context.Background())
totalAppended := 0
for min := fromMins; min <= toMins; min += time.Minute.Milliseconds() {
_, err := app.Append(0, series1, min, float64(min))
if min >= queryMinT && min <= queryMaxT {
expSamples = append(expSamples, sample{t: min, f: float64(min)})
for m := fromMins; m <= toMins; m += time.Minute.Milliseconds() {
_, err := app.Append(0, series1, m, float64(m))
if m >= queryMinT && m <= queryMaxT {
expSamples = append(expSamples, sample{t: m, f: float64(m)})
}
require.NoError(t, err)
totalAppended++
@ -5222,9 +5239,9 @@ func testOOOAppendAndQuery(t *testing.T, scenario sampleTypeScenario) {
app := db.Appender(context.Background())
key := lbls.String()
from, to := minutes(fromMins), minutes(toMins)
for min := from; min <= to; min += time.Minute.Milliseconds() {
for m := from; m <= to; m += time.Minute.Milliseconds() {
val := rand.Intn(1000)
_, s, err := scenario.appendFunc(app, lbls, min, int64(val))
_, s, err := scenario.appendFunc(app, lbls, m, int64(val))
if faceError {
require.Error(t, err)
} else {
@ -5353,14 +5370,14 @@ func testOOODisabled(t *testing.T, scenario sampleTypeScenario) {
app := db.Appender(context.Background())
key := lbls.String()
from, to := minutes(fromMins), minutes(toMins)
for min := from; min <= to; min += time.Minute.Milliseconds() {
_, _, err := scenario.appendFunc(app, lbls, min, min)
for m := from; m <= to; m += time.Minute.Milliseconds() {
_, _, err := scenario.appendFunc(app, lbls, m, m)
if faceError {
require.Error(t, err)
failedSamples++
} else {
require.NoError(t, err)
expSamples[key] = append(expSamples[key], scenario.sampleFunc(min, min))
expSamples[key] = append(expSamples[key], scenario.sampleFunc(m, m))
totalSamples++
}
}
@ -5427,9 +5444,9 @@ func testWBLAndMmapReplay(t *testing.T, scenario sampleTypeScenario) {
app := db.Appender(context.Background())
key := lbls.String()
from, to := minutes(fromMins), minutes(toMins)
for min := from; min <= to; min += time.Minute.Milliseconds() {
for m := from; m <= to; m += time.Minute.Milliseconds() {
val := rand.Intn(1000)
_, s, err := scenario.appendFunc(app, lbls, min, int64(val))
_, s, err := scenario.appendFunc(app, lbls, m, int64(val))
require.NoError(t, err)
expSamples[key] = append(expSamples[key], s)
totalSamples++
@ -5618,8 +5635,8 @@ func testOOOCompactionFailure(t *testing.T, scenario sampleTypeScenario) {
addSample := func(fromMins, toMins int64) {
app := db.Appender(context.Background())
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
_, _, err := scenario.appendFunc(app, series1, ts, ts)
require.NoError(t, err)
}
@ -5706,8 +5723,8 @@ func testOOOCompactionFailure(t *testing.T, scenario sampleTypeScenario) {
verifySamples := func(block *Block, fromMins, toMins int64) {
series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
}
expRes := map[string][]chunks.Sample{
@ -5755,8 +5772,8 @@ func TestWBLCorruption(t *testing.T) {
var allSamples, expAfterRestart []chunks.Sample
addSamples := func(fromMins, toMins int64, afterRestart bool) {
app := db.Appender(context.Background())
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
_, err := app.Append(0, series1, ts, float64(ts))
require.NoError(t, err)
allSamples = append(allSamples, sample{t: ts, f: float64(ts)})
@ -5909,8 +5926,8 @@ func testOOOMmapCorruption(t *testing.T, scenario sampleTypeScenario) {
var allSamples, expInMmapChunks []chunks.Sample
addSamples := func(fromMins, toMins int64, inMmapAfterCorruption bool) {
app := db.Appender(context.Background())
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
_, s, err := scenario.appendFunc(app, series1, ts, ts)
require.NoError(t, err)
allSamples = append(allSamples, s)
@ -6054,8 +6071,8 @@ func testOutOfOrderRuntimeConfig(t *testing.T, scenario sampleTypeScenario) {
series1 := labels.FromStrings("foo", "bar1")
addSamples := func(t *testing.T, db *DB, fromMins, toMins int64, success bool, allSamples []chunks.Sample) []chunks.Sample {
app := db.Appender(context.Background())
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
_, s, err := scenario.appendFunc(app, series1, ts, ts)
if success {
require.NoError(t, err)
@ -6088,7 +6105,7 @@ func testOutOfOrderRuntimeConfig(t *testing.T, scenario sampleTypeScenario) {
// WBL is not empty.
size, err := db.head.wbl.Size()
require.NoError(t, err)
require.Greater(t, size, int64(0))
require.Positive(t, size)
require.Empty(t, db.Blocks())
require.NoError(t, db.compactOOOHead(ctx))
@ -6265,8 +6282,8 @@ func testNoGapAfterRestartWithOOO(t *testing.T, scenario sampleTypeScenario) {
series1 := labels.FromStrings("foo", "bar1")
addSamples := func(t *testing.T, db *DB, fromMins, toMins int64, success bool) {
app := db.Appender(context.Background())
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
_, _, err := scenario.appendFunc(app, series1, ts, ts)
if success {
require.NoError(t, err)
@ -6279,8 +6296,8 @@ func testNoGapAfterRestartWithOOO(t *testing.T, scenario sampleTypeScenario) {
verifySamples := func(t *testing.T, db *DB, fromMins, toMins int64) {
var expSamples []chunks.Sample
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
expSamples = append(expSamples, scenario.sampleFunc(ts, ts))
}
@ -6393,8 +6410,8 @@ func testWblReplayAfterOOODisableAndRestart(t *testing.T, scenario sampleTypeSce
var allSamples []chunks.Sample
addSamples := func(fromMins, toMins int64) {
app := db.Appender(context.Background())
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
_, s, err := scenario.appendFunc(app, series1, ts, ts)
require.NoError(t, err)
allSamples = append(allSamples, s)
@ -6460,8 +6477,8 @@ func testPanicOnApplyConfig(t *testing.T, scenario sampleTypeScenario) {
var allSamples []chunks.Sample
addSamples := func(fromMins, toMins int64) {
app := db.Appender(context.Background())
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
_, s, err := scenario.appendFunc(app, series1, ts, ts)
require.NoError(t, err)
allSamples = append(allSamples, s)
@ -6517,8 +6534,8 @@ func testDiskFillingUpAfterDisablingOOO(t *testing.T, scenario sampleTypeScenari
var allSamples []chunks.Sample
addSamples := func(fromMins, toMins int64) {
app := db.Appender(context.Background())
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
_, s, err := scenario.appendFunc(app, series1, ts, ts)
require.NoError(t, err)
allSamples = append(allSamples, s)
@ -7341,3 +7358,25 @@ func TestBlockQuerierAndBlockChunkQuerier(t *testing.T) {
// Make sure only block-1 is queried.
require.Equal(t, "block-1", lbls.Get("block"))
}
func TestGenerateCompactionDelay(t *testing.T) {
assertDelay := func(delay time.Duration) {
t.Helper()
require.GreaterOrEqual(t, delay, time.Duration(0))
// Less than 10% of the chunkRange.
require.LessOrEqual(t, delay, 6000*time.Millisecond)
}
opts := DefaultOptions()
opts.EnableDelayedCompaction = true
db := openTestDB(t, opts, []int64{60000})
defer func() {
require.NoError(t, db.Close())
}()
// The offset is generated and changed while opening.
assertDelay(db.opts.CompactionDelay)
for i := 0; i < 1000; i++ {
assertDelay(db.generateCompactionDelay())
}
}

View file

@ -19,6 +19,7 @@ import (
"fmt"
"math"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/prometheus/model/exemplar"
@ -466,6 +467,9 @@ func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooTi
// like federation and erroring out at that time would be extremely noisy.
// This only checks against the latest in-order sample.
// The OOO headchunk has its own method to detect these duplicates.
if s.lastHistogramValue != nil || s.lastFloatHistogramValue != nil {
return false, 0, storage.NewDuplicateHistogramToFloatErr(t, v)
}
if math.Float64bits(s.lastValue) != math.Float64bits(v) {
return false, 0, storage.NewDuplicateFloatErr(t, s.lastValue, v)
}
@ -837,7 +841,7 @@ func (a *headAppender) Commit() (err error) {
floatsAppended = len(a.samples)
histogramsAppended = len(a.histograms) + len(a.floatHistograms)
// number of samples out of order but accepted: with ooo enabled and within time window
floatOOOAccepted int
oooFloatsAccepted int
// number of samples rejected due to: out of order but OOO support disabled.
floatOOORejected int
histoOOORejected int
@ -933,7 +937,7 @@ func (a *headAppender) Commit() (err error) {
// Sample is OOO and OOO handling is enabled
// and the delta is within the OOO tolerance.
var mmapRefs []chunks.ChunkDiskMapperRef
ok, chunkCreated, mmapRefs = series.insert(s.T, s.V, a.head.chunkDiskMapper, oooCapMax)
ok, chunkCreated, mmapRefs = series.insert(s.T, s.V, nil, nil, a.head.chunkDiskMapper, oooCapMax, a.head.logger)
if chunkCreated {
r, ok := oooMmapMarkers[series.ref]
if !ok || r != nil {
@ -966,7 +970,7 @@ func (a *headAppender) Commit() (err error) {
if s.T > oooMaxT {
oooMaxT = s.T
}
floatOOOAccepted++
oooFloatsAccepted++
} else {
// Sample is an exact duplicate of the last sample.
// NOTE: We can only detect updates if they clash with a sample in the OOOHeadChunk,
@ -1062,7 +1066,7 @@ func (a *headAppender) Commit() (err error) {
a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatTooOldRejected))
a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatsAppended))
a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(histogramsAppended))
a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatOOOAccepted))
a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(oooFloatsAccepted))
a.head.updateMinMaxTime(inOrderMint, inOrderMaxt)
a.head.updateMinOOOMaxOOOTime(oooMinT, oooMaxT)
@ -1080,18 +1084,18 @@ func (a *headAppender) Commit() (err error) {
}
// insert is like append, except it inserts. Used for OOO samples.
func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64) (inserted, chunkCreated bool, mmapRefs []chunks.ChunkDiskMapperRef) {
func (s *memSeries) insert(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64, logger log.Logger) (inserted, chunkCreated bool, mmapRefs []chunks.ChunkDiskMapperRef) {
if s.ooo == nil {
s.ooo = &memSeriesOOOFields{}
}
c := s.ooo.oooHeadChunk
if c == nil || c.chunk.NumSamples() == int(oooCapMax) {
// Note: If no new samples come in then we rely on compaction to clean up stale in-memory OOO chunks.
c, mmapRefs = s.cutNewOOOHeadChunk(t, chunkDiskMapper)
c, mmapRefs = s.cutNewOOOHeadChunk(t, chunkDiskMapper, logger)
chunkCreated = true
}
ok := c.chunk.Insert(t, v)
ok := c.chunk.Insert(t, v, h, fh)
if ok {
if chunkCreated || t < c.minTime {
c.minTime = t
@ -1399,12 +1403,12 @@ func (s *memSeries) histogramsAppendPreprocessor(t int64, e chunkenc.Encoding, o
// It assumes that the time range is 1/ratioToFull full.
// Assuming that the samples will keep arriving at the same rate, it will make the
// remaining n chunks within this chunk range (before max) equally sized.
func computeChunkEndTime(start, cur, max int64, ratioToFull float64) int64 {
n := float64(max-start) / (float64(cur-start+1) * ratioToFull)
func computeChunkEndTime(start, cur, maxT int64, ratioToFull float64) int64 {
n := float64(maxT-start) / (float64(cur-start+1) * ratioToFull)
if n <= 1 {
return max
return maxT
}
return int64(float64(start) + float64(max-start)/math.Floor(n))
return int64(float64(start) + float64(maxT-start)/math.Floor(n))
}
func (s *memSeries) cutNewHeadChunk(mint int64, e chunkenc.Encoding, chunkRange int64) *memChunk {
@ -1441,9 +1445,9 @@ func (s *memSeries) cutNewHeadChunk(mint int64, e chunkenc.Encoding, chunkRange
}
// cutNewOOOHeadChunk cuts a new OOO chunk and m-maps the old chunk.
// The caller must ensure that s.ooo is not nil.
func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper) (*oooHeadChunk, []chunks.ChunkDiskMapperRef) {
ref := s.mmapCurrentOOOHeadChunk(chunkDiskMapper)
// The caller must ensure that s is locked and s.ooo is not nil.
func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper, logger log.Logger) (*oooHeadChunk, []chunks.ChunkDiskMapperRef) {
ref := s.mmapCurrentOOOHeadChunk(chunkDiskMapper, logger)
s.ooo.oooHeadChunk = &oooHeadChunk{
chunk: NewOOOChunk(),
@ -1454,7 +1458,8 @@ func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.Chunk
return s.ooo.oooHeadChunk, ref
}
func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper) []chunks.ChunkDiskMapperRef {
// s must be locked when calling.
func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper, logger log.Logger) []chunks.ChunkDiskMapperRef {
if s.ooo == nil || s.ooo.oooHeadChunk == nil {
// OOO is not enabled or there is no head chunk, so nothing to m-map here.
return nil
@ -1466,6 +1471,10 @@ func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMap
}
chunkRefs := make([]chunks.ChunkDiskMapperRef, 0, 1)
for _, memchunk := range chks {
if len(s.ooo.oooMmappedChunks) >= (oooChunkIDMask - 1) {
level.Error(logger).Log("msg", "Too many OOO chunks, dropping data", "series", s.lset.String())
break
}
chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.ooo.oooHeadChunk.minTime, s.ooo.oooHeadChunk.maxTime, memchunk.chunk, true, handleChunkWriteError)
chunkRefs = append(chunkRefs, chunkRef)
s.ooo.oooMmappedChunks = append(s.ooo.oooMmappedChunks, &mmappedChunk{

View file

@ -199,13 +199,18 @@ func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchB
defer s.Unlock()
*chks = (*chks)[:0]
*chks = appendSeriesChunks(s, h.mint, h.maxt, *chks)
return nil
}
func appendSeriesChunks(s *memSeries, mint, maxt int64, chks []chunks.Meta) []chunks.Meta {
for i, c := range s.mmappedChunks {
// Do not expose chunks that are outside of the specified range.
if !c.OverlapsClosedInterval(h.mint, h.maxt) {
if !c.OverlapsClosedInterval(mint, maxt) {
continue
}
*chks = append(*chks, chunks.Meta{
chks = append(chks, chunks.Meta{
MinTime: c.minTime,
MaxTime: c.maxTime,
Ref: chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.headChunkID(i))),
@ -223,8 +228,8 @@ func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchB
} else {
maxTime = chk.maxTime
}
if chk.OverlapsClosedInterval(h.mint, h.maxt) {
*chks = append(*chks, chunks.Meta{
if chk.OverlapsClosedInterval(mint, maxt) {
chks = append(chks, chunks.Meta{
MinTime: chk.minTime,
MaxTime: maxTime,
Ref: chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.headChunkID(len(s.mmappedChunks)+j))),
@ -233,8 +238,7 @@ func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchB
j++
}
}
return nil
return chks
}
// headChunkID returns the HeadChunkID referred to by the given position.
@ -244,12 +248,20 @@ func (s *memSeries) headChunkID(pos int) chunks.HeadChunkID {
return chunks.HeadChunkID(pos) + s.firstChunkID
}
const oooChunkIDMask = 1 << 23
// oooHeadChunkID returns the HeadChunkID referred to by the given position.
// Only the bottom 24 bits are used. Bit 23 is always 1 for an OOO chunk; for the rest:
// * 0 <= pos < len(s.oooMmappedChunks) refer to s.oooMmappedChunks[pos]
// * pos == len(s.oooMmappedChunks) refers to s.oooHeadChunk
// The caller must ensure that s.ooo is not nil.
func (s *memSeries) oooHeadChunkID(pos int) chunks.HeadChunkID {
return chunks.HeadChunkID(pos) + s.ooo.firstOOOChunkID
return (chunks.HeadChunkID(pos) + s.ooo.firstOOOChunkID) | oooChunkIDMask
}
func unpackHeadChunkRef(ref chunks.ChunkRef) (seriesID chunks.HeadSeriesRef, chunkID chunks.HeadChunkID, isOOO bool) {
sid, cid := chunks.HeadChunkRef(ref).Unpack()
return sid, (cid & (oooChunkIDMask - 1)), (cid & oooChunkIDMask) != 0
}
// LabelValueFor returns label value for the given label name in the series referred to by ID.
@ -339,10 +351,15 @@ func (h *headChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chu
return chk, nil, err
}
// ChunkWithCopy returns the chunk for the reference number.
// If the chunk is the in-memory chunk, then it makes a copy and returns the copied chunk.
func (h *headChunkReader) ChunkWithCopy(meta chunks.Meta) (chunkenc.Chunk, int64, error) {
return h.chunk(meta, true)
type ChunkReaderWithCopy interface {
ChunkOrIterableWithCopy(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, int64, error)
}
// ChunkOrIterableWithCopy returns the chunk for the reference number.
// If the chunk is the in-memory chunk, then it makes a copy and returns the copied chunk, plus the max time of the chunk.
func (h *headChunkReader) ChunkOrIterableWithCopy(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, int64, error) {
chk, maxTime, err := h.chunk(meta, true)
return chk, nil, maxTime, err
}
// chunk returns the chunk for the reference number.
@ -358,9 +375,14 @@ func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc.
}
s.Lock()
defer s.Unlock()
return h.chunkFromSeries(s, cid, copyLastChunk)
}
// Call with s locked.
func (h *headChunkReader) chunkFromSeries(s *memSeries, cid chunks.HeadChunkID, copyLastChunk bool) (chunkenc.Chunk, int64, error) {
c, headChunk, isOpen, err := s.chunk(cid, h.head.chunkDiskMapper, &h.head.memChunkPool)
if err != nil {
s.Unlock()
return nil, 0, err
}
defer func() {
@ -374,7 +396,6 @@ func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc.
// This means that the chunk is outside the specified range.
if !c.OverlapsClosedInterval(h.mint, h.maxt) {
s.Unlock()
return nil, 0, storage.ErrNotFound
}
@ -391,7 +412,6 @@ func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc.
return nil, 0, err
}
}
s.Unlock()
return &safeHeadChunk{
Chunk: chk,
@ -461,14 +481,15 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDi
return elem, true, offset == 0, nil
}
// oooMergedChunks return an iterable over one or more OOO chunks for the given
// mergedChunks return an iterable over one or more OOO chunks for the given
// chunks.Meta reference from memory or by m-mapping it from the disk. The
// returned iterable will be a merge of all the overlapping chunks, if any,
// amongst all the chunks in the OOOHead.
// If hr is non-nil then in-order chunks are included.
// This function is not thread safe unless the caller holds a lock.
// The caller must ensure that s.ooo is not nil.
func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMapper, mint, maxt int64) (*mergedOOOChunks, error) {
_, cid := chunks.HeadChunkRef(meta.Ref).Unpack()
func (s *memSeries) mergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMapper, hr *headChunkReader, mint, maxt int64, maxMmapRef chunks.ChunkDiskMapperRef) (chunkenc.Iterable, error) {
_, cid, _ := unpackHeadChunkRef(meta.Ref)
// ix represents the index of chunk in the s.mmappedChunks slice. The chunk meta's are
// incremented by 1 when new chunk is created, hence (meta - firstChunkID) gives the slice index.
@ -490,6 +511,9 @@ func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMappe
tmpChks := make([]chunkMetaAndChunkDiskMapperRef, 0, len(s.ooo.oooMmappedChunks)+1)
for i, c := range s.ooo.oooMmappedChunks {
if maxMmapRef != 0 && c.ref > maxMmapRef {
break
}
if c.OverlapsClosedInterval(mint, maxt) {
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{
meta: chunks.Meta{
@ -506,6 +530,16 @@ func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMappe
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{meta: meta})
}
if hr != nil { // Include in-order chunks.
metas := appendSeriesChunks(s, max(meta.MinTime, mint), min(meta.MaxTime, maxt), nil)
for _, m := range metas {
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{
meta: m,
ref: 0, // This tells the loop below it's an in-order head chunk.
})
}
}
// Next we want to sort all the collected chunks by min time so we can find
// those that overlap and stop when we know the rest don't.
slices.SortFunc(tmpChks, refLessByMinTimeAndMinRef)
@ -517,9 +551,17 @@ func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMappe
continue
}
var iterable chunkenc.Iterable
if c.meta.Chunk != nil {
switch {
case c.meta.Chunk != nil:
iterable = c.meta.Chunk
} else {
case c.ref == 0: // This is an in-order head chunk.
_, cid := chunks.HeadChunkRef(c.meta.Ref).Unpack()
var err error
iterable, _, err = hr.chunkFromSeries(s, cid, false)
if err != nil {
return nil, fmt.Errorf("invalid head chunk: %w", err)
}
default:
chk, err := cdm.Chunk(c.ref)
if err != nil {
var cerr *chunks.CorruptionErr

View file

@ -2757,7 +2757,7 @@ func testOutOfOrderSamplesMetric(t *testing.T, scenario sampleTypeScenario) {
require.Equal(t, int64(math.MinInt64), db.head.minValidTime.Load())
require.NoError(t, db.Compact(ctx))
require.Greater(t, db.head.minValidTime.Load(), int64(0))
require.Positive(t, db.head.minValidTime.Load())
app = db.Appender(ctx)
_, err = appendSample(app, db.head.minValidTime.Load()-2)
@ -3677,7 +3677,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) {
require.Len(t, ms.mmappedChunks, 25)
expMmapChunks := make([]*mmappedChunk, 0, 20)
for _, mmap := range ms.mmappedChunks {
require.Greater(t, mmap.numSamples, uint16(0))
require.Positive(t, mmap.numSamples)
cpy := *mmap
expMmapChunks = append(expMmapChunks, &cpy)
}
@ -5695,7 +5695,7 @@ func TestCuttingNewHeadChunks(t *testing.T) {
}
}
// TestHeadDetectsDuplcateSampleAtSizeLimit tests a regression where a duplicate sample
// TestHeadDetectsDuplicateSampleAtSizeLimit tests a regression where a duplicate sample
// is appended to the head, right when the head chunk is at the size limit.
// The test adds all samples as duplicate, thus expecting that the result has
// exactly half of the samples.
@ -5919,6 +5919,35 @@ func TestPostingsCardinalityStats(t *testing.T) {
require.Equal(t, statsForSomeLabel1, head.PostingsCardinalityStats("n", 1))
}
func TestHeadAppender_AppendFloatWithSameTimestampAsPreviousHistogram(t *testing.T) {
head, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false)
t.Cleanup(func() { head.Close() })
ls := labels.FromStrings(labels.MetricName, "test")
{
// Append a float 10.0 @ 1_000
app := head.Appender(context.Background())
_, err := app.Append(0, ls, 1_000, 10.0)
require.NoError(t, err)
require.NoError(t, app.Commit())
}
{
// Append a float histogram @ 2_000
app := head.Appender(context.Background())
h := tsdbutil.GenerateTestHistogram(1)
_, err := app.AppendHistogram(0, ls, 2_000, h, nil)
require.NoError(t, err)
require.NoError(t, app.Commit())
}
app := head.Appender(context.Background())
_, err := app.Append(0, ls, 2_000, 10.0)
require.Error(t, err)
require.ErrorIs(t, err, storage.NewDuplicateHistogramToFloatErr(2_000, 10.0))
}
func TestHeadAppender_AppendCTZeroSample(t *testing.T) {
type appendableSamples struct {
ts int64

View file

@ -890,7 +890,7 @@ func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (unknownRefs uint64) {
unknownRefs++
continue
}
ok, chunkCreated, _ := ms.insert(s.T, s.V, h.chunkDiskMapper, oooCapMax)
ok, chunkCreated, _ := ms.insert(s.T, s.V, nil, nil, h.chunkDiskMapper, oooCapMax, h.logger)
if chunkCreated {
h.metrics.chunksCreated.Inc()
h.metrics.chunks.Inc()

View file

@ -20,10 +20,10 @@ import (
func TestPostingsStats(t *testing.T) {
stats := &maxHeap{}
max := 3000000
heapLength := 10
const maxCount = 3000000
const heapLength = 10
stats.init(heapLength)
for i := 0; i < max; i++ {
for i := 0; i < maxCount; i++ {
item := Stat{
Name: "Label-da",
Count: uint64(i),
@ -35,13 +35,13 @@ func TestPostingsStats(t *testing.T) {
data := stats.get()
require.Len(t, data, 10)
for i := 0; i < heapLength; i++ {
require.Equal(t, uint64(max-i), data[i].Count)
require.Equal(t, uint64(maxCount-i), data[i].Count)
}
}
func TestPostingsStats2(t *testing.T) {
stats := &maxHeap{}
heapLength := 10
const heapLength = 10
stats.init(heapLength)
stats.push(Stat{Name: "Stuff", Count: 10})
@ -57,12 +57,12 @@ func TestPostingsStats2(t *testing.T) {
func BenchmarkPostingStatsMaxHep(b *testing.B) {
stats := &maxHeap{}
max := 9000000
heapLength := 10
const maxCount = 9000000
const heapLength = 10
b.ResetTimer()
for n := 0; n < b.N; n++ {
stats.init(heapLength)
for i := 0; i < max; i++ {
for i := 0; i < maxCount; i++ {
item := Stat{
Name: "Label-da",
Count: uint64(i),

View file

@ -14,15 +14,10 @@
package tsdb
import (
"fmt"
"sort"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/oklog/ulid"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/tombstones"
)
// OOOChunk maintains samples in time-ascending order.
@ -39,13 +34,13 @@ func NewOOOChunk() *OOOChunk {
// Insert inserts the sample such that order is maintained.
// Returns false if insert was not possible due to the same timestamp already existing.
func (o *OOOChunk) Insert(t int64, v float64) bool {
func (o *OOOChunk) Insert(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram) bool {
// Although out-of-order samples can be out-of-order amongst themselves, we
// are opinionated and expect them to be usually in-order meaning we could
// try to append at the end first if the new timestamp is higher than the
// last known timestamp.
if len(o.samples) == 0 || t > o.samples[len(o.samples)-1].t {
o.samples = append(o.samples, sample{t, v, nil, nil})
o.samples = append(o.samples, sample{t, v, h, fh})
return true
}
@ -54,7 +49,7 @@ func (o *OOOChunk) Insert(t int64, v float64) bool {
if i >= len(o.samples) {
// none found. append it at the end
o.samples = append(o.samples, sample{t, v, nil, nil})
o.samples = append(o.samples, sample{t, v, h, fh})
return true
}
@ -66,7 +61,7 @@ func (o *OOOChunk) Insert(t int64, v float64) bool {
// Expand length by 1 to make room. use a zero sample, we will overwrite it anyway.
o.samples = append(o.samples, sample{})
copy(o.samples[i+1:], o.samples[i:])
o.samples[i] = sample{t, v, nil, nil}
o.samples[i] = sample{t, v, h, fh}
return true
}
@ -142,9 +137,9 @@ func (o *OOOChunk) ToEncodedChunks(mint, maxt int64) (chks []memChunk, err error
if newChunk != nil { // A new chunk was allocated.
if !recoded {
chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
cmint = s.t
}
chunk = newChunk
cmint = s.t
}
case chunkenc.EncFloatHistogram:
// Ignoring ok is ok, since we don't want to compare to the wrong previous appender anyway.
@ -157,9 +152,9 @@ func (o *OOOChunk) ToEncodedChunks(mint, maxt int64) (chks []memChunk, err error
if newChunk != nil { // A new chunk was allocated.
if !recoded {
chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
cmint = s.t
}
chunk = newChunk
cmint = s.t
}
}
cmaxt = s.t
@ -170,75 +165,3 @@ func (o *OOOChunk) ToEncodedChunks(mint, maxt int64) (chks []memChunk, err error
}
return chks, nil
}
var _ BlockReader = &OOORangeHead{}
// OOORangeHead allows querying Head out of order samples via BlockReader
// interface implementation.
type OOORangeHead struct {
head *Head
// mint and maxt are tracked because when a query is handled we only want
// the timerange of the query and having preexisting pointers to the first
// and last timestamp help with that.
mint, maxt int64
isoState *oooIsolationState
}
func NewOOORangeHead(head *Head, mint, maxt int64, minRef chunks.ChunkDiskMapperRef) *OOORangeHead {
isoState := head.oooIso.TrackReadAfter(minRef)
return &OOORangeHead{
head: head,
mint: mint,
maxt: maxt,
isoState: isoState,
}
}
func (oh *OOORangeHead) Index() (IndexReader, error) {
return NewOOOHeadIndexReader(oh.head, oh.mint, oh.maxt, oh.isoState.minRef), nil
}
func (oh *OOORangeHead) Chunks() (ChunkReader, error) {
return NewOOOHeadChunkReader(oh.head, oh.mint, oh.maxt, oh.isoState), nil
}
func (oh *OOORangeHead) Tombstones() (tombstones.Reader, error) {
// As stated in the design doc https://docs.google.com/document/d/1Kppm7qL9C-BJB1j6yb6-9ObG3AbdZnFUBYPNNWwDBYM/edit?usp=sharing
// Tombstones are not supported for out of order metrics.
return tombstones.NewMemTombstones(), nil
}
var oooRangeHeadULID = ulid.MustParse("0000000000XXXX000RANGEHEAD")
func (oh *OOORangeHead) Meta() BlockMeta {
return BlockMeta{
MinTime: oh.mint,
MaxTime: oh.maxt,
ULID: oooRangeHeadULID,
Stats: BlockStats{
NumSeries: oh.head.NumSeries(),
},
}
}
// Size returns the size taken by the Head block.
func (oh *OOORangeHead) Size() int64 {
return oh.head.Size()
}
// String returns an human readable representation of the out of order range
// head. It's important to keep this function in order to avoid the struct dump
// when the head is stringified in errors or logs.
func (oh *OOORangeHead) String() string {
return fmt.Sprintf("ooo range head (mint: %d, maxt: %d)", oh.MinTime(), oh.MaxTime())
}
func (oh *OOORangeHead) MinTime() int64 {
return oh.mint
}
func (oh *OOORangeHead) MaxTime() int64 {
return oh.maxt
}

View file

@ -27,17 +27,12 @@ import (
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/tsdb/tombstones"
"github.com/prometheus/prometheus/util/annotations"
)
var _ IndexReader = &OOOHeadIndexReader{}
var _ IndexReader = &HeadAndOOOIndexReader{}
// OOOHeadIndexReader implements IndexReader so ooo samples in the head can be
// accessed.
// It also has a reference to headIndexReader so we can leverage on its
// IndexReader implementation for all the methods that remain the same. We
// decided to do this to avoid code duplication.
// The only methods that change are the ones about getting Series and Postings.
type OOOHeadIndexReader struct {
type HeadAndOOOIndexReader struct {
*headIndexReader // A reference to the headIndexReader so we can reuse as many interface implementation as possible.
lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef
}
@ -53,25 +48,16 @@ func (o mergedOOOChunks) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator
return storage.ChainSampleIteratorFromIterables(iterator, o.chunkIterables)
}
func NewOOOHeadIndexReader(head *Head, mint, maxt int64, lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef) *OOOHeadIndexReader {
func NewHeadAndOOOIndexReader(head *Head, mint, maxt int64, lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef) *HeadAndOOOIndexReader {
hr := &headIndexReader{
head: head,
mint: mint,
maxt: maxt,
}
return &OOOHeadIndexReader{hr, lastGarbageCollectedMmapRef}
return &HeadAndOOOIndexReader{hr, lastGarbageCollectedMmapRef}
}
func (oh *OOOHeadIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
return oh.series(ref, builder, chks, oh.lastGarbageCollectedMmapRef, 0)
}
// lastGarbageCollectedMmapRef gives the last mmap chunk that may be being garbage collected and so
// any chunk at or before this ref will not be considered. 0 disables this check.
//
// maxMmapRef tells upto what max m-map chunk that we can consider. If it is non-0, then
// the oooHeadChunk will not be considered.
func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta, lastGarbageCollectedMmapRef, maxMmapRef chunks.ChunkDiskMapperRef) error {
func (oh *HeadAndOOOIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
s := oh.head.series.getByID(chunks.HeadSeriesRef(ref))
if s == nil {
@ -88,10 +74,19 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
defer s.Unlock()
*chks = (*chks)[:0]
if s.ooo == nil {
return nil
if s.ooo != nil {
return getOOOSeriesChunks(s, oh.mint, oh.maxt, oh.lastGarbageCollectedMmapRef, 0, true, chks)
}
*chks = appendSeriesChunks(s, oh.mint, oh.maxt, *chks)
return nil
}
// lastGarbageCollectedMmapRef gives the last mmap chunk that may be being garbage collected and so
// any chunk at or before this ref will not be considered. 0 disables this check.
//
// maxMmapRef tells upto what max m-map chunk that we can consider. If it is non-0, then
// the oooHeadChunk will not be considered.
func getOOOSeriesChunks(s *memSeries, mint, maxt int64, lastGarbageCollectedMmapRef, maxMmapRef chunks.ChunkDiskMapperRef, includeInOrder bool, chks *[]chunks.Meta) error {
tmpChks := make([]chunks.Meta, 0, len(s.ooo.oooMmappedChunks))
addChunk := func(minT, maxT int64, ref chunks.ChunkRef, chunk chunkenc.Chunk) {
@ -106,7 +101,7 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
// Collect all chunks that overlap the query range.
if s.ooo.oooHeadChunk != nil {
c := s.ooo.oooHeadChunk
if c.OverlapsClosedInterval(oh.mint, oh.maxt) && maxMmapRef == 0 {
if c.OverlapsClosedInterval(mint, maxt) && maxMmapRef == 0 {
ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.ooo.oooMmappedChunks))))
if len(c.chunk.samples) > 0 { // Empty samples happens in tests, at least.
chks, err := s.ooo.oooHeadChunk.chunk.ToEncodedChunks(c.minTime, c.maxTime)
@ -125,12 +120,16 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
}
for i := len(s.ooo.oooMmappedChunks) - 1; i >= 0; i-- {
c := s.ooo.oooMmappedChunks[i]
if c.OverlapsClosedInterval(oh.mint, oh.maxt) && (maxMmapRef == 0 || maxMmapRef.GreaterThanOrEqualTo(c.ref)) && (lastGarbageCollectedMmapRef == 0 || c.ref.GreaterThan(lastGarbageCollectedMmapRef)) {
if c.OverlapsClosedInterval(mint, maxt) && (maxMmapRef == 0 || maxMmapRef.GreaterThanOrEqualTo(c.ref)) && (lastGarbageCollectedMmapRef == 0 || c.ref.GreaterThan(lastGarbageCollectedMmapRef)) {
ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(i)))
addChunk(c.minTime, c.maxTime, ref, nil)
}
}
if includeInOrder {
tmpChks = appendSeriesChunks(s, mint, maxt, tmpChks)
}
// There is nothing to do if we did not collect any chunk.
if len(tmpChks) == 0 {
return nil
@ -167,11 +166,10 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
return nil
}
// LabelValues needs to be overridden from the headIndexReader implementation due
// to the check that happens at the beginning where we make sure that the query
// interval overlaps with the head minooot and maxooot.
func (oh *OOOHeadIndexReader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
if oh.maxt < oh.head.MinOOOTime() || oh.mint > oh.head.MaxOOOTime() {
// LabelValues needs to be overridden from the headIndexReader implementation
// so we can return labels within either in-order range or ooo range.
func (oh *HeadAndOOOIndexReader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
if oh.maxt < oh.head.MinTime() && oh.maxt < oh.head.MinOOOTime() || oh.mint > oh.head.MaxTime() && oh.mint > oh.head.MaxOOOTime() {
return []string{}, nil
}
@ -223,39 +221,30 @@ func lessByMinTimeAndMinRef(a, b chunks.Meta) int {
}
}
func (oh *OOOHeadIndexReader) Postings(ctx context.Context, name string, values ...string) (index.Postings, error) {
switch len(values) {
case 0:
return index.EmptyPostings(), nil
case 1:
return oh.head.postings.Get(name, values[0]), nil // TODO(ganesh) Also call GetOOOPostings
default:
// TODO(ganesh) We want to only return postings for out of order series.
res := make([]index.Postings, 0, len(values))
for _, value := range values {
res = append(res, oh.head.postings.Get(name, value)) // TODO(ganesh) Also call GetOOOPostings
}
return index.Merge(ctx, res...), nil
type HeadAndOOOChunkReader struct {
head *Head
mint, maxt int64
cr *headChunkReader // If nil, only read OOO chunks.
maxMmapRef chunks.ChunkDiskMapperRef
oooIsoState *oooIsolationState
}
func NewHeadAndOOOChunkReader(head *Head, mint, maxt int64, cr *headChunkReader, oooIsoState *oooIsolationState, maxMmapRef chunks.ChunkDiskMapperRef) *HeadAndOOOChunkReader {
return &HeadAndOOOChunkReader{
head: head,
mint: mint,
maxt: maxt,
cr: cr,
maxMmapRef: maxMmapRef,
oooIsoState: oooIsoState,
}
}
type OOOHeadChunkReader struct {
head *Head
mint, maxt int64
isoState *oooIsolationState
}
func NewOOOHeadChunkReader(head *Head, mint, maxt int64, isoState *oooIsolationState) *OOOHeadChunkReader {
return &OOOHeadChunkReader{
head: head,
mint: mint,
maxt: maxt,
isoState: isoState,
func (cr *HeadAndOOOChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) {
sid, _, isOOO := unpackHeadChunkRef(meta.Ref)
if !isOOO {
return cr.cr.ChunkOrIterable(meta)
}
}
func (cr OOOHeadChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) {
sid, _ := chunks.HeadChunkRef(meta.Ref).Unpack()
s := cr.head.series.getByID(sid)
// This means that the series has been garbage collected.
@ -264,34 +253,35 @@ func (cr OOOHeadChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk,
}
s.Lock()
if s.ooo == nil {
// There is no OOO data for this series.
s.Unlock()
return nil, nil, storage.ErrNotFound
}
mc, err := s.oooMergedChunks(meta, cr.head.chunkDiskMapper, cr.mint, cr.maxt)
mc, err := s.mergedChunks(meta, cr.head.chunkDiskMapper, cr.cr, cr.mint, cr.maxt, cr.maxMmapRef)
s.Unlock()
if err != nil {
return nil, nil, err
}
// This means that the query range did not overlap with the requested chunk.
if len(mc.chunkIterables) == 0 {
return nil, nil, storage.ErrNotFound
}
return nil, mc, nil
return nil, mc, err
}
func (cr OOOHeadChunkReader) Close() error {
if cr.isoState != nil {
cr.isoState.Close()
// ChunkOrIterableWithCopy: implements ChunkReaderWithCopy. The special Copy behaviour
// is only implemented for the in-order head chunk.
func (cr *HeadAndOOOChunkReader) ChunkOrIterableWithCopy(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, int64, error) {
_, _, isOOO := unpackHeadChunkRef(meta.Ref)
if !isOOO {
return cr.cr.ChunkOrIterableWithCopy(meta)
}
chk, iter, err := cr.ChunkOrIterable(meta)
return chk, iter, 0, err
}
func (cr *HeadAndOOOChunkReader) Close() error {
if cr.cr != nil && cr.cr.isoState != nil {
cr.cr.isoState.Close()
}
if cr.oooIsoState != nil {
cr.oooIsoState.Close()
}
return nil
}
type OOOCompactionHead struct {
oooIR *OOOHeadIndexReader
head *Head
lastMmapRef chunks.ChunkDiskMapperRef
lastWBLFile int
postings []storage.SeriesRef
@ -308,6 +298,7 @@ type OOOCompactionHead struct {
// on the sample append latency. So call NewOOOCompactionHead only right before compaction.
func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead, error) {
ch := &OOOCompactionHead{
head: head,
chunkRange: head.chunkRange.Load(),
mint: math.MaxInt64,
maxt: math.MinInt64,
@ -321,15 +312,14 @@ func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead,
ch.lastWBLFile = lastWBLFile
}
ch.oooIR = NewOOOHeadIndexReader(head, math.MinInt64, math.MaxInt64, 0)
hr := headIndexReader{head: head, mint: ch.mint, maxt: ch.maxt}
n, v := index.AllPostingsKey()
// TODO: verify this gets only ooo samples.
p, err := ch.oooIR.Postings(ctx, n, v)
// TODO: filter to series with OOO samples, before sorting.
p, err := hr.Postings(ctx, n, v)
if err != nil {
return nil, err
}
p = ch.oooIR.SortedPostings(p)
p = hr.SortedPostings(p)
var lastSeq, lastOff int
for p.Next() {
@ -350,7 +340,7 @@ func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead,
}
var lastMmapRef chunks.ChunkDiskMapperRef
mmapRefs := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper)
mmapRefs := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper, head.logger)
if len(mmapRefs) == 0 && len(ms.ooo.oooMmappedChunks) > 0 {
// Nothing was m-mapped. So take the mmapRef from the existing slice if it exists.
mmapRefs = []chunks.ChunkDiskMapperRef{ms.ooo.oooMmappedChunks[len(ms.ooo.oooMmappedChunks)-1].ref}
@ -386,7 +376,7 @@ func (ch *OOOCompactionHead) Index() (IndexReader, error) {
}
func (ch *OOOCompactionHead) Chunks() (ChunkReader, error) {
return NewOOOHeadChunkReader(ch.oooIR.head, ch.oooIR.mint, ch.oooIR.maxt, nil), nil
return NewHeadAndOOOChunkReader(ch.head, ch.mint, ch.maxt, nil, nil, ch.lastMmapRef), nil
}
func (ch *OOOCompactionHead) Tombstones() (tombstones.Reader, error) {
@ -412,12 +402,12 @@ func (ch *OOOCompactionHead) Meta() BlockMeta {
// Only the method of BlockReader interface are valid for the cloned OOOCompactionHead.
func (ch *OOOCompactionHead) CloneForTimeRange(mint, maxt int64) *OOOCompactionHead {
return &OOOCompactionHead{
oooIR: NewOOOHeadIndexReader(ch.oooIR.head, mint, maxt, 0),
head: ch.head,
lastMmapRef: ch.lastMmapRef,
postings: ch.postings,
chunkRange: ch.chunkRange,
mint: ch.mint,
maxt: ch.maxt,
mint: mint,
maxt: maxt,
}
}
@ -437,7 +427,8 @@ func NewOOOCompactionHeadIndexReader(ch *OOOCompactionHead) IndexReader {
}
func (ir *OOOCompactionHeadIndexReader) Symbols() index.StringIter {
return ir.ch.oooIR.Symbols()
hr := headIndexReader{head: ir.ch.head, mint: ir.ch.mint, maxt: ir.ch.maxt}
return hr.Symbols()
}
func (ir *OOOCompactionHeadIndexReader) Postings(_ context.Context, name string, values ...string) (index.Postings, error) {
@ -458,11 +449,28 @@ func (ir *OOOCompactionHeadIndexReader) SortedPostings(p index.Postings) index.P
}
func (ir *OOOCompactionHeadIndexReader) ShardedPostings(p index.Postings, shardIndex, shardCount uint64) index.Postings {
return ir.ch.oooIR.ShardedPostings(p, shardIndex, shardCount)
hr := headIndexReader{head: ir.ch.head, mint: ir.ch.mint, maxt: ir.ch.maxt}
return hr.ShardedPostings(p, shardIndex, shardCount)
}
func (ir *OOOCompactionHeadIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
return ir.ch.oooIR.series(ref, builder, chks, 0, ir.ch.lastMmapRef)
s := ir.ch.head.series.getByID(chunks.HeadSeriesRef(ref))
if s == nil {
ir.ch.head.metrics.seriesNotFound.Inc()
return storage.ErrNotFound
}
builder.Assign(s.labels())
s.Lock()
defer s.Unlock()
*chks = (*chks)[:0]
if s.ooo == nil {
return nil
}
return getOOOSeriesChunks(s, ir.ch.mint, ir.ch.maxt, 0, ir.ch.lastMmapRef, false, chks)
}
func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
@ -490,5 +498,91 @@ func (ir *OOOCompactionHeadIndexReader) LabelNamesFor(ctx context.Context, posti
}
func (ir *OOOCompactionHeadIndexReader) Close() error {
return ir.ch.oooIR.Close()
return nil
}
// HeadAndOOOQuerier queries both the head and the out-of-order head.
type HeadAndOOOQuerier struct {
mint, maxt int64
head *Head
index IndexReader
chunkr ChunkReader
querier storage.Querier
}
func NewHeadAndOOOQuerier(mint, maxt int64, head *Head, oooIsoState *oooIsolationState, querier storage.Querier) storage.Querier {
cr := &headChunkReader{
head: head,
mint: mint,
maxt: maxt,
isoState: head.iso.State(mint, maxt),
}
return &HeadAndOOOQuerier{
mint: mint,
maxt: maxt,
head: head,
index: NewHeadAndOOOIndexReader(head, mint, maxt, oooIsoState.minRef),
chunkr: NewHeadAndOOOChunkReader(head, mint, maxt, cr, oooIsoState, 0),
querier: querier,
}
}
func (q *HeadAndOOOQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return q.querier.LabelValues(ctx, name, hints, matchers...)
}
func (q *HeadAndOOOQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return q.querier.LabelNames(ctx, hints, matchers...)
}
func (q *HeadAndOOOQuerier) Close() error {
q.chunkr.Close()
return q.querier.Close()
}
func (q *HeadAndOOOQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
return selectSeriesSet(ctx, sortSeries, hints, matchers, q.index, q.chunkr, q.head.tombstones, q.mint, q.maxt)
}
// HeadAndOOOChunkQuerier queries both the head and the out-of-order head.
type HeadAndOOOChunkQuerier struct {
mint, maxt int64
head *Head
index IndexReader
chunkr ChunkReader
querier storage.ChunkQuerier
}
func NewHeadAndOOOChunkQuerier(mint, maxt int64, head *Head, oooIsoState *oooIsolationState, querier storage.ChunkQuerier) storage.ChunkQuerier {
cr := &headChunkReader{
head: head,
mint: mint,
maxt: maxt,
isoState: head.iso.State(mint, maxt),
}
return &HeadAndOOOChunkQuerier{
mint: mint,
maxt: maxt,
head: head,
index: NewHeadAndOOOIndexReader(head, mint, maxt, oooIsoState.minRef),
chunkr: NewHeadAndOOOChunkReader(head, mint, maxt, cr, oooIsoState, 0),
querier: querier,
}
}
func (q *HeadAndOOOChunkQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return q.querier.LabelValues(ctx, name, hints, matchers...)
}
func (q *HeadAndOOOChunkQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return q.querier.LabelNames(ctx, hints, matchers...)
}
func (q *HeadAndOOOChunkQuerier) Close() error {
q.chunkr.Close()
return q.querier.Close()
}
func (q *HeadAndOOOChunkQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.ChunkSeriesSet {
return selectChunkSeriesSet(ctx, sortSeries, hints, matchers, rangeHeadULID, q.index, q.chunkr, q.head.tombstones, q.mint, q.maxt)
}

View file

@ -316,7 +316,7 @@ func TestOOOHeadIndexReader_Series(t *testing.T) {
// Ref to whatever Ref the chunk has, that we refer to by ID
for ref, c := range intervals {
if c.ID == e.ID {
meta.Ref = chunks.ChunkRef(chunks.NewHeadChunkRef(chunks.HeadSeriesRef(s1ID), chunks.HeadChunkID(ref)))
meta.Ref = chunks.ChunkRef(chunks.NewHeadChunkRef(chunks.HeadSeriesRef(s1ID), s1.oooHeadChunkID(ref)))
break
}
}
@ -341,7 +341,7 @@ func TestOOOHeadIndexReader_Series(t *testing.T) {
})
}
ir := NewOOOHeadIndexReader(h, tc.queryMinT, tc.queryMaxT, 0)
ir := NewHeadAndOOOIndexReader(h, tc.queryMinT, tc.queryMaxT, 0)
var chks []chunks.Meta
var b labels.ScratchBuilder
@ -421,17 +421,17 @@ func testOOOHeadChunkReader_LabelValues(t *testing.T, scenario sampleTypeScenari
name: "LabelValues calls with ooo head query range not overlapping out-of-order data",
queryMinT: 100,
queryMaxT: 100,
expValues1: []string{},
expValues2: []string{},
expValues3: []string{},
expValues4: []string{},
expValues1: []string{"bar1"},
expValues2: nil,
expValues3: []string{"bar1", "bar2"},
expValues4: []string{"bar1", "bar2"},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
// We first want to test using a head index reader that covers the biggest query interval
oh := NewOOOHeadIndexReader(head, tc.queryMinT, tc.queryMaxT, 0)
oh := NewHeadAndOOOIndexReader(head, tc.queryMinT, tc.queryMaxT, 0)
matchers := []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1")}
values, err := oh.LabelValues(ctx, "foo", matchers...)
sort.Strings(values)
@ -481,10 +481,10 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) {
t.Run("Getting a non existing chunk fails with not found error", func(t *testing.T) {
db := newTestDBWithOpts(t, opts)
cr := NewOOOHeadChunkReader(db.head, 0, 1000, nil)
cr := NewHeadAndOOOChunkReader(db.head, 0, 1000, nil, nil, 0)
defer cr.Close()
c, iterable, err := cr.ChunkOrIterable(chunks.Meta{
Ref: 0x1000000, Chunk: chunkenc.Chunk(nil), MinTime: 100, MaxTime: 300,
Ref: 0x1800000, Chunk: chunkenc.Chunk(nil), MinTime: 100, MaxTime: 300,
})
require.Nil(t, iterable)
require.Equal(t, err, fmt.Errorf("not found"))
@ -832,14 +832,14 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) {
// The Series method populates the chunk metas, taking a copy of the
// head OOO chunk if necessary. These are then used by the ChunkReader.
ir := NewOOOHeadIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0)
ir := NewHeadAndOOOIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0)
var chks []chunks.Meta
var b labels.ScratchBuilder
err = ir.Series(s1Ref, &b, &chks)
require.NoError(t, err)
require.Equal(t, len(tc.expChunksSamples), len(chks))
cr := NewOOOHeadChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil)
cr := NewHeadAndOOOChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil, nil, 0)
defer cr.Close()
for i := 0; i < len(chks); i++ {
c, iterable, err := cr.ChunkOrIterable(chks[i])
@ -997,7 +997,7 @@ func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
// The Series method populates the chunk metas, taking a copy of the
// head OOO chunk if necessary. These are then used by the ChunkReader.
ir := NewOOOHeadIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0)
ir := NewHeadAndOOOIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0)
var chks []chunks.Meta
var b labels.ScratchBuilder
err = ir.Series(s1Ref, &b, &chks)
@ -1013,7 +1013,7 @@ func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
}
require.NoError(t, app.Commit())
cr := NewOOOHeadChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil)
cr := NewHeadAndOOOChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil, nil, 0)
defer cr.Close()
for i := 0; i < len(chks); i++ {
c, iterable, err := cr.ChunkOrIterable(chks[i])

View file

@ -14,8 +14,14 @@
package tsdb
import (
"math"
"testing"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/stretchr/testify/require"
)
@ -52,7 +58,7 @@ func TestOOOInsert(t *testing.T) {
chunk := NewOOOChunk()
chunk.samples = makeEvenSampleSlice(numPreExisting)
newSample := samplify(valOdd(insertPos))
chunk.Insert(newSample.t, newSample.f)
chunk.Insert(newSample.t, newSample.f, nil, nil)
var expSamples []sample
// Our expected new samples slice, will be first the original samples.
@ -83,7 +89,7 @@ func TestOOOInsertDuplicate(t *testing.T) {
dupSample := chunk.samples[dupPos]
dupSample.f = 0.123
ok := chunk.Insert(dupSample.t, dupSample.f)
ok := chunk.Insert(dupSample.t, dupSample.f, nil, nil)
expSamples := makeEvenSampleSlice(num) // We expect no change.
require.False(t, ok)
@ -91,3 +97,136 @@ func TestOOOInsertDuplicate(t *testing.T) {
}
}
}
type chunkVerify struct {
encoding chunkenc.Encoding
minTime int64
maxTime int64
}
func TestOOOChunks_ToEncodedChunks(t *testing.T) {
h1 := tsdbutil.GenerateTestHistogram(1)
// Make h2 appendable but with more buckets, to trigger recoding.
h2 := h1.Copy()
h2.PositiveSpans = append(h2.PositiveSpans, histogram.Span{Offset: 1, Length: 1})
h2.PositiveBuckets = append(h2.PositiveBuckets, 12)
testCases := map[string]struct {
samples []sample
expectedCounterResets []histogram.CounterResetHint
expectedChunks []chunkVerify
}{
"empty": {
samples: []sample{},
},
"has floats": {
samples: []sample{
{t: 1000, f: 43.0},
{t: 1100, f: 42.0},
},
expectedCounterResets: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.UnknownCounterReset},
expectedChunks: []chunkVerify{
{encoding: chunkenc.EncXOR, minTime: 1000, maxTime: 1100},
},
},
"mix of floats and histograms": {
samples: []sample{
{t: 1000, f: 43.0},
{t: 1100, h: h1},
{t: 1200, f: 42.0},
},
expectedCounterResets: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.UnknownCounterReset, histogram.UnknownCounterReset},
expectedChunks: []chunkVerify{
{encoding: chunkenc.EncXOR, minTime: 1000, maxTime: 1000},
{encoding: chunkenc.EncHistogram, minTime: 1100, maxTime: 1100},
{encoding: chunkenc.EncXOR, minTime: 1200, maxTime: 1200},
},
},
"has a counter reset": {
samples: []sample{
{t: 1000, h: h2},
{t: 1100, h: h1},
},
expectedCounterResets: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.CounterReset},
expectedChunks: []chunkVerify{
{encoding: chunkenc.EncHistogram, minTime: 1000, maxTime: 1000},
{encoding: chunkenc.EncHistogram, minTime: 1100, maxTime: 1100},
},
},
"has a recoded histogram": { // Regression test for wrong minT, maxT in histogram recoding.
samples: []sample{
{t: 0, h: h1},
{t: 1, h: h2},
},
expectedCounterResets: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.NotCounterReset},
expectedChunks: []chunkVerify{
{encoding: chunkenc.EncHistogram, minTime: 0, maxTime: 1},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// Sanity check.
require.Equal(t, len(tc.samples), len(tc.expectedCounterResets), "number of samples and counter resets")
oooChunk := OOOChunk{}
for _, s := range tc.samples {
switch s.Type() {
case chunkenc.ValFloat:
oooChunk.Insert(s.t, s.f, nil, nil)
case chunkenc.ValHistogram:
oooChunk.Insert(s.t, 0, s.h.Copy(), nil)
case chunkenc.ValFloatHistogram:
oooChunk.Insert(s.t, 0, nil, s.fh.Copy())
default:
t.Fatalf("unexpected sample type %d", s.Type())
}
}
chunks, err := oooChunk.ToEncodedChunks(math.MinInt64, math.MaxInt64)
require.NoError(t, err)
require.Equal(t, len(tc.expectedChunks), len(chunks), "number of chunks")
sampleIndex := 0
for i, c := range chunks {
require.Equal(t, tc.expectedChunks[i].encoding, c.chunk.Encoding(), "chunk %d encoding", i)
require.Equal(t, tc.expectedChunks[i].minTime, c.minTime, "chunk %d minTime", i)
require.Equal(t, tc.expectedChunks[i].maxTime, c.maxTime, "chunk %d maxTime", i)
samples, err := storage.ExpandSamples(c.chunk.Iterator(nil), newSample)
require.GreaterOrEqual(t, len(tc.samples)-sampleIndex, len(samples), "too many samples in chunk %d expected less than %d", i, len(tc.samples)-sampleIndex)
require.NoError(t, err)
if len(samples) == 0 {
// Ignore empty chunks.
continue
}
switch c.chunk.Encoding() {
case chunkenc.EncXOR:
for j, s := range samples {
require.Equal(t, chunkenc.ValFloat, s.Type())
// XOR chunks don't have counter reset hints, so we shouldn't expect anything else than UnknownCounterReset.
require.Equal(t, histogram.UnknownCounterReset, tc.expectedCounterResets[sampleIndex+j], "sample reset hint %d", sampleIndex+j)
require.Equal(t, tc.samples[sampleIndex+j].f, s.F(), "sample %d", sampleIndex+j)
}
case chunkenc.EncHistogram:
for j, s := range samples {
require.Equal(t, chunkenc.ValHistogram, s.Type())
require.Equal(t, tc.expectedCounterResets[sampleIndex+j], s.H().CounterResetHint, "sample reset hint %d", sampleIndex+j)
compareTo := tc.samples[sampleIndex+j].h.Copy()
compareTo.CounterResetHint = tc.expectedCounterResets[sampleIndex+j]
require.Equal(t, compareTo, s.H().Compact(0), "sample %d", sampleIndex+j)
}
case chunkenc.EncFloatHistogram:
for j, s := range samples {
require.Equal(t, chunkenc.ValFloatHistogram, s.Type())
require.Equal(t, tc.expectedCounterResets[sampleIndex+j], s.FH().CounterResetHint, "sample reset hint %d", sampleIndex+j)
compareTo := tc.samples[sampleIndex+j].fh.Copy()
compareTo.CounterResetHint = tc.expectedCounterResets[sampleIndex+j]
require.Equal(t, compareTo, s.FH().Compact(0), "sample %d", sampleIndex+j)
}
}
sampleIndex += len(samples)
}
require.Equal(t, len(tc.samples), sampleIndex, "number of samples")
})
}
}

View file

@ -115,20 +115,24 @@ func NewBlockQuerier(b BlockReader, mint, maxt int64) (storage.Querier, error) {
}
func (q *blockQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.SeriesSet {
mint := q.mint
maxt := q.maxt
return selectSeriesSet(ctx, sortSeries, hints, ms, q.index, q.chunks, q.tombstones, q.mint, q.maxt)
}
func selectSeriesSet(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms []*labels.Matcher,
index IndexReader, chunks ChunkReader, tombstones tombstones.Reader, mint, maxt int64,
) storage.SeriesSet {
disableTrimming := false
sharded := hints != nil && hints.ShardCount > 0
p, err := PostingsForMatchers(ctx, q.index, ms...)
p, err := PostingsForMatchers(ctx, index, ms...)
if err != nil {
return storage.ErrSeriesSet(err)
}
if sharded {
p = q.index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount)
p = index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount)
}
if sortSeries {
p = q.index.SortedPostings(p)
p = index.SortedPostings(p)
}
if hints != nil {
@ -137,11 +141,11 @@ func (q *blockQuerier) Select(ctx context.Context, sortSeries bool, hints *stora
disableTrimming = hints.DisableTrimming
if hints.Func == "series" {
// When you're only looking up metadata (for example series API), you don't need to load any chunks.
return newBlockSeriesSet(q.index, newNopChunkReader(), q.tombstones, p, mint, maxt, disableTrimming)
return newBlockSeriesSet(index, newNopChunkReader(), tombstones, p, mint, maxt, disableTrimming)
}
}
return newBlockSeriesSet(q.index, q.chunks, q.tombstones, p, mint, maxt, disableTrimming)
return newBlockSeriesSet(index, chunks, tombstones, p, mint, maxt, disableTrimming)
}
// blockChunkQuerier provides chunk querying access to a single block database.
@ -159,8 +163,12 @@ func NewBlockChunkQuerier(b BlockReader, mint, maxt int64) (storage.ChunkQuerier
}
func (q *blockChunkQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.ChunkSeriesSet {
mint := q.mint
maxt := q.maxt
return selectChunkSeriesSet(ctx, sortSeries, hints, ms, q.blockID, q.index, q.chunks, q.tombstones, q.mint, q.maxt)
}
func selectChunkSeriesSet(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms []*labels.Matcher,
blockID ulid.ULID, index IndexReader, chunks ChunkReader, tombstones tombstones.Reader, mint, maxt int64,
) storage.ChunkSeriesSet {
disableTrimming := false
sharded := hints != nil && hints.ShardCount > 0
@ -169,17 +177,17 @@ func (q *blockChunkQuerier) Select(ctx context.Context, sortSeries bool, hints *
maxt = hints.End
disableTrimming = hints.DisableTrimming
}
p, err := PostingsForMatchers(ctx, q.index, ms...)
p, err := PostingsForMatchers(ctx, index, ms...)
if err != nil {
return storage.ErrChunkSeriesSet(err)
}
if sharded {
p = q.index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount)
p = index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount)
}
if sortSeries {
p = q.index.SortedPostings(p)
p = index.SortedPostings(p)
}
return NewBlockChunkSeriesSet(q.blockID, q.index, q.chunks, q.tombstones, p, mint, maxt, disableTrimming)
return NewBlockChunkSeriesSet(blockID, index, chunks, tombstones, p, mint, maxt, disableTrimming)
}
// PostingsForMatchers assembles a single postings iterator against the index reader
@ -633,14 +641,16 @@ func (p *populateWithDelGenericSeriesIterator) next(copyHeadChunk bool) bool {
}
}
hcr, ok := p.cr.(*headChunkReader)
hcr, ok := p.cr.(ChunkReaderWithCopy)
var iterable chunkenc.Iterable
if ok && copyHeadChunk && len(p.bufIter.Intervals) == 0 {
// ChunkWithCopy will copy the head chunk.
// ChunkOrIterableWithCopy will copy the head chunk, if it can.
var maxt int64
p.currMeta.Chunk, maxt, p.err = hcr.ChunkWithCopy(p.currMeta)
// For the in-memory head chunk the index reader sets maxt as MaxInt64. We fix it here.
p.currMeta.MaxTime = maxt
p.currMeta.Chunk, iterable, maxt, p.err = hcr.ChunkOrIterableWithCopy(p.currMeta)
if p.currMeta.Chunk != nil {
// For the in-memory head chunk the index reader sets maxt as MaxInt64. We fix it here.
p.currMeta.MaxTime = maxt
}
} else {
p.currMeta.Chunk, iterable, p.err = p.cr.ChunkOrIterable(p.currMeta)
}

View file

@ -20,6 +20,7 @@ import (
"testing"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/stretchr/testify/require"
@ -254,56 +255,98 @@ func BenchmarkMergedStringIter(b *testing.B) {
b.ReportAllocs()
}
func BenchmarkQuerierSelect(b *testing.B) {
opts := DefaultHeadOptions()
opts.ChunkRange = 1000
opts.ChunkDirRoot = b.TempDir()
h, err := NewHead(nil, nil, nil, nil, opts, nil)
func createHeadForBenchmarkSelect(b *testing.B, numSeries int, addSeries func(app storage.Appender, i int)) (*Head, *DB) {
dir := b.TempDir()
opts := DefaultOptions()
opts.OutOfOrderCapMax = 255
opts.OutOfOrderTimeWindow = 1000
db, err := Open(dir, nil, nil, opts, nil)
require.NoError(b, err)
defer h.Close()
b.Cleanup(func() {
require.NoError(b, db.Close())
})
h := db.Head()
app := h.Appender(context.Background())
numSeries := 1000000
for i := 0; i < numSeries; i++ {
app.Append(0, labels.FromStrings("foo", "bar", "i", fmt.Sprintf("%d%s", i, postingsBenchSuffix)), int64(i), 0)
addSeries(app, i)
}
require.NoError(b, app.Commit())
return h, db
}
bench := func(b *testing.B, br BlockReader, sorted bool) {
matcher := labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")
for s := 1; s <= numSeries; s *= 10 {
b.Run(fmt.Sprintf("%dof%d", s, numSeries), func(b *testing.B) {
q, err := NewBlockQuerier(br, 0, int64(s-1))
require.NoError(b, err)
func benchmarkSelect(b *testing.B, queryable storage.Queryable, numSeries int, sorted bool) {
matcher := labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")
b.ResetTimer()
for s := 1; s <= numSeries; s *= 10 {
b.Run(fmt.Sprintf("%dof%d", s, numSeries), func(b *testing.B) {
q, err := queryable.Querier(0, int64(s-1))
require.NoError(b, err)
b.ResetTimer()
for i := 0; i < b.N; i++ {
ss := q.Select(context.Background(), sorted, nil, matcher)
for ss.Next() {
}
require.NoError(b, ss.Err())
b.ResetTimer()
for i := 0; i < b.N; i++ {
ss := q.Select(context.Background(), sorted, nil, matcher)
for ss.Next() {
}
q.Close()
})
}
require.NoError(b, ss.Err())
}
q.Close()
})
}
}
func BenchmarkQuerierSelect(b *testing.B) {
numSeries := 1000000
h, db := createHeadForBenchmarkSelect(b, numSeries, func(app storage.Appender, i int) {
_, err := app.Append(0, labels.FromStrings("foo", "bar", "i", fmt.Sprintf("%d%s", i, postingsBenchSuffix)), int64(i), 0)
if err != nil {
b.Fatal(err)
}
})
b.Run("Head", func(b *testing.B) {
bench(b, h, false)
benchmarkSelect(b, db, numSeries, false)
})
b.Run("SortedHead", func(b *testing.B) {
bench(b, h, true)
benchmarkSelect(b, db, numSeries, true)
})
tmpdir := b.TempDir()
blockdir := createBlockFromHead(b, tmpdir, h)
block, err := OpenBlock(nil, blockdir, nil)
require.NoError(b, err)
defer func() {
require.NoError(b, block.Close())
}()
b.Run("Block", func(b *testing.B) {
bench(b, block, false)
tmpdir := b.TempDir()
blockdir := createBlockFromHead(b, tmpdir, h)
block, err := OpenBlock(nil, blockdir, nil)
require.NoError(b, err)
defer func() {
require.NoError(b, block.Close())
}()
benchmarkSelect(b, (*queryableBlock)(block), numSeries, false)
})
}
// Type wrapper to let a Block be a Queryable in benchmarkSelect().
type queryableBlock Block
func (pb *queryableBlock) Querier(mint, maxt int64) (storage.Querier, error) {
return NewBlockQuerier((*Block)(pb), mint, maxt)
}
func BenchmarkQuerierSelectWithOutOfOrder(b *testing.B) {
numSeries := 1000000
_, db := createHeadForBenchmarkSelect(b, numSeries, func(app storage.Appender, i int) {
l := labels.FromStrings("foo", "bar", "i", fmt.Sprintf("%d%s", i, postingsBenchSuffix))
ref, err := app.Append(0, l, int64(i+1), 0)
if err != nil {
b.Fatal(err)
}
_, err = app.Append(ref, l, int64(i), 1) // Out of order sample
if err != nil {
b.Fatal(err)
}
})
b.Run("Head", func(b *testing.B) {
benchmarkSelect(b, db, numSeries, false)
})
}

View file

@ -3169,12 +3169,11 @@ func BenchmarkQueries(b *testing.B) {
qHead, err := NewBlockQuerier(NewRangeHead(head, 1, nSamples), 1, nSamples)
require.NoError(b, err)
qOOOHead, err := NewBlockQuerier(NewOOORangeHead(head, 1, nSamples, 0), 1, nSamples)
require.NoError(b, err)
isoState := head.oooIso.TrackReadAfter(0)
qOOOHead := NewHeadAndOOOQuerier(1, nSamples, head, isoState, qHead)
queryTypes = append(queryTypes, qt{
fmt.Sprintf("_Head_oooPercent:%d", oooPercentage),
storage.NewMergeQuerier([]storage.Querier{qHead, qOOOHead}, nil, storage.ChainedSeriesMerge),
fmt.Sprintf("_Head_oooPercent:%d", oooPercentage), qOOOHead,
})
}

View file

@ -20,7 +20,6 @@ import (
"math"
"os"
"path/filepath"
"slices"
"strconv"
"strings"
"time"
@ -265,9 +264,9 @@ func (w *Watcher) loop() {
// Run the watcher, which will tail the WAL until the quit channel is closed
// or an error case is hit.
func (w *Watcher) Run() error {
_, lastSegment, err := w.firstAndLast()
_, lastSegment, err := Segments(w.walDir)
if err != nil {
return fmt.Errorf("wal.Segments: %w", err)
return fmt.Errorf("Segments: %w", err)
}
// We want to ensure this is false across iterations since
@ -318,57 +317,20 @@ func (w *Watcher) Run() error {
// findSegmentForIndex finds the first segment greater than or equal to index.
func (w *Watcher) findSegmentForIndex(index int) (int, error) {
refs, err := w.segments(w.walDir)
refs, err := listSegments(w.walDir)
if err != nil {
return -1, err
}
for _, r := range refs {
if r >= index {
return r, nil
if r.index >= index {
return r.index, nil
}
}
return -1, errors.New("failed to find segment for index")
}
func (w *Watcher) firstAndLast() (int, int, error) {
refs, err := w.segments(w.walDir)
if err != nil {
return -1, -1, err
}
if len(refs) == 0 {
return -1, -1, nil
}
return refs[0], refs[len(refs)-1], nil
}
// Copied from tsdb/wlog/wlog.go so we do not have to open a WAL.
// Plan is to move WAL watcher to TSDB and dedupe these implementations.
func (w *Watcher) segments(dir string) ([]int, error) {
files, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
var refs []int
for _, f := range files {
k, err := strconv.Atoi(f.Name())
if err != nil {
continue
}
refs = append(refs, k)
}
slices.Sort(refs)
for i := 0; i < len(refs)-1; i++ {
if refs[i]+1 != refs[i+1] {
return nil, errors.New("segments are not sequential")
}
}
return refs, nil
}
func (w *Watcher) readAndHandleError(r *LiveReader, segmentNum int, tail bool, size int64) error {
err := w.readSegment(r, segmentNum, tail)
@ -447,35 +409,17 @@ func (w *Watcher) watch(segmentNum int, tail bool) error {
// Currently doing a garbage collect, try again later.
}
// if a newer segment is produced, read the current one until the end and move on.
case <-segmentTicker.C:
_, last, err := w.firstAndLast()
_, last, err := Segments(w.walDir)
if err != nil {
return fmt.Errorf("segments: %w", err)
return fmt.Errorf("Segments: %w", err)
}
// Check if new segments exists.
if last <= segmentNum {
continue
if last > segmentNum {
return w.readAndHandleError(reader, segmentNum, tail, size)
}
err = w.readSegment(reader, segmentNum, tail)
// Ignore errors reading to end of segment whilst replaying the WAL.
if !tail {
switch {
case err != nil && !errors.Is(err, io.EOF):
level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "err", err)
case reader.Offset() != size:
level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size)
}
return nil
}
// Otherwise, when we are tailing, non-EOFs are fatal.
if err != nil && !errors.Is(err, io.EOF) {
return err
}
return nil
continue
// we haven't read due to a notification in quite some time, try reading anyways
case <-readTicker.C:
@ -484,7 +428,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error {
if err != nil {
return err
}
// still want to reset the ticker so we don't read too often
// reset the ticker so we don't read too often
readTicker.Reset(readTimeout)
case <-w.readNotify:
@ -492,7 +436,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error {
if err != nil {
return err
}
// still want to reset the ticker so we don't read too often
// reset the ticker so we don't read too often
readTicker.Reset(readTimeout)
}
}
@ -628,6 +572,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
w.writer.AppendHistograms(histogramsToSend)
histogramsToSend = histogramsToSend[:0]
}
case record.FloatHistogramSamples:
// Skip if experimental "histograms over remote write" is not enabled.
if !w.sendHistograms {
@ -666,11 +611,13 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
return err
}
w.writer.StoreMetadata(meta)
case record.Tombstones:
default:
case record.Unknown:
// Could be corruption, or reading from a WAL from a newer Prometheus.
w.recordDecodeFailsMetric.Inc()
default:
// We're not interested in other types of records.
}
}
if err := r.Err(); err != nil {
@ -699,14 +646,12 @@ func (w *Watcher) readSegmentForGC(r *LiveReader, segmentNum int, _ bool) error
}
w.writer.UpdateSeriesSegment(series, segmentNum)
// Ignore these; we're only interested in series.
case record.Samples:
case record.Exemplars:
case record.Tombstones:
default:
case record.Unknown:
// Could be corruption, or reading from a WAL from a newer Prometheus.
w.recordDecodeFailsMetric.Inc()
default:
// We're only interested in series.
}
}
if err := r.Err(); err != nil {
@ -731,29 +676,30 @@ func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) err
}
// Ensure we read the whole contents of every segment in the checkpoint dir.
segs, err := w.segments(checkpointDir)
segs, err := listSegments(checkpointDir)
if err != nil {
return fmt.Errorf("Unable to get segments checkpoint dir: %w", err)
}
for _, seg := range segs {
size, err := getSegmentSize(checkpointDir, seg)
for _, segRef := range segs {
size, err := getSegmentSize(checkpointDir, segRef.index)
if err != nil {
return fmt.Errorf("getSegmentSize: %w", err)
}
sr, err := OpenReadSegment(SegmentName(checkpointDir, seg))
sr, err := OpenReadSegment(SegmentName(checkpointDir, segRef.index))
if err != nil {
return fmt.Errorf("unable to open segment: %w", err)
}
defer sr.Close()
r := NewLiveReader(w.logger, w.readerMetrics, sr)
if err := readFn(w, r, index, false); err != nil && !errors.Is(err, io.EOF) {
err = readFn(w, r, index, false)
sr.Close()
if err != nil && !errors.Is(err, io.EOF) {
return fmt.Errorf("readSegment: %w", err)
}
if r.Offset() != size {
return fmt.Errorf("readCheckpoint wasn't able to read all data from the checkpoint %s/%08d, size: %d, totalRead: %d", checkpointDir, seg, size, r.Offset())
return fmt.Errorf("readCheckpoint wasn't able to read all data from the checkpoint %s/%08d, size: %d, totalRead: %d", checkpointDir, segRef.index, size, r.Offset())
}
}

View file

@ -230,11 +230,11 @@ func TestTailSamples(t *testing.T) {
for i := first; i <= last; i++ {
segment, err := OpenReadSegment(SegmentName(watcher.walDir, i))
require.NoError(t, err)
defer segment.Close()
reader := NewLiveReader(nil, NewLiveReaderMetrics(nil), segment)
// Use tail true so we can ensure we got the right number of samples.
watcher.readSegment(reader, i, true)
require.NoError(t, segment.Close())
}
expectedSeries := seriesCount

View file

@ -612,16 +612,16 @@ func (w *WL) setSegment(segment *Segment) error {
// flushPage writes the new contents of the page to disk. If no more records will fit into
// the page, the remaining bytes will be set to zero and a new page will be started.
// If clear is true, this is enforced regardless of how many bytes are left in the page.
func (w *WL) flushPage(clear bool) error {
// If forceClear is true, this is enforced regardless of how many bytes are left in the page.
func (w *WL) flushPage(forceClear bool) error {
w.metrics.pageFlushes.Inc()
p := w.page
clear = clear || p.full()
shouldClear := forceClear || p.full()
// No more data will fit into the page or an implicit clear.
// Enqueue and clear it.
if clear {
if shouldClear {
p.alloc = pageSize // Write till end of page.
}
@ -633,7 +633,7 @@ func (w *WL) flushPage(clear bool) error {
p.flushed += n
// We flushed an entire page, prepare a new one.
if clear {
if shouldClear {
p.reset()
w.donePages++
w.metrics.pageCompletions.Inc()

View file

@ -155,7 +155,7 @@ func DirHash(t *testing.T, path string) []byte {
modTime, err := info.ModTime().GobEncode()
require.NoError(t, err)
_, err = io.WriteString(hash, string(modTime))
_, err = hash.Write(modTime)
require.NoError(t, err)
return nil
})

View file

@ -1,6 +1,6 @@
{
"name": "@prometheus-io/codemirror-promql",
"version": "0.53.1",
"version": "0.54.0-rc.1",
"description": "a CodeMirror mode for the PromQL language",
"types": "dist/esm/index.d.ts",
"module": "dist/esm/index.js",
@ -29,7 +29,7 @@
},
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
"dependencies": {
"@prometheus-io/lezer-promql": "0.53.1",
"@prometheus-io/lezer-promql": "0.54.0-rc.1",
"lru-cache": "^7.18.3"
},
"devDependencies": {
@ -37,10 +37,10 @@
"@codemirror/language": "^6.10.2",
"@codemirror/lint": "^6.8.1",
"@codemirror/state": "^6.3.3",
"@codemirror/view": "^6.28.3",
"@codemirror/view": "^6.29.1",
"@lezer/common": "^1.2.1",
"@lezer/highlight": "^1.2.0",
"@lezer/lr": "^1.4.1",
"@lezer/lr": "^1.4.2",
"isomorphic-fetch": "^3.0.0",
"nock": "^13.5.4"
},

View file

@ -1,6 +1,6 @@
{
"name": "@prometheus-io/lezer-promql",
"version": "0.53.1",
"version": "0.54.0-rc.1",
"description": "lezer-based PromQL grammar",
"main": "dist/index.cjs",
"type": "module",
@ -32,7 +32,7 @@
"devDependencies": {
"@lezer/generator": "^1.7.1",
"@lezer/highlight": "^1.2.0",
"@lezer/lr": "^1.4.1"
"@lezer/lr": "^1.4.2"
},
"peerDependencies": {
"@lezer/highlight": "^1.1.2",

132
web/ui/package-lock.json generated
View file

@ -1,12 +1,12 @@
{
"name": "prometheus-io",
"version": "0.53.1",
"version": "0.54.0-rc.1",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "prometheus-io",
"version": "0.53.1",
"version": "0.54.0-rc.1",
"workspaces": [
"react-app",
"module/*"
@ -30,10 +30,10 @@
},
"module/codemirror-promql": {
"name": "@prometheus-io/codemirror-promql",
"version": "0.53.1",
"version": "0.54.0-rc.1",
"license": "Apache-2.0",
"dependencies": {
"@prometheus-io/lezer-promql": "0.53.1",
"@prometheus-io/lezer-promql": "0.54.0-rc.1",
"lru-cache": "^7.18.3"
},
"devDependencies": {
@ -41,10 +41,10 @@
"@codemirror/language": "^6.10.2",
"@codemirror/lint": "^6.8.1",
"@codemirror/state": "^6.3.3",
"@codemirror/view": "^6.28.3",
"@codemirror/view": "^6.29.1",
"@lezer/common": "^1.2.1",
"@lezer/highlight": "^1.2.0",
"@lezer/lr": "^1.4.1",
"@lezer/lr": "^1.4.2",
"isomorphic-fetch": "^3.0.0",
"nock": "^13.5.4"
},
@ -69,12 +69,12 @@
},
"module/lezer-promql": {
"name": "@prometheus-io/lezer-promql",
"version": "0.53.1",
"version": "0.54.0-rc.1",
"license": "Apache-2.0",
"devDependencies": {
"@lezer/generator": "^1.7.1",
"@lezer/highlight": "^1.2.0",
"@lezer/lr": "^1.4.1"
"@lezer/lr": "^1.4.2"
},
"peerDependencies": {
"@lezer/highlight": "^1.1.2",
@ -2093,9 +2093,9 @@
"integrity": "sha512-QkEyUiLhsJoZkbumGZlswmAhA7CBU02Wrz7zvH4SrcifbsqwlXShVXg65f3v/ts57W3dqyamEriMhij1Z3Zz4A=="
},
"node_modules/@codemirror/view": {
"version": "6.28.3",
"resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.28.3.tgz",
"integrity": "sha512-QVqP+ko078/h9yrW+u5grX3rQhC+BkGKADRrlDaJznfPngJOv5zObiVf0+SgAWhL/Yt0nvZ+10rO3L+gU5IbFw==",
"version": "6.29.1",
"resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.29.1.tgz",
"integrity": "sha512-7r+DlO/QFwPqKp73uq5mmrS4TuLPUVotbNOKYzN3OLP5ScrOVXcm4g13/48b6ZXGhdmzMinzFYqH0vo+qihIkQ==",
"dependencies": {
"@codemirror/state": "^6.4.0",
"style-mod": "^4.1.0",
@ -3391,9 +3391,9 @@
}
},
"node_modules/@lezer/lr": {
"version": "1.4.1",
"resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.4.1.tgz",
"integrity": "sha512-CHsKq8DMKBf9b3yXPDIU4DbH+ZJd/sJdYOW2llbW/HudP5u0VS6Bfq1hLYfgU7uAYGFIyGGQIsSOXGPEErZiJw==",
"version": "1.4.2",
"resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.4.2.tgz",
"integrity": "sha512-pu0K1jCIdnQ12aWNaAVU5bzi7Bd1w54J3ECgANPmYLtQKP0HBj2cE/5coBD66MT10xbtIuUr7tg0Shbsvk0mDA==",
"dependencies": {
"@lezer/common": "^1.0.0"
}
@ -5541,20 +5541,21 @@
"license": "MIT"
},
"node_modules/body-parser": {
"version": "1.20.0",
"version": "1.20.2",
"resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.2.tgz",
"integrity": "sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==",
"dev": true,
"license": "MIT",
"dependencies": {
"bytes": "3.1.2",
"content-type": "~1.0.4",
"content-type": "~1.0.5",
"debug": "2.6.9",
"depd": "2.0.0",
"destroy": "1.2.0",
"http-errors": "2.0.0",
"iconv-lite": "0.4.24",
"on-finished": "2.4.1",
"qs": "6.10.3",
"raw-body": "2.5.1",
"qs": "6.11.0",
"raw-body": "2.5.2",
"type-is": "~1.6.18",
"unpipe": "1.0.0"
},
@ -5565,24 +5566,27 @@
},
"node_modules/body-parser/node_modules/bytes": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz",
"integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/body-parser/node_modules/debug": {
"version": "2.6.9",
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
"dev": true,
"license": "MIT",
"dependencies": {
"ms": "2.0.0"
}
},
"node_modules/body-parser/node_modules/iconv-lite": {
"version": "0.4.24",
"resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
"integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
"dev": true,
"license": "MIT",
"dependencies": {
"safer-buffer": ">= 2.1.2 < 3"
},
@ -5592,8 +5596,9 @@
},
"node_modules/body-parser/node_modules/ms": {
"version": "2.0.0",
"dev": true,
"license": "MIT"
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
"integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==",
"dev": true
},
"node_modules/bonjour-service": {
"version": "1.0.14",
@ -5639,10 +5644,11 @@
}
},
"node_modules/braces": {
"version": "3.0.2",
"license": "MIT",
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
"integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
"dependencies": {
"fill-range": "^7.0.1"
"fill-range": "^7.1.1"
},
"engines": {
"node": ">=8"
@ -6214,9 +6220,10 @@
"license": "MIT"
},
"node_modules/content-type": {
"version": "1.0.4",
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz",
"integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
@ -6230,9 +6237,10 @@
}
},
"node_modules/cookie": {
"version": "0.5.0",
"version": "0.6.0",
"resolved": "https://registry.npmjs.org/cookie/-/cookie-0.6.0.tgz",
"integrity": "sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
@ -7112,9 +7120,10 @@
"license": "MIT"
},
"node_modules/ejs": {
"version": "3.1.8",
"version": "3.1.10",
"resolved": "https://registry.npmjs.org/ejs/-/ejs-3.1.10.tgz",
"integrity": "sha512-UeJmFfOrAQS8OJWPZ4qtgHyWExa088/MtK5UEyoJGFH67cDEXkZSviOiKRCZ4Xij0zxI3JECgYs3oKx+AizQBA==",
"dev": true,
"license": "Apache-2.0",
"dependencies": {
"jake": "^10.8.5"
},
@ -8147,16 +8156,17 @@
"license": "MIT"
},
"node_modules/express": {
"version": "4.18.1",
"version": "4.19.2",
"resolved": "https://registry.npmjs.org/express/-/express-4.19.2.tgz",
"integrity": "sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==",
"dev": true,
"license": "MIT",
"dependencies": {
"accepts": "~1.3.8",
"array-flatten": "1.1.1",
"body-parser": "1.20.0",
"body-parser": "1.20.2",
"content-disposition": "0.5.4",
"content-type": "~1.0.4",
"cookie": "0.5.0",
"cookie": "0.6.0",
"cookie-signature": "1.0.6",
"debug": "2.6.9",
"depd": "2.0.0",
@ -8172,7 +8182,7 @@
"parseurl": "~1.3.3",
"path-to-regexp": "0.1.7",
"proxy-addr": "~2.0.7",
"qs": "6.10.3",
"qs": "6.11.0",
"range-parser": "~1.2.1",
"safe-buffer": "5.2.1",
"send": "0.18.0",
@ -8368,8 +8378,9 @@
}
},
"node_modules/fill-range": {
"version": "7.0.1",
"license": "MIT",
"version": "7.1.1",
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
"integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
"dependencies": {
"to-regex-range": "^5.0.1"
},
@ -8456,14 +8467,15 @@
"license": "ISC"
},
"node_modules/follow-redirects": {
"version": "1.15.2",
"version": "1.15.6",
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.6.tgz",
"integrity": "sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==",
"funding": [
{
"type": "individual",
"url": "https://github.com/sponsors/RubenVerborgh"
}
],
"license": "MIT",
"engines": {
"node": ">=4.0"
},
@ -9548,7 +9560,8 @@
},
"node_modules/is-number": {
"version": "7.0.0",
"license": "MIT",
"resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
"integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
"engines": {
"node": ">=0.12.0"
}
@ -12359,8 +12372,9 @@
},
"node_modules/media-typer": {
"version": "0.3.0",
"resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
"integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
@ -14784,9 +14798,10 @@
}
},
"node_modules/qs": {
"version": "6.10.3",
"version": "6.11.0",
"resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz",
"integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==",
"dev": true,
"license": "BSD-3-Clause",
"dependencies": {
"side-channel": "^1.0.4"
},
@ -14874,9 +14889,10 @@
}
},
"node_modules/raw-body": {
"version": "2.5.1",
"version": "2.5.2",
"resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz",
"integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==",
"dev": true,
"license": "MIT",
"dependencies": {
"bytes": "3.1.2",
"http-errors": "2.0.0",
@ -14889,16 +14905,18 @@
},
"node_modules/raw-body/node_modules/bytes": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz",
"integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/raw-body/node_modules/iconv-lite": {
"version": "0.4.24",
"resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
"integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
"dev": true,
"license": "MIT",
"dependencies": {
"safer-buffer": ">= 2.1.2 < 3"
},
@ -17969,7 +17987,8 @@
},
"node_modules/to-regex-range": {
"version": "5.0.1",
"license": "MIT",
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
"integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
"dependencies": {
"is-number": "^7.0.0"
},
@ -18191,8 +18210,9 @@
},
"node_modules/type-is": {
"version": "1.6.18",
"resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz",
"integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==",
"dev": true,
"license": "MIT",
"dependencies": {
"media-typer": "0.3.0",
"mime-types": "~2.1.24"
@ -19332,7 +19352,7 @@
},
"react-app": {
"name": "@prometheus-io/app",
"version": "0.53.1",
"version": "0.54.0-rc.1",
"dependencies": {
"@codemirror/autocomplete": "^6.17.0",
"@codemirror/commands": "^6.6.0",
@ -19340,17 +19360,17 @@
"@codemirror/lint": "^6.8.1",
"@codemirror/search": "^6.5.6",
"@codemirror/state": "^6.3.3",
"@codemirror/view": "^6.28.3",
"@codemirror/view": "^6.29.1",
"@forevolve/bootstrap-dark": "^4.0.2",
"@fortawesome/fontawesome-svg-core": "6.5.2",
"@fortawesome/free-solid-svg-icons": "6.5.2",
"@fortawesome/react-fontawesome": "0.2.0",
"@lezer/common": "^1.2.1",
"@lezer/highlight": "^1.2.0",
"@lezer/lr": "^1.4.1",
"@lezer/lr": "^1.4.2",
"@nexucis/fuzzy": "^0.4.1",
"@nexucis/kvsearch": "^0.8.1",
"@prometheus-io/codemirror-promql": "0.53.1",
"@prometheus-io/codemirror-promql": "0.54.0-rc.1",
"bootstrap": "^4.6.2",
"css.escape": "^1.5.1",
"downshift": "^9.0.6",

View file

@ -28,5 +28,5 @@
"ts-jest": "^29.2.2",
"typescript": "^4.9.5"
},
"version": "0.53.1"
"version": "0.54.0-rc.1"
}

View file

@ -1,6 +1,6 @@
{
"name": "@prometheus-io/app",
"version": "0.53.1",
"version": "0.54.0-rc.1",
"private": true,
"dependencies": {
"@codemirror/autocomplete": "^6.17.0",
@ -9,17 +9,17 @@
"@codemirror/lint": "^6.8.1",
"@codemirror/search": "^6.5.6",
"@codemirror/state": "^6.3.3",
"@codemirror/view": "^6.28.3",
"@codemirror/view": "^6.29.1",
"@forevolve/bootstrap-dark": "^4.0.2",
"@fortawesome/fontawesome-svg-core": "6.5.2",
"@fortawesome/free-solid-svg-icons": "6.5.2",
"@fortawesome/react-fontawesome": "0.2.0",
"@lezer/common": "^1.2.1",
"@lezer/highlight": "^1.2.0",
"@lezer/lr": "^1.4.1",
"@lezer/lr": "^1.4.2",
"@nexucis/fuzzy": "^0.4.1",
"@nexucis/kvsearch": "^0.8.1",
"@prometheus-io/codemirror-promql": "0.53.1",
"@prometheus-io/codemirror-promql": "0.54.0-rc.1",
"bootstrap": "^4.6.2",
"css.escape": "^1.5.1",
"downshift": "^9.0.6",

View file

@ -481,14 +481,14 @@ func New(logger log.Logger, o *Options) *Handler {
router.Get("/-/healthy", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, o.AppName+" is Healthy.\n")
fmt.Fprintf(w, "%s is Healthy.\n", o.AppName)
})
router.Head("/-/healthy", func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
})
router.Get("/-/ready", readyf(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, o.AppName+" is Ready.\n")
fmt.Fprintf(w, "%s is Ready.\n", o.AppName)
}))
router.Head("/-/ready", readyf(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)