mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
Merge pull request #14676 from prometheus/krajo/update-pr-14546-from-main
Some checks failed
CI / Go tests (push) Has been cancelled
CI / More Go tests (push) Has been cancelled
CI / Go tests with previous Go version (push) Has been cancelled
CI / UI tests (push) Has been cancelled
CI / Go tests on Windows (push) Has been cancelled
CI / Mixins tests (push) Has been cancelled
CI / Build Prometheus for common architectures (0) (push) Has been cancelled
CI / Build Prometheus for common architectures (1) (push) Has been cancelled
CI / Build Prometheus for common architectures (2) (push) Has been cancelled
CI / Build Prometheus for all architectures (0) (push) Has been cancelled
CI / Build Prometheus for all architectures (1) (push) Has been cancelled
CI / Build Prometheus for all architectures (10) (push) Has been cancelled
CI / Build Prometheus for all architectures (11) (push) Has been cancelled
CI / Build Prometheus for all architectures (2) (push) Has been cancelled
CI / Build Prometheus for all architectures (3) (push) Has been cancelled
CI / Build Prometheus for all architectures (4) (push) Has been cancelled
CI / Build Prometheus for all architectures (5) (push) Has been cancelled
CI / Build Prometheus for all architectures (6) (push) Has been cancelled
CI / Build Prometheus for all architectures (7) (push) Has been cancelled
CI / Build Prometheus for all architectures (8) (push) Has been cancelled
CI / Build Prometheus for all architectures (9) (push) Has been cancelled
CI / Check generated parser (push) Has been cancelled
CI / golangci-lint (push) Has been cancelled
CI / fuzzing (push) Has been cancelled
CI / codeql (push) Has been cancelled
CI / Report status of build Prometheus for all architectures (push) Has been cancelled
CI / Publish main branch artifacts (push) Has been cancelled
CI / Publish release artefacts (push) Has been cancelled
CI / Publish UI on npm Registry (push) Has been cancelled
Some checks failed
CI / Go tests (push) Has been cancelled
CI / More Go tests (push) Has been cancelled
CI / Go tests with previous Go version (push) Has been cancelled
CI / UI tests (push) Has been cancelled
CI / Go tests on Windows (push) Has been cancelled
CI / Mixins tests (push) Has been cancelled
CI / Build Prometheus for common architectures (0) (push) Has been cancelled
CI / Build Prometheus for common architectures (1) (push) Has been cancelled
CI / Build Prometheus for common architectures (2) (push) Has been cancelled
CI / Build Prometheus for all architectures (0) (push) Has been cancelled
CI / Build Prometheus for all architectures (1) (push) Has been cancelled
CI / Build Prometheus for all architectures (10) (push) Has been cancelled
CI / Build Prometheus for all architectures (11) (push) Has been cancelled
CI / Build Prometheus for all architectures (2) (push) Has been cancelled
CI / Build Prometheus for all architectures (3) (push) Has been cancelled
CI / Build Prometheus for all architectures (4) (push) Has been cancelled
CI / Build Prometheus for all architectures (5) (push) Has been cancelled
CI / Build Prometheus for all architectures (6) (push) Has been cancelled
CI / Build Prometheus for all architectures (7) (push) Has been cancelled
CI / Build Prometheus for all architectures (8) (push) Has been cancelled
CI / Build Prometheus for all architectures (9) (push) Has been cancelled
CI / Check generated parser (push) Has been cancelled
CI / golangci-lint (push) Has been cancelled
CI / fuzzing (push) Has been cancelled
CI / codeql (push) Has been cancelled
CI / Report status of build Prometheus for all architectures (push) Has been cancelled
CI / Publish main branch artifacts (push) Has been cancelled
CI / Publish release artefacts (push) Has been cancelled
CI / Publish UI on npm Registry (push) Has been cancelled
update pr 14546 from main
This commit is contained in:
commit
ce36c05266
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
|
@ -186,7 +186,7 @@ jobs:
|
|||
with:
|
||||
args: --verbose
|
||||
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
|
||||
version: v1.59.1
|
||||
version: v1.60.2
|
||||
fuzzing:
|
||||
uses: ./.github/workflows/fuzzing.yml
|
||||
if: github.event_name == 'pull_request'
|
||||
|
|
|
@ -25,15 +25,34 @@ linters:
|
|||
- loggercheck
|
||||
|
||||
issues:
|
||||
max-issues-per-linter: 0
|
||||
max-same-issues: 0
|
||||
# The default exclusions are too aggressive. For one, they
|
||||
# essentially disable any linting on doc comments. We disable
|
||||
# default exclusions here and add exclusions fitting our codebase
|
||||
# further down.
|
||||
exclude-use-default: false
|
||||
exclude-files:
|
||||
# Skip autogenerated files.
|
||||
- ^.*\.(pb|y)\.go$
|
||||
exclude-dirs:
|
||||
# Copied it from a different source
|
||||
# Copied it from a different source.
|
||||
- storage/remote/otlptranslator/prometheusremotewrite
|
||||
- storage/remote/otlptranslator/prometheus
|
||||
exclude-rules:
|
||||
- linters:
|
||||
- errcheck
|
||||
# Taken from the default exclusions (that are otherwise disabled above).
|
||||
text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked
|
||||
- linters:
|
||||
- govet
|
||||
# We use many Seek methods that do not follow the usual pattern.
|
||||
text: "stdmethods: method Seek.* should have signature Seek"
|
||||
- linters:
|
||||
- revive
|
||||
# We have stopped at some point to write doc comments on exported symbols.
|
||||
# TODO(beorn7): Maybe we should enforce this again? There are ~500 offenders right now.
|
||||
text: exported (.+) should have comment( \(or a comment on this block\))? or be unexported
|
||||
- linters:
|
||||
- gocritic
|
||||
text: "appendAssign"
|
||||
|
@ -94,15 +113,14 @@ linters-settings:
|
|||
errorf: false
|
||||
revive:
|
||||
# By default, revive will enable only the linting rules that are named in the configuration file.
|
||||
# So, it's needed to explicitly set in configuration all required rules.
|
||||
# The following configuration enables all the rules from the defaults.toml
|
||||
# https://github.com/mgechev/revive/blob/master/defaults.toml
|
||||
# So, it's needed to explicitly enable all required rules here.
|
||||
rules:
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md
|
||||
- name: blank-imports
|
||||
- name: comment-spacings
|
||||
- name: context-as-argument
|
||||
arguments:
|
||||
# allow functions with test or bench signatures
|
||||
# Allow functions with test or bench signatures.
|
||||
- allowTypesBefore: "*testing.T,testing.TB"
|
||||
- name: context-keys-type
|
||||
- name: dot-imports
|
||||
|
@ -118,6 +136,8 @@ linters-settings:
|
|||
- name: increment-decrement
|
||||
- name: indent-error-flow
|
||||
- name: package-comments
|
||||
# TODO(beorn7): Currently, we have a lot of missing package doc comments. Maybe we should have them.
|
||||
disabled: true
|
||||
- name: range
|
||||
- name: receiver-naming
|
||||
- name: redefines-builtin-id
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
## unreleased
|
||||
|
||||
* [FEATURE] OTLP receiver: Add new option `otlp.promote_resource_attributes`, for any OTel resource attributes that should be promoted to metric labels. #14200
|
||||
* [ENHANCEMENT] OTLP receiver: Warn when encountering exponential histograms with zero count and non-zero sum. #14706
|
||||
* [BUGFIX] tsdb/wlog.Watcher.readSegmentForGC: Only count unknown record types against record_decode_failures_total metric. #14042
|
||||
|
||||
## 2.54.0-rc.1 / 2024-08-05
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
|
|||
SKIP_GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT_OPTS ?=
|
||||
GOLANGCI_LINT_VERSION ?= v1.59.1
|
||||
GOLANGCI_LINT_VERSION ?= v1.60.2
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
|
||||
# windows isn't included here because of the path separator being different.
|
||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||
|
|
|
@ -152,6 +152,7 @@ type flagConfig struct {
|
|||
queryConcurrency int
|
||||
queryMaxSamples int
|
||||
RemoteFlushDeadline model.Duration
|
||||
nameEscapingScheme string
|
||||
|
||||
featureList []string
|
||||
memlimitRatio float64
|
||||
|
@ -240,6 +241,9 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
|||
case "delayed-compaction":
|
||||
c.tsdb.EnableDelayedCompaction = true
|
||||
level.Info(logger).Log("msg", "Experimental delayed compaction is enabled.")
|
||||
case "utf8-names":
|
||||
model.NameValidationScheme = model.UTF8Validation
|
||||
level.Info(logger).Log("msg", "Experimental UTF-8 support enabled")
|
||||
case "":
|
||||
continue
|
||||
case "promql-at-modifier", "promql-negative-offset":
|
||||
|
@ -484,7 +488,9 @@ func main() {
|
|||
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
|
||||
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
|
||||
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
a.Flag("scrape.name-escaping-scheme", `Method for escaping legacy invalid names when sending to Prometheus that does not support UTF-8. Can be one of "values", "underscores", or "dots".`).Default(scrape.DefaultNameEscapingScheme.String()).StringVar(&cfg.nameEscapingScheme)
|
||||
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, utf8-names. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
Default("").StringsVar(&cfg.featureList)
|
||||
|
||||
promlogflag.AddFlags(a, &cfg.promlogConfig)
|
||||
|
@ -512,6 +518,15 @@ func main() {
|
|||
os.Exit(1)
|
||||
}
|
||||
|
||||
if cfg.nameEscapingScheme != "" {
|
||||
scheme, err := model.ToEscapingScheme(cfg.nameEscapingScheme)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, `Invalid name escaping scheme: %q; Needs to be one of "values", "underscores", or "dots"`, cfg.nameEscapingScheme)
|
||||
os.Exit(1)
|
||||
}
|
||||
model.NameEscapingScheme = scheme
|
||||
}
|
||||
|
||||
if agentMode && len(serverOnlyFlags) > 0 {
|
||||
fmt.Fprintf(os.Stderr, "The following flag(s) can not be used in agent mode: %q", serverOnlyFlags)
|
||||
os.Exit(3)
|
||||
|
|
|
@ -471,7 +471,7 @@ func (ls lintConfig) lintDuplicateRules() bool {
|
|||
return ls.all || ls.duplicateRules
|
||||
}
|
||||
|
||||
// Check server status - healthy & ready.
|
||||
// CheckServerStatus - healthy & ready.
|
||||
func CheckServerStatus(serverURL *url.URL, checkEndpoint string, roundTripper http.RoundTripper) error {
|
||||
if serverURL.Scheme == "" {
|
||||
serverURL.Scheme = "http"
|
||||
|
|
|
@ -31,7 +31,7 @@ import (
|
|||
"github.com/prometheus/prometheus/util/fmtutil"
|
||||
)
|
||||
|
||||
// Push metrics to a prometheus remote write (for testing purpose only).
|
||||
// PushMetrics to a prometheus remote write (for testing purpose only).
|
||||
func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, timeout time.Duration, labels map[string]string, files ...string) int {
|
||||
addressURL, err := url.Parse(url.String())
|
||||
if err != nil {
|
||||
|
|
|
@ -866,16 +866,16 @@ func displayHistogram(dataType string, datas []int, total int) {
|
|||
fmt.Println()
|
||||
}
|
||||
|
||||
func generateBucket(min, max int) (start, end, step int) {
|
||||
s := (max - min) / 10
|
||||
func generateBucket(minVal, maxVal int) (start, end, step int) {
|
||||
s := (maxVal - minVal) / 10
|
||||
|
||||
step = 10
|
||||
for step < s && step <= 10000 {
|
||||
step *= 10
|
||||
}
|
||||
|
||||
start = min - min%step
|
||||
end = max - max%step + step
|
||||
start = minVal - minVal%step
|
||||
end = maxVal - maxVal%step + step
|
||||
|
||||
return
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"math"
|
||||
"os"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -152,12 +153,18 @@ func TestTSDBDump(t *testing.T) {
|
|||
expectedMetrics, err := os.ReadFile(tt.expectedDump)
|
||||
require.NoError(t, err)
|
||||
expectedMetrics = normalizeNewLine(expectedMetrics)
|
||||
// even though in case of one matcher samples are not sorted, the order in the cases above should stay the same.
|
||||
require.Equal(t, string(expectedMetrics), dumpedMetrics)
|
||||
// Sort both, because Prometheus does not guarantee the output order.
|
||||
require.Equal(t, sortLines(string(expectedMetrics)), sortLines(dumpedMetrics))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func sortLines(buf string) string {
|
||||
lines := strings.Split(buf, "\n")
|
||||
slices.Sort(lines)
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
func TestTSDBDumpOpenMetrics(t *testing.T) {
|
||||
storage := promqltest.LoadedStorage(t, `
|
||||
load 1m
|
||||
|
@ -169,7 +176,7 @@ func TestTSDBDumpOpenMetrics(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
expectedMetrics = normalizeNewLine(expectedMetrics)
|
||||
dumpedMetrics := getDumpedSamples(t, storage.Dir(), math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics)
|
||||
require.Equal(t, string(expectedMetrics), dumpedMetrics)
|
||||
require.Equal(t, sortLines(string(expectedMetrics)), sortLines(dumpedMetrics))
|
||||
}
|
||||
|
||||
func TestTSDBDumpOpenMetricsRoundTrip(t *testing.T) {
|
||||
|
|
|
@ -67,6 +67,11 @@ var (
|
|||
}
|
||||
)
|
||||
|
||||
const (
|
||||
LegacyValidationConfig = "legacy"
|
||||
UTF8ValidationConfig = "utf8"
|
||||
)
|
||||
|
||||
// Load parses the YAML input s into a Config.
|
||||
func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, error) {
|
||||
cfg := &Config{}
|
||||
|
@ -446,6 +451,8 @@ type GlobalConfig struct {
|
|||
// Keep no more than this many dropped targets per job.
|
||||
// 0 means no limit.
|
||||
KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"`
|
||||
// Allow UTF8 Metric and Label Names.
|
||||
MetricNameValidationScheme string `yaml:"metric_name_validation_scheme,omitempty"`
|
||||
}
|
||||
|
||||
// ScrapeProtocol represents supported protocol for scraping metrics.
|
||||
|
@ -471,6 +478,7 @@ var (
|
|||
PrometheusText0_0_4 ScrapeProtocol = "PrometheusText0.0.4"
|
||||
OpenMetricsText0_0_1 ScrapeProtocol = "OpenMetricsText0.0.1"
|
||||
OpenMetricsText1_0_0 ScrapeProtocol = "OpenMetricsText1.0.0"
|
||||
UTF8NamesHeader string = model.EscapingKey + "=" + model.AllowUTF8
|
||||
|
||||
ScrapeProtocolsHeaders = map[ScrapeProtocol]string{
|
||||
PrometheusProto: "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
|
||||
|
@ -656,6 +664,8 @@ type ScrapeConfig struct {
|
|||
// Keep no more than this many dropped targets per job.
|
||||
// 0 means no limit.
|
||||
KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"`
|
||||
// Allow UTF8 Metric and Label Names.
|
||||
MetricNameValidationScheme string `yaml:"metric_name_validation_scheme,omitempty"`
|
||||
|
||||
// We cannot do proper Go type embedding below as the parser will then parse
|
||||
// values arbitrarily into the overflow maps of further-down types.
|
||||
|
@ -762,6 +772,17 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
|
|||
return fmt.Errorf("%w for scrape config with job name %q", err, c.JobName)
|
||||
}
|
||||
|
||||
switch globalConfig.MetricNameValidationScheme {
|
||||
case "", LegacyValidationConfig:
|
||||
case UTF8ValidationConfig:
|
||||
if model.NameValidationScheme != model.UTF8Validation {
|
||||
return fmt.Errorf("utf8 name validation requested but feature not enabled via --enable-feature=utf8-names")
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unknown name validation method specified, must be either 'legacy' or 'utf8', got %s", globalConfig.MetricNameValidationScheme)
|
||||
}
|
||||
c.MetricNameValidationScheme = globalConfig.MetricNameValidationScheme
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
|
||||
package discovery
|
||||
|
||||
// Create a dummy metrics struct, because this SD doesn't have any metrics.
|
||||
// NoopDiscovererMetrics creates a dummy metrics struct, because this SD doesn't have any metrics.
|
||||
type NoopDiscovererMetrics struct{}
|
||||
|
||||
var _ DiscovererMetrics = (*NoopDiscovererMetrics)(nil)
|
||||
|
|
|
@ -39,7 +39,7 @@ type Discoverer interface {
|
|||
Run(ctx context.Context, up chan<- []*targetgroup.Group)
|
||||
}
|
||||
|
||||
// Internal metrics of service discovery mechanisms.
|
||||
// DiscovererMetrics are internal metrics of service discovery mechanisms.
|
||||
type DiscovererMetrics interface {
|
||||
Register() error
|
||||
Unregister()
|
||||
|
@ -56,7 +56,7 @@ type DiscovererOptions struct {
|
|||
HTTPClientOptions []config.HTTPClientOption
|
||||
}
|
||||
|
||||
// Metrics used by the "refresh" package.
|
||||
// RefreshMetrics are used by the "refresh" package.
|
||||
// We define them here in the "discovery" package in order to avoid a cyclic dependency between
|
||||
// "discovery" and "refresh".
|
||||
type RefreshMetrics struct {
|
||||
|
@ -64,17 +64,18 @@ type RefreshMetrics struct {
|
|||
Duration prometheus.Observer
|
||||
}
|
||||
|
||||
// Instantiate the metrics used by the "refresh" package.
|
||||
// RefreshMetricsInstantiator instantiates the metrics used by the "refresh" package.
|
||||
type RefreshMetricsInstantiator interface {
|
||||
Instantiate(mech string) *RefreshMetrics
|
||||
}
|
||||
|
||||
// An interface for registering, unregistering, and instantiating metrics for the "refresh" package.
|
||||
// Refresh metrics are registered and unregistered outside of the service discovery mechanism.
|
||||
// This is so that the same metrics can be reused across different service discovery mechanisms.
|
||||
// To manage refresh metrics inside the SD mechanism, we'd need to use const labels which are
|
||||
// specific to that SD. However, doing so would also expose too many unused metrics on
|
||||
// the Prometheus /metrics endpoint.
|
||||
// RefreshMetricsManager is an interface for registering, unregistering, and
|
||||
// instantiating metrics for the "refresh" package. Refresh metrics are
|
||||
// registered and unregistered outside of the service discovery mechanism. This
|
||||
// is so that the same metrics can be reused across different service discovery
|
||||
// mechanisms. To manage refresh metrics inside the SD mechanism, we'd need to
|
||||
// use const labels which are specific to that SD. However, doing so would also
|
||||
// expose too many unused metrics on the Prometheus /metrics endpoint.
|
||||
type RefreshMetricsManager interface {
|
||||
DiscovererMetrics
|
||||
RefreshMetricsInstantiator
|
||||
|
@ -145,7 +146,8 @@ func (c StaticConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) {
|
|||
return staticDiscoverer(c), nil
|
||||
}
|
||||
|
||||
// No metrics are needed for this service discovery mechanism.
|
||||
// NewDiscovererMetrics returns NoopDiscovererMetrics because no metrics are
|
||||
// needed for this service discovery mechanism.
|
||||
func (c StaticConfig) NewDiscovererMetrics(prometheus.Registerer, RefreshMetricsInstantiator) DiscovererMetrics {
|
||||
return &NoopDiscovererMetrics{}
|
||||
}
|
||||
|
|
|
@ -97,7 +97,6 @@ func fetchApps(ctx context.Context, server string, client *http.Client) (*Applic
|
|||
resp.Body.Close()
|
||||
}()
|
||||
|
||||
//nolint:usestdlibvars
|
||||
if resp.StatusCode/100 != 2 {
|
||||
return nil, fmt.Errorf("non 2xx status '%d' response during eureka service discovery", resp.StatusCode)
|
||||
}
|
||||
|
|
|
@ -87,7 +87,6 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error)
|
|||
resp.Body.Close()
|
||||
}()
|
||||
|
||||
//nolint:usestdlibvars
|
||||
if resp.StatusCode/100 != 2 {
|
||||
return nil, fmt.Errorf("non 2xx status '%d' response during hetzner service discovery with role robot", resp.StatusCode)
|
||||
}
|
||||
|
|
|
@ -970,7 +970,7 @@ func TestEndpointsDiscoveryEmptyPodStatus(t *testing.T) {
|
|||
}.Run(t)
|
||||
}
|
||||
|
||||
// TestEndpointsUpdatePod makes sure that Endpoints discovery detects underlying Pods changes.
|
||||
// TestEndpointsDiscoveryUpdatePod makes sure that Endpoints discovery detects underlying Pods changes.
|
||||
// See https://github.com/prometheus/prometheus/issues/11305 for more details.
|
||||
func TestEndpointsDiscoveryUpdatePod(t *testing.T) {
|
||||
pod := &v1.Pod{
|
||||
|
|
|
@ -154,7 +154,7 @@ func (d k8sDiscoveryTest) Run(t *testing.T) {
|
|||
|
||||
// readResultWithTimeout reads all targetgroups from channel with timeout.
|
||||
// It merges targetgroups by source and sends the result to result channel.
|
||||
func readResultWithTimeout(t *testing.T, ctx context.Context, ch <-chan []*targetgroup.Group, max int, stopAfter time.Duration, resChan chan<- map[string]*targetgroup.Group) {
|
||||
func readResultWithTimeout(t *testing.T, ctx context.Context, ch <-chan []*targetgroup.Group, maxGroups int, stopAfter time.Duration, resChan chan<- map[string]*targetgroup.Group) {
|
||||
res := make(map[string]*targetgroup.Group)
|
||||
timeout := time.After(stopAfter)
|
||||
Loop:
|
||||
|
@ -167,7 +167,7 @@ Loop:
|
|||
}
|
||||
res[tg.Source] = tg
|
||||
}
|
||||
if len(res) == max {
|
||||
if len(res) == maxGroups {
|
||||
// Reached max target groups we may get, break fast.
|
||||
break Loop
|
||||
}
|
||||
|
@ -175,10 +175,10 @@ Loop:
|
|||
// Because we use queue, an object that is created then
|
||||
// deleted or updated may be processed only once.
|
||||
// So possibly we may skip events, timed out here.
|
||||
t.Logf("timed out, got %d (max: %d) items, some events are skipped", len(res), max)
|
||||
t.Logf("timed out, got %d (max: %d) items, some events are skipped", len(res), maxGroups)
|
||||
break Loop
|
||||
case <-ctx.Done():
|
||||
t.Logf("stopped, got %d (max: %d) items", len(res), max)
|
||||
t.Logf("stopped, got %d (max: %d) items", len(res), maxGroups)
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ func (p *Provider) Config() interface{} {
|
|||
return p.config
|
||||
}
|
||||
|
||||
// Registers the metrics needed for SD mechanisms.
|
||||
// CreateAndRegisterSDMetrics registers the metrics needed for SD mechanisms.
|
||||
// Does not register the metrics for the Discovery Manager.
|
||||
// TODO(ptodev): Add ability to unregister the metrics?
|
||||
func CreateAndRegisterSDMetrics(reg prometheus.Registerer) (map[string]DiscovererMetrics, error) {
|
||||
|
|
|
@ -17,7 +17,7 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// Metric vectors for the "refresh" package.
|
||||
// RefreshMetricsVecs are metric vectors for the "refresh" package.
|
||||
// We define them here in the "discovery" package in order to avoid a cyclic dependency between
|
||||
// "discovery" and "refresh".
|
||||
type RefreshMetricsVecs struct {
|
||||
|
|
|
@ -19,8 +19,8 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// A utility to be used by implementations of discovery.Discoverer
|
||||
// which need to manage the lifetime of their metrics.
|
||||
// MetricRegisterer is used by implementations of discovery.Discoverer that need
|
||||
// to manage the lifetime of their metrics.
|
||||
type MetricRegisterer interface {
|
||||
RegisterMetrics() error
|
||||
UnregisterMetrics()
|
||||
|
@ -34,7 +34,7 @@ type metricRegistererImpl struct {
|
|||
|
||||
var _ MetricRegisterer = &metricRegistererImpl{}
|
||||
|
||||
// Creates an instance of a MetricRegisterer.
|
||||
// NewMetricRegisterer creates an instance of a MetricRegisterer.
|
||||
// Typically called inside the implementation of the NewDiscoverer() method.
|
||||
func NewMetricRegisterer(reg prometheus.Registerer, metrics []prometheus.Collector) MetricRegisterer {
|
||||
return &metricRegistererImpl{
|
||||
|
|
|
@ -56,7 +56,8 @@ The Prometheus monitoring server
|
|||
| <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` |
|
||||
| <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` |
|
||||
| <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` |
|
||||
| <code class="text-nowrap">--enable-feature</code> | Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
|
||||
| <code class="text-nowrap">--scrape.name-escaping-scheme</code> | Method for escaping legacy invalid names when sending to Prometheus that does not support UTF-8. Can be one of "values", "underscores", or "dots". | `values` |
|
||||
| <code class="text-nowrap">--enable-feature</code> | Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, utf8-names. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
|
||||
| <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` |
|
||||
| <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` |
|
||||
|
||||
|
|
|
@ -121,6 +121,11 @@ global:
|
|||
# that will be kept in memory. 0 means no limit.
|
||||
[ keep_dropped_targets: <int> | default = 0 ]
|
||||
|
||||
# Specifies the validation scheme for metric and label names. Either blank or
|
||||
# "legacy" for letters, numbers, colons, and underscores; or "utf8" for full
|
||||
# UTF-8 support.
|
||||
[ metric_name_validation_scheme <string> | default "legacy" ]
|
||||
|
||||
runtime:
|
||||
# Configure the Go garbage collector GOGC parameter
|
||||
# See: https://tip.golang.org/doc/gc-guide#GOGC
|
||||
|
@ -461,6 +466,11 @@ metric_relabel_configs:
|
|||
# that will be kept in memory. 0 means no limit.
|
||||
[ keep_dropped_targets: <int> | default = 0 ]
|
||||
|
||||
# Specifies the validation scheme for metric and label names. Either blank or
|
||||
# "legacy" for letters, numbers, colons, and underscores; or "utf8" for full
|
||||
# UTF-8 support.
|
||||
[ metric_name_validation_scheme <string> | default "legacy" ]
|
||||
|
||||
# Limit on total number of positive and negative buckets allowed in a single
|
||||
# native histogram. The resolution of a histogram with more buckets will be
|
||||
# reduced until the number of buckets is within the limit. If the limit cannot
|
||||
|
@ -3265,12 +3275,16 @@ Initially, aside from the configured per-target labels, a target's `job`
|
|||
label is set to the `job_name` value of the respective scrape configuration.
|
||||
The `__address__` label is set to the `<host>:<port>` address of the target.
|
||||
After relabeling, the `instance` label is set to the value of `__address__` by default if
|
||||
it was not set during relabeling. The `__scheme__` and `__metrics_path__` labels
|
||||
are set to the scheme and metrics path of the target respectively. The `__param_<name>`
|
||||
label is set to the value of the first passed URL parameter called `<name>`.
|
||||
it was not set during relabeling.
|
||||
|
||||
The `__scheme__` and `__metrics_path__` labels
|
||||
are set to the scheme and metrics path of the target respectively, as specified in `scrape_config`.
|
||||
|
||||
The `__param_<name>`
|
||||
label is set to the value of the first passed URL parameter called `<name>`, as defined in `scrape_config`.
|
||||
|
||||
The `__scrape_interval__` and `__scrape_timeout__` labels are set to the target's
|
||||
interval and timeout.
|
||||
interval and timeout, as specified in `scrape_config`.
|
||||
|
||||
Additional labels prefixed with `__meta_` may be available during the
|
||||
relabeling phase. They are set by the service discovery mechanism that provided
|
||||
|
|
|
@ -249,3 +249,11 @@ In the event of multiple consecutive Head compactions being possible, only the f
|
|||
Note that during this delay, the Head continues its usual operations, which include serving and appending series.
|
||||
|
||||
Despite the delay in compaction, the blocks produced are time-aligned in the same manner as they would be if the delay was not in place.
|
||||
|
||||
## UTF-8 Name Support
|
||||
|
||||
`--enable-feature=utf8-names`
|
||||
|
||||
When enabled, changes the metric and label name validation scheme inside Prometheus to allow the full UTF-8 character set.
|
||||
By itself, this flag does not enable the request of UTF-8 names via content negotiation.
|
||||
Users will also have to set `metric_name_validation_scheme` in scrape configs to enable the feature either on the global config or on a per-scrape config basis.
|
||||
|
|
|
@ -617,9 +617,9 @@ Like `sort`, `sort_desc` only affects the results of instant queries, as range q
|
|||
|
||||
## `sort_by_label()`
|
||||
|
||||
**This function has to be enabled via the [feature flag](../feature_flags.md) `--enable-feature=promql-experimental-functions`.**
|
||||
**This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.**
|
||||
|
||||
`sort_by_label(v instant-vector, label string, ...)` returns vector elements sorted by their label values and sample value in case of label values being equal, in ascending order.
|
||||
`sort_by_label(v instant-vector, label string, ...)` returns vector elements sorted by the values of the given labels in ascending order. In case these label values are equal, elements are sorted by their full label sets.
|
||||
|
||||
Please note that the sort by label functions only affect the results of instant queries, as range query results always have a fixed output ordering.
|
||||
|
||||
|
@ -627,7 +627,7 @@ This function uses [natural sort order](https://en.wikipedia.org/wiki/Natural_so
|
|||
|
||||
## `sort_by_label_desc()`
|
||||
|
||||
**This function has to be enabled via the [feature flag](../feature_flags.md) `--enable-feature=promql-experimental-functions`.**
|
||||
**This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.**
|
||||
|
||||
Same as `sort_by_label`, but sorts in descending order.
|
||||
|
||||
|
@ -676,7 +676,7 @@ over time and return an instant vector with per-series aggregation results:
|
|||
* `last_over_time(range-vector)`: the most recent point value in the specified interval.
|
||||
* `present_over_time(range-vector)`: the value 1 for any series in the specified interval.
|
||||
|
||||
If the [feature flag](../feature_flags.md)
|
||||
If the [feature flag](../feature_flags.md#experimental-promql-functions)
|
||||
`--enable-feature=promql-experimental-functions` is set, the following
|
||||
additional functions are available:
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ require (
|
|||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect
|
||||
github.com/aws/aws-sdk-go v1.53.16 // indirect
|
||||
github.com/aws/aws-sdk-go v1.55.5 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
|
@ -62,7 +62,7 @@ require (
|
|||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect
|
||||
google.golang.org/grpc v1.64.0 // indirect
|
||||
google.golang.org/grpc v1.65.0 // indirect
|
||||
google.golang.org/protobuf v1.34.2 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
|
|
|
@ -26,8 +26,8 @@ github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8V
|
|||
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.53.16 h1:8oZjKQO/ml1WLUZw5hvF7pvYjPf8o9f57Wldoy/q9Qc=
|
||||
github.com/aws/aws-sdk-go v1.53.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
|
||||
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
|
||||
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
|
||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
|
@ -37,8 +37,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
|
|||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc=
|
||||
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM=
|
||||
github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw=
|
||||
github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
|
@ -402,8 +402,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:
|
|||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
|
||||
google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
|
||||
google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
|
||||
google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
|
||||
google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
|
|
@ -15,7 +15,9 @@ package exemplar
|
|||
|
||||
import "github.com/prometheus/prometheus/model/labels"
|
||||
|
||||
// The combined length of the label names and values of an Exemplar's LabelSet MUST NOT exceed 128 UTF-8 characters
|
||||
// ExemplarMaxLabelSetLength is defined by OpenMetrics: "The combined length of
|
||||
// the label names and values of an Exemplar's LabelSet MUST NOT exceed 128
|
||||
// UTF-8 characters."
|
||||
// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars
|
||||
const ExemplarMaxLabelSetLength = 128
|
||||
|
||||
|
@ -49,7 +51,7 @@ func (e Exemplar) Equals(e2 Exemplar) bool {
|
|||
return e.Value == e2.Value
|
||||
}
|
||||
|
||||
// Sort first by timestamp, then value, then labels.
|
||||
// Compare first timestamps, then values, then labels.
|
||||
func Compare(a, b Exemplar) int {
|
||||
if a.Ts < b.Ts {
|
||||
return -1
|
||||
|
|
|
@ -315,7 +315,8 @@ func Compare(a, b Labels) int {
|
|||
return len(a) - len(b)
|
||||
}
|
||||
|
||||
// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed.
|
||||
// CopyFrom copies labels from b on top of whatever was in ls previously,
|
||||
// reusing memory or expanding if needed.
|
||||
func (ls *Labels) CopyFrom(b Labels) {
|
||||
(*ls) = append((*ls)[:0], b...)
|
||||
}
|
||||
|
@ -422,7 +423,7 @@ type ScratchBuilder struct {
|
|||
add Labels
|
||||
}
|
||||
|
||||
// Symbol-table is no-op, just for api parity with dedupelabels.
|
||||
// SymbolTable is no-op, just for api parity with dedupelabels.
|
||||
type SymbolTable struct{}
|
||||
|
||||
func NewSymbolTable() *SymbolTable { return nil }
|
||||
|
@ -458,7 +459,7 @@ func (b *ScratchBuilder) Add(name, value string) {
|
|||
b.add = append(b.add, Label{Name: name, Value: value})
|
||||
}
|
||||
|
||||
// Add a name/value pair, using []byte instead of string.
|
||||
// UnsafeAddBytes adds a name/value pair, using []byte instead of string.
|
||||
// The '-tags stringlabels' version of this function is unsafe, hence the name.
|
||||
// This version is safe - it copies the strings immediately - but we keep the same name so everything compiles.
|
||||
func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) {
|
||||
|
@ -475,14 +476,14 @@ func (b *ScratchBuilder) Assign(ls Labels) {
|
|||
b.add = append(b.add[:0], ls...) // Copy on top of our slice, so we don't retain the input slice.
|
||||
}
|
||||
|
||||
// Return the name/value pairs added so far as a Labels object.
|
||||
// Labels returns the name/value pairs added so far as a Labels object.
|
||||
// Note: if you want them sorted, call Sort() first.
|
||||
func (b *ScratchBuilder) Labels() Labels {
|
||||
// Copy the slice, so the next use of ScratchBuilder doesn't overwrite.
|
||||
return append([]Label{}, b.add...)
|
||||
}
|
||||
|
||||
// Write the newly-built Labels out to ls.
|
||||
// Overwrite the newly-built Labels out to ls.
|
||||
// Callers must ensure that there are no other references to ls, or any strings fetched from it.
|
||||
func (b *ScratchBuilder) Overwrite(ls *Labels) {
|
||||
*ls = append((*ls)[:0], b.add...)
|
||||
|
|
|
@ -106,8 +106,8 @@ const (
|
|||
EntryInvalid Entry = -1
|
||||
EntryType Entry = 0
|
||||
EntryHelp Entry = 1
|
||||
EntrySeries Entry = 2 // A series with a simple float64 as value.
|
||||
EntrySeries Entry = 2 // EntrySeries marks a series with a simple float64 as value.
|
||||
EntryComment Entry = 3
|
||||
EntryUnit Entry = 4
|
||||
EntryHistogram Entry = 5 // A series with a native histogram as a value.
|
||||
EntryHistogram Entry = 5 // EntryHistogram marks a series with a native histogram as a value.
|
||||
)
|
||||
|
|
|
@ -674,7 +674,6 @@ func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []b
|
|||
}()
|
||||
|
||||
// Any HTTP status 2xx is OK.
|
||||
//nolint:usestdlibvars
|
||||
if resp.StatusCode/100 != 2 {
|
||||
return fmt.Errorf("bad response status %s", resp.Status)
|
||||
}
|
||||
|
|
|
@ -711,7 +711,7 @@ func TestHangingNotifier(t *testing.T) {
|
|||
)
|
||||
|
||||
var (
|
||||
sendTimeout = 10 * time.Millisecond
|
||||
sendTimeout = 100 * time.Millisecond
|
||||
sdUpdatert = sendTimeout / 2
|
||||
|
||||
done = make(chan struct{})
|
||||
|
|
|
@ -573,7 +573,7 @@ func (ng *Engine) validateOpts(expr parser.Expr) error {
|
|||
return validationErr
|
||||
}
|
||||
|
||||
// NewTestQuery: inject special behaviour into Query for testing.
|
||||
// NewTestQuery injects special behaviour into Query for testing.
|
||||
func (ng *Engine) NewTestQuery(f func(context.Context) error) Query {
|
||||
qry := &query{
|
||||
q: "test statement",
|
||||
|
@ -2789,6 +2789,7 @@ type groupedAggregation struct {
|
|||
seen bool // Was this output groups seen in the input at this timestamp.
|
||||
hasFloat bool // Has at least 1 float64 sample aggregated.
|
||||
hasHistogram bool // Has at least 1 histogram sample aggregated.
|
||||
incompatibleHistograms bool // If true, group has seen mixed exponential and custom buckets, or incompatible custom buckets.
|
||||
groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group.
|
||||
incrementalMean bool // True after reverting to incremental calculation of the mean value.
|
||||
}
|
||||
|
@ -2817,6 +2818,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
|
|||
seen: true,
|
||||
floatValue: f,
|
||||
floatMean: f,
|
||||
incompatibleHistograms: false,
|
||||
groupCount: 1,
|
||||
}
|
||||
switch op {
|
||||
|
@ -2838,6 +2840,10 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
|
|||
continue
|
||||
}
|
||||
|
||||
if group.incompatibleHistograms {
|
||||
continue
|
||||
}
|
||||
|
||||
switch op {
|
||||
case parser.SUM:
|
||||
if h != nil {
|
||||
|
@ -2846,6 +2852,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
|
|||
_, err := group.histogramValue.Add(h)
|
||||
if err != nil {
|
||||
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
|
||||
group.incompatibleHistograms = true
|
||||
}
|
||||
}
|
||||
// Otherwise the aggregation contained floats
|
||||
|
@ -2866,10 +2873,14 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
|
|||
toAdd, err := left.Sub(right)
|
||||
if err != nil {
|
||||
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
|
||||
group.incompatibleHistograms = true
|
||||
continue
|
||||
}
|
||||
_, err = group.histogramValue.Add(toAdd)
|
||||
if err != nil {
|
||||
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
|
||||
group.incompatibleHistograms = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Otherwise the aggregation contained floats
|
||||
|
@ -2966,6 +2977,8 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
|
|||
continue
|
||||
}
|
||||
switch {
|
||||
case aggr.incompatibleHistograms:
|
||||
continue
|
||||
case aggr.hasHistogram:
|
||||
aggr.histogramValue = aggr.histogramValue.Compact(0)
|
||||
case aggr.incrementalMean:
|
||||
|
@ -2992,9 +3005,12 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
|
|||
annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(e.Expr.PositionRange()))
|
||||
continue
|
||||
}
|
||||
if aggr.hasHistogram {
|
||||
switch {
|
||||
case aggr.incompatibleHistograms:
|
||||
continue
|
||||
case aggr.hasHistogram:
|
||||
aggr.histogramValue.Compact(0)
|
||||
} else {
|
||||
default:
|
||||
aggr.floatValue += aggr.floatKahanC
|
||||
}
|
||||
default:
|
||||
|
@ -3161,7 +3177,7 @@ seriesLoop:
|
|||
return mat, annos
|
||||
}
|
||||
|
||||
// aggregationK evaluates count_values on vec.
|
||||
// aggregationCountValues evaluates count_values on vec.
|
||||
// Outputs as many series per group as there are values in the input.
|
||||
func (ev *evaluator) aggregationCountValues(e *parser.AggregateExpr, grouping []string, valueLabel string, vec Vector, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
type groupCount struct {
|
||||
|
@ -3515,14 +3531,14 @@ func makeInt64Pointer(val int64) *int64 {
|
|||
return valp
|
||||
}
|
||||
|
||||
// Add RatioSampler interface to allow unit-testing (previously: Randomizer).
|
||||
// RatioSampler allows unit-testing (previously: Randomizer).
|
||||
type RatioSampler interface {
|
||||
// Return this sample "offset" between [0.0, 1.0]
|
||||
sampleOffset(ts int64, sample *Sample) float64
|
||||
AddRatioSample(r float64, sample *Sample) bool
|
||||
}
|
||||
|
||||
// Use Hash(labels.String()) / maxUint64 as a "deterministic"
|
||||
// HashRatioSampler uses Hash(labels.String()) / maxUint64 as a "deterministic"
|
||||
// value in [0.0, 1.0].
|
||||
type HashRatioSampler struct{}
|
||||
|
||||
|
|
|
@ -406,17 +406,22 @@ func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel
|
|||
|
||||
// === sort_by_label(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) ===
|
||||
func funcSortByLabel(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
// In case the labels are the same, NaN should sort to the bottom, so take
|
||||
// ascending sort with NaN first and reverse it.
|
||||
var anno annotations.Annotations
|
||||
vals[0], anno = funcSort(vals, args, enh)
|
||||
labels := stringSliceFromArgs(args[1:])
|
||||
// First, sort by the full label set. This ensures a consistent ordering in case sorting by the
|
||||
// labels provided as arguments is not conclusive.
|
||||
slices.SortFunc(vals[0].(Vector), func(a, b Sample) int {
|
||||
// Iterate over each given label
|
||||
return labels.Compare(a.Metric, b.Metric)
|
||||
})
|
||||
|
||||
labels := stringSliceFromArgs(args[1:])
|
||||
// Next, sort by the labels provided as arguments.
|
||||
slices.SortFunc(vals[0].(Vector), func(a, b Sample) int {
|
||||
// Iterate over each given label.
|
||||
for _, label := range labels {
|
||||
lv1 := a.Metric.Get(label)
|
||||
lv2 := b.Metric.Get(label)
|
||||
|
||||
// If we encounter multiple samples with the same label values, the sorting which was
|
||||
// performed in the first step will act as a "tie breaker".
|
||||
if lv1 == lv2 {
|
||||
continue
|
||||
}
|
||||
|
@ -431,22 +436,27 @@ func funcSortByLabel(vals []parser.Value, args parser.Expressions, enh *EvalNode
|
|||
return 0
|
||||
})
|
||||
|
||||
return vals[0].(Vector), anno
|
||||
return vals[0].(Vector), nil
|
||||
}
|
||||
|
||||
// === sort_by_label_desc(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) ===
|
||||
func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
// In case the labels are the same, NaN should sort to the bottom, so take
|
||||
// ascending sort with NaN first and reverse it.
|
||||
var anno annotations.Annotations
|
||||
vals[0], anno = funcSortDesc(vals, args, enh)
|
||||
labels := stringSliceFromArgs(args[1:])
|
||||
// First, sort by the full label set. This ensures a consistent ordering in case sorting by the
|
||||
// labels provided as arguments is not conclusive.
|
||||
slices.SortFunc(vals[0].(Vector), func(a, b Sample) int {
|
||||
// Iterate over each given label
|
||||
return labels.Compare(b.Metric, a.Metric)
|
||||
})
|
||||
|
||||
labels := stringSliceFromArgs(args[1:])
|
||||
// Next, sort by the labels provided as arguments.
|
||||
slices.SortFunc(vals[0].(Vector), func(a, b Sample) int {
|
||||
// Iterate over each given label.
|
||||
for _, label := range labels {
|
||||
lv1 := a.Metric.Get(label)
|
||||
lv2 := b.Metric.Get(label)
|
||||
|
||||
// If we encounter multiple samples with the same label values, the sorting which was
|
||||
// performed in the first step will act as a "tie breaker".
|
||||
if lv1 == lv2 {
|
||||
continue
|
||||
}
|
||||
|
@ -461,21 +471,21 @@ func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, enh *Eval
|
|||
return 0
|
||||
})
|
||||
|
||||
return vals[0].(Vector), anno
|
||||
return vals[0].(Vector), nil
|
||||
}
|
||||
|
||||
// === clamp(Vector parser.ValueTypeVector, min, max Scalar) (Vector, Annotations) ===
|
||||
func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
vec := vals[0].(Vector)
|
||||
min := vals[1].(Vector)[0].F
|
||||
max := vals[2].(Vector)[0].F
|
||||
if max < min {
|
||||
minVal := vals[1].(Vector)[0].F
|
||||
maxVal := vals[2].(Vector)[0].F
|
||||
if maxVal < minVal {
|
||||
return enh.Out, nil
|
||||
}
|
||||
for _, el := range vec {
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: el.Metric.DropMetricName(),
|
||||
F: math.Max(min, math.Min(max, el.F)),
|
||||
F: math.Max(minVal, math.Min(maxVal, el.F)),
|
||||
})
|
||||
}
|
||||
return enh.Out, nil
|
||||
|
@ -484,11 +494,11 @@ func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper
|
|||
// === clamp_max(Vector parser.ValueTypeVector, max Scalar) (Vector, Annotations) ===
|
||||
func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
vec := vals[0].(Vector)
|
||||
max := vals[1].(Vector)[0].F
|
||||
maxVal := vals[1].(Vector)[0].F
|
||||
for _, el := range vec {
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: el.Metric.DropMetricName(),
|
||||
F: math.Min(max, el.F),
|
||||
F: math.Min(maxVal, el.F),
|
||||
})
|
||||
}
|
||||
return enh.Out, nil
|
||||
|
@ -497,11 +507,11 @@ func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel
|
|||
// === clamp_min(Vector parser.ValueTypeVector, min Scalar) (Vector, Annotations) ===
|
||||
func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
vec := vals[0].(Vector)
|
||||
min := vals[1].(Vector)[0].F
|
||||
minVal := vals[1].(Vector)[0].F
|
||||
for _, el := range vec {
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: el.Metric.DropMetricName(),
|
||||
F: math.Max(min, el.F),
|
||||
F: math.Max(minVal, el.F),
|
||||
})
|
||||
}
|
||||
return enh.Out, nil
|
||||
|
@ -700,13 +710,13 @@ func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
|
|||
return enh.Out, nil
|
||||
}
|
||||
return aggrOverTime(vals, enh, func(s Series) float64 {
|
||||
max := s.Floats[0].F
|
||||
maxVal := s.Floats[0].F
|
||||
for _, f := range s.Floats {
|
||||
if f.F > max || math.IsNaN(max) {
|
||||
max = f.F
|
||||
if f.F > maxVal || math.IsNaN(maxVal) {
|
||||
maxVal = f.F
|
||||
}
|
||||
}
|
||||
return max
|
||||
return maxVal
|
||||
}), nil
|
||||
}
|
||||
|
||||
|
@ -720,13 +730,13 @@ func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
|
|||
return enh.Out, nil
|
||||
}
|
||||
return aggrOverTime(vals, enh, func(s Series) float64 {
|
||||
min := s.Floats[0].F
|
||||
minVal := s.Floats[0].F
|
||||
for _, f := range s.Floats {
|
||||
if f.F < min || math.IsNaN(min) {
|
||||
min = f.F
|
||||
if f.F < minVal || math.IsNaN(minVal) {
|
||||
minVal = f.F
|
||||
}
|
||||
}
|
||||
return min
|
||||
return minVal
|
||||
}), nil
|
||||
}
|
||||
|
||||
|
|
|
@ -352,8 +352,7 @@ func (f inspector) Visit(node Node, path []Node) (Visitor, error) {
|
|||
// f(node, path); node must not be nil. If f returns a nil error, Inspect invokes f
|
||||
// for all the non-nil children of node, recursively.
|
||||
func Inspect(node Node, f inspector) {
|
||||
//nolint: errcheck
|
||||
Walk(f, node, nil)
|
||||
Walk(f, node, nil) //nolint:errcheck
|
||||
}
|
||||
|
||||
// Children returns a list of all child nodes of a syntax tree node.
|
||||
|
@ -419,7 +418,7 @@ func mergeRanges(first, last Node) posrange.PositionRange {
|
|||
}
|
||||
}
|
||||
|
||||
// Item implements the Node interface.
|
||||
// PositionRange implements the Node interface.
|
||||
// This makes it possible to call mergeRanges on them.
|
||||
func (i *Item) PositionRange() posrange.PositionRange {
|
||||
return posrange.PositionRange{
|
||||
|
|
|
@ -727,23 +727,23 @@ func lexValueSequence(l *Lexer) stateFn {
|
|||
// was only modified to integrate with our lexer.
|
||||
func lexEscape(l *Lexer) stateFn {
|
||||
var n int
|
||||
var base, max uint32
|
||||
var base, maxVal uint32
|
||||
|
||||
ch := l.next()
|
||||
switch ch {
|
||||
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', l.stringOpen:
|
||||
return lexString
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
n, base, max = 3, 8, 255
|
||||
n, base, maxVal = 3, 8, 255
|
||||
case 'x':
|
||||
ch = l.next()
|
||||
n, base, max = 2, 16, 255
|
||||
n, base, maxVal = 2, 16, 255
|
||||
case 'u':
|
||||
ch = l.next()
|
||||
n, base, max = 4, 16, unicode.MaxRune
|
||||
n, base, maxVal = 4, 16, unicode.MaxRune
|
||||
case 'U':
|
||||
ch = l.next()
|
||||
n, base, max = 8, 16, unicode.MaxRune
|
||||
n, base, maxVal = 8, 16, unicode.MaxRune
|
||||
case eof:
|
||||
l.errorf("escape sequence not terminated")
|
||||
return lexString
|
||||
|
@ -772,7 +772,7 @@ func lexEscape(l *Lexer) stateFn {
|
|||
}
|
||||
}
|
||||
|
||||
if x > max || 0xD800 <= x && x < 0xE000 {
|
||||
if x > maxVal || 0xD800 <= x && x < 0xE000 {
|
||||
l.errorf("escape sequence is an invalid Unicode code point")
|
||||
}
|
||||
return lexString
|
||||
|
|
18
promql/promqltest/testdata/functions.test
vendored
18
promql/promqltest/testdata/functions.test
vendored
|
@ -523,16 +523,16 @@ load 5m
|
|||
node_uname_info{job="node_exporter", instance="4m1000", release="1.111.3"} 0+10x10
|
||||
|
||||
eval_ordered instant at 50m sort_by_label(http_requests, "instance")
|
||||
http_requests{group="production", instance="0", job="api-server"} 100
|
||||
http_requests{group="canary", instance="0", job="api-server"} 300
|
||||
http_requests{group="production", instance="0", job="app-server"} 500
|
||||
http_requests{group="canary", instance="0", job="app-server"} 700
|
||||
http_requests{group="production", instance="1", job="api-server"} 200
|
||||
http_requests{group="production", instance="0", job="api-server"} 100
|
||||
http_requests{group="production", instance="0", job="app-server"} 500
|
||||
http_requests{group="canary", instance="1", job="api-server"} 400
|
||||
http_requests{group="production", instance="1", job="app-server"} 600
|
||||
http_requests{group="canary", instance="1", job="app-server"} 800
|
||||
http_requests{group="production", instance="2", job="api-server"} 100
|
||||
http_requests{group="production", instance="1", job="api-server"} 200
|
||||
http_requests{group="production", instance="1", job="app-server"} 600
|
||||
http_requests{group="canary", instance="2", job="api-server"} NaN
|
||||
http_requests{group="production", instance="2", job="api-server"} 100
|
||||
|
||||
eval_ordered instant at 50m sort_by_label(http_requests, "instance", "group")
|
||||
http_requests{group="canary", instance="0", job="api-server"} 300
|
||||
|
@ -585,14 +585,14 @@ eval_ordered instant at 50m sort_by_label(http_requests, "job", "instance", "gro
|
|||
eval_ordered instant at 50m sort_by_label_desc(http_requests, "instance")
|
||||
http_requests{group="production", instance="2", job="api-server"} 100
|
||||
http_requests{group="canary", instance="2", job="api-server"} NaN
|
||||
http_requests{group="canary", instance="1", job="app-server"} 800
|
||||
http_requests{group="production", instance="1", job="app-server"} 600
|
||||
http_requests{group="canary", instance="1", job="api-server"} 400
|
||||
http_requests{group="production", instance="1", job="api-server"} 200
|
||||
http_requests{group="canary", instance="0", job="app-server"} 700
|
||||
http_requests{group="canary", instance="1", job="app-server"} 800
|
||||
http_requests{group="canary", instance="1", job="api-server"} 400
|
||||
http_requests{group="production", instance="0", job="app-server"} 500
|
||||
http_requests{group="canary", instance="0", job="api-server"} 300
|
||||
http_requests{group="production", instance="0", job="api-server"} 100
|
||||
http_requests{group="canary", instance="0", job="app-server"} 700
|
||||
http_requests{group="canary", instance="0", job="api-server"} 300
|
||||
|
||||
eval_ordered instant at 50m sort_by_label_desc(http_requests, "instance", "group")
|
||||
http_requests{group="production", instance="2", job="api-server"} 100
|
||||
|
|
|
@ -785,6 +785,8 @@ eval_warn instant at 1m rate(some_metric[30s])
|
|||
eval_warn instant at 30s rate(some_metric[30s])
|
||||
# Should produce no results.
|
||||
|
||||
clear
|
||||
|
||||
# Histogram with constant buckets.
|
||||
load 1m
|
||||
const_histogram {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}}
|
||||
|
@ -828,3 +830,59 @@ eval instant at 5m histogram_stddev(rate(const_histogram[5m]))
|
|||
# Zero buckets mean no observations, so there is no standard variance.
|
||||
eval instant at 5m histogram_stdvar(rate(const_histogram[5m]))
|
||||
{} NaN
|
||||
|
||||
clear
|
||||
|
||||
# Test mixing exponential and custom buckets.
|
||||
load 6m
|
||||
metric{series="exponential"} {{sum:4 count:3 buckets:[1 2 1]}} _ {{sum:4 count:3 buckets:[1 2 1]}}
|
||||
metric{series="other-exponential"} {{sum:3 count:2 buckets:[1 1 1]}} _ {{sum:3 count:2 buckets:[1 1 1]}}
|
||||
metric{series="custom"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
|
||||
metric{series="other-custom"} _ {{schema:-53 sum:15 count:2 custom_values:[5 10] buckets:[0 2]}} {{schema:-53 sum:15 count:2 custom_values:[5 10] buckets:[0 2]}}
|
||||
|
||||
# T=0: only exponential
|
||||
# T=6: only custom
|
||||
# T=12: mixed, should be ignored and emit a warning
|
||||
eval_warn range from 0 to 12m step 6m sum(metric)
|
||||
{} {{sum:7 count:5 buckets:[2 3 2]}} {{schema:-53 sum:16 count:3 custom_values:[5 10] buckets:[1 2]}} _
|
||||
|
||||
eval_warn range from 0 to 12m step 6m avg(metric)
|
||||
{} {{sum:3.5 count:2.5 buckets:[1 1.5 1]}} {{schema:-53 sum:8 count:1.5 custom_values:[5 10] buckets:[0.5 1]}} _
|
||||
|
||||
clear
|
||||
|
||||
# Test incompatible custom bucket schemas.
|
||||
load 6m
|
||||
metric{series="1"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
|
||||
metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}}
|
||||
metric{series="3"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
|
||||
|
||||
# T=0: incompatible, should be ignored and emit a warning
|
||||
# T=6: compatible
|
||||
# T=12: incompatible followed by compatible, should be ignored and emit a warning
|
||||
eval_warn range from 0 to 12m step 6m sum(metric)
|
||||
{} _ {{schema:-53 sum:2 count:2 custom_values:[5 10] buckets:[2]}} _
|
||||
|
||||
eval_warn range from 0 to 12m step 6m avg(metric)
|
||||
{} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} _
|
||||
|
||||
clear
|
||||
|
||||
load 1m
|
||||
metric{group="just-floats", series="1"} 2
|
||||
metric{group="just-floats", series="2"} 3
|
||||
metric{group="just-exponential-histograms", series="1"} {{sum:3 count:4 buckets:[1 2 1]}}
|
||||
metric{group="just-exponential-histograms", series="2"} {{sum:2 count:3 buckets:[1 1 1]}}
|
||||
metric{group="just-custom-histograms", series="1"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}}
|
||||
metric{group="just-custom-histograms", series="2"} {{schema:-53 sum:3 count:4 custom_values:[2] buckets:[7]}}
|
||||
metric{group="floats-and-histograms", series="1"} 2
|
||||
metric{group="floats-and-histograms", series="2"} {{sum:2 count:3 buckets:[1 1 1]}}
|
||||
metric{group="exponential-and-custom-histograms", series="1"} {{sum:2 count:3 buckets:[1 1 1]}}
|
||||
metric{group="exponential-and-custom-histograms", series="2"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
|
||||
metric{group="incompatible-custom-histograms", series="1"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
|
||||
metric{group="incompatible-custom-histograms", series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}}
|
||||
|
||||
eval_warn instant at 0 sum by (group) (metric)
|
||||
{group="just-floats"} 5
|
||||
{group="just-exponential-histograms"} {{sum:5 count:7 buckets:[2 3 2]}}
|
||||
{group="just-custom-histograms"} {{schema:-53 sum:4 count:5 custom_values:[2] buckets:[8]}}
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
// Write a MetricFamily into a protobuf.
|
||||
// MetricFamilyToProtobuf writes a MetricFamily into a protobuf.
|
||||
// This function is intended for testing scraping by providing protobuf serialized input.
|
||||
func MetricFamilyToProtobuf(metricFamily *dto.MetricFamily) ([]byte, error) {
|
||||
buffer := &bytes.Buffer{}
|
||||
|
@ -34,7 +34,7 @@ func MetricFamilyToProtobuf(metricFamily *dto.MetricFamily) ([]byte, error) {
|
|||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
// Append a MetricFamily protobuf representation to a buffer.
|
||||
// AddMetricFamilyToProtobuf appends a MetricFamily protobuf representation to a buffer.
|
||||
// This function is intended for testing scraping by providing protobuf serialized input.
|
||||
func AddMetricFamilyToProtobuf(buffer *bytes.Buffer, metricFamily *dto.MetricFamily) error {
|
||||
protoBuf, err := proto.Marshal(metricFamily)
|
||||
|
|
|
@ -93,6 +93,8 @@ type Options struct {
|
|||
skipOffsetting bool
|
||||
}
|
||||
|
||||
const DefaultNameEscapingScheme = model.ValueEncodingEscaping
|
||||
|
||||
// Manager maintains a set of scrape pools and manages start/stop cycles
|
||||
// when receiving new target groups from the discovery manager.
|
||||
type Manager struct {
|
||||
|
|
|
@ -303,6 +303,11 @@ func (sp *scrapePool) restartLoops(reuseCache bool) {
|
|||
mrc = sp.config.MetricRelabelConfigs
|
||||
)
|
||||
|
||||
validationScheme := model.LegacyValidation
|
||||
if sp.config.MetricNameValidationScheme == config.UTF8ValidationConfig {
|
||||
validationScheme = model.UTF8Validation
|
||||
}
|
||||
|
||||
sp.targetMtx.Lock()
|
||||
|
||||
forcedErr := sp.refreshTargetLimitErr()
|
||||
|
@ -323,7 +328,7 @@ func (sp *scrapePool) restartLoops(reuseCache bool) {
|
|||
client: sp.client,
|
||||
timeout: timeout,
|
||||
bodySizeLimit: bodySizeLimit,
|
||||
acceptHeader: acceptHeader(sp.config.ScrapeProtocols),
|
||||
acceptHeader: acceptHeader(sp.config.ScrapeProtocols, validationScheme),
|
||||
acceptEncodingHeader: acceptEncodingHeader(enableCompression),
|
||||
}
|
||||
newLoop = sp.newLoop(scrapeLoopOptions{
|
||||
|
@ -452,6 +457,11 @@ func (sp *scrapePool) sync(targets []*Target) {
|
|||
scrapeClassicHistograms = sp.config.ScrapeClassicHistograms
|
||||
)
|
||||
|
||||
validationScheme := model.LegacyValidation
|
||||
if sp.config.MetricNameValidationScheme == config.UTF8ValidationConfig {
|
||||
validationScheme = model.UTF8Validation
|
||||
}
|
||||
|
||||
sp.targetMtx.Lock()
|
||||
for _, t := range targets {
|
||||
hash := t.hash()
|
||||
|
@ -467,7 +477,7 @@ func (sp *scrapePool) sync(targets []*Target) {
|
|||
client: sp.client,
|
||||
timeout: timeout,
|
||||
bodySizeLimit: bodySizeLimit,
|
||||
acceptHeader: acceptHeader(sp.config.ScrapeProtocols),
|
||||
acceptHeader: acceptHeader(sp.config.ScrapeProtocols, validationScheme),
|
||||
acceptEncodingHeader: acceptEncodingHeader(enableCompression),
|
||||
metrics: sp.metrics,
|
||||
}
|
||||
|
@ -714,11 +724,16 @@ var errBodySizeLimit = errors.New("body size limit exceeded")
|
|||
// acceptHeader transforms preference from the options into specific header values as
|
||||
// https://www.rfc-editor.org/rfc/rfc9110.html#name-accept defines.
|
||||
// No validation is here, we expect scrape protocols to be validated already.
|
||||
func acceptHeader(sps []config.ScrapeProtocol) string {
|
||||
func acceptHeader(sps []config.ScrapeProtocol, scheme model.ValidationScheme) string {
|
||||
var vals []string
|
||||
weight := len(config.ScrapeProtocolsHeaders) + 1
|
||||
for _, sp := range sps {
|
||||
vals = append(vals, fmt.Sprintf("%s;q=0.%d", config.ScrapeProtocolsHeaders[sp], weight))
|
||||
val := config.ScrapeProtocolsHeaders[sp]
|
||||
if scheme == model.UTF8Validation {
|
||||
val += ";" + config.UTF8NamesHeader
|
||||
}
|
||||
val += fmt.Sprintf(";q=0.%d", weight)
|
||||
vals = append(vals, val)
|
||||
weight--
|
||||
}
|
||||
// Default match anything.
|
||||
|
|
|
@ -2339,11 +2339,15 @@ func TestTargetScraperScrapeOK(t *testing.T) {
|
|||
)
|
||||
|
||||
var protobufParsing bool
|
||||
var allowUTF8 bool
|
||||
|
||||
server := httptest.NewServer(
|
||||
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if protobufParsing {
|
||||
accept := r.Header.Get("Accept")
|
||||
if allowUTF8 {
|
||||
require.Truef(t, strings.Contains(accept, "escaping=allow-utf-8"), "Expected Accept header to allow utf8, got %q", accept)
|
||||
}
|
||||
if protobufParsing {
|
||||
require.True(t, strings.HasPrefix(accept, "application/vnd.google.protobuf;"),
|
||||
"Expected Accept header to prefer application/vnd.google.protobuf.")
|
||||
}
|
||||
|
@ -2351,7 +2355,11 @@ func TestTargetScraperScrapeOK(t *testing.T) {
|
|||
timeout := r.Header.Get("X-Prometheus-Scrape-Timeout-Seconds")
|
||||
require.Equal(t, expectedTimeout, timeout, "Expected scrape timeout header.")
|
||||
|
||||
if allowUTF8 {
|
||||
w.Header().Set("Content-Type", `text/plain; version=1.0.0; escaping=allow-utf-8`)
|
||||
} else {
|
||||
w.Header().Set("Content-Type", `text/plain; version=0.0.4`)
|
||||
}
|
||||
w.Write([]byte("metric_a 1\nmetric_b 2\n"))
|
||||
}),
|
||||
)
|
||||
|
@ -2380,13 +2388,22 @@ func TestTargetScraperScrapeOK(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
contentType, err := ts.readResponse(context.Background(), resp, &buf)
|
||||
require.NoError(t, err)
|
||||
if allowUTF8 {
|
||||
require.Equal(t, "text/plain; version=1.0.0; escaping=allow-utf-8", contentType)
|
||||
} else {
|
||||
require.Equal(t, "text/plain; version=0.0.4", contentType)
|
||||
}
|
||||
require.Equal(t, "metric_a 1\nmetric_b 2\n", buf.String())
|
||||
}
|
||||
|
||||
runTest(acceptHeader(config.DefaultScrapeProtocols))
|
||||
runTest(acceptHeader(config.DefaultScrapeProtocols, model.LegacyValidation))
|
||||
protobufParsing = true
|
||||
runTest(acceptHeader(config.DefaultProtoFirstScrapeProtocols))
|
||||
runTest(acceptHeader(config.DefaultProtoFirstScrapeProtocols, model.LegacyValidation))
|
||||
protobufParsing = false
|
||||
allowUTF8 = true
|
||||
runTest(acceptHeader(config.DefaultScrapeProtocols, model.UTF8Validation))
|
||||
protobufParsing = true
|
||||
runTest(acceptHeader(config.DefaultProtoFirstScrapeProtocols, model.UTF8Validation))
|
||||
}
|
||||
|
||||
func TestTargetScrapeScrapeCancel(t *testing.T) {
|
||||
|
@ -2412,7 +2429,7 @@ func TestTargetScrapeScrapeCancel(t *testing.T) {
|
|||
),
|
||||
},
|
||||
client: http.DefaultClient,
|
||||
acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols),
|
||||
acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols, model.LegacyValidation),
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
|
@ -2467,7 +2484,7 @@ func TestTargetScrapeScrapeNotFound(t *testing.T) {
|
|||
),
|
||||
},
|
||||
client: http.DefaultClient,
|
||||
acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols),
|
||||
acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols, model.LegacyValidation),
|
||||
}
|
||||
|
||||
resp, err := ts.scrape(context.Background())
|
||||
|
@ -2511,7 +2528,7 @@ func TestTargetScraperBodySizeLimit(t *testing.T) {
|
|||
},
|
||||
client: http.DefaultClient,
|
||||
bodySizeLimit: bodySizeLimit,
|
||||
acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols),
|
||||
acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols, model.LegacyValidation),
|
||||
metrics: newTestScrapeMetrics(t),
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
|
|
76
scrape/testdata/ca.cer
vendored
76
scrape/testdata/ca.cer
vendored
|
@ -1,8 +1,66 @@
|
|||
Certificate:
|
||||
Data:
|
||||
Version: 3 (0x2)
|
||||
Serial Number:
|
||||
93:6c:9e:29:8d:37:7b:66
|
||||
Signature Algorithm: sha256WithRSAEncryption
|
||||
Issuer: C = XX, L = Default City, O = Default Company Ltd, CN = Prometheus Test CA
|
||||
Validity
|
||||
Not Before: Aug 20 11:51:23 2024 GMT
|
||||
Not After : Dec 5 11:51:23 2044 GMT
|
||||
Subject: C = XX, L = Default City, O = Default Company Ltd, CN = Prometheus Test CA
|
||||
Subject Public Key Info:
|
||||
Public Key Algorithm: rsaEncryption
|
||||
Public-Key: (2048 bit)
|
||||
Modulus:
|
||||
00:e9:52:05:4d:f2:5a:95:04:2d:b8:73:8b:3c:e7:
|
||||
47:48:67:00:be:dd:6c:41:f3:7c:4b:44:73:a3:77:
|
||||
3e:84:af:30:d7:2a:ad:45:6a:b7:89:23:05:15:b3:
|
||||
aa:46:79:b8:95:64:cc:13:c4:44:a1:01:a0:e2:3d:
|
||||
a5:67:2b:aa:d3:13:06:43:33:1c:96:36:12:9e:c6:
|
||||
1d:36:9b:d7:47:bd:28:2d:88:15:04:fa:14:a3:ff:
|
||||
8c:26:22:c5:a2:15:c7:76:b3:11:f6:a3:44:9a:28:
|
||||
0f:ca:fb:f4:51:a8:6a:05:94:7c:77:47:c8:21:56:
|
||||
25:bf:e2:2e:df:33:f3:e4:bd:d6:47:a5:49:13:12:
|
||||
c8:1f:4c:d7:2a:56:a2:6c:c1:cf:55:05:5d:9a:75:
|
||||
a2:23:4e:e6:8e:ff:76:05:d6:e0:c8:0b:51:f0:b6:
|
||||
a1:b2:7d:8f:9c:6a:a5:ce:86:92:15:8c:5b:86:45:
|
||||
c0:4a:ff:54:b8:ee:cf:11:bd:07:cb:4b:7d:0b:a1:
|
||||
9d:72:86:9f:55:bc:f9:6c:d9:55:60:96:30:3f:ec:
|
||||
2d:f6:5f:9a:32:9a:5a:5c:1c:5f:32:f9:d6:0f:04:
|
||||
f8:81:08:04:9a:95:c3:9d:5a:30:8e:a5:0e:47:2f:
|
||||
00:ce:e0:2e:ad:5a:b8:b6:4c:55:7c:8a:59:22:b0:
|
||||
ed:73
|
||||
Exponent: 65537 (0x10001)
|
||||
X509v3 extensions:
|
||||
X509v3 Subject Key Identifier:
|
||||
CC:F5:05:99:E5:AB:12:69:D8:78:89:4A:31:CA:F0:8B:0B:AD:66:1B
|
||||
X509v3 Authority Key Identifier:
|
||||
CC:F5:05:99:E5:AB:12:69:D8:78:89:4A:31:CA:F0:8B:0B:AD:66:1B
|
||||
X509v3 Basic Constraints:
|
||||
CA:TRUE
|
||||
Signature Algorithm: sha256WithRSAEncryption
|
||||
Signature Value:
|
||||
4a:a1:b0:bc:c8:87:4f:7c:96:62:e5:09:29:ae:3a:2e:68:d0:
|
||||
d2:c5:68:ed:ea:83:36:b1:86:f3:b9:e9:19:2b:b6:73:10:6f:
|
||||
df:7f:bb:f1:76:81:03:c1:a1:5a:ee:6c:44:b8:7c:10:d1:5a:
|
||||
d7:c1:92:64:59:35:a6:e0:aa:08:41:37:6e:e7:c8:b6:bd:0c:
|
||||
4b:47:78:ec:c4:b4:15:a3:62:76:4a:39:8e:6e:19:ff:f0:c0:
|
||||
8a:7e:1c:cd:87:e5:00:6c:f1:ce:27:26:ff:b8:e9:eb:f7:2f:
|
||||
bd:c2:4b:9c:d6:57:de:74:74:b3:4f:03:98:9a:b5:08:2d:16:
|
||||
ca:7f:b6:c8:76:62:86:1b:7c:f2:3e:6c:78:cc:2c:95:9a:bb:
|
||||
77:25:e8:80:ff:9b:e8:f8:9a:85:3b:85:b7:17:4e:77:a1:cf:
|
||||
4d:b9:d0:25:e8:5d:8c:e6:7c:f1:d9:52:30:3d:ec:2b:37:91:
|
||||
bc:e2:e8:39:31:6f:3d:e9:98:70:80:7c:41:dd:19:13:05:21:
|
||||
94:7b:16:cf:d8:ee:4e:38:34:5e:6a:ff:cd:85:ac:8f:94:9a:
|
||||
dd:4e:77:05:13:a6:b4:80:52:b2:97:64:76:88:f4:dd:42:0a:
|
||||
50:1c:80:fd:4b:6e:a9:62:10:aa:ef:2e:c1:2f:be:0e:c2:2e:
|
||||
b5:28:5f:83
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDkTCCAnmgAwIBAgIJAJNsnimNN3tmMA0GCSqGSIb3DQEBCwUAMF8xCzAJBgNV
|
||||
BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg
|
||||
Q29tcGFueSBMdGQxGzAZBgNVBAMMElByb21ldGhldXMgVGVzdCBDQTAeFw0xNTA4
|
||||
MDQxNDA5MjFaFw0yNTA4MDExNDA5MjFaMF8xCzAJBgNVBAYTAlhYMRUwEwYDVQQH
|
||||
Q29tcGFueSBMdGQxGzAZBgNVBAMMElByb21ldGhldXMgVGVzdCBDQTAeFw0yNDA4
|
||||
MjAxMTUxMjNaFw00NDEyMDUxMTUxMjNaMF8xCzAJBgNVBAYTAlhYMRUwEwYDVQQH
|
||||
DAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxGzAZ
|
||||
BgNVBAMMElByb21ldGhldXMgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP
|
||||
ADCCAQoCggEBAOlSBU3yWpUELbhzizznR0hnAL7dbEHzfEtEc6N3PoSvMNcqrUVq
|
||||
|
@ -12,11 +70,11 @@ yB9M1ypWomzBz1UFXZp1oiNO5o7/dgXW4MgLUfC2obJ9j5xqpc6GkhWMW4ZFwEr/
|
|||
VLjuzxG9B8tLfQuhnXKGn1W8+WzZVWCWMD/sLfZfmjKaWlwcXzL51g8E+IEIBJqV
|
||||
w51aMI6lDkcvAM7gLq1auLZMVXyKWSKw7XMCAwEAAaNQME4wHQYDVR0OBBYEFMz1
|
||||
BZnlqxJp2HiJSjHK8IsLrWYbMB8GA1UdIwQYMBaAFMz1BZnlqxJp2HiJSjHK8IsL
|
||||
rWYbMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAI2iA3w3TK5J15Pu
|
||||
e4fPFB4jxQqsbUwuyXbCCv/jKLeFNCD4BjM181WZEYjPMumeTBVzU3aF45LWQIG1
|
||||
0DJcrCL4mjMz9qgAoGqA7aDDXiJGbukMgYYsn7vrnVmrZH8T3E8ySlltr7+W578k
|
||||
pJ5FxnbCroQwn0zLyVB3sFbS8E3vpBr3L8oy8PwPHhIScexcNVc3V6/m4vTZsXTH
|
||||
U+vUm1XhDgpDcFMTg2QQiJbfpOYUkwIgnRDAT7t282t2KQWtnlqc3zwPQ1F/6Cpx
|
||||
j19JeNsaF1DArkD7YlyKj/GhZLtHwFHG5cxznH0mLDJTW7bQvqqh2iQTeXmBk1lU
|
||||
mM5lH/s=
|
||||
rWYbMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAEqhsLzIh098lmLl
|
||||
CSmuOi5o0NLFaO3qgzaxhvO56RkrtnMQb99/u/F2gQPBoVrubES4fBDRWtfBkmRZ
|
||||
NabgqghBN27nyLa9DEtHeOzEtBWjYnZKOY5uGf/wwIp+HM2H5QBs8c4nJv+46ev3
|
||||
L73CS5zWV950dLNPA5iatQgtFsp/tsh2YoYbfPI+bHjMLJWau3cl6ID/m+j4moU7
|
||||
hbcXTnehz0250CXoXYzmfPHZUjA97Cs3kbzi6Dkxbz3pmHCAfEHdGRMFIZR7Fs/Y
|
||||
7k44NF5q/82FrI+Umt1OdwUTprSAUrKXZHaI9N1CClAcgP1LbqliEKrvLsEvvg7C
|
||||
LrUoX4M=
|
||||
-----END CERTIFICATE-----
|
||||
|
|
|
@ -36,4 +36,4 @@ jobs:
|
|||
uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0
|
||||
with:
|
||||
args: --verbose
|
||||
version: v1.59.1
|
||||
version: v1.60.2
|
||||
|
|
|
@ -96,10 +96,10 @@ func TestSampleRingMixed(t *testing.T) {
|
|||
|
||||
// With ValNone as the preferred type, nothing should be initialized.
|
||||
r := newSampleRing(10, 2, chunkenc.ValNone)
|
||||
require.Zero(t, len(r.fBuf))
|
||||
require.Zero(t, len(r.hBuf))
|
||||
require.Zero(t, len(r.fhBuf))
|
||||
require.Zero(t, len(r.iBuf))
|
||||
require.Empty(t, r.fBuf)
|
||||
require.Empty(t, r.hBuf)
|
||||
require.Empty(t, r.fhBuf)
|
||||
require.Empty(t, r.iBuf)
|
||||
|
||||
// But then mixed adds should work as expected.
|
||||
r.addF(fSample{t: 1, f: 3.14})
|
||||
|
@ -146,10 +146,10 @@ func TestSampleRingAtFloatHistogram(t *testing.T) {
|
|||
|
||||
// With ValNone as the preferred type, nothing should be initialized.
|
||||
r := newSampleRing(10, 2, chunkenc.ValNone)
|
||||
require.Zero(t, len(r.fBuf))
|
||||
require.Zero(t, len(r.hBuf))
|
||||
require.Zero(t, len(r.fhBuf))
|
||||
require.Zero(t, len(r.iBuf))
|
||||
require.Empty(t, r.fBuf)
|
||||
require.Empty(t, r.hBuf)
|
||||
require.Empty(t, r.fhBuf)
|
||||
require.Empty(t, r.iBuf)
|
||||
|
||||
var (
|
||||
h *histogram.Histogram
|
||||
|
|
|
@ -228,9 +228,9 @@ type LabelHints struct {
|
|||
Limit int
|
||||
}
|
||||
|
||||
// TODO(bwplotka): Move to promql/engine_test.go?
|
||||
// QueryableFunc is an adapter to allow the use of ordinary functions as
|
||||
// Queryables. It follows the idea of http.HandlerFunc.
|
||||
// TODO(bwplotka): Move to promql/engine_test.go?
|
||||
type QueryableFunc func(mint, maxt int64) (Querier, error)
|
||||
|
||||
// Querier calls f() with the given parameters.
|
||||
|
|
|
@ -31,13 +31,15 @@ import (
|
|||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
const (
|
||||
// Clouds.
|
||||
const (
|
||||
AzureChina = "AzureChina"
|
||||
AzureGovernment = "AzureGovernment"
|
||||
AzurePublic = "AzurePublic"
|
||||
)
|
||||
|
||||
// Audiences.
|
||||
const (
|
||||
IngestionChinaAudience = "https://monitor.azure.cn//.default"
|
||||
IngestionGovernmentAudience = "https://monitor.azure.us//.default"
|
||||
IngestionPublicAudience = "https://monitor.azure.com//.default"
|
||||
|
|
|
@ -287,7 +287,6 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) (WriteRespo
|
|||
// we can continue handling.
|
||||
rs, _ := ParseWriteResponseStats(httpResp)
|
||||
|
||||
//nolint:usestdlibvars
|
||||
if httpResp.StatusCode/100 == 2 {
|
||||
return rs, nil
|
||||
}
|
||||
|
@ -297,7 +296,6 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) (WriteRespo
|
|||
body, _ := io.ReadAll(io.LimitReader(httpResp.Body, maxErrMsgLen))
|
||||
err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, body)
|
||||
|
||||
//nolint:usestdlibvars
|
||||
if httpResp.StatusCode/100 == 5 ||
|
||||
(c.retryOnRateLimit && httpResp.StatusCode == http.StatusTooManyRequests) {
|
||||
return rs, RecoverableError{err, retryAfterDuration(httpResp.Header.Get("Retry-After"))}
|
||||
|
@ -382,7 +380,6 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe
|
|||
return nil, fmt.Errorf("error reading response. HTTP status code: %s: %w", httpResp.Status, err)
|
||||
}
|
||||
|
||||
//nolint:usestdlibvars
|
||||
if httpResp.StatusCode/100 != 2 {
|
||||
return nil, fmt.Errorf("remote server %s returned HTTP status %s: %s", c.urlString, httpResp.Status, strings.TrimSpace(string(compressed)))
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
|
||||
"github.com/prometheus/prometheus/model/value"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
"github.com/prometheus/prometheus/util/annotations"
|
||||
)
|
||||
|
||||
const defaultZeroThreshold = 1e-128
|
||||
|
@ -33,13 +34,15 @@ const defaultZeroThreshold = 1e-128
|
|||
// addExponentialHistogramDataPoints adds OTel exponential histogram data points to the corresponding time series
|
||||
// as native histogram samples.
|
||||
func (c *PrometheusConverter) addExponentialHistogramDataPoints(dataPoints pmetric.ExponentialHistogramDataPointSlice,
|
||||
resource pcommon.Resource, settings Settings, promName string) error {
|
||||
resource pcommon.Resource, settings Settings, promName string) (annotations.Annotations, error) {
|
||||
var annots annotations.Annotations
|
||||
for x := 0; x < dataPoints.Len(); x++ {
|
||||
pt := dataPoints.At(x)
|
||||
|
||||
histogram, err := exponentialToNativeHistogram(pt)
|
||||
histogram, ws, err := exponentialToNativeHistogram(pt)
|
||||
annots.Merge(ws)
|
||||
if err != nil {
|
||||
return err
|
||||
return annots, err
|
||||
}
|
||||
|
||||
lbls := createAttributes(
|
||||
|
@ -58,15 +61,16 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(dataPoints pmetr
|
|||
ts.Exemplars = append(ts.Exemplars, exemplars...)
|
||||
}
|
||||
|
||||
return nil
|
||||
return annots, nil
|
||||
}
|
||||
|
||||
// exponentialToNativeHistogram translates OTel Exponential Histogram data point
|
||||
// to Prometheus Native Histogram.
|
||||
func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (prompb.Histogram, error) {
|
||||
func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (prompb.Histogram, annotations.Annotations, error) {
|
||||
var annots annotations.Annotations
|
||||
scale := p.Scale()
|
||||
if scale < -4 {
|
||||
return prompb.Histogram{},
|
||||
return prompb.Histogram{}, annots,
|
||||
fmt.Errorf("cannot convert exponential to native histogram."+
|
||||
" Scale must be >= -4, was %d", scale)
|
||||
}
|
||||
|
@ -114,8 +118,11 @@ func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (prom
|
|||
h.Sum = p.Sum()
|
||||
}
|
||||
h.Count = &prompb.Histogram_CountInt{CountInt: p.Count()}
|
||||
if p.Count() == 0 && h.Sum != 0 {
|
||||
annots.Add(fmt.Errorf("exponential histogram data point has zero count, but non-zero sum: %f", h.Sum))
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
return h, annots, nil
|
||||
}
|
||||
|
||||
// convertBucketsLayout translates OTel Exponential Histogram dense buckets
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
|
||||
"github.com/prometheus/prometheus/util/annotations"
|
||||
)
|
||||
|
||||
type Settings struct {
|
||||
|
@ -53,7 +54,7 @@ func NewPrometheusConverter() *PrometheusConverter {
|
|||
}
|
||||
|
||||
// FromMetrics converts pmetric.Metrics to Prometheus remote write format.
|
||||
func (c *PrometheusConverter) FromMetrics(md pmetric.Metrics, settings Settings) (errs error) {
|
||||
func (c *PrometheusConverter) FromMetrics(md pmetric.Metrics, settings Settings) (annots annotations.Annotations, errs error) {
|
||||
resourceMetricsSlice := md.ResourceMetrics()
|
||||
for i := 0; i < resourceMetricsSlice.Len(); i++ {
|
||||
resourceMetrics := resourceMetricsSlice.At(i)
|
||||
|
@ -107,12 +108,14 @@ func (c *PrometheusConverter) FromMetrics(md pmetric.Metrics, settings Settings)
|
|||
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
|
||||
break
|
||||
}
|
||||
errs = multierr.Append(errs, c.addExponentialHistogramDataPoints(
|
||||
ws, err := c.addExponentialHistogramDataPoints(
|
||||
dataPoints,
|
||||
resource,
|
||||
settings,
|
||||
promName,
|
||||
))
|
||||
)
|
||||
annots.Merge(ws)
|
||||
errs = multierr.Append(errs, err)
|
||||
case pmetric.MetricTypeSummary:
|
||||
dataPoints := metric.Summary().DataPoints()
|
||||
if dataPoints.Len() == 0 {
|
||||
|
@ -128,7 +131,7 @@ func (c *PrometheusConverter) FromMetrics(md pmetric.Metrics, settings Settings)
|
|||
addResourceTargetInfo(resource, settings, mostRecentTimestamp, c)
|
||||
}
|
||||
|
||||
return
|
||||
return annots, errs
|
||||
}
|
||||
|
||||
func isSameMetric(ts *prompb.TimeSeries, lbls []prompb.Label) bool {
|
||||
|
|
|
@ -27,6 +27,41 @@ import (
|
|||
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
|
||||
)
|
||||
|
||||
func TestFromMetrics(t *testing.T) {
|
||||
t.Run("exponential histogram warnings for zero count and non-zero sum", func(t *testing.T) {
|
||||
request := pmetricotlp.NewExportRequest()
|
||||
rm := request.Metrics().ResourceMetrics().AppendEmpty()
|
||||
generateAttributes(rm.Resource().Attributes(), "resource", 10)
|
||||
|
||||
metrics := rm.ScopeMetrics().AppendEmpty().Metrics()
|
||||
ts := pcommon.NewTimestampFromTime(time.Now())
|
||||
|
||||
for i := 1; i <= 10; i++ {
|
||||
m := metrics.AppendEmpty()
|
||||
m.SetEmptyExponentialHistogram()
|
||||
m.SetName(fmt.Sprintf("histogram-%d", i))
|
||||
m.ExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
|
||||
h := m.ExponentialHistogram().DataPoints().AppendEmpty()
|
||||
h.SetTimestamp(ts)
|
||||
|
||||
h.SetCount(0)
|
||||
h.SetSum(155)
|
||||
|
||||
generateAttributes(h.Attributes(), "series", 10)
|
||||
}
|
||||
|
||||
converter := NewPrometheusConverter()
|
||||
annots, err := converter.FromMetrics(request.Metrics(), Settings{})
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, annots)
|
||||
ws, infos := annots.AsStrings("", 0, 0)
|
||||
require.Empty(t, infos)
|
||||
require.Equal(t, []string{
|
||||
"exponential histogram data point has zero count, but non-zero sum: 155.000000",
|
||||
}, ws)
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkPrometheusConverter_FromMetrics(b *testing.B) {
|
||||
for _, resourceAttributeCount := range []int{0, 5, 50} {
|
||||
b.Run(fmt.Sprintf("resource attribute count: %v", resourceAttributeCount), func(b *testing.B) {
|
||||
|
@ -49,7 +84,9 @@ func BenchmarkPrometheusConverter_FromMetrics(b *testing.B) {
|
|||
|
||||
for i := 0; i < b.N; i++ {
|
||||
converter := NewPrometheusConverter()
|
||||
require.NoError(b, converter.FromMetrics(payload.Metrics(), Settings{}))
|
||||
annots, err := converter.FromMetrics(payload.Metrics(), Settings{})
|
||||
require.NoError(b, err)
|
||||
require.Empty(b, annots)
|
||||
require.NotNil(b, converter.TimeSeries())
|
||||
}
|
||||
})
|
||||
|
|
|
@ -1522,7 +1522,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
|
|||
// Send batches of at most MaxSamplesPerSend samples to the remote storage.
|
||||
// If we have fewer samples than that, flush them out after a deadline anyways.
|
||||
var (
|
||||
max = s.qm.cfg.MaxSamplesPerSend
|
||||
maxCount = s.qm.cfg.MaxSamplesPerSend
|
||||
|
||||
pBuf = proto.NewBuffer(nil)
|
||||
pBufRaw []byte
|
||||
|
@ -1530,19 +1530,19 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
|
|||
)
|
||||
// TODO(@tpaschalis) Should we also raise the max if we have WAL metadata?
|
||||
if s.qm.sendExemplars {
|
||||
max += int(float64(max) * 0.1)
|
||||
maxCount += int(float64(maxCount) * 0.1)
|
||||
}
|
||||
|
||||
// TODO: Dry all of this, we should make an interface/generic for the timeseries type.
|
||||
batchQueue := queue.Chan()
|
||||
pendingData := make([]prompb.TimeSeries, max)
|
||||
pendingData := make([]prompb.TimeSeries, maxCount)
|
||||
for i := range pendingData {
|
||||
pendingData[i].Samples = []prompb.Sample{{}}
|
||||
if s.qm.sendExemplars {
|
||||
pendingData[i].Exemplars = []prompb.Exemplar{{}}
|
||||
}
|
||||
}
|
||||
pendingDataV2 := make([]writev2.TimeSeries, max)
|
||||
pendingDataV2 := make([]writev2.TimeSeries, maxCount)
|
||||
for i := range pendingDataV2 {
|
||||
pendingDataV2[i].Samples = []writev2.Sample{{}}
|
||||
}
|
||||
|
|
|
@ -930,7 +930,7 @@ func createHistograms(numSamples, numSeries int, floatHistogram bool) ([]record.
|
|||
}
|
||||
|
||||
func createSeriesMetadata(series []record.RefSeries) []record.RefMetadata {
|
||||
metas := make([]record.RefMetadata, len(series))
|
||||
metas := make([]record.RefMetadata, 0, len(series))
|
||||
|
||||
for _, s := range series {
|
||||
metas = append(metas, record.RefMetadata{
|
||||
|
|
|
@ -502,12 +502,17 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
otlpCfg := h.configFunc().OTLPConfig
|
||||
|
||||
converter := otlptranslator.NewPrometheusConverter()
|
||||
if err := converter.FromMetrics(req.Metrics(), otlptranslator.Settings{
|
||||
annots, err := converter.FromMetrics(req.Metrics(), otlptranslator.Settings{
|
||||
AddMetricSuffixes: true,
|
||||
PromoteResourceAttributes: otlpCfg.PromoteResourceAttributes,
|
||||
}); err != nil {
|
||||
})
|
||||
if err != nil {
|
||||
level.Warn(h.logger).Log("msg", "Error translating OTLP metrics to Prometheus write request", "err", err)
|
||||
}
|
||||
ws, _ := annots.AsStrings("", 0, 0)
|
||||
if len(ws) > 0 {
|
||||
level.Warn(h.logger).Log("msg", "Warnings translating OTLP metrics to Prometheus write request", "warnings", ws)
|
||||
}
|
||||
|
||||
err = h.rwHandler.write(r.Context(), &prompb.WriteRequest{
|
||||
Timeseries: converter.TimeSeries(),
|
||||
|
|
|
@ -453,10 +453,10 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) {
|
|||
expectHeaderValue(t, 0, resp.Header.Get(rw20WrittenHistogramsHeader))
|
||||
expectHeaderValue(t, 0, resp.Header.Get(rw20WrittenExemplarsHeader))
|
||||
|
||||
require.Empty(t, len(appendable.samples))
|
||||
require.Empty(t, len(appendable.histograms))
|
||||
require.Empty(t, len(appendable.exemplars))
|
||||
require.Empty(t, len(appendable.metadata))
|
||||
require.Empty(t, appendable.samples)
|
||||
require.Empty(t, appendable.histograms)
|
||||
require.Empty(t, appendable.exemplars)
|
||||
require.Empty(t, appendable.metadata)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -72,7 +72,7 @@ func TestListSeriesIterator(t *testing.T) {
|
|||
require.Equal(t, chunkenc.ValNone, it.Seek(2))
|
||||
}
|
||||
|
||||
// TestSeriesSetToChunkSet test the property of SeriesSet that says
|
||||
// TestChunkSeriesSetToSeriesSet test the property of SeriesSet that says
|
||||
// returned series should be iterable even after Next is called.
|
||||
func TestChunkSeriesSetToSeriesSet(t *testing.T) {
|
||||
series := []struct {
|
||||
|
|
|
@ -166,7 +166,7 @@ func NewTemplateExpander(
|
|||
return html_template.HTML(text)
|
||||
},
|
||||
"match": regexp.MatchString,
|
||||
"title": strings.Title, //nolint:staticcheck
|
||||
"title": strings.Title, //nolint:staticcheck // TODO(beorn7): Need to come up with a replacement using the cases package.
|
||||
"toUpper": strings.ToUpper,
|
||||
"toLower": strings.ToLower,
|
||||
"graphLink": strutil.GraphLinkForExpression,
|
||||
|
|
76
tracing/testdata/ca.cer
vendored
76
tracing/testdata/ca.cer
vendored
|
@ -1,8 +1,66 @@
|
|||
Certificate:
|
||||
Data:
|
||||
Version: 3 (0x2)
|
||||
Serial Number:
|
||||
93:6c:9e:29:8d:37:7b:66
|
||||
Signature Algorithm: sha256WithRSAEncryption
|
||||
Issuer: C = XX, L = Default City, O = Default Company Ltd, CN = Prometheus Test CA
|
||||
Validity
|
||||
Not Before: Aug 20 11:51:23 2024 GMT
|
||||
Not After : Dec 5 11:51:23 2044 GMT
|
||||
Subject: C = XX, L = Default City, O = Default Company Ltd, CN = Prometheus Test CA
|
||||
Subject Public Key Info:
|
||||
Public Key Algorithm: rsaEncryption
|
||||
Public-Key: (2048 bit)
|
||||
Modulus:
|
||||
00:e9:52:05:4d:f2:5a:95:04:2d:b8:73:8b:3c:e7:
|
||||
47:48:67:00:be:dd:6c:41:f3:7c:4b:44:73:a3:77:
|
||||
3e:84:af:30:d7:2a:ad:45:6a:b7:89:23:05:15:b3:
|
||||
aa:46:79:b8:95:64:cc:13:c4:44:a1:01:a0:e2:3d:
|
||||
a5:67:2b:aa:d3:13:06:43:33:1c:96:36:12:9e:c6:
|
||||
1d:36:9b:d7:47:bd:28:2d:88:15:04:fa:14:a3:ff:
|
||||
8c:26:22:c5:a2:15:c7:76:b3:11:f6:a3:44:9a:28:
|
||||
0f:ca:fb:f4:51:a8:6a:05:94:7c:77:47:c8:21:56:
|
||||
25:bf:e2:2e:df:33:f3:e4:bd:d6:47:a5:49:13:12:
|
||||
c8:1f:4c:d7:2a:56:a2:6c:c1:cf:55:05:5d:9a:75:
|
||||
a2:23:4e:e6:8e:ff:76:05:d6:e0:c8:0b:51:f0:b6:
|
||||
a1:b2:7d:8f:9c:6a:a5:ce:86:92:15:8c:5b:86:45:
|
||||
c0:4a:ff:54:b8:ee:cf:11:bd:07:cb:4b:7d:0b:a1:
|
||||
9d:72:86:9f:55:bc:f9:6c:d9:55:60:96:30:3f:ec:
|
||||
2d:f6:5f:9a:32:9a:5a:5c:1c:5f:32:f9:d6:0f:04:
|
||||
f8:81:08:04:9a:95:c3:9d:5a:30:8e:a5:0e:47:2f:
|
||||
00:ce:e0:2e:ad:5a:b8:b6:4c:55:7c:8a:59:22:b0:
|
||||
ed:73
|
||||
Exponent: 65537 (0x10001)
|
||||
X509v3 extensions:
|
||||
X509v3 Subject Key Identifier:
|
||||
CC:F5:05:99:E5:AB:12:69:D8:78:89:4A:31:CA:F0:8B:0B:AD:66:1B
|
||||
X509v3 Authority Key Identifier:
|
||||
CC:F5:05:99:E5:AB:12:69:D8:78:89:4A:31:CA:F0:8B:0B:AD:66:1B
|
||||
X509v3 Basic Constraints:
|
||||
CA:TRUE
|
||||
Signature Algorithm: sha256WithRSAEncryption
|
||||
Signature Value:
|
||||
4a:a1:b0:bc:c8:87:4f:7c:96:62:e5:09:29:ae:3a:2e:68:d0:
|
||||
d2:c5:68:ed:ea:83:36:b1:86:f3:b9:e9:19:2b:b6:73:10:6f:
|
||||
df:7f:bb:f1:76:81:03:c1:a1:5a:ee:6c:44:b8:7c:10:d1:5a:
|
||||
d7:c1:92:64:59:35:a6:e0:aa:08:41:37:6e:e7:c8:b6:bd:0c:
|
||||
4b:47:78:ec:c4:b4:15:a3:62:76:4a:39:8e:6e:19:ff:f0:c0:
|
||||
8a:7e:1c:cd:87:e5:00:6c:f1:ce:27:26:ff:b8:e9:eb:f7:2f:
|
||||
bd:c2:4b:9c:d6:57:de:74:74:b3:4f:03:98:9a:b5:08:2d:16:
|
||||
ca:7f:b6:c8:76:62:86:1b:7c:f2:3e:6c:78:cc:2c:95:9a:bb:
|
||||
77:25:e8:80:ff:9b:e8:f8:9a:85:3b:85:b7:17:4e:77:a1:cf:
|
||||
4d:b9:d0:25:e8:5d:8c:e6:7c:f1:d9:52:30:3d:ec:2b:37:91:
|
||||
bc:e2:e8:39:31:6f:3d:e9:98:70:80:7c:41:dd:19:13:05:21:
|
||||
94:7b:16:cf:d8:ee:4e:38:34:5e:6a:ff:cd:85:ac:8f:94:9a:
|
||||
dd:4e:77:05:13:a6:b4:80:52:b2:97:64:76:88:f4:dd:42:0a:
|
||||
50:1c:80:fd:4b:6e:a9:62:10:aa:ef:2e:c1:2f:be:0e:c2:2e:
|
||||
b5:28:5f:83
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDkTCCAnmgAwIBAgIJAJNsnimNN3tmMA0GCSqGSIb3DQEBCwUAMF8xCzAJBgNV
|
||||
BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg
|
||||
Q29tcGFueSBMdGQxGzAZBgNVBAMMElByb21ldGhldXMgVGVzdCBDQTAeFw0xNTA4
|
||||
MDQxNDA5MjFaFw0yNTA4MDExNDA5MjFaMF8xCzAJBgNVBAYTAlhYMRUwEwYDVQQH
|
||||
Q29tcGFueSBMdGQxGzAZBgNVBAMMElByb21ldGhldXMgVGVzdCBDQTAeFw0yNDA4
|
||||
MjAxMTUxMjNaFw00NDEyMDUxMTUxMjNaMF8xCzAJBgNVBAYTAlhYMRUwEwYDVQQH
|
||||
DAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxGzAZ
|
||||
BgNVBAMMElByb21ldGhldXMgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP
|
||||
ADCCAQoCggEBAOlSBU3yWpUELbhzizznR0hnAL7dbEHzfEtEc6N3PoSvMNcqrUVq
|
||||
|
@ -12,11 +70,11 @@ yB9M1ypWomzBz1UFXZp1oiNO5o7/dgXW4MgLUfC2obJ9j5xqpc6GkhWMW4ZFwEr/
|
|||
VLjuzxG9B8tLfQuhnXKGn1W8+WzZVWCWMD/sLfZfmjKaWlwcXzL51g8E+IEIBJqV
|
||||
w51aMI6lDkcvAM7gLq1auLZMVXyKWSKw7XMCAwEAAaNQME4wHQYDVR0OBBYEFMz1
|
||||
BZnlqxJp2HiJSjHK8IsLrWYbMB8GA1UdIwQYMBaAFMz1BZnlqxJp2HiJSjHK8IsL
|
||||
rWYbMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAI2iA3w3TK5J15Pu
|
||||
e4fPFB4jxQqsbUwuyXbCCv/jKLeFNCD4BjM181WZEYjPMumeTBVzU3aF45LWQIG1
|
||||
0DJcrCL4mjMz9qgAoGqA7aDDXiJGbukMgYYsn7vrnVmrZH8T3E8ySlltr7+W578k
|
||||
pJ5FxnbCroQwn0zLyVB3sFbS8E3vpBr3L8oy8PwPHhIScexcNVc3V6/m4vTZsXTH
|
||||
U+vUm1XhDgpDcFMTg2QQiJbfpOYUkwIgnRDAT7t282t2KQWtnlqc3zwPQ1F/6Cpx
|
||||
j19JeNsaF1DArkD7YlyKj/GhZLtHwFHG5cxznH0mLDJTW7bQvqqh2iQTeXmBk1lU
|
||||
mM5lH/s=
|
||||
rWYbMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAEqhsLzIh098lmLl
|
||||
CSmuOi5o0NLFaO3qgzaxhvO56RkrtnMQb99/u/F2gQPBoVrubES4fBDRWtfBkmRZ
|
||||
NabgqghBN27nyLa9DEtHeOzEtBWjYnZKOY5uGf/wwIp+HM2H5QBs8c4nJv+46ev3
|
||||
L73CS5zWV950dLNPA5iatQgtFsp/tsh2YoYbfPI+bHjMLJWau3cl6ID/m+j4moU7
|
||||
hbcXTnehz0250CXoXYzmfPHZUjA97Cs3kbzi6Dkxbz3pmHCAfEHdGRMFIZR7Fs/Y
|
||||
7k44NF5q/82FrI+Umt1OdwUTprSAUrKXZHaI9N1CClAcgP1LbqliEKrvLsEvvg7C
|
||||
LrUoX4M=
|
||||
-----END CERTIFICATE-----
|
||||
|
|
|
@ -133,6 +133,9 @@ type Meta struct {
|
|||
// Time range the data covers.
|
||||
// When MaxTime == math.MaxInt64 the chunk is still open and being appended to.
|
||||
MinTime, MaxTime int64
|
||||
|
||||
// Flag to indicate that this meta needs merge with OOO data.
|
||||
MergeOOO bool
|
||||
}
|
||||
|
||||
// ChunkFromSamples requires all samples to have the same type.
|
||||
|
|
|
@ -191,7 +191,7 @@ func (f *chunkPos) bytesToWriteForChunk(chkLen uint64) uint64 {
|
|||
// ChunkDiskMapper is for writing the Head block chunks to disk
|
||||
// and access chunks via mmapped files.
|
||||
type ChunkDiskMapper struct {
|
||||
/// Writer.
|
||||
// Writer.
|
||||
dir *os.File
|
||||
writeBufferSize int
|
||||
|
||||
|
@ -210,7 +210,7 @@ type ChunkDiskMapper struct {
|
|||
crc32 hash.Hash
|
||||
writePathMtx sync.Mutex
|
||||
|
||||
/// Reader.
|
||||
// Reader.
|
||||
// The int key in the map is the file number on the disk.
|
||||
mmappedChunkFiles map[int]*mmappedChunkFile // Contains the m-mapped files for each chunk file mapped with its index.
|
||||
closers map[int]io.Closer // Closers for resources behind the byte slices.
|
||||
|
|
|
@ -69,16 +69,16 @@ func TestQueuePushPopSingleGoroutine(t *testing.T) {
|
|||
const maxSize = 500
|
||||
const maxIters = 50
|
||||
|
||||
for max := 1; max < maxSize; max++ {
|
||||
queue := newWriteJobQueue(max, 1+(r.Int()%max))
|
||||
for maxCount := 1; maxCount < maxSize; maxCount++ {
|
||||
queue := newWriteJobQueue(maxCount, 1+(r.Int()%maxCount))
|
||||
|
||||
elements := 0 // total elements in the queue
|
||||
lastWriteID := 0
|
||||
lastReadID := 0
|
||||
|
||||
for iter := 0; iter < maxIters; iter++ {
|
||||
if elements < max {
|
||||
toWrite := r.Int() % (max - elements)
|
||||
if elements < maxCount {
|
||||
toWrite := r.Int() % (maxCount - elements)
|
||||
if toWrite == 0 {
|
||||
toWrite = 1
|
||||
}
|
||||
|
|
85
tsdb/db.go
85
tsdb/db.go
|
@ -49,7 +49,7 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
// Default duration of a block in milliseconds.
|
||||
// DefaultBlockDuration in milliseconds.
|
||||
DefaultBlockDuration = int64(2 * time.Hour / time.Millisecond)
|
||||
|
||||
// Block dir suffixes to make deletion and creation operations atomic.
|
||||
|
@ -699,7 +699,7 @@ func (db *DBReadOnly) LastBlockID() (string, error) {
|
|||
return "", err
|
||||
}
|
||||
|
||||
max := uint64(0)
|
||||
maxT := uint64(0)
|
||||
|
||||
lastBlockID := ""
|
||||
|
||||
|
@ -711,8 +711,8 @@ func (db *DBReadOnly) LastBlockID() (string, error) {
|
|||
continue // Not a block dir.
|
||||
}
|
||||
timestamp := ulidObj.Time()
|
||||
if timestamp > max {
|
||||
max = timestamp
|
||||
if timestamp > maxT {
|
||||
maxT = timestamp
|
||||
lastBlockID = dirName
|
||||
}
|
||||
}
|
||||
|
@ -2046,7 +2046,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) {
|
|||
}
|
||||
}
|
||||
|
||||
blockQueriers := make([]storage.Querier, 0, len(blocks)+2) // +2 to allow for possible in-order and OOO head queriers
|
||||
blockQueriers := make([]storage.Querier, 0, len(blocks)+1) // +1 to allow for possible head querier.
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
|
@ -2058,10 +2058,12 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) {
|
|||
}
|
||||
}()
|
||||
|
||||
if maxt >= db.head.MinTime() {
|
||||
overlapsOOO := overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime())
|
||||
var headQuerier storage.Querier
|
||||
if maxt >= db.head.MinTime() || overlapsOOO {
|
||||
rh := NewRangeHead(db.head, mint, maxt)
|
||||
var err error
|
||||
inOrderHeadQuerier, err := db.blockQuerierFunc(rh, mint, maxt)
|
||||
headQuerier, err = db.blockQuerierFunc(rh, mint, maxt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open block querier for head %s: %w", rh, err)
|
||||
}
|
||||
|
@ -2071,36 +2073,28 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) {
|
|||
// won't run into a race later since any truncation that comes after will wait on this querier if it overlaps.
|
||||
shouldClose, getNew, newMint := db.head.IsQuerierCollidingWithTruncation(mint, maxt)
|
||||
if shouldClose {
|
||||
if err := inOrderHeadQuerier.Close(); err != nil {
|
||||
if err := headQuerier.Close(); err != nil {
|
||||
return nil, fmt.Errorf("closing head block querier %s: %w", rh, err)
|
||||
}
|
||||
inOrderHeadQuerier = nil
|
||||
headQuerier = nil
|
||||
}
|
||||
if getNew {
|
||||
rh := NewRangeHead(db.head, newMint, maxt)
|
||||
inOrderHeadQuerier, err = db.blockQuerierFunc(rh, newMint, maxt)
|
||||
headQuerier, err = db.blockQuerierFunc(rh, newMint, maxt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open block querier for head while getting new querier %s: %w", rh, err)
|
||||
}
|
||||
}
|
||||
|
||||
if inOrderHeadQuerier != nil {
|
||||
blockQueriers = append(blockQueriers, inOrderHeadQuerier)
|
||||
}
|
||||
}
|
||||
|
||||
if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) {
|
||||
rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef)
|
||||
var err error
|
||||
outOfOrderHeadQuerier, err := db.blockQuerierFunc(rh, mint, maxt)
|
||||
if err != nil {
|
||||
// If BlockQuerierFunc() failed, make sure to clean up the pending read created by NewOOORangeHead.
|
||||
rh.isoState.Close()
|
||||
|
||||
return nil, fmt.Errorf("open block querier for ooo head %s: %w", rh, err)
|
||||
if overlapsOOO {
|
||||
// We need to fetch from in-order and out-of-order chunks: wrap the headQuerier.
|
||||
isoState := db.head.oooIso.TrackReadAfter(db.lastGarbageCollectedMmapRef)
|
||||
headQuerier = NewHeadAndOOOQuerier(mint, maxt, db.head, isoState, headQuerier)
|
||||
}
|
||||
|
||||
blockQueriers = append(blockQueriers, outOfOrderHeadQuerier)
|
||||
if headQuerier != nil {
|
||||
blockQueriers = append(blockQueriers, headQuerier)
|
||||
}
|
||||
|
||||
for _, b := range blocks {
|
||||
|
@ -2128,7 +2122,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer
|
|||
}
|
||||
}
|
||||
|
||||
blockQueriers := make([]storage.ChunkQuerier, 0, len(blocks)+2) // +2 to allow for possible in-order and OOO head queriers
|
||||
blockQueriers := make([]storage.ChunkQuerier, 0, len(blocks)+1) // +1 to allow for possible head querier.
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
|
@ -2140,9 +2134,11 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer
|
|||
}
|
||||
}()
|
||||
|
||||
if maxt >= db.head.MinTime() {
|
||||
overlapsOOO := overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime())
|
||||
var headQuerier storage.ChunkQuerier
|
||||
if maxt >= db.head.MinTime() || overlapsOOO {
|
||||
rh := NewRangeHead(db.head, mint, maxt)
|
||||
inOrderHeadQuerier, err := db.blockChunkQuerierFunc(rh, mint, maxt)
|
||||
headQuerier, err = db.blockChunkQuerierFunc(rh, mint, maxt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open querier for head %s: %w", rh, err)
|
||||
}
|
||||
|
@ -2152,35 +2148,28 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer
|
|||
// won't run into a race later since any truncation that comes after will wait on this querier if it overlaps.
|
||||
shouldClose, getNew, newMint := db.head.IsQuerierCollidingWithTruncation(mint, maxt)
|
||||
if shouldClose {
|
||||
if err := inOrderHeadQuerier.Close(); err != nil {
|
||||
if err := headQuerier.Close(); err != nil {
|
||||
return nil, fmt.Errorf("closing head querier %s: %w", rh, err)
|
||||
}
|
||||
inOrderHeadQuerier = nil
|
||||
headQuerier = nil
|
||||
}
|
||||
if getNew {
|
||||
rh := NewRangeHead(db.head, newMint, maxt)
|
||||
inOrderHeadQuerier, err = db.blockChunkQuerierFunc(rh, newMint, maxt)
|
||||
headQuerier, err = db.blockChunkQuerierFunc(rh, newMint, maxt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open querier for head while getting new querier %s: %w", rh, err)
|
||||
}
|
||||
}
|
||||
|
||||
if inOrderHeadQuerier != nil {
|
||||
blockQueriers = append(blockQueriers, inOrderHeadQuerier)
|
||||
}
|
||||
}
|
||||
|
||||
if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) {
|
||||
rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef)
|
||||
outOfOrderHeadQuerier, err := db.blockChunkQuerierFunc(rh, mint, maxt)
|
||||
if err != nil {
|
||||
// If NewBlockQuerier() failed, make sure to clean up the pending read created by NewOOORangeHead.
|
||||
rh.isoState.Close()
|
||||
|
||||
return nil, fmt.Errorf("open block chunk querier for ooo head %s: %w", rh, err)
|
||||
if overlapsOOO {
|
||||
// We need to fetch from in-order and out-of-order chunks: wrap the headQuerier.
|
||||
isoState := db.head.oooIso.TrackReadAfter(db.lastGarbageCollectedMmapRef)
|
||||
headQuerier = NewHeadAndOOOChunkQuerier(mint, maxt, db.head, isoState, headQuerier)
|
||||
}
|
||||
|
||||
blockQueriers = append(blockQueriers, outOfOrderHeadQuerier)
|
||||
if headQuerier != nil {
|
||||
blockQueriers = append(blockQueriers, headQuerier)
|
||||
}
|
||||
|
||||
for _, b := range blocks {
|
||||
|
@ -2344,13 +2333,13 @@ func blockDirs(dir string) ([]string, error) {
|
|||
return dirs, nil
|
||||
}
|
||||
|
||||
func exponential(d, min, max time.Duration) time.Duration {
|
||||
func exponential(d, minD, maxD time.Duration) time.Duration {
|
||||
d *= 2
|
||||
if d < min {
|
||||
d = min
|
||||
if d < minD {
|
||||
d = minD
|
||||
}
|
||||
if d > max {
|
||||
d = max
|
||||
if d > maxD {
|
||||
d = maxD
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
|
312
tsdb/db_test.go
312
tsdb/db_test.go
|
@ -2676,8 +2676,9 @@ func TestDBReadOnly_Querier_NoAlteration(t *testing.T) {
|
|||
require.NoError(t, db.Close())
|
||||
|
||||
// Simulate a corrupted chunk: without a header.
|
||||
_, err := os.Create(path.Join(mmappedChunksDir(db.dir), "000001"))
|
||||
chunk, err := os.Create(path.Join(mmappedChunksDir(db.dir), "000001"))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, chunk.Close())
|
||||
|
||||
spinUpQuerierAndCheck(db.dir, t.TempDir(), 1)
|
||||
|
||||
|
@ -4887,8 +4888,8 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario, addExtraSample
|
|||
|
||||
addSample := func(fromMins, toMins int64) {
|
||||
app := db.Appender(context.Background())
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
for m := fromMins; m <= toMins; m++ {
|
||||
ts := m * time.Minute.Milliseconds()
|
||||
_, _, err := scenario.appendFunc(app, series1, ts, ts)
|
||||
require.NoError(t, err)
|
||||
_, _, err = scenario.appendFunc(app, series2, ts, 2*ts)
|
||||
|
@ -4924,8 +4925,8 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario, addExtraSample
|
|||
var series1Samples, series2Samples []chunks.Sample
|
||||
for _, r := range [][2]int64{{90, 119}, {120, 239}, {240, highest}} {
|
||||
fromMins, toMins := r[0], r[1]
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
for m := fromMins; m <= toMins; m++ {
|
||||
ts := m * time.Minute.Milliseconds()
|
||||
series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
|
||||
series2Samples = append(series2Samples, scenario.sampleFunc(ts, 2*ts))
|
||||
}
|
||||
|
@ -5003,8 +5004,8 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario, addExtraSample
|
|||
verifySamples := func(block *Block, fromMins, toMins int64) {
|
||||
series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
|
||||
series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
for m := fromMins; m <= toMins; m++ {
|
||||
ts := m * time.Minute.Milliseconds()
|
||||
series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
|
||||
series2Samples = append(series2Samples, scenario.sampleFunc(ts, 2*ts))
|
||||
}
|
||||
|
@ -5090,8 +5091,8 @@ func testOOOCompactionWithNormalCompaction(t *testing.T, scenario sampleTypeScen
|
|||
|
||||
addSamples := func(fromMins, toMins int64) {
|
||||
app := db.Appender(context.Background())
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
for m := fromMins; m <= toMins; m++ {
|
||||
ts := m * time.Minute.Milliseconds()
|
||||
_, _, err := scenario.appendFunc(app, series1, ts, ts)
|
||||
require.NoError(t, err)
|
||||
_, _, err = scenario.appendFunc(app, series2, ts, 2*ts)
|
||||
|
@ -5145,8 +5146,8 @@ func testOOOCompactionWithNormalCompaction(t *testing.T, scenario sampleTypeScen
|
|||
verifySamples := func(block *Block, fromMins, toMins int64) {
|
||||
series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
|
||||
series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
for m := fromMins; m <= toMins; m++ {
|
||||
ts := m * time.Minute.Milliseconds()
|
||||
series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
|
||||
series2Samples = append(series2Samples, scenario.sampleFunc(ts, 2*ts))
|
||||
}
|
||||
|
@ -5203,8 +5204,8 @@ func testOOOCompactionWithDisabledWriteLog(t *testing.T, scenario sampleTypeScen
|
|||
|
||||
addSamples := func(fromMins, toMins int64) {
|
||||
app := db.Appender(context.Background())
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
for m := fromMins; m <= toMins; m++ {
|
||||
ts := m * time.Minute.Milliseconds()
|
||||
_, _, err := scenario.appendFunc(app, series1, ts, ts)
|
||||
require.NoError(t, err)
|
||||
_, _, err = scenario.appendFunc(app, series2, ts, 2*ts)
|
||||
|
@ -5258,8 +5259,8 @@ func testOOOCompactionWithDisabledWriteLog(t *testing.T, scenario sampleTypeScen
|
|||
verifySamples := func(block *Block, fromMins, toMins int64) {
|
||||
series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
|
||||
series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
for m := fromMins; m <= toMins; m++ {
|
||||
ts := m * time.Minute.Milliseconds()
|
||||
series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
|
||||
series2Samples = append(series2Samples, scenario.sampleFunc(ts, 2*ts))
|
||||
}
|
||||
|
@ -5314,8 +5315,8 @@ func testOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T, scenario sa
|
|||
|
||||
addSamples := func(fromMins, toMins int64) {
|
||||
app := db.Appender(context.Background())
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
for m := fromMins; m <= toMins; m++ {
|
||||
ts := m * time.Minute.Milliseconds()
|
||||
_, _, err := scenario.appendFunc(app, series1, ts, ts)
|
||||
require.NoError(t, err)
|
||||
_, _, err = scenario.appendFunc(app, series2, ts, 2*ts)
|
||||
|
@ -5362,8 +5363,8 @@ func testOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T, scenario sa
|
|||
verifySamples := func(fromMins, toMins int64) {
|
||||
series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
|
||||
series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
for m := fromMins; m <= toMins; m++ {
|
||||
ts := m * time.Minute.Milliseconds()
|
||||
series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
|
||||
series2Samples = append(series2Samples, scenario.sampleFunc(ts, ts*2))
|
||||
}
|
||||
|
@ -5462,7 +5463,6 @@ func testQuerierOOOQuery(t *testing.T,
|
|||
sampleFunc func(ts int64) chunks.Sample,
|
||||
) {
|
||||
opts := DefaultOptions()
|
||||
opts.OutOfOrderCapMax = 30
|
||||
opts.OutOfOrderTimeWindow = 24 * time.Hour.Milliseconds()
|
||||
|
||||
series1 := labels.FromStrings("foo", "bar1")
|
||||
|
@ -5474,18 +5474,19 @@ func testQuerierOOOQuery(t *testing.T,
|
|||
addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []chunks.Sample, filter filterFunc, counterReset bool) ([]chunks.Sample, int) {
|
||||
app := db.Appender(context.Background())
|
||||
totalAppended := 0
|
||||
for min := fromMins; min <= toMins; min += time.Minute.Milliseconds() {
|
||||
if !filter(min) {
|
||||
for m := fromMins; m <= toMins; m += time.Minute.Milliseconds() {
|
||||
if !filter(m / time.Minute.Milliseconds()) {
|
||||
continue
|
||||
}
|
||||
_, err := appendFunc(app, min, counterReset)
|
||||
if min >= queryMinT && min <= queryMaxT {
|
||||
expSamples = append(expSamples, sampleFunc(min))
|
||||
_, err := appendFunc(app, m, counterReset)
|
||||
if m >= queryMinT && m <= queryMaxT {
|
||||
expSamples = append(expSamples, sampleFunc(m))
|
||||
}
|
||||
require.NoError(t, err)
|
||||
totalAppended++
|
||||
}
|
||||
require.NoError(t, app.Commit())
|
||||
require.Positive(t, totalAppended, 0) // Sanity check that filter is not too zealous.
|
||||
return expSamples, totalAppended
|
||||
}
|
||||
|
||||
|
@ -5499,12 +5500,14 @@ func testQuerierOOOQuery(t *testing.T,
|
|||
|
||||
tests := []struct {
|
||||
name string
|
||||
oooCap int64
|
||||
queryMinT int64
|
||||
queryMaxT int64
|
||||
batches []sampleBatch
|
||||
}{
|
||||
{
|
||||
name: "query interval covering ooomint and inordermaxt returns all ingested samples",
|
||||
oooCap: 30,
|
||||
queryMinT: minutes(0),
|
||||
queryMaxT: minutes(200),
|
||||
batches: []sampleBatch{
|
||||
|
@ -5523,6 +5526,7 @@ func testQuerierOOOQuery(t *testing.T,
|
|||
},
|
||||
{
|
||||
name: "partial query interval returns only samples within interval",
|
||||
oooCap: 30,
|
||||
queryMinT: minutes(20),
|
||||
queryMaxT: minutes(180),
|
||||
batches: []sampleBatch{
|
||||
|
@ -5564,9 +5568,102 @@ func testQuerierOOOQuery(t *testing.T,
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "query overlapping inorder and ooo samples returns all ingested samples at the end of the interval",
|
||||
oooCap: 30,
|
||||
queryMinT: minutes(0),
|
||||
queryMaxT: minutes(200),
|
||||
batches: []sampleBatch{
|
||||
{
|
||||
minT: minutes(100),
|
||||
maxT: minutes(200),
|
||||
filter: func(t int64) bool { return t%2 == 0 },
|
||||
isOOO: false,
|
||||
},
|
||||
{
|
||||
minT: minutes(170),
|
||||
maxT: minutes(180),
|
||||
filter: func(t int64) bool { return t%2 == 1 },
|
||||
isOOO: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "query overlapping inorder and ooo in-memory samples returns all ingested samples at the beginning of the interval",
|
||||
oooCap: 30,
|
||||
queryMinT: minutes(0),
|
||||
queryMaxT: minutes(200),
|
||||
batches: []sampleBatch{
|
||||
{
|
||||
minT: minutes(100),
|
||||
maxT: minutes(200),
|
||||
filter: func(t int64) bool { return t%2 == 0 },
|
||||
isOOO: false,
|
||||
},
|
||||
{
|
||||
minT: minutes(100),
|
||||
maxT: minutes(110),
|
||||
filter: func(t int64) bool { return t%2 == 1 },
|
||||
isOOO: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "query inorder contain ooo mmaped samples returns all ingested samples at the beginning of the interval",
|
||||
oooCap: 5,
|
||||
queryMinT: minutes(0),
|
||||
queryMaxT: minutes(200),
|
||||
batches: []sampleBatch{
|
||||
{
|
||||
minT: minutes(100),
|
||||
maxT: minutes(200),
|
||||
filter: func(t int64) bool { return t%2 == 0 },
|
||||
isOOO: false,
|
||||
},
|
||||
{
|
||||
minT: minutes(101),
|
||||
maxT: minutes(101 + (5-1)*2), // Append samples to fit in a single mmmaped OOO chunk and fit inside the first in-order mmaped chunk.
|
||||
filter: func(t int64) bool { return t%2 == 1 },
|
||||
isOOO: true,
|
||||
},
|
||||
{
|
||||
minT: minutes(191),
|
||||
maxT: minutes(193), // Append some more OOO samples to trigger mapping the OOO chunk, but use time 151 to not overlap with in-order head chunk.
|
||||
filter: func(t int64) bool { return t%2 == 1 },
|
||||
isOOO: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "query overlapping inorder and ooo mmaped samples returns all ingested samples at the beginning of the interval",
|
||||
oooCap: 30,
|
||||
queryMinT: minutes(0),
|
||||
queryMaxT: minutes(200),
|
||||
batches: []sampleBatch{
|
||||
{
|
||||
minT: minutes(100),
|
||||
maxT: minutes(200),
|
||||
filter: func(t int64) bool { return t%2 == 0 },
|
||||
isOOO: false,
|
||||
},
|
||||
{
|
||||
minT: minutes(101),
|
||||
maxT: minutes(101 + (30-1)*2), // Append samples to fit in a single mmmaped OOO chunk and overlap the first in-order mmaped chunk.
|
||||
filter: func(t int64) bool { return t%2 == 1 },
|
||||
isOOO: true,
|
||||
},
|
||||
{
|
||||
minT: minutes(191),
|
||||
maxT: minutes(193), // Append some more OOO samples to trigger mapping the OOO chunk, but use time 151 to not overlap with in-order head chunk.
|
||||
filter: func(t int64) bool { return t%2 == 1 },
|
||||
isOOO: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(fmt.Sprintf("name=%s", tc.name), func(t *testing.T) {
|
||||
opts.OutOfOrderCapMax = tc.oooCap
|
||||
db := openTestDB(t, opts, nil)
|
||||
db.DisableCompactions()
|
||||
db.EnableNativeHistograms()
|
||||
|
@ -5676,18 +5773,19 @@ func testChunkQuerierOOOQuery(t *testing.T,
|
|||
addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []chunks.Sample, filter filterFunc, counterReset bool) ([]chunks.Sample, int) {
|
||||
app := db.Appender(context.Background())
|
||||
totalAppended := 0
|
||||
for min := fromMins; min <= toMins; min += time.Minute.Milliseconds() {
|
||||
if !filter(min) {
|
||||
for m := fromMins; m <= toMins; m += time.Minute.Milliseconds() {
|
||||
if !filter(m / time.Minute.Milliseconds()) {
|
||||
continue
|
||||
}
|
||||
_, err := appendFunc(app, min, counterReset)
|
||||
if min >= queryMinT && min <= queryMaxT {
|
||||
expSamples = append(expSamples, sampleFunc(min))
|
||||
_, err := appendFunc(app, m, counterReset)
|
||||
if m >= queryMinT && m <= queryMaxT {
|
||||
expSamples = append(expSamples, sampleFunc(m))
|
||||
}
|
||||
require.NoError(t, err)
|
||||
totalAppended++
|
||||
}
|
||||
require.NoError(t, app.Commit())
|
||||
require.Positive(t, totalAppended) // Sanity check that filter is not too zealous.
|
||||
return expSamples, totalAppended
|
||||
}
|
||||
|
||||
|
@ -5701,12 +5799,14 @@ func testChunkQuerierOOOQuery(t *testing.T,
|
|||
|
||||
tests := []struct {
|
||||
name string
|
||||
oooCap int64
|
||||
queryMinT int64
|
||||
queryMaxT int64
|
||||
batches []sampleBatch
|
||||
}{
|
||||
{
|
||||
name: "query interval covering ooomint and inordermaxt returns all ingested samples",
|
||||
oooCap: 30,
|
||||
queryMinT: minutes(0),
|
||||
queryMaxT: minutes(200),
|
||||
batches: []sampleBatch{
|
||||
|
@ -5725,6 +5825,7 @@ func testChunkQuerierOOOQuery(t *testing.T,
|
|||
},
|
||||
{
|
||||
name: "partial query interval returns only samples within interval",
|
||||
oooCap: 30,
|
||||
queryMinT: minutes(20),
|
||||
queryMaxT: minutes(180),
|
||||
batches: []sampleBatch{
|
||||
|
@ -5766,9 +5867,102 @@ func testChunkQuerierOOOQuery(t *testing.T,
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "query overlapping inorder and ooo samples returns all ingested samples at the end of the interval",
|
||||
oooCap: 30,
|
||||
queryMinT: minutes(0),
|
||||
queryMaxT: minutes(200),
|
||||
batches: []sampleBatch{
|
||||
{
|
||||
minT: minutes(100),
|
||||
maxT: minutes(200),
|
||||
filter: func(t int64) bool { return t%2 == 0 },
|
||||
isOOO: false,
|
||||
},
|
||||
{
|
||||
minT: minutes(170),
|
||||
maxT: minutes(180),
|
||||
filter: func(t int64) bool { return t%2 == 1 },
|
||||
isOOO: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "query overlapping inorder and ooo in-memory samples returns all ingested samples at the beginning of the interval",
|
||||
oooCap: 30,
|
||||
queryMinT: minutes(0),
|
||||
queryMaxT: minutes(200),
|
||||
batches: []sampleBatch{
|
||||
{
|
||||
minT: minutes(100),
|
||||
maxT: minutes(200),
|
||||
filter: func(t int64) bool { return t%2 == 0 },
|
||||
isOOO: false,
|
||||
},
|
||||
{
|
||||
minT: minutes(100),
|
||||
maxT: minutes(110),
|
||||
filter: func(t int64) bool { return t%2 == 1 },
|
||||
isOOO: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "query inorder contain ooo mmaped samples returns all ingested samples at the beginning of the interval",
|
||||
oooCap: 5,
|
||||
queryMinT: minutes(0),
|
||||
queryMaxT: minutes(200),
|
||||
batches: []sampleBatch{
|
||||
{
|
||||
minT: minutes(100),
|
||||
maxT: minutes(200),
|
||||
filter: func(t int64) bool { return t%2 == 0 },
|
||||
isOOO: false,
|
||||
},
|
||||
{
|
||||
minT: minutes(101),
|
||||
maxT: minutes(101 + (5-1)*2), // Append samples to fit in a single mmmaped OOO chunk and fit inside the first in-order mmaped chunk.
|
||||
filter: func(t int64) bool { return t%2 == 1 },
|
||||
isOOO: true,
|
||||
},
|
||||
{
|
||||
minT: minutes(191),
|
||||
maxT: minutes(193), // Append some more OOO samples to trigger mapping the OOO chunk, but use time 151 to not overlap with in-order head chunk.
|
||||
filter: func(t int64) bool { return t%2 == 1 },
|
||||
isOOO: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "query overlapping inorder and ooo mmaped samples returns all ingested samples at the beginning of the interval",
|
||||
oooCap: 30,
|
||||
queryMinT: minutes(0),
|
||||
queryMaxT: minutes(200),
|
||||
batches: []sampleBatch{
|
||||
{
|
||||
minT: minutes(100),
|
||||
maxT: minutes(200),
|
||||
filter: func(t int64) bool { return t%2 == 0 },
|
||||
isOOO: false,
|
||||
},
|
||||
{
|
||||
minT: minutes(101),
|
||||
maxT: minutes(101 + (30-1)*2), // Append samples to fit in a single mmmaped OOO chunk and overlap the first in-order mmaped chunk.
|
||||
filter: func(t int64) bool { return t%2 == 1 },
|
||||
isOOO: true,
|
||||
},
|
||||
{
|
||||
minT: minutes(191),
|
||||
maxT: minutes(193), // Append some more OOO samples to trigger mapping the OOO chunk, but use time 151 to not overlap with in-order head chunk.
|
||||
filter: func(t int64) bool { return t%2 == 1 },
|
||||
isOOO: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(fmt.Sprintf("name=%s", tc.name), func(t *testing.T) {
|
||||
opts.OutOfOrderCapMax = tc.oooCap
|
||||
db := openTestDB(t, opts, nil)
|
||||
db.DisableCompactions()
|
||||
db.EnableNativeHistograms()
|
||||
|
@ -5911,7 +6105,7 @@ func testOOONativeHistogramsWithCounterResets(t *testing.T, scenario sampleTypeS
|
|||
shouldReset: func(v int64) bool {
|
||||
return v == 44
|
||||
},
|
||||
expCounterResetHints: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.CounterReset},
|
||||
expCounterResetHints: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.UnknownCounterReset},
|
||||
},
|
||||
{
|
||||
from: 50,
|
||||
|
@ -6017,9 +6211,9 @@ func testOOOAppendAndQuery(t *testing.T, scenario sampleTypeScenario) {
|
|||
app := db.Appender(context.Background())
|
||||
key := lbls.String()
|
||||
from, to := minutes(fromMins), minutes(toMins)
|
||||
for min := from; min <= to; min += time.Minute.Milliseconds() {
|
||||
for m := from; m <= to; m += time.Minute.Milliseconds() {
|
||||
val := rand.Intn(1000)
|
||||
_, s, err := scenario.appendFunc(app, lbls, min, int64(val))
|
||||
_, s, err := scenario.appendFunc(app, lbls, m, int64(val))
|
||||
if faceError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
|
@ -6150,14 +6344,14 @@ func testOOODisabled(t *testing.T, scenario sampleTypeScenario) {
|
|||
app := db.Appender(context.Background())
|
||||
key := lbls.String()
|
||||
from, to := minutes(fromMins), minutes(toMins)
|
||||
for min := from; min <= to; min += time.Minute.Milliseconds() {
|
||||
_, _, err := scenario.appendFunc(app, lbls, min, min)
|
||||
for m := from; m <= to; m += time.Minute.Milliseconds() {
|
||||
_, _, err := scenario.appendFunc(app, lbls, m, m)
|
||||
if faceError {
|
||||
require.Error(t, err)
|
||||
failedSamples++
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
expSamples[key] = append(expSamples[key], scenario.sampleFunc(min, min))
|
||||
expSamples[key] = append(expSamples[key], scenario.sampleFunc(m, m))
|
||||
totalSamples++
|
||||
}
|
||||
}
|
||||
|
@ -6226,9 +6420,9 @@ func testWBLAndMmapReplay(t *testing.T, scenario sampleTypeScenario) {
|
|||
app := db.Appender(context.Background())
|
||||
key := lbls.String()
|
||||
from, to := minutes(fromMins), minutes(toMins)
|
||||
for min := from; min <= to; min += time.Minute.Milliseconds() {
|
||||
for m := from; m <= to; m += time.Minute.Milliseconds() {
|
||||
val := rand.Intn(1000)
|
||||
_, s, err := scenario.appendFunc(app, lbls, min, int64(val))
|
||||
_, s, err := scenario.appendFunc(app, lbls, m, int64(val))
|
||||
require.NoError(t, err)
|
||||
expSamples[key] = append(expSamples[key], s)
|
||||
totalSamples++
|
||||
|
@ -6791,8 +6985,8 @@ func testOOOCompactionFailure(t *testing.T, scenario sampleTypeScenario) {
|
|||
|
||||
addSample := func(fromMins, toMins int64) {
|
||||
app := db.Appender(context.Background())
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
for m := fromMins; m <= toMins; m++ {
|
||||
ts := m * time.Minute.Milliseconds()
|
||||
_, _, err := scenario.appendFunc(app, series1, ts, ts)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
@ -6879,8 +7073,8 @@ func testOOOCompactionFailure(t *testing.T, scenario sampleTypeScenario) {
|
|||
|
||||
verifySamples := func(block *Block, fromMins, toMins int64) {
|
||||
series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
for m := fromMins; m <= toMins; m++ {
|
||||
ts := m * time.Minute.Milliseconds()
|
||||
series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
|
||||
}
|
||||
expRes := map[string][]chunks.Sample{
|
||||
|
@ -6928,8 +7122,8 @@ func TestWBLCorruption(t *testing.T) {
|
|||
var allSamples, expAfterRestart []chunks.Sample
|
||||
addSamples := func(fromMins, toMins int64, afterRestart bool) {
|
||||
app := db.Appender(context.Background())
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
for m := fromMins; m <= toMins; m++ {
|
||||
ts := m * time.Minute.Milliseconds()
|
||||
_, err := app.Append(0, series1, ts, float64(ts))
|
||||
require.NoError(t, err)
|
||||
allSamples = append(allSamples, sample{t: ts, f: float64(ts)})
|
||||
|
@ -7084,8 +7278,8 @@ func testOOOMmapCorruption(t *testing.T, scenario sampleTypeScenario) {
|
|||
var allSamples, expInMmapChunks []chunks.Sample
|
||||
addSamples := func(fromMins, toMins int64, inMmapAfterCorruption bool) {
|
||||
app := db.Appender(context.Background())
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
for m := fromMins; m <= toMins; m++ {
|
||||
ts := m * time.Minute.Milliseconds()
|
||||
_, s, err := scenario.appendFunc(app, series1, ts, ts)
|
||||
require.NoError(t, err)
|
||||
allSamples = append(allSamples, s)
|
||||
|
@ -7231,8 +7425,8 @@ func testOutOfOrderRuntimeConfig(t *testing.T, scenario sampleTypeScenario) {
|
|||
series1 := labels.FromStrings("foo", "bar1")
|
||||
addSamples := func(t *testing.T, db *DB, fromMins, toMins int64, success bool, allSamples []chunks.Sample) []chunks.Sample {
|
||||
app := db.Appender(context.Background())
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
for m := fromMins; m <= toMins; m++ {
|
||||
ts := m * time.Minute.Milliseconds()
|
||||
_, s, err := scenario.appendFunc(app, series1, ts, ts)
|
||||
if success {
|
||||
require.NoError(t, err)
|
||||
|
@ -7265,7 +7459,7 @@ func testOutOfOrderRuntimeConfig(t *testing.T, scenario sampleTypeScenario) {
|
|||
// WBL is not empty.
|
||||
size, err := db.head.wbl.Size()
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, size, int64(0))
|
||||
require.Positive(t, size)
|
||||
|
||||
require.Empty(t, db.Blocks())
|
||||
require.NoError(t, db.compactOOOHead(ctx))
|
||||
|
@ -7442,8 +7636,8 @@ func testNoGapAfterRestartWithOOO(t *testing.T, scenario sampleTypeScenario) {
|
|||
series1 := labels.FromStrings("foo", "bar1")
|
||||
addSamples := func(t *testing.T, db *DB, fromMins, toMins int64, success bool) {
|
||||
app := db.Appender(context.Background())
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
for m := fromMins; m <= toMins; m++ {
|
||||
ts := m * time.Minute.Milliseconds()
|
||||
_, _, err := scenario.appendFunc(app, series1, ts, ts)
|
||||
if success {
|
||||
require.NoError(t, err)
|
||||
|
@ -7456,8 +7650,8 @@ func testNoGapAfterRestartWithOOO(t *testing.T, scenario sampleTypeScenario) {
|
|||
|
||||
verifySamples := func(t *testing.T, db *DB, fromMins, toMins int64) {
|
||||
var expSamples []chunks.Sample
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
for m := fromMins; m <= toMins; m++ {
|
||||
ts := m * time.Minute.Milliseconds()
|
||||
expSamples = append(expSamples, scenario.sampleFunc(ts, ts))
|
||||
}
|
||||
|
||||
|
@ -7574,8 +7768,8 @@ func testWblReplayAfterOOODisableAndRestart(t *testing.T, scenario sampleTypeSce
|
|||
var allSamples []chunks.Sample
|
||||
addSamples := func(fromMins, toMins int64) {
|
||||
app := db.Appender(context.Background())
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
for m := fromMins; m <= toMins; m++ {
|
||||
ts := m * time.Minute.Milliseconds()
|
||||
_, s, err := scenario.appendFunc(app, series1, ts, ts)
|
||||
require.NoError(t, err)
|
||||
allSamples = append(allSamples, s)
|
||||
|
@ -7643,8 +7837,8 @@ func testPanicOnApplyConfig(t *testing.T, scenario sampleTypeScenario) {
|
|||
var allSamples []chunks.Sample
|
||||
addSamples := func(fromMins, toMins int64) {
|
||||
app := db.Appender(context.Background())
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
for m := fromMins; m <= toMins; m++ {
|
||||
ts := m * time.Minute.Milliseconds()
|
||||
_, s, err := scenario.appendFunc(app, series1, ts, ts)
|
||||
require.NoError(t, err)
|
||||
allSamples = append(allSamples, s)
|
||||
|
@ -7702,8 +7896,8 @@ func testDiskFillingUpAfterDisablingOOO(t *testing.T, scenario sampleTypeScenari
|
|||
var allSamples []chunks.Sample
|
||||
addSamples := func(fromMins, toMins int64) {
|
||||
app := db.Appender(context.Background())
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
for m := fromMins; m <= toMins; m++ {
|
||||
ts := m * time.Minute.Milliseconds()
|
||||
_, s, err := scenario.appendFunc(app, series1, ts, ts)
|
||||
require.NoError(t, err)
|
||||
allSamples = append(allSamples, s)
|
||||
|
|
|
@ -201,8 +201,8 @@ func (d *Decbuf) UvarintStr() string {
|
|||
return string(d.UvarintBytes())
|
||||
}
|
||||
|
||||
// The return value becomes invalid if the byte slice goes away.
|
||||
// Compared to UvarintStr, this avoid allocations.
|
||||
// UvarintBytes returns invalid values if the byte slice goes away.
|
||||
// Compared to UvarintStr, it avoid allocations.
|
||||
func (d *Decbuf) UvarintBytes() []byte {
|
||||
l := d.Uvarint64()
|
||||
if d.E != nil {
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
|
@ -1024,7 +1025,7 @@ func (a *headAppender) Commit() (err error) {
|
|||
// Sample is OOO and OOO handling is enabled
|
||||
// and the delta is within the OOO tolerance.
|
||||
var mmapRefs []chunks.ChunkDiskMapperRef
|
||||
ok, chunkCreated, mmapRefs = series.insert(s.T, s.V, nil, nil, a.head.chunkDiskMapper, oooCapMax)
|
||||
ok, chunkCreated, mmapRefs = series.insert(s.T, s.V, nil, nil, a.head.chunkDiskMapper, oooCapMax, a.head.logger)
|
||||
if chunkCreated {
|
||||
r, ok := oooMmapMarkers[series.ref]
|
||||
if !ok || r != nil {
|
||||
|
@ -1120,7 +1121,7 @@ func (a *headAppender) Commit() (err error) {
|
|||
// Sample is OOO and OOO handling is enabled
|
||||
// and the delta is within the OOO tolerance.
|
||||
var mmapRefs []chunks.ChunkDiskMapperRef
|
||||
ok, chunkCreated, mmapRefs = series.insert(s.T, 0, s.H, nil, a.head.chunkDiskMapper, oooCapMax)
|
||||
ok, chunkCreated, mmapRefs = series.insert(s.T, 0, s.H, nil, a.head.chunkDiskMapper, oooCapMax, a.head.logger)
|
||||
if chunkCreated {
|
||||
r, ok := oooMmapMarkers[series.ref]
|
||||
if !ok || r != nil {
|
||||
|
@ -1216,7 +1217,7 @@ func (a *headAppender) Commit() (err error) {
|
|||
// Sample is OOO and OOO handling is enabled
|
||||
// and the delta is within the OOO tolerance.
|
||||
var mmapRefs []chunks.ChunkDiskMapperRef
|
||||
ok, chunkCreated, mmapRefs = series.insert(s.T, 0, nil, s.FH, a.head.chunkDiskMapper, oooCapMax)
|
||||
ok, chunkCreated, mmapRefs = series.insert(s.T, 0, nil, s.FH, a.head.chunkDiskMapper, oooCapMax, a.head.logger)
|
||||
if chunkCreated {
|
||||
r, ok := oooMmapMarkers[series.ref]
|
||||
if !ok || r != nil {
|
||||
|
@ -1314,14 +1315,14 @@ func (a *headAppender) Commit() (err error) {
|
|||
}
|
||||
|
||||
// insert is like append, except it inserts. Used for OOO samples.
|
||||
func (s *memSeries) insert(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64) (inserted, chunkCreated bool, mmapRefs []chunks.ChunkDiskMapperRef) {
|
||||
func (s *memSeries) insert(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64, logger log.Logger) (inserted, chunkCreated bool, mmapRefs []chunks.ChunkDiskMapperRef) {
|
||||
if s.ooo == nil {
|
||||
s.ooo = &memSeriesOOOFields{}
|
||||
}
|
||||
c := s.ooo.oooHeadChunk
|
||||
if c == nil || c.chunk.NumSamples() == int(oooCapMax) {
|
||||
// Note: If no new samples come in then we rely on compaction to clean up stale in-memory OOO chunks.
|
||||
c, mmapRefs = s.cutNewOOOHeadChunk(t, chunkDiskMapper)
|
||||
c, mmapRefs = s.cutNewOOOHeadChunk(t, chunkDiskMapper, logger)
|
||||
chunkCreated = true
|
||||
}
|
||||
|
||||
|
@ -1633,12 +1634,12 @@ func (s *memSeries) histogramsAppendPreprocessor(t int64, e chunkenc.Encoding, o
|
|||
// It assumes that the time range is 1/ratioToFull full.
|
||||
// Assuming that the samples will keep arriving at the same rate, it will make the
|
||||
// remaining n chunks within this chunk range (before max) equally sized.
|
||||
func computeChunkEndTime(start, cur, max int64, ratioToFull float64) int64 {
|
||||
n := float64(max-start) / (float64(cur-start+1) * ratioToFull)
|
||||
func computeChunkEndTime(start, cur, maxT int64, ratioToFull float64) int64 {
|
||||
n := float64(maxT-start) / (float64(cur-start+1) * ratioToFull)
|
||||
if n <= 1 {
|
||||
return max
|
||||
return maxT
|
||||
}
|
||||
return int64(float64(start) + float64(max-start)/math.Floor(n))
|
||||
return int64(float64(start) + float64(maxT-start)/math.Floor(n))
|
||||
}
|
||||
|
||||
func (s *memSeries) cutNewHeadChunk(mint int64, e chunkenc.Encoding, chunkRange int64) *memChunk {
|
||||
|
@ -1675,9 +1676,9 @@ func (s *memSeries) cutNewHeadChunk(mint int64, e chunkenc.Encoding, chunkRange
|
|||
}
|
||||
|
||||
// cutNewOOOHeadChunk cuts a new OOO chunk and m-maps the old chunk.
|
||||
// The caller must ensure that s.ooo is not nil.
|
||||
func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper) (*oooHeadChunk, []chunks.ChunkDiskMapperRef) {
|
||||
ref := s.mmapCurrentOOOHeadChunk(chunkDiskMapper)
|
||||
// The caller must ensure that s is locked and s.ooo is not nil.
|
||||
func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper, logger log.Logger) (*oooHeadChunk, []chunks.ChunkDiskMapperRef) {
|
||||
ref := s.mmapCurrentOOOHeadChunk(chunkDiskMapper, logger)
|
||||
|
||||
s.ooo.oooHeadChunk = &oooHeadChunk{
|
||||
chunk: NewOOOChunk(),
|
||||
|
@ -1688,7 +1689,8 @@ func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.Chunk
|
|||
return s.ooo.oooHeadChunk, ref
|
||||
}
|
||||
|
||||
func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper) []chunks.ChunkDiskMapperRef {
|
||||
// s must be locked when calling.
|
||||
func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper, logger log.Logger) []chunks.ChunkDiskMapperRef {
|
||||
if s.ooo == nil || s.ooo.oooHeadChunk == nil {
|
||||
// OOO is not enabled or there is no head chunk, so nothing to m-map here.
|
||||
return nil
|
||||
|
@ -1700,6 +1702,10 @@ func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMap
|
|||
}
|
||||
chunkRefs := make([]chunks.ChunkDiskMapperRef, 0, len(chks))
|
||||
for _, memchunk := range chks {
|
||||
if len(s.ooo.oooMmappedChunks) >= (oooChunkIDMask - 1) {
|
||||
level.Error(logger).Log("msg", "Too many OOO chunks, dropping data", "series", s.lset.String())
|
||||
break
|
||||
}
|
||||
chunkRef := chunkDiskMapper.WriteChunk(s.ref, memchunk.minTime, memchunk.maxTime, memchunk.chunk, true, handleChunkWriteError)
|
||||
chunkRefs = append(chunkRefs, chunkRef)
|
||||
s.ooo.oooMmappedChunks = append(s.ooo.oooMmappedChunks, &mmappedChunk{
|
||||
|
|
|
@ -26,7 +26,7 @@ func (s *memSeries) labels() labels.Labels {
|
|||
return s.lset
|
||||
}
|
||||
|
||||
// No-op when not using dedupelabels.
|
||||
// RebuildSymbolTable is a no-op when not using dedupelabels.
|
||||
func (h *Head) RebuildSymbolTable(logger log.Logger) *labels.SymbolTable {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -199,13 +199,18 @@ func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchB
|
|||
defer s.Unlock()
|
||||
|
||||
*chks = (*chks)[:0]
|
||||
*chks = appendSeriesChunks(s, h.mint, h.maxt, *chks)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func appendSeriesChunks(s *memSeries, mint, maxt int64, chks []chunks.Meta) []chunks.Meta {
|
||||
for i, c := range s.mmappedChunks {
|
||||
// Do not expose chunks that are outside of the specified range.
|
||||
if !c.OverlapsClosedInterval(h.mint, h.maxt) {
|
||||
if !c.OverlapsClosedInterval(mint, maxt) {
|
||||
continue
|
||||
}
|
||||
*chks = append(*chks, chunks.Meta{
|
||||
chks = append(chks, chunks.Meta{
|
||||
MinTime: c.minTime,
|
||||
MaxTime: c.maxTime,
|
||||
Ref: chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.headChunkID(i))),
|
||||
|
@ -223,8 +228,8 @@ func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchB
|
|||
} else {
|
||||
maxTime = chk.maxTime
|
||||
}
|
||||
if chk.OverlapsClosedInterval(h.mint, h.maxt) {
|
||||
*chks = append(*chks, chunks.Meta{
|
||||
if chk.OverlapsClosedInterval(mint, maxt) {
|
||||
chks = append(chks, chunks.Meta{
|
||||
MinTime: chk.minTime,
|
||||
MaxTime: maxTime,
|
||||
Ref: chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.headChunkID(len(s.mmappedChunks)+j))),
|
||||
|
@ -233,8 +238,7 @@ func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchB
|
|||
j++
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return chks
|
||||
}
|
||||
|
||||
// headChunkID returns the HeadChunkID referred to by the given position.
|
||||
|
@ -244,12 +248,20 @@ func (s *memSeries) headChunkID(pos int) chunks.HeadChunkID {
|
|||
return chunks.HeadChunkID(pos) + s.firstChunkID
|
||||
}
|
||||
|
||||
const oooChunkIDMask = 1 << 23
|
||||
|
||||
// oooHeadChunkID returns the HeadChunkID referred to by the given position.
|
||||
// Only the bottom 24 bits are used. Bit 23 is always 1 for an OOO chunk; for the rest:
|
||||
// * 0 <= pos < len(s.oooMmappedChunks) refer to s.oooMmappedChunks[pos]
|
||||
// * pos == len(s.oooMmappedChunks) refers to s.oooHeadChunk
|
||||
// The caller must ensure that s.ooo is not nil.
|
||||
func (s *memSeries) oooHeadChunkID(pos int) chunks.HeadChunkID {
|
||||
return chunks.HeadChunkID(pos) + s.ooo.firstOOOChunkID
|
||||
return (chunks.HeadChunkID(pos) + s.ooo.firstOOOChunkID) | oooChunkIDMask
|
||||
}
|
||||
|
||||
func unpackHeadChunkRef(ref chunks.ChunkRef) (seriesID chunks.HeadSeriesRef, chunkID chunks.HeadChunkID, isOOO bool) {
|
||||
sid, cid := chunks.HeadChunkRef(ref).Unpack()
|
||||
return sid, (cid & (oooChunkIDMask - 1)), (cid & oooChunkIDMask) != 0
|
||||
}
|
||||
|
||||
// LabelValueFor returns label value for the given label name in the series referred to by ID.
|
||||
|
@ -339,10 +351,15 @@ func (h *headChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chu
|
|||
return chk, nil, err
|
||||
}
|
||||
|
||||
// ChunkWithCopy returns the chunk for the reference number.
|
||||
// If the chunk is the in-memory chunk, then it makes a copy and returns the copied chunk.
|
||||
func (h *headChunkReader) ChunkWithCopy(meta chunks.Meta) (chunkenc.Chunk, int64, error) {
|
||||
return h.chunk(meta, true)
|
||||
type ChunkReaderWithCopy interface {
|
||||
ChunkOrIterableWithCopy(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, int64, error)
|
||||
}
|
||||
|
||||
// ChunkOrIterableWithCopy returns the chunk for the reference number.
|
||||
// If the chunk is the in-memory chunk, then it makes a copy and returns the copied chunk, plus the max time of the chunk.
|
||||
func (h *headChunkReader) ChunkOrIterableWithCopy(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, int64, error) {
|
||||
chk, maxTime, err := h.chunk(meta, true)
|
||||
return chk, nil, maxTime, err
|
||||
}
|
||||
|
||||
// chunk returns the chunk for the reference number.
|
||||
|
@ -358,9 +375,14 @@ func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc.
|
|||
}
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
return h.chunkFromSeries(s, cid, copyLastChunk)
|
||||
}
|
||||
|
||||
// Call with s locked.
|
||||
func (h *headChunkReader) chunkFromSeries(s *memSeries, cid chunks.HeadChunkID, copyLastChunk bool) (chunkenc.Chunk, int64, error) {
|
||||
c, headChunk, isOpen, err := s.chunk(cid, h.head.chunkDiskMapper, &h.head.memChunkPool)
|
||||
if err != nil {
|
||||
s.Unlock()
|
||||
return nil, 0, err
|
||||
}
|
||||
defer func() {
|
||||
|
@ -374,7 +396,6 @@ func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc.
|
|||
|
||||
// This means that the chunk is outside the specified range.
|
||||
if !c.OverlapsClosedInterval(h.mint, h.maxt) {
|
||||
s.Unlock()
|
||||
return nil, 0, storage.ErrNotFound
|
||||
}
|
||||
|
||||
|
@ -391,7 +412,6 @@ func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc.
|
|||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
s.Unlock()
|
||||
|
||||
return &safeHeadChunk{
|
||||
Chunk: chk,
|
||||
|
@ -461,30 +481,12 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDi
|
|||
return elem, true, offset == 0, nil
|
||||
}
|
||||
|
||||
// oooMergedChunks return an iterable over one or more OOO chunks for the given
|
||||
// chunks.Meta reference from memory or by m-mapping it from the disk. The
|
||||
// returned iterable will be a merge of all the overlapping chunks, if any,
|
||||
// amongst all the chunks in the OOOHead.
|
||||
// mergedChunks return an iterable over all chunks that overlap the
|
||||
// time window [mint,maxt], plus meta.Chunk if populated.
|
||||
// If hr is non-nil then in-order chunks are included.
|
||||
// This function is not thread safe unless the caller holds a lock.
|
||||
// The caller must ensure that s.ooo is not nil.
|
||||
func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMapper, mint, maxt int64, maxMmapRef chunks.ChunkDiskMapperRef) (*mergedOOOChunks, error) {
|
||||
_, cid := chunks.HeadChunkRef(meta.Ref).Unpack()
|
||||
|
||||
// ix represents the index of chunk in the s.mmappedChunks slice. The chunk meta's are
|
||||
// incremented by 1 when new chunk is created, hence (meta - firstChunkID) gives the slice index.
|
||||
// The max index for the s.mmappedChunks slice can be len(s.mmappedChunks)-1, hence if the ix
|
||||
// is len(s.mmappedChunks), it represents the next chunk, which is the head chunk.
|
||||
ix := int(cid) - int(s.ooo.firstOOOChunkID)
|
||||
if ix < 0 || ix > len(s.ooo.oooMmappedChunks) {
|
||||
return nil, storage.ErrNotFound
|
||||
}
|
||||
|
||||
if ix == len(s.ooo.oooMmappedChunks) {
|
||||
if s.ooo.oooHeadChunk == nil {
|
||||
return nil, errors.New("invalid ooo head chunk")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *memSeries) mergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMapper, hr *headChunkReader, mint, maxt int64, maxMmapRef chunks.ChunkDiskMapperRef) (chunkenc.Iterable, error) {
|
||||
// We create a temporary slice of chunk metas to hold the information of all
|
||||
// possible chunks that may overlap with the requested chunk.
|
||||
tmpChks := make([]chunkMetaAndChunkDiskMapperRef, 0, len(s.ooo.oooMmappedChunks)+1)
|
||||
|
@ -509,6 +511,16 @@ func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMappe
|
|||
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{meta: meta})
|
||||
}
|
||||
|
||||
if hr != nil { // Include in-order chunks.
|
||||
metas := appendSeriesChunks(s, max(meta.MinTime, mint), min(meta.MaxTime, maxt), nil)
|
||||
for _, m := range metas {
|
||||
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{
|
||||
meta: m,
|
||||
ref: 0, // This tells the loop below it's an in-order head chunk.
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Next we want to sort all the collected chunks by min time so we can find
|
||||
// those that overlap and stop when we know the rest don't.
|
||||
slices.SortFunc(tmpChks, refLessByMinTimeAndMinRef)
|
||||
|
@ -520,9 +532,17 @@ func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMappe
|
|||
continue
|
||||
}
|
||||
var iterable chunkenc.Iterable
|
||||
if c.meta.Chunk != nil {
|
||||
switch {
|
||||
case c.meta.Chunk != nil:
|
||||
iterable = c.meta.Chunk
|
||||
} else {
|
||||
case c.ref == 0: // This is an in-order head chunk.
|
||||
_, cid := chunks.HeadChunkRef(c.meta.Ref).Unpack()
|
||||
var err error
|
||||
iterable, _, err = hr.chunkFromSeries(s, cid, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid head chunk: %w", err)
|
||||
}
|
||||
default:
|
||||
chk, err := cdm.Chunk(c.ref)
|
||||
if err != nil {
|
||||
var cerr *chunks.CorruptionErr
|
||||
|
|
|
@ -2759,7 +2759,7 @@ func testOutOfOrderSamplesMetric(t *testing.T, scenario sampleTypeScenario) {
|
|||
|
||||
require.Equal(t, int64(math.MinInt64), db.head.minValidTime.Load())
|
||||
require.NoError(t, db.Compact(ctx))
|
||||
require.Greater(t, db.head.minValidTime.Load(), int64(0))
|
||||
require.Positive(t, db.head.minValidTime.Load())
|
||||
|
||||
app = db.Appender(ctx)
|
||||
_, err = appendSample(app, db.head.minValidTime.Load()-2)
|
||||
|
@ -3679,7 +3679,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) {
|
|||
require.Len(t, ms.mmappedChunks, 25)
|
||||
expMmapChunks := make([]*mmappedChunk, 0, 20)
|
||||
for _, mmap := range ms.mmappedChunks {
|
||||
require.Greater(t, mmap.numSamples, uint16(0))
|
||||
require.Positive(t, mmap.numSamples)
|
||||
cpy := *mmap
|
||||
expMmapChunks = append(expMmapChunks, &cpy)
|
||||
}
|
||||
|
@ -5868,7 +5868,7 @@ func TestCuttingNewHeadChunks(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// TestHeadDetectsDuplcateSampleAtSizeLimit tests a regression where a duplicate sample
|
||||
// TestHeadDetectsDuplicateSampleAtSizeLimit tests a regression where a duplicate sample
|
||||
// is appended to the head, right when the head chunk is at the size limit.
|
||||
// The test adds all samples as duplicate, thus expecting that the result has
|
||||
// exactly half of the samples.
|
||||
|
|
|
@ -1006,7 +1006,7 @@ func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (unknownRefs, unknownHi
|
|||
unknownRefs++
|
||||
continue
|
||||
}
|
||||
ok, chunkCreated, _ := ms.insert(s.T, s.V, nil, nil, h.chunkDiskMapper, oooCapMax)
|
||||
ok, chunkCreated, _ := ms.insert(s.T, s.V, nil, nil, h.chunkDiskMapper, oooCapMax, h.logger)
|
||||
if chunkCreated {
|
||||
h.metrics.chunksCreated.Inc()
|
||||
h.metrics.chunks.Inc()
|
||||
|
@ -1033,9 +1033,9 @@ func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (unknownRefs, unknownHi
|
|||
var chunkCreated bool
|
||||
var ok bool
|
||||
if s.h != nil {
|
||||
ok, chunkCreated, _ = ms.insert(s.t, 0, s.h, nil, h.chunkDiskMapper, oooCapMax)
|
||||
ok, chunkCreated, _ = ms.insert(s.t, 0, s.h, nil, h.chunkDiskMapper, oooCapMax, h.logger)
|
||||
} else {
|
||||
ok, chunkCreated, _ = ms.insert(s.t, 0, nil, s.fh, h.chunkDiskMapper, oooCapMax)
|
||||
ok, chunkCreated, _ = ms.insert(s.t, 0, nil, s.fh, h.chunkDiskMapper, oooCapMax, h.logger)
|
||||
}
|
||||
if chunkCreated {
|
||||
h.metrics.chunksCreated.Inc()
|
||||
|
|
|
@ -196,8 +196,9 @@ func NewTOCFromByteSlice(bs ByteSlice) (*TOC, error) {
|
|||
return toc, d.Err()
|
||||
}
|
||||
|
||||
// NewWriter returns a new Writer to the given filename. It serializes data in format version 2.
|
||||
// It uses the given encoder to encode each postings list.
|
||||
// NewWriterWithEncoder returns a new Writer to the given filename. It
|
||||
// serializes data in format version 2. It uses the given encoder to encode each
|
||||
// postings list.
|
||||
func NewWriterWithEncoder(ctx context.Context, fn string, encoder PostingsEncoder) (*Writer, error) {
|
||||
dir := filepath.Dir(fn)
|
||||
|
||||
|
|
|
@ -20,10 +20,10 @@ import (
|
|||
|
||||
func TestPostingsStats(t *testing.T) {
|
||||
stats := &maxHeap{}
|
||||
max := 3000000
|
||||
heapLength := 10
|
||||
const maxCount = 3000000
|
||||
const heapLength = 10
|
||||
stats.init(heapLength)
|
||||
for i := 0; i < max; i++ {
|
||||
for i := 0; i < maxCount; i++ {
|
||||
item := Stat{
|
||||
Name: "Label-da",
|
||||
Count: uint64(i),
|
||||
|
@ -35,13 +35,13 @@ func TestPostingsStats(t *testing.T) {
|
|||
data := stats.get()
|
||||
require.Len(t, data, 10)
|
||||
for i := 0; i < heapLength; i++ {
|
||||
require.Equal(t, uint64(max-i), data[i].Count)
|
||||
require.Equal(t, uint64(maxCount-i), data[i].Count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostingsStats2(t *testing.T) {
|
||||
stats := &maxHeap{}
|
||||
heapLength := 10
|
||||
const heapLength = 10
|
||||
|
||||
stats.init(heapLength)
|
||||
stats.push(Stat{Name: "Stuff", Count: 10})
|
||||
|
@ -57,12 +57,12 @@ func TestPostingsStats2(t *testing.T) {
|
|||
|
||||
func BenchmarkPostingStatsMaxHep(b *testing.B) {
|
||||
stats := &maxHeap{}
|
||||
max := 9000000
|
||||
heapLength := 10
|
||||
const maxCount = 9000000
|
||||
const heapLength = 10
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
stats.init(heapLength)
|
||||
for i := 0; i < max; i++ {
|
||||
for i := 0; i < maxCount; i++ {
|
||||
item := Stat{
|
||||
Name: "Label-da",
|
||||
Count: uint64(i),
|
||||
|
|
|
@ -14,16 +14,10 @@
|
|||
package tsdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
|
||||
"github.com/oklog/ulid"
|
||||
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
"github.com/prometheus/prometheus/tsdb/tombstones"
|
||||
)
|
||||
|
||||
// OOOChunk maintains samples in time-ascending order.
|
||||
|
@ -171,75 +165,3 @@ func (o *OOOChunk) ToEncodedChunks(mint, maxt int64) (chks []memChunk, err error
|
|||
}
|
||||
return chks, nil
|
||||
}
|
||||
|
||||
var _ BlockReader = &OOORangeHead{}
|
||||
|
||||
// OOORangeHead allows querying Head out of order samples via BlockReader
|
||||
// interface implementation.
|
||||
type OOORangeHead struct {
|
||||
head *Head
|
||||
// mint and maxt are tracked because when a query is handled we only want
|
||||
// the timerange of the query and having preexisting pointers to the first
|
||||
// and last timestamp help with that.
|
||||
mint, maxt int64
|
||||
|
||||
isoState *oooIsolationState
|
||||
}
|
||||
|
||||
func NewOOORangeHead(head *Head, mint, maxt int64, minRef chunks.ChunkDiskMapperRef) *OOORangeHead {
|
||||
isoState := head.oooIso.TrackReadAfter(minRef)
|
||||
|
||||
return &OOORangeHead{
|
||||
head: head,
|
||||
mint: mint,
|
||||
maxt: maxt,
|
||||
isoState: isoState,
|
||||
}
|
||||
}
|
||||
|
||||
func (oh *OOORangeHead) Index() (IndexReader, error) {
|
||||
return NewOOOHeadIndexReader(oh.head, oh.mint, oh.maxt, oh.isoState.minRef), nil
|
||||
}
|
||||
|
||||
func (oh *OOORangeHead) Chunks() (ChunkReader, error) {
|
||||
return NewOOOHeadChunkReader(oh.head, oh.mint, oh.maxt, oh.isoState, 0), nil
|
||||
}
|
||||
|
||||
func (oh *OOORangeHead) Tombstones() (tombstones.Reader, error) {
|
||||
// As stated in the design doc https://docs.google.com/document/d/1Kppm7qL9C-BJB1j6yb6-9ObG3AbdZnFUBYPNNWwDBYM/edit?usp=sharing
|
||||
// Tombstones are not supported for out of order metrics.
|
||||
return tombstones.NewMemTombstones(), nil
|
||||
}
|
||||
|
||||
var oooRangeHeadULID = ulid.MustParse("0000000000XXXX000RANGEHEAD")
|
||||
|
||||
func (oh *OOORangeHead) Meta() BlockMeta {
|
||||
return BlockMeta{
|
||||
MinTime: oh.mint,
|
||||
MaxTime: oh.maxt,
|
||||
ULID: oooRangeHeadULID,
|
||||
Stats: BlockStats{
|
||||
NumSeries: oh.head.NumSeries(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Size returns the size taken by the Head block.
|
||||
func (oh *OOORangeHead) Size() int64 {
|
||||
return oh.head.Size()
|
||||
}
|
||||
|
||||
// String returns an human readable representation of the out of order range
|
||||
// head. It's important to keep this function in order to avoid the struct dump
|
||||
// when the head is stringified in errors or logs.
|
||||
func (oh *OOORangeHead) String() string {
|
||||
return fmt.Sprintf("ooo range head (mint: %d, maxt: %d)", oh.MinTime(), oh.MaxTime())
|
||||
}
|
||||
|
||||
func (oh *OOORangeHead) MinTime() int64 {
|
||||
return oh.mint
|
||||
}
|
||||
|
||||
func (oh *OOORangeHead) MaxTime() int64 {
|
||||
return oh.maxt
|
||||
}
|
||||
|
|
|
@ -27,15 +27,10 @@ import (
|
|||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
"github.com/prometheus/prometheus/tsdb/index"
|
||||
"github.com/prometheus/prometheus/tsdb/tombstones"
|
||||
"github.com/prometheus/prometheus/util/annotations"
|
||||
)
|
||||
|
||||
// OOOHeadIndexReader implements IndexReader so ooo samples in the head can be
|
||||
// accessed.
|
||||
// It also has a reference to headIndexReader so we can leverage on its
|
||||
// IndexReader implementation for all the methods that remain the same. We
|
||||
// decided to do this to avoid code duplication.
|
||||
// The only methods that change are the ones about getting Series and Postings.
|
||||
type OOOHeadIndexReader struct {
|
||||
type HeadAndOOOIndexReader struct {
|
||||
*headIndexReader // A reference to the headIndexReader so we can reuse as many interface implementation as possible.
|
||||
lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef
|
||||
}
|
||||
|
@ -49,17 +44,13 @@ func (o mergedOOOChunks) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator
|
|||
return storage.ChainSampleIteratorFromIterables(iterator, o.chunkIterables)
|
||||
}
|
||||
|
||||
func NewOOOHeadIndexReader(head *Head, mint, maxt int64, lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef) *OOOHeadIndexReader {
|
||||
func NewHeadAndOOOIndexReader(head *Head, mint, maxt int64, lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef) *HeadAndOOOIndexReader {
|
||||
hr := &headIndexReader{
|
||||
head: head,
|
||||
mint: mint,
|
||||
maxt: maxt,
|
||||
}
|
||||
return &OOOHeadIndexReader{hr, lastGarbageCollectedMmapRef}
|
||||
}
|
||||
|
||||
func (oh *OOOHeadIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
|
||||
return oh.series(ref, builder, chks, oh.lastGarbageCollectedMmapRef, 0)
|
||||
return &HeadAndOOOIndexReader{hr, lastGarbageCollectedMmapRef}
|
||||
}
|
||||
|
||||
type MultiChunk struct {
|
||||
|
@ -109,12 +100,7 @@ func (c MultiChunk) Reset([]byte) {
|
|||
// no-op
|
||||
}
|
||||
|
||||
// lastGarbageCollectedMmapRef gives the last mmap chunk that may be being garbage collected and so
|
||||
// any chunk at or before this ref will not be considered. 0 disables this check.
|
||||
//
|
||||
// maxMmapRef tells upto what max m-map chunk that we can consider. If it is non-0, then
|
||||
// the oooHeadChunk will not be considered.
|
||||
func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta, lastGarbageCollectedMmapRef, maxMmapRef chunks.ChunkDiskMapperRef) error {
|
||||
func (oh *HeadAndOOOIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
|
||||
s := oh.head.series.getByID(chunks.HeadSeriesRef(ref))
|
||||
|
||||
if s == nil {
|
||||
|
@ -131,10 +117,19 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
|
|||
defer s.Unlock()
|
||||
*chks = (*chks)[:0]
|
||||
|
||||
if s.ooo == nil {
|
||||
if s.ooo != nil {
|
||||
return getOOOSeriesChunks(s, oh.mint, oh.maxt, oh.lastGarbageCollectedMmapRef, 0, true, chks)
|
||||
}
|
||||
*chks = appendSeriesChunks(s, oh.mint, oh.maxt, *chks)
|
||||
return nil
|
||||
}
|
||||
|
||||
// lastGarbageCollectedMmapRef gives the last mmap chunk that may be being garbage collected and so
|
||||
// any chunk at or before this ref will not be considered. 0 disables this check.
|
||||
//
|
||||
// maxMmapRef tells upto what max m-map chunk that we can consider. If it is non-0, then
|
||||
// the oooHeadChunk will not be considered.
|
||||
func getOOOSeriesChunks(s *memSeries, mint, maxt int64, lastGarbageCollectedMmapRef, maxMmapRef chunks.ChunkDiskMapperRef, includeInOrder bool, chks *[]chunks.Meta) error {
|
||||
tmpChks := make([]chunks.Meta, 0, len(s.ooo.oooMmappedChunks))
|
||||
|
||||
addChunk := func(minT, maxT int64, ref chunks.ChunkRef, chunk chunkenc.Chunk) {
|
||||
|
@ -143,13 +138,14 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
|
|||
MaxTime: maxT,
|
||||
Ref: ref,
|
||||
Chunk: chunk,
|
||||
MergeOOO: true,
|
||||
})
|
||||
}
|
||||
|
||||
// Collect all chunks that overlap the query range.
|
||||
if s.ooo.oooHeadChunk != nil {
|
||||
c := s.ooo.oooHeadChunk
|
||||
if c.OverlapsClosedInterval(oh.mint, oh.maxt) && maxMmapRef == 0 {
|
||||
if c.OverlapsClosedInterval(mint, maxt) && maxMmapRef == 0 {
|
||||
ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.ooo.oooMmappedChunks))))
|
||||
if len(c.chunk.samples) > 0 { // Empty samples happens in tests, at least.
|
||||
headChunks := MultiChunk{}
|
||||
|
@ -170,12 +166,16 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
|
|||
}
|
||||
for i := len(s.ooo.oooMmappedChunks) - 1; i >= 0; i-- {
|
||||
c := s.ooo.oooMmappedChunks[i]
|
||||
if c.OverlapsClosedInterval(oh.mint, oh.maxt) && (maxMmapRef == 0 || maxMmapRef.GreaterThanOrEqualTo(c.ref)) && (lastGarbageCollectedMmapRef == 0 || c.ref.GreaterThan(lastGarbageCollectedMmapRef)) {
|
||||
if c.OverlapsClosedInterval(mint, maxt) && (maxMmapRef == 0 || maxMmapRef.GreaterThanOrEqualTo(c.ref)) && (lastGarbageCollectedMmapRef == 0 || c.ref.GreaterThan(lastGarbageCollectedMmapRef)) {
|
||||
ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(i)))
|
||||
addChunk(c.minTime, c.maxTime, ref, nil)
|
||||
}
|
||||
}
|
||||
|
||||
if includeInOrder {
|
||||
tmpChks = appendSeriesChunks(s, mint, maxt, tmpChks)
|
||||
}
|
||||
|
||||
// There is nothing to do if we did not collect any chunk.
|
||||
if len(tmpChks) == 0 {
|
||||
return nil
|
||||
|
@ -206,17 +206,17 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
|
|||
if c.Chunk != nil {
|
||||
(*chks)[len(*chks)-1].Chunk = c.Chunk
|
||||
}
|
||||
(*chks)[len(*chks)-1].MergeOOO = (*chks)[len(*chks)-1].MergeOOO || c.MergeOOO
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LabelValues needs to be overridden from the headIndexReader implementation due
|
||||
// to the check that happens at the beginning where we make sure that the query
|
||||
// interval overlaps with the head minooot and maxooot.
|
||||
func (oh *OOOHeadIndexReader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
|
||||
if oh.maxt < oh.head.MinOOOTime() || oh.mint > oh.head.MaxOOOTime() {
|
||||
// LabelValues needs to be overridden from the headIndexReader implementation
|
||||
// so we can return labels within either in-order range or ooo range.
|
||||
func (oh *HeadAndOOOIndexReader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
|
||||
if oh.maxt < oh.head.MinTime() && oh.maxt < oh.head.MinOOOTime() || oh.mint > oh.head.MaxTime() && oh.mint > oh.head.MaxOOOTime() {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
|
@ -268,41 +268,30 @@ func lessByMinTimeAndMinRef(a, b chunks.Meta) int {
|
|||
}
|
||||
}
|
||||
|
||||
func (oh *OOOHeadIndexReader) Postings(ctx context.Context, name string, values ...string) (index.Postings, error) {
|
||||
switch len(values) {
|
||||
case 0:
|
||||
return index.EmptyPostings(), nil
|
||||
case 1:
|
||||
return oh.head.postings.Get(name, values[0]), nil // TODO(ganesh) Also call GetOOOPostings
|
||||
default:
|
||||
// TODO(ganesh) We want to only return postings for out of order series.
|
||||
res := make([]index.Postings, 0, len(values))
|
||||
for _, value := range values {
|
||||
res = append(res, oh.head.postings.Get(name, value)) // TODO(ganesh) Also call GetOOOPostings
|
||||
}
|
||||
return index.Merge(ctx, res...), nil
|
||||
}
|
||||
}
|
||||
|
||||
type OOOHeadChunkReader struct {
|
||||
type HeadAndOOOChunkReader struct {
|
||||
head *Head
|
||||
mint, maxt int64
|
||||
isoState *oooIsolationState
|
||||
cr *headChunkReader // If nil, only read OOO chunks.
|
||||
maxMmapRef chunks.ChunkDiskMapperRef
|
||||
oooIsoState *oooIsolationState
|
||||
}
|
||||
|
||||
func NewOOOHeadChunkReader(head *Head, mint, maxt int64, isoState *oooIsolationState, maxMmapRef chunks.ChunkDiskMapperRef) *OOOHeadChunkReader {
|
||||
return &OOOHeadChunkReader{
|
||||
func NewHeadAndOOOChunkReader(head *Head, mint, maxt int64, cr *headChunkReader, oooIsoState *oooIsolationState, maxMmapRef chunks.ChunkDiskMapperRef) *HeadAndOOOChunkReader {
|
||||
return &HeadAndOOOChunkReader{
|
||||
head: head,
|
||||
mint: mint,
|
||||
maxt: maxt,
|
||||
isoState: isoState,
|
||||
cr: cr,
|
||||
maxMmapRef: maxMmapRef,
|
||||
oooIsoState: oooIsoState,
|
||||
}
|
||||
}
|
||||
|
||||
func (cr OOOHeadChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) {
|
||||
sid, _ := chunks.HeadChunkRef(meta.Ref).Unpack()
|
||||
func (cr *HeadAndOOOChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) {
|
||||
sid, _, _ := unpackHeadChunkRef(meta.Ref)
|
||||
if !meta.MergeOOO {
|
||||
return cr.cr.ChunkOrIterable(meta)
|
||||
}
|
||||
|
||||
s := cr.head.series.getByID(sid)
|
||||
// This means that the series has been garbage collected.
|
||||
|
@ -311,34 +300,38 @@ func (cr OOOHeadChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk,
|
|||
}
|
||||
|
||||
s.Lock()
|
||||
if s.ooo == nil {
|
||||
// There is no OOO data for this series.
|
||||
if s.ooo == nil { // Must have s.ooo non-nil to call mergedChunks().
|
||||
s.Unlock()
|
||||
return nil, nil, storage.ErrNotFound
|
||||
return cr.cr.ChunkOrIterable(meta)
|
||||
}
|
||||
mc, err := s.oooMergedChunks(meta, cr.head.chunkDiskMapper, cr.mint, cr.maxt, cr.maxMmapRef)
|
||||
mc, err := s.mergedChunks(meta, cr.head.chunkDiskMapper, cr.cr, cr.mint, cr.maxt, cr.maxMmapRef)
|
||||
s.Unlock()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
||||
return nil, mc, err
|
||||
}
|
||||
|
||||
// This means that the query range did not overlap with the requested chunk.
|
||||
if len(mc.chunkIterables) == 0 {
|
||||
return nil, nil, storage.ErrNotFound
|
||||
// ChunkOrIterableWithCopy implements ChunkReaderWithCopy. The special Copy
|
||||
// behaviour is only implemented for the in-order head chunk.
|
||||
func (cr *HeadAndOOOChunkReader) ChunkOrIterableWithCopy(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, int64, error) {
|
||||
if !meta.MergeOOO {
|
||||
return cr.cr.ChunkOrIterableWithCopy(meta)
|
||||
}
|
||||
chk, iter, err := cr.ChunkOrIterable(meta)
|
||||
return chk, iter, 0, err
|
||||
}
|
||||
|
||||
return nil, mc, nil
|
||||
func (cr *HeadAndOOOChunkReader) Close() error {
|
||||
if cr.cr != nil && cr.cr.isoState != nil {
|
||||
cr.cr.isoState.Close()
|
||||
}
|
||||
|
||||
func (cr OOOHeadChunkReader) Close() error {
|
||||
if cr.isoState != nil {
|
||||
cr.isoState.Close()
|
||||
if cr.oooIsoState != nil {
|
||||
cr.oooIsoState.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type OOOCompactionHead struct {
|
||||
oooIR *OOOHeadIndexReader
|
||||
head *Head
|
||||
lastMmapRef chunks.ChunkDiskMapperRef
|
||||
lastWBLFile int
|
||||
postings []storage.SeriesRef
|
||||
|
@ -355,6 +348,7 @@ type OOOCompactionHead struct {
|
|||
// on the sample append latency. So call NewOOOCompactionHead only right before compaction.
|
||||
func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead, error) {
|
||||
ch := &OOOCompactionHead{
|
||||
head: head,
|
||||
chunkRange: head.chunkRange.Load(),
|
||||
mint: math.MaxInt64,
|
||||
maxt: math.MinInt64,
|
||||
|
@ -368,15 +362,14 @@ func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead,
|
|||
ch.lastWBLFile = lastWBLFile
|
||||
}
|
||||
|
||||
ch.oooIR = NewOOOHeadIndexReader(head, math.MinInt64, math.MaxInt64, 0)
|
||||
hr := headIndexReader{head: head, mint: ch.mint, maxt: ch.maxt}
|
||||
n, v := index.AllPostingsKey()
|
||||
|
||||
// TODO: verify this gets only ooo samples.
|
||||
p, err := ch.oooIR.Postings(ctx, n, v)
|
||||
// TODO: filter to series with OOO samples, before sorting.
|
||||
p, err := hr.Postings(ctx, n, v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p = ch.oooIR.SortedPostings(p)
|
||||
p = hr.SortedPostings(p)
|
||||
|
||||
var lastSeq, lastOff int
|
||||
for p.Next() {
|
||||
|
@ -397,7 +390,7 @@ func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead,
|
|||
}
|
||||
|
||||
var lastMmapRef chunks.ChunkDiskMapperRef
|
||||
mmapRefs := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper)
|
||||
mmapRefs := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper, head.logger)
|
||||
if len(mmapRefs) == 0 && len(ms.ooo.oooMmappedChunks) > 0 {
|
||||
// Nothing was m-mapped. So take the mmapRef from the existing slice if it exists.
|
||||
mmapRefs = []chunks.ChunkDiskMapperRef{ms.ooo.oooMmappedChunks[len(ms.ooo.oooMmappedChunks)-1].ref}
|
||||
|
@ -433,7 +426,7 @@ func (ch *OOOCompactionHead) Index() (IndexReader, error) {
|
|||
}
|
||||
|
||||
func (ch *OOOCompactionHead) Chunks() (ChunkReader, error) {
|
||||
return NewOOOHeadChunkReader(ch.oooIR.head, ch.oooIR.mint, ch.oooIR.maxt, nil, ch.lastMmapRef), nil
|
||||
return NewHeadAndOOOChunkReader(ch.head, ch.mint, ch.maxt, nil, nil, ch.lastMmapRef), nil
|
||||
}
|
||||
|
||||
func (ch *OOOCompactionHead) Tombstones() (tombstones.Reader, error) {
|
||||
|
@ -459,12 +452,12 @@ func (ch *OOOCompactionHead) Meta() BlockMeta {
|
|||
// Only the method of BlockReader interface are valid for the cloned OOOCompactionHead.
|
||||
func (ch *OOOCompactionHead) CloneForTimeRange(mint, maxt int64) *OOOCompactionHead {
|
||||
return &OOOCompactionHead{
|
||||
oooIR: NewOOOHeadIndexReader(ch.oooIR.head, mint, maxt, 0),
|
||||
head: ch.head,
|
||||
lastMmapRef: ch.lastMmapRef,
|
||||
postings: ch.postings,
|
||||
chunkRange: ch.chunkRange,
|
||||
mint: ch.mint,
|
||||
maxt: ch.maxt,
|
||||
mint: mint,
|
||||
maxt: maxt,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -484,7 +477,8 @@ func NewOOOCompactionHeadIndexReader(ch *OOOCompactionHead) IndexReader {
|
|||
}
|
||||
|
||||
func (ir *OOOCompactionHeadIndexReader) Symbols() index.StringIter {
|
||||
return ir.ch.oooIR.Symbols()
|
||||
hr := headIndexReader{head: ir.ch.head, mint: ir.ch.mint, maxt: ir.ch.maxt}
|
||||
return hr.Symbols()
|
||||
}
|
||||
|
||||
func (ir *OOOCompactionHeadIndexReader) Postings(_ context.Context, name string, values ...string) (index.Postings, error) {
|
||||
|
@ -505,11 +499,28 @@ func (ir *OOOCompactionHeadIndexReader) SortedPostings(p index.Postings) index.P
|
|||
}
|
||||
|
||||
func (ir *OOOCompactionHeadIndexReader) ShardedPostings(p index.Postings, shardIndex, shardCount uint64) index.Postings {
|
||||
return ir.ch.oooIR.ShardedPostings(p, shardIndex, shardCount)
|
||||
hr := headIndexReader{head: ir.ch.head, mint: ir.ch.mint, maxt: ir.ch.maxt}
|
||||
return hr.ShardedPostings(p, shardIndex, shardCount)
|
||||
}
|
||||
|
||||
func (ir *OOOCompactionHeadIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
|
||||
return ir.ch.oooIR.series(ref, builder, chks, 0, ir.ch.lastMmapRef)
|
||||
s := ir.ch.head.series.getByID(chunks.HeadSeriesRef(ref))
|
||||
|
||||
if s == nil {
|
||||
ir.ch.head.metrics.seriesNotFound.Inc()
|
||||
return storage.ErrNotFound
|
||||
}
|
||||
builder.Assign(s.labels())
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
*chks = (*chks)[:0]
|
||||
|
||||
if s.ooo == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return getOOOSeriesChunks(s, ir.ch.mint, ir.ch.maxt, 0, ir.ch.lastMmapRef, false, chks)
|
||||
}
|
||||
|
||||
func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
|
||||
|
@ -537,5 +548,91 @@ func (ir *OOOCompactionHeadIndexReader) LabelNamesFor(ctx context.Context, posti
|
|||
}
|
||||
|
||||
func (ir *OOOCompactionHeadIndexReader) Close() error {
|
||||
return ir.ch.oooIR.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// HeadAndOOOQuerier queries both the head and the out-of-order head.
|
||||
type HeadAndOOOQuerier struct {
|
||||
mint, maxt int64
|
||||
head *Head
|
||||
index IndexReader
|
||||
chunkr ChunkReader
|
||||
querier storage.Querier
|
||||
}
|
||||
|
||||
func NewHeadAndOOOQuerier(mint, maxt int64, head *Head, oooIsoState *oooIsolationState, querier storage.Querier) storage.Querier {
|
||||
cr := &headChunkReader{
|
||||
head: head,
|
||||
mint: mint,
|
||||
maxt: maxt,
|
||||
isoState: head.iso.State(mint, maxt),
|
||||
}
|
||||
return &HeadAndOOOQuerier{
|
||||
mint: mint,
|
||||
maxt: maxt,
|
||||
head: head,
|
||||
index: NewHeadAndOOOIndexReader(head, mint, maxt, oooIsoState.minRef),
|
||||
chunkr: NewHeadAndOOOChunkReader(head, mint, maxt, cr, oooIsoState, 0),
|
||||
querier: querier,
|
||||
}
|
||||
}
|
||||
|
||||
func (q *HeadAndOOOQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
return q.querier.LabelValues(ctx, name, hints, matchers...)
|
||||
}
|
||||
|
||||
func (q *HeadAndOOOQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
return q.querier.LabelNames(ctx, hints, matchers...)
|
||||
}
|
||||
|
||||
func (q *HeadAndOOOQuerier) Close() error {
|
||||
q.chunkr.Close()
|
||||
return q.querier.Close()
|
||||
}
|
||||
|
||||
func (q *HeadAndOOOQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
|
||||
return selectSeriesSet(ctx, sortSeries, hints, matchers, q.index, q.chunkr, q.head.tombstones, q.mint, q.maxt)
|
||||
}
|
||||
|
||||
// HeadAndOOOChunkQuerier queries both the head and the out-of-order head.
|
||||
type HeadAndOOOChunkQuerier struct {
|
||||
mint, maxt int64
|
||||
head *Head
|
||||
index IndexReader
|
||||
chunkr ChunkReader
|
||||
querier storage.ChunkQuerier
|
||||
}
|
||||
|
||||
func NewHeadAndOOOChunkQuerier(mint, maxt int64, head *Head, oooIsoState *oooIsolationState, querier storage.ChunkQuerier) storage.ChunkQuerier {
|
||||
cr := &headChunkReader{
|
||||
head: head,
|
||||
mint: mint,
|
||||
maxt: maxt,
|
||||
isoState: head.iso.State(mint, maxt),
|
||||
}
|
||||
return &HeadAndOOOChunkQuerier{
|
||||
mint: mint,
|
||||
maxt: maxt,
|
||||
head: head,
|
||||
index: NewHeadAndOOOIndexReader(head, mint, maxt, oooIsoState.minRef),
|
||||
chunkr: NewHeadAndOOOChunkReader(head, mint, maxt, cr, oooIsoState, 0),
|
||||
querier: querier,
|
||||
}
|
||||
}
|
||||
|
||||
func (q *HeadAndOOOChunkQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
return q.querier.LabelValues(ctx, name, hints, matchers...)
|
||||
}
|
||||
|
||||
func (q *HeadAndOOOChunkQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
return q.querier.LabelNames(ctx, hints, matchers...)
|
||||
}
|
||||
|
||||
func (q *HeadAndOOOChunkQuerier) Close() error {
|
||||
q.chunkr.Close()
|
||||
return q.querier.Close()
|
||||
}
|
||||
|
||||
func (q *HeadAndOOOChunkQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.ChunkSeriesSet {
|
||||
return selectChunkSeriesSet(ctx, sortSeries, hints, matchers, rangeHeadULID, q.index, q.chunkr, q.head.tombstones, q.mint, q.maxt)
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ import (
|
|||
var (
|
||||
_ chunkenc.Chunk = &MultiChunk{}
|
||||
_ chunkenc.Iterable = &mergedOOOChunks{}
|
||||
_ IndexReader = &OOOHeadIndexReader{}
|
||||
_ IndexReader = &HeadAndOOOIndexReader{}
|
||||
)
|
||||
|
||||
type chunkInterval struct {
|
||||
|
@ -318,12 +318,13 @@ func TestOOOHeadIndexReader_Series(t *testing.T) {
|
|||
Chunk: chunkenc.Chunk(nil),
|
||||
MinTime: e.mint,
|
||||
MaxTime: e.maxt,
|
||||
MergeOOO: true, // Only OOO chunks are tested here, so we always request merge from OOO head.
|
||||
}
|
||||
|
||||
// Ref to whatever Ref the chunk has, that we refer to by ID
|
||||
for ref, c := range intervals {
|
||||
if c.ID == e.ID {
|
||||
meta.Ref = chunks.ChunkRef(chunks.NewHeadChunkRef(chunks.HeadSeriesRef(s1ID), chunks.HeadChunkID(ref)))
|
||||
meta.Ref = chunks.ChunkRef(chunks.NewHeadChunkRef(chunks.HeadSeriesRef(s1ID), s1.oooHeadChunkID(ref)))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -348,7 +349,7 @@ func TestOOOHeadIndexReader_Series(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
ir := NewOOOHeadIndexReader(h, tc.queryMinT, tc.queryMaxT, 0)
|
||||
ir := NewHeadAndOOOIndexReader(h, tc.queryMinT, tc.queryMaxT, 0)
|
||||
|
||||
var chks []chunks.Meta
|
||||
var b labels.ScratchBuilder
|
||||
|
@ -429,17 +430,17 @@ func testOOOHeadChunkReader_LabelValues(t *testing.T, scenario sampleTypeScenari
|
|||
name: "LabelValues calls with ooo head query range not overlapping out-of-order data",
|
||||
queryMinT: 100,
|
||||
queryMaxT: 100,
|
||||
expValues1: []string{},
|
||||
expValues2: []string{},
|
||||
expValues3: []string{},
|
||||
expValues4: []string{},
|
||||
expValues1: []string{"bar1"},
|
||||
expValues2: nil,
|
||||
expValues3: []string{"bar1", "bar2"},
|
||||
expValues4: []string{"bar1", "bar2"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// We first want to test using a head index reader that covers the biggest query interval
|
||||
oh := NewOOOHeadIndexReader(head, tc.queryMinT, tc.queryMaxT, 0)
|
||||
oh := NewHeadAndOOOIndexReader(head, tc.queryMinT, tc.queryMaxT, 0)
|
||||
matchers := []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1")}
|
||||
values, err := oh.LabelValues(ctx, "foo", matchers...)
|
||||
sort.Strings(values)
|
||||
|
@ -491,10 +492,10 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) {
|
|||
t.Run("Getting a non existing chunk fails with not found error", func(t *testing.T) {
|
||||
db := newTestDBWithOpts(t, opts)
|
||||
|
||||
cr := NewOOOHeadChunkReader(db.head, 0, 1000, nil, 0)
|
||||
cr := NewHeadAndOOOChunkReader(db.head, 0, 1000, nil, nil, 0)
|
||||
defer cr.Close()
|
||||
c, iterable, err := cr.ChunkOrIterable(chunks.Meta{
|
||||
Ref: 0x1000000, Chunk: chunkenc.Chunk(nil), MinTime: 100, MaxTime: 300,
|
||||
Ref: 0x1800000, Chunk: chunkenc.Chunk(nil), MinTime: 100, MaxTime: 300, MergeOOO: true,
|
||||
})
|
||||
require.Nil(t, iterable)
|
||||
require.Equal(t, err, fmt.Errorf("not found"))
|
||||
|
@ -842,14 +843,14 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) {
|
|||
|
||||
// The Series method populates the chunk metas, taking a copy of the
|
||||
// head OOO chunk if necessary. These are then used by the ChunkReader.
|
||||
ir := NewOOOHeadIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0)
|
||||
ir := NewHeadAndOOOIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0)
|
||||
var chks []chunks.Meta
|
||||
var b labels.ScratchBuilder
|
||||
err = ir.Series(s1Ref, &b, &chks)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(tc.expChunksSamples), len(chks))
|
||||
|
||||
cr := NewOOOHeadChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil, 0)
|
||||
cr := NewHeadAndOOOChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil, nil, 0)
|
||||
defer cr.Close()
|
||||
for i := 0; i < len(chks); i++ {
|
||||
c, iterable, err := cr.ChunkOrIterable(chks[i])
|
||||
|
@ -1009,7 +1010,7 @@ func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
|
|||
|
||||
// The Series method populates the chunk metas, taking a copy of the
|
||||
// head OOO chunk if necessary. These are then used by the ChunkReader.
|
||||
ir := NewOOOHeadIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0)
|
||||
ir := NewHeadAndOOOIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0)
|
||||
var chks []chunks.Meta
|
||||
var b labels.ScratchBuilder
|
||||
err = ir.Series(s1Ref, &b, &chks)
|
||||
|
@ -1025,7 +1026,7 @@ func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
|
|||
}
|
||||
require.NoError(t, app.Commit())
|
||||
|
||||
cr := NewOOOHeadChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil, 0)
|
||||
cr := NewHeadAndOOOChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil, nil, 0)
|
||||
defer cr.Close()
|
||||
for i := 0; i < len(chks); i++ {
|
||||
c, iterable, err := cr.ChunkOrIterable(chks[i])
|
||||
|
|
|
@ -115,20 +115,24 @@ func NewBlockQuerier(b BlockReader, mint, maxt int64) (storage.Querier, error) {
|
|||
}
|
||||
|
||||
func (q *blockQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.SeriesSet {
|
||||
mint := q.mint
|
||||
maxt := q.maxt
|
||||
return selectSeriesSet(ctx, sortSeries, hints, ms, q.index, q.chunks, q.tombstones, q.mint, q.maxt)
|
||||
}
|
||||
|
||||
func selectSeriesSet(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms []*labels.Matcher,
|
||||
index IndexReader, chunks ChunkReader, tombstones tombstones.Reader, mint, maxt int64,
|
||||
) storage.SeriesSet {
|
||||
disableTrimming := false
|
||||
sharded := hints != nil && hints.ShardCount > 0
|
||||
|
||||
p, err := PostingsForMatchers(ctx, q.index, ms...)
|
||||
p, err := PostingsForMatchers(ctx, index, ms...)
|
||||
if err != nil {
|
||||
return storage.ErrSeriesSet(err)
|
||||
}
|
||||
if sharded {
|
||||
p = q.index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount)
|
||||
p = index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount)
|
||||
}
|
||||
if sortSeries {
|
||||
p = q.index.SortedPostings(p)
|
||||
p = index.SortedPostings(p)
|
||||
}
|
||||
|
||||
if hints != nil {
|
||||
|
@ -137,11 +141,11 @@ func (q *blockQuerier) Select(ctx context.Context, sortSeries bool, hints *stora
|
|||
disableTrimming = hints.DisableTrimming
|
||||
if hints.Func == "series" {
|
||||
// When you're only looking up metadata (for example series API), you don't need to load any chunks.
|
||||
return newBlockSeriesSet(q.index, newNopChunkReader(), q.tombstones, p, mint, maxt, disableTrimming)
|
||||
return newBlockSeriesSet(index, newNopChunkReader(), tombstones, p, mint, maxt, disableTrimming)
|
||||
}
|
||||
}
|
||||
|
||||
return newBlockSeriesSet(q.index, q.chunks, q.tombstones, p, mint, maxt, disableTrimming)
|
||||
return newBlockSeriesSet(index, chunks, tombstones, p, mint, maxt, disableTrimming)
|
||||
}
|
||||
|
||||
// blockChunkQuerier provides chunk querying access to a single block database.
|
||||
|
@ -159,8 +163,12 @@ func NewBlockChunkQuerier(b BlockReader, mint, maxt int64) (storage.ChunkQuerier
|
|||
}
|
||||
|
||||
func (q *blockChunkQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.ChunkSeriesSet {
|
||||
mint := q.mint
|
||||
maxt := q.maxt
|
||||
return selectChunkSeriesSet(ctx, sortSeries, hints, ms, q.blockID, q.index, q.chunks, q.tombstones, q.mint, q.maxt)
|
||||
}
|
||||
|
||||
func selectChunkSeriesSet(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms []*labels.Matcher,
|
||||
blockID ulid.ULID, index IndexReader, chunks ChunkReader, tombstones tombstones.Reader, mint, maxt int64,
|
||||
) storage.ChunkSeriesSet {
|
||||
disableTrimming := false
|
||||
sharded := hints != nil && hints.ShardCount > 0
|
||||
|
||||
|
@ -169,17 +177,17 @@ func (q *blockChunkQuerier) Select(ctx context.Context, sortSeries bool, hints *
|
|||
maxt = hints.End
|
||||
disableTrimming = hints.DisableTrimming
|
||||
}
|
||||
p, err := PostingsForMatchers(ctx, q.index, ms...)
|
||||
p, err := PostingsForMatchers(ctx, index, ms...)
|
||||
if err != nil {
|
||||
return storage.ErrChunkSeriesSet(err)
|
||||
}
|
||||
if sharded {
|
||||
p = q.index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount)
|
||||
p = index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount)
|
||||
}
|
||||
if sortSeries {
|
||||
p = q.index.SortedPostings(p)
|
||||
p = index.SortedPostings(p)
|
||||
}
|
||||
return NewBlockChunkSeriesSet(q.blockID, q.index, q.chunks, q.tombstones, p, mint, maxt, disableTrimming)
|
||||
return NewBlockChunkSeriesSet(blockID, index, chunks, tombstones, p, mint, maxt, disableTrimming)
|
||||
}
|
||||
|
||||
// PostingsForMatchers assembles a single postings iterator against the index reader
|
||||
|
@ -633,14 +641,16 @@ func (p *populateWithDelGenericSeriesIterator) next(copyHeadChunk bool) bool {
|
|||
}
|
||||
}
|
||||
|
||||
hcr, ok := p.cr.(*headChunkReader)
|
||||
hcr, ok := p.cr.(ChunkReaderWithCopy)
|
||||
var iterable chunkenc.Iterable
|
||||
if ok && copyHeadChunk && len(p.bufIter.Intervals) == 0 {
|
||||
// ChunkWithCopy will copy the head chunk.
|
||||
// ChunkOrIterableWithCopy will copy the head chunk, if it can.
|
||||
var maxt int64
|
||||
p.currMeta.Chunk, maxt, p.err = hcr.ChunkWithCopy(p.currMeta)
|
||||
p.currMeta.Chunk, iterable, maxt, p.err = hcr.ChunkOrIterableWithCopy(p.currMeta)
|
||||
if p.currMeta.Chunk != nil {
|
||||
// For the in-memory head chunk the index reader sets maxt as MaxInt64. We fix it here.
|
||||
p.currMeta.MaxTime = maxt
|
||||
}
|
||||
} else {
|
||||
p.currMeta.Chunk, iterable, p.err = p.cr.ChunkOrIterable(p.currMeta)
|
||||
}
|
||||
|
@ -962,7 +972,7 @@ func (p *populateWithDelChunkSeriesIterator) populateChunksFromIterable() bool {
|
|||
// Check if the encoding has changed (i.e. we need to create a new
|
||||
// chunk as chunks can't have multiple encoding types).
|
||||
// For the first sample, the following condition will always be true as
|
||||
// ValNoneNone != ValFloat | ValHistogram | ValFloatHistogram.
|
||||
// ValNone != ValFloat | ValHistogram | ValFloatHistogram.
|
||||
if currentValueType != prevValueType {
|
||||
if prevValueType != chunkenc.ValNone {
|
||||
p.chunksFromIterable = append(p.chunksFromIterable, chunks.Meta{Chunk: currentChunk, MinTime: cmint, MaxTime: cmaxt})
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/tsdb/index"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -254,25 +255,32 @@ func BenchmarkMergedStringIter(b *testing.B) {
|
|||
b.ReportAllocs()
|
||||
}
|
||||
|
||||
func BenchmarkQuerierSelect(b *testing.B) {
|
||||
opts := DefaultHeadOptions()
|
||||
opts.ChunkRange = 1000
|
||||
opts.ChunkDirRoot = b.TempDir()
|
||||
h, err := NewHead(nil, nil, nil, nil, opts, nil)
|
||||
func createHeadForBenchmarkSelect(b *testing.B, numSeries int, addSeries func(app storage.Appender, i int)) (*Head, *DB) {
|
||||
dir := b.TempDir()
|
||||
opts := DefaultOptions()
|
||||
opts.OutOfOrderCapMax = 255
|
||||
opts.OutOfOrderTimeWindow = 1000
|
||||
db, err := Open(dir, nil, nil, opts, nil)
|
||||
require.NoError(b, err)
|
||||
defer h.Close()
|
||||
b.Cleanup(func() {
|
||||
require.NoError(b, db.Close())
|
||||
})
|
||||
h := db.Head()
|
||||
|
||||
app := h.Appender(context.Background())
|
||||
numSeries := 1000000
|
||||
for i := 0; i < numSeries; i++ {
|
||||
app.Append(0, labels.FromStrings("foo", "bar", "i", fmt.Sprintf("%d%s", i, postingsBenchSuffix)), int64(i), 0)
|
||||
addSeries(app, i)
|
||||
}
|
||||
require.NoError(b, app.Commit())
|
||||
return h, db
|
||||
}
|
||||
|
||||
bench := func(b *testing.B, br BlockReader, sorted bool) {
|
||||
func benchmarkSelect(b *testing.B, queryable storage.Queryable, numSeries int, sorted bool) {
|
||||
matcher := labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")
|
||||
b.ResetTimer()
|
||||
for s := 1; s <= numSeries; s *= 10 {
|
||||
b.Run(fmt.Sprintf("%dof%d", s, numSeries), func(b *testing.B) {
|
||||
q, err := NewBlockQuerier(br, 0, int64(s-1))
|
||||
q, err := queryable.Querier(0, int64(s-1))
|
||||
require.NoError(b, err)
|
||||
|
||||
b.ResetTimer()
|
||||
|
@ -287,13 +295,23 @@ func BenchmarkQuerierSelect(b *testing.B) {
|
|||
}
|
||||
}
|
||||
|
||||
b.Run("Head", func(b *testing.B) {
|
||||
bench(b, h, false)
|
||||
})
|
||||
b.Run("SortedHead", func(b *testing.B) {
|
||||
bench(b, h, true)
|
||||
func BenchmarkQuerierSelect(b *testing.B) {
|
||||
numSeries := 1000000
|
||||
h, db := createHeadForBenchmarkSelect(b, numSeries, func(app storage.Appender, i int) {
|
||||
_, err := app.Append(0, labels.FromStrings("foo", "bar", "i", fmt.Sprintf("%d%s", i, postingsBenchSuffix)), int64(i), 0)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("Head", func(b *testing.B) {
|
||||
benchmarkSelect(b, db, numSeries, false)
|
||||
})
|
||||
b.Run("SortedHead", func(b *testing.B) {
|
||||
benchmarkSelect(b, db, numSeries, true)
|
||||
})
|
||||
|
||||
b.Run("Block", func(b *testing.B) {
|
||||
tmpdir := b.TempDir()
|
||||
|
||||
blockdir := createBlockFromHead(b, tmpdir, h)
|
||||
|
@ -303,7 +321,32 @@ func BenchmarkQuerierSelect(b *testing.B) {
|
|||
require.NoError(b, block.Close())
|
||||
}()
|
||||
|
||||
b.Run("Block", func(b *testing.B) {
|
||||
bench(b, block, false)
|
||||
benchmarkSelect(b, (*queryableBlock)(block), numSeries, false)
|
||||
})
|
||||
}
|
||||
|
||||
// Type wrapper to let a Block be a Queryable in benchmarkSelect().
|
||||
type queryableBlock Block
|
||||
|
||||
func (pb *queryableBlock) Querier(mint, maxt int64) (storage.Querier, error) {
|
||||
return NewBlockQuerier((*Block)(pb), mint, maxt)
|
||||
}
|
||||
|
||||
func BenchmarkQuerierSelectWithOutOfOrder(b *testing.B) {
|
||||
numSeries := 1000000
|
||||
_, db := createHeadForBenchmarkSelect(b, numSeries, func(app storage.Appender, i int) {
|
||||
l := labels.FromStrings("foo", "bar", "i", fmt.Sprintf("%d%s", i, postingsBenchSuffix))
|
||||
ref, err := app.Append(0, l, int64(i+1), 0)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
_, err = app.Append(ref, l, int64(i), 1) // Out of order sample
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("Head", func(b *testing.B) {
|
||||
benchmarkSelect(b, db, numSeries, false)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -3169,12 +3169,11 @@ func BenchmarkQueries(b *testing.B) {
|
|||
|
||||
qHead, err := NewBlockQuerier(NewRangeHead(head, 1, nSamples), 1, nSamples)
|
||||
require.NoError(b, err)
|
||||
qOOOHead, err := NewBlockQuerier(NewOOORangeHead(head, 1, nSamples, 0), 1, nSamples)
|
||||
require.NoError(b, err)
|
||||
isoState := head.oooIso.TrackReadAfter(0)
|
||||
qOOOHead := NewHeadAndOOOQuerier(1, nSamples, head, isoState, qHead)
|
||||
|
||||
queryTypes = append(queryTypes, qt{
|
||||
fmt.Sprintf("_Head_oooPercent:%d", oooPercentage),
|
||||
storage.NewMergeQuerier([]storage.Querier{qHead, qOOOHead}, nil, storage.ChainedSeriesMerge),
|
||||
fmt.Sprintf("_Head_oooPercent:%d", oooPercentage), qOOOHead,
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -58,15 +58,16 @@ type WriteTo interface {
|
|||
StoreSeries([]record.RefSeries, int)
|
||||
StoreMetadata([]record.RefMetadata)
|
||||
|
||||
// Next two methods are intended for garbage-collection: first we call
|
||||
// UpdateSeriesSegment on all current series
|
||||
// UpdateSeriesSegment and SeriesReset are intended for
|
||||
// garbage-collection:
|
||||
// First we call UpdateSeriesSegment on all current series.
|
||||
UpdateSeriesSegment([]record.RefSeries, int)
|
||||
// Then SeriesReset is called to allow the deletion
|
||||
// of all series created in a segment lower than the argument.
|
||||
// Then SeriesReset is called to allow the deletion of all series
|
||||
// created in a segment lower than the argument.
|
||||
SeriesReset(int)
|
||||
}
|
||||
|
||||
// Used to notify the watcher that data has been written so that it can read.
|
||||
// WriteNotified notifies the watcher that data has been written so that it can read.
|
||||
type WriteNotified interface {
|
||||
Notify()
|
||||
}
|
||||
|
@ -572,6 +573,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
|
|||
w.writer.AppendHistograms(histogramsToSend)
|
||||
histogramsToSend = histogramsToSend[:0]
|
||||
}
|
||||
|
||||
case record.FloatHistogramSamples:
|
||||
// Skip if experimental "histograms over remote write" is not enabled.
|
||||
if !w.sendHistograms {
|
||||
|
@ -610,11 +612,13 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
|
|||
return err
|
||||
}
|
||||
w.writer.StoreMetadata(meta)
|
||||
case record.Tombstones:
|
||||
|
||||
default:
|
||||
case record.Unknown:
|
||||
// Could be corruption, or reading from a WAL from a newer Prometheus.
|
||||
w.recordDecodeFailsMetric.Inc()
|
||||
|
||||
default:
|
||||
// We're not interested in other types of records.
|
||||
}
|
||||
}
|
||||
if err := r.Err(); err != nil {
|
||||
|
@ -643,14 +647,12 @@ func (w *Watcher) readSegmentForGC(r *LiveReader, segmentNum int, _ bool) error
|
|||
}
|
||||
w.writer.UpdateSeriesSegment(series, segmentNum)
|
||||
|
||||
// Ignore these; we're only interested in series.
|
||||
case record.Samples:
|
||||
case record.Exemplars:
|
||||
case record.Tombstones:
|
||||
|
||||
default:
|
||||
case record.Unknown:
|
||||
// Could be corruption, or reading from a WAL from a newer Prometheus.
|
||||
w.recordDecodeFailsMetric.Inc()
|
||||
|
||||
default:
|
||||
// We're only interested in series.
|
||||
}
|
||||
}
|
||||
if err := r.Err(); err != nil {
|
||||
|
|
|
@ -38,8 +38,8 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
DefaultSegmentSize = 128 * 1024 * 1024 // 128 MB
|
||||
pageSize = 32 * 1024 // 32KB
|
||||
DefaultSegmentSize = 128 * 1024 * 1024 // DefaultSegmentSize is 128 MB.
|
||||
pageSize = 32 * 1024 // pageSize is 32KB.
|
||||
recordHeaderSize = 7
|
||||
WblDirName = "wbl"
|
||||
)
|
||||
|
@ -612,16 +612,16 @@ func (w *WL) setSegment(segment *Segment) error {
|
|||
|
||||
// flushPage writes the new contents of the page to disk. If no more records will fit into
|
||||
// the page, the remaining bytes will be set to zero and a new page will be started.
|
||||
// If clear is true, this is enforced regardless of how many bytes are left in the page.
|
||||
func (w *WL) flushPage(clear bool) error {
|
||||
// If forceClear is true, this is enforced regardless of how many bytes are left in the page.
|
||||
func (w *WL) flushPage(forceClear bool) error {
|
||||
w.metrics.pageFlushes.Inc()
|
||||
|
||||
p := w.page
|
||||
clear = clear || p.full()
|
||||
shouldClear := forceClear || p.full()
|
||||
|
||||
// No more data will fit into the page or an implicit clear.
|
||||
// Enqueue and clear it.
|
||||
if clear {
|
||||
if shouldClear {
|
||||
p.alloc = pageSize // Write till end of page.
|
||||
}
|
||||
|
||||
|
@ -633,7 +633,7 @@ func (w *WL) flushPage(clear bool) error {
|
|||
p.flushed += n
|
||||
|
||||
// We flushed an entire page, prepare a new one.
|
||||
if clear {
|
||||
if shouldClear {
|
||||
p.reset()
|
||||
w.donePages++
|
||||
w.metrics.pageCompletions.Inc()
|
||||
|
|
|
@ -174,7 +174,7 @@ func NewInvalidQuantileWarning(q float64, pos posrange.PositionRange) error {
|
|||
}
|
||||
}
|
||||
|
||||
// NewInvalidQuantileWarning is used when the user specifies an invalid ratio
|
||||
// NewInvalidRatioWarning is used when the user specifies an invalid ratio
|
||||
// value, i.e. a float that is outside the range [-1, 1] or NaN.
|
||||
func NewInvalidRatioWarning(q, to float64, pos posrange.PositionRange) error {
|
||||
return annoErr{
|
||||
|
|
|
@ -23,13 +23,14 @@ import (
|
|||
"github.com/prometheus/prometheus/model/labels"
|
||||
)
|
||||
|
||||
// Replacement for require.Equal using go-cmp adapted for Prometheus data structures, instead of DeepEqual.
|
||||
// RequireEqual is a replacement for require.Equal using go-cmp adapted for
|
||||
// Prometheus data structures, instead of DeepEqual.
|
||||
func RequireEqual(t testing.TB, expected, actual interface{}, msgAndArgs ...interface{}) {
|
||||
t.Helper()
|
||||
RequireEqualWithOptions(t, expected, actual, nil, msgAndArgs...)
|
||||
}
|
||||
|
||||
// As RequireEqual but allows extra cmp.Options.
|
||||
// RequireEqualWithOptions works like RequireEqual but allows extra cmp.Options.
|
||||
func RequireEqualWithOptions(t testing.TB, expected, actual interface{}, extra []cmp.Option, msgAndArgs ...interface{}) {
|
||||
t.Helper()
|
||||
options := append([]cmp.Option{cmp.Comparer(labels.Equal)}, extra...)
|
||||
|
|
|
@ -155,7 +155,7 @@ func DirHash(t *testing.T, path string) []byte {
|
|||
modTime, err := info.ModTime().GobEncode()
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = io.WriteString(hash, string(modTime))
|
||||
_, err = hash.Write(modTime)
|
||||
require.NoError(t, err)
|
||||
return nil
|
||||
})
|
||||
|
|
|
@ -481,14 +481,14 @@ func New(logger log.Logger, o *Options) *Handler {
|
|||
|
||||
router.Get("/-/healthy", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintf(w, o.AppName+" is Healthy.\n")
|
||||
fmt.Fprintf(w, "%s is Healthy.\n", o.AppName)
|
||||
})
|
||||
router.Head("/-/healthy", func(w http.ResponseWriter, _ *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})
|
||||
router.Get("/-/ready", readyf(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintf(w, o.AppName+" is Ready.\n")
|
||||
fmt.Fprintf(w, "%s is Ready.\n", o.AppName)
|
||||
}))
|
||||
router.Head("/-/ready", readyf(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
|
Loading…
Reference in a new issue