Merge branch 'main' into sort-matrix-series

# Conflicts:
#	promql/engine_test.go
This commit is contained in:
Charles Korn 2024-05-31 12:44:27 +10:00
commit 24ef000025
No known key found for this signature in database
98 changed files with 2150 additions and 1906 deletions

View file

@ -161,21 +161,20 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Install Go - name: Install Go
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with: with:
cache: false
go-version: 1.22.x go-version: 1.22.x
- name: Install snmp_exporter/generator dependencies - name: Install snmp_exporter/generator dependencies
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter' if: github.repository == 'prometheus/snmp_exporter'
- name: Lint - name: Lint
uses: golangci/golangci-lint-action@3cfe3a4abbb849e10058ce4af15d205b6da42804 # v4.0.0 uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1
with: with:
args: --verbose args: --verbose
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml. # Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
version: v1.56.2 version: v1.59.0
fuzzing: fuzzing:
uses: ./.github/workflows/fuzzing.yml uses: ./.github/workflows/fuzzing.yml
if: github.event_name == 'pull_request' if: github.event_name == 'pull_request'

32
.gitpod.Dockerfile vendored
View file

@ -1,15 +1,33 @@
FROM gitpod/workspace-full FROM gitpod/workspace-full
# Set Node.js version as an environment variable.
ENV CUSTOM_NODE_VERSION=16 ENV CUSTOM_NODE_VERSION=16
ENV CUSTOM_GO_VERSION=1.19
ENV GOPATH=$HOME/go-packages
ENV GOROOT=$HOME/go
ENV PATH=$GOROOT/bin:$GOPATH/bin:$PATH
# Install and use the specified Node.js version via nvm.
RUN bash -c ". .nvm/nvm.sh && nvm install ${CUSTOM_NODE_VERSION} && nvm use ${CUSTOM_NODE_VERSION} && nvm alias default ${CUSTOM_NODE_VERSION}" RUN bash -c ". .nvm/nvm.sh && nvm install ${CUSTOM_NODE_VERSION} && nvm use ${CUSTOM_NODE_VERSION} && nvm alias default ${CUSTOM_NODE_VERSION}"
# Ensure nvm uses the default Node.js version in all new shells.
RUN echo "nvm use default &>/dev/null" >> ~/.bashrc.d/51-nvm-fix RUN echo "nvm use default &>/dev/null" >> ~/.bashrc.d/51-nvm-fix
RUN curl -fsSL https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz | tar xzs \
&& printf '%s\n' 'export GOPATH=/workspace/go' \
'export PATH=$GOPATH/bin:$PATH' > $HOME/.bashrc.d/300-go
# Remove any existing Go installation in $HOME path.
RUN rm -rf $HOME/go $HOME/go-packages
# Export go environment variables.
RUN echo "export GOPATH=/workspace/go" >> ~/.bashrc.d/300-go && \
echo "export GOBIN=\$GOPATH/bin" >> ~/.bashrc.d/300-go && \
echo "export GOROOT=${HOME}/go" >> ~/.bashrc.d/300-go && \
echo "export PATH=\$GOROOT/bin:\$GOBIN:\$PATH" >> ~/.bashrc
# Reload the environment variables to ensure go environment variables are
# available in subsequent commands.
RUN bash -c "source ~/.bashrc && source ~/.bashrc.d/300-go"
# Fetch the Go version dynamically from the Prometheus go.mod file and Install Go in $HOME path.
RUN export CUSTOM_GO_VERSION=$(curl -sSL "https://raw.githubusercontent.com/prometheus/prometheus/main/go.mod" | awk '/^go/{print $2".0"}') && \
curl -fsSL "https://dl.google.com/go/go${CUSTOM_GO_VERSION}.linux-amd64.tar.gz" | \
tar -xz -C $HOME
# Fetch the goyacc parser version dynamically from the Prometheus Makefile
# and install it globally in $GOBIN path.
RUN GOYACC_VERSION=$(curl -fsSL "https://raw.githubusercontent.com/prometheus/prometheus/main/Makefile" | awk -F'=' '/GOYACC_VERSION \?=/{gsub(/ /, "", $2); print $2}') && \
go install "golang.org/x/tools/cmd/goyacc@${GOYACC_VERSION}"

View file

@ -21,6 +21,7 @@ linters:
- goimports - goimports
- misspell - misspell
- nolintlint - nolintlint
- perfsprint
- predeclared - predeclared
- revive - revive
- testifylint - testifylint
@ -44,7 +45,9 @@ issues:
- linters: - linters:
- godot - godot
source: "^// ===" source: "^// ==="
- linters:
- perfsprint
text: "fmt.Sprintf can be replaced with string concatenation"
linters-settings: linters-settings:
depguard: depguard:
rules: rules:
@ -85,6 +88,9 @@ linters-settings:
local-prefixes: github.com/prometheus/prometheus local-prefixes: github.com/prometheus/prometheus
gofumpt: gofumpt:
extra-rules: true extra-rules: true
perfsprint:
# Optimizes `fmt.Errorf`.
errorf: false
revive: revive:
# By default, revive will enable only the linting rules that are named in the configuration file. # By default, revive will enable only the linting rules that are named in the configuration file.
# So, it's needed to explicitly set in configuration all required rules. # So, it's needed to explicitly set in configuration all required rules.

View file

@ -3,6 +3,7 @@
## unreleased ## unreleased
* [CHANGE] Rules: Execute 1 query instead of N (where N is the number of alerts within alert rule) when restoring alerts. #13980 * [CHANGE] Rules: Execute 1 query instead of N (where N is the number of alerts within alert rule) when restoring alerts. #13980
* [FEATURE] Rules: Add new option `query_offset` for each rule group via rule group configuration file and `rule_query_offset` as part of the global configuration to have more resilience for remote write delays. #14061
* [ENHANCEMENT] Rules: Add `rule_group_last_restore_duration_seconds` to measure the time it takes to restore a rule group. #13974 * [ENHANCEMENT] Rules: Add `rule_group_last_restore_duration_seconds` to measure the time it takes to restore a rule group. #13974
* [ENHANCEMENT] OTLP: Improve remote write format translation performance by using label set hashes for metric identifiers instead of string based ones. #14006 #13991 * [ENHANCEMENT] OTLP: Improve remote write format translation performance by using label set hashes for metric identifiers instead of string based ones. #14006 #13991
* [ENHANCEMENT] TSDB: Optimize querying with regexp matchers. #13620 * [ENHANCEMENT] TSDB: Optimize querying with regexp matchers. #13620

View file

@ -42,7 +42,12 @@ go build ./cmd/prometheus/
make test # Make sure all the tests pass before you commit and push :) make test # Make sure all the tests pass before you commit and push :)
``` ```
We use [`golangci-lint`](https://github.com/golangci/golangci-lint) for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action. To run a collection of Go linters through [`golangci-lint`](https://github.com/golangci/golangci-lint), do:
```bash
make lint
```
If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action. See [this section of the golangci-lint documentation](https://golangci-lint.run/usage/false-positives/#nolint-directive) for more information.
All our issues are regularly tagged so that you can also filter down the issues involving the components you want to work on. For our labeling policy refer [the wiki page](https://github.com/prometheus/prometheus/wiki/Label-Names-and-Descriptions). All our issues are regularly tagged so that you can also filter down the issues involving the components you want to work on. For our labeling policy refer [the wiki page](https://github.com/prometheus/prometheus/wiki/Label-Names-and-Descriptions).

View file

@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT := SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT := GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?= GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.56.2 GOLANGCI_LINT_VERSION ?= v1.59.0
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different. # windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))

View file

@ -42,6 +42,7 @@ import (
"github.com/mwitkow/go-conntrack" "github.com/mwitkow/go-conntrack"
"github.com/oklog/run" "github.com/oklog/run"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
versioncollector "github.com/prometheus/client_golang/prometheus/collectors/version" versioncollector "github.com/prometheus/client_golang/prometheus/collectors/version"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promlog" "github.com/prometheus/common/promlog"
@ -252,6 +253,18 @@ func main() {
newFlagRetentionDuration model.Duration newFlagRetentionDuration model.Duration
) )
// Unregister the default GoCollector, and reregister with our defaults.
if prometheus.Unregister(collectors.NewGoCollector()) {
prometheus.MustRegister(
collectors.NewGoCollector(
collectors.WithGoCollectorRuntimeMetrics(
collectors.MetricsGC,
collectors.MetricsScheduler,
),
),
)
}
cfg := flagConfig{ cfg := flagConfig{
notifier: notifier.Options{ notifier: notifier.Options{
Registerer: prometheus.DefaultRegisterer, Registerer: prometheus.DefaultRegisterer,
@ -418,7 +431,7 @@ func main() {
serverOnlyFlag(a, "rules.alert.resend-delay", "Minimum amount of time to wait before resending an alert to Alertmanager."). serverOnlyFlag(a, "rules.alert.resend-delay", "Minimum amount of time to wait before resending an alert to Alertmanager.").
Default("1m").SetValue(&cfg.resendDelay) Default("1m").SetValue(&cfg.resendDelay)
serverOnlyFlag(a, "rules.max-concurrent-evals", "Global concurrency limit for independent rules that can run concurrently."). serverOnlyFlag(a, "rules.max-concurrent-evals", "Global concurrency limit for independent rules that can run concurrently. When set, \"query.max-concurrency\" may need to be adjusted accordingly.").
Default("4").Int64Var(&cfg.maxConcurrentEvals) Default("4").Int64Var(&cfg.maxConcurrentEvals)
a.Flag("scrape.adjust-timestamps", "Adjust scrape timestamps by up to `scrape.timestamp-tolerance` to align them to the intended schedule. See https://github.com/prometheus/prometheus/issues/7846 for more context. Experimental. This flag will be removed in a future release."). a.Flag("scrape.adjust-timestamps", "Adjust scrape timestamps by up to `scrape.timestamp-tolerance` to align them to the intended schedule. See https://github.com/prometheus/prometheus/issues/7846 for more context. Experimental. This flag will be removed in a future release.").
@ -772,6 +785,9 @@ func main() {
ResendDelay: time.Duration(cfg.resendDelay), ResendDelay: time.Duration(cfg.resendDelay),
MaxConcurrentEvals: cfg.maxConcurrentEvals, MaxConcurrentEvals: cfg.maxConcurrentEvals,
ConcurrentEvalsEnabled: cfg.enableConcurrentRuleEval, ConcurrentEvalsEnabled: cfg.enableConcurrentRuleEval,
DefaultRuleQueryOffset: func() time.Duration {
return time.Duration(cfgFile.GlobalConfig.RuleQueryOffset)
},
}) })
} }

View file

@ -24,6 +24,7 @@ import (
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"runtime" "runtime"
"strconv"
"strings" "strings"
"syscall" "syscall"
"testing" "testing"
@ -189,7 +190,7 @@ func TestSendAlerts(t *testing.T) {
for i, tc := range testCases { for i, tc := range testCases {
tc := tc tc := tc
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) {
senderFunc := senderFunc(func(alerts ...*notifier.Alert) { senderFunc := senderFunc(func(alerts ...*notifier.Alert) {
require.NotEmpty(t, tc.in, "sender called with 0 alert") require.NotEmpty(t, tc.in, "sender called with 0 alert")
require.Equal(t, tc.exp, alerts) require.Equal(t, tc.exp, alerts)

View file

@ -296,7 +296,7 @@ func (p *queryLogTest) run(t *testing.T) {
if p.exactQueryCount() { if p.exactQueryCount() {
require.Equal(t, 1, qc) require.Equal(t, 1, qc)
} else { } else {
require.Greater(t, qc, 0, "no queries logged") require.Positive(t, qc, "no queries logged")
} }
p.validateLastQuery(t, ql) p.validateLastQuery(t, ql)
@ -366,7 +366,7 @@ func (p *queryLogTest) run(t *testing.T) {
if p.exactQueryCount() { if p.exactQueryCount() {
require.Equal(t, 1, qc) require.Equal(t, 1, qc)
} else { } else {
require.Greater(t, qc, 0, "no queries logged") require.Positive(t, qc, "no queries logged")
} }
} }

View file

@ -88,7 +88,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
blockDuration := getCompatibleBlockDuration(maxBlockDuration) blockDuration := getCompatibleBlockDuration(maxBlockDuration)
mint = blockDuration * (mint / blockDuration) mint = blockDuration * (mint / blockDuration)
db, err := tsdb.OpenDBReadOnly(outputDir, nil) db, err := tsdb.OpenDBReadOnly(outputDir, "", nil)
if err != nil { if err != nil {
return err return err
} }

View file

@ -235,12 +235,14 @@ func main() {
tsdbDumpCmd := tsdbCmd.Command("dump", "Dump samples from a TSDB.") tsdbDumpCmd := tsdbCmd.Command("dump", "Dump samples from a TSDB.")
dumpPath := tsdbDumpCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String() dumpPath := tsdbDumpCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
dumpSandboxDirRoot := tsdbDumpCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end.").Default(defaultDBPath).String()
dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64() dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64() dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
dumpMatch := tsdbDumpCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings() dumpMatch := tsdbDumpCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings()
tsdbDumpOpenMetricsCmd := tsdbCmd.Command("dump-openmetrics", "[Experimental] Dump samples from a TSDB into OpenMetrics format. Native histograms are not dumped.") tsdbDumpOpenMetricsCmd := tsdbCmd.Command("dump-openmetrics", "[Experimental] Dump samples from a TSDB into OpenMetrics text format, excluding native histograms and staleness markers, which are not representable in OpenMetrics.")
dumpOpenMetricsPath := tsdbDumpOpenMetricsCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String() dumpOpenMetricsPath := tsdbDumpOpenMetricsCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
dumpOpenMetricsSandboxDirRoot := tsdbDumpOpenMetricsCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end.").Default(defaultDBPath).String()
dumpOpenMetricsMinTime := tsdbDumpOpenMetricsCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64() dumpOpenMetricsMinTime := tsdbDumpOpenMetricsCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
dumpOpenMetricsMaxTime := tsdbDumpOpenMetricsCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64() dumpOpenMetricsMaxTime := tsdbDumpOpenMetricsCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
dumpOpenMetricsMatch := tsdbDumpOpenMetricsCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings() dumpOpenMetricsMatch := tsdbDumpOpenMetricsCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings()
@ -396,9 +398,9 @@ func main() {
os.Exit(checkErr(listBlocks(*listPath, *listHumanReadable))) os.Exit(checkErr(listBlocks(*listPath, *listHumanReadable)))
case tsdbDumpCmd.FullCommand(): case tsdbDumpCmd.FullCommand():
os.Exit(checkErr(dumpSamples(ctx, *dumpPath, *dumpMinTime, *dumpMaxTime, *dumpMatch, formatSeriesSet))) os.Exit(checkErr(dumpSamples(ctx, *dumpPath, *dumpSandboxDirRoot, *dumpMinTime, *dumpMaxTime, *dumpMatch, formatSeriesSet)))
case tsdbDumpOpenMetricsCmd.FullCommand(): case tsdbDumpOpenMetricsCmd.FullCommand():
os.Exit(checkErr(dumpSamples(ctx, *dumpOpenMetricsPath, *dumpOpenMetricsMinTime, *dumpOpenMetricsMaxTime, *dumpOpenMetricsMatch, formatSeriesSetOpenMetrics))) os.Exit(checkErr(dumpSamples(ctx, *dumpOpenMetricsPath, *dumpOpenMetricsSandboxDirRoot, *dumpOpenMetricsMinTime, *dumpOpenMetricsMaxTime, *dumpOpenMetricsMatch, formatSeriesSetOpenMetrics)))
// TODO(aSquare14): Work on adding support for custom block size. // TODO(aSquare14): Work on adding support for custom block size.
case openMetricsImportCmd.FullCommand(): case openMetricsImportCmd.FullCommand():
os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration)) os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration))

View file

@ -25,6 +25,7 @@ import (
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"runtime" "runtime"
"strconv"
"strings" "strings"
"syscall" "syscall"
"testing" "testing"
@ -410,7 +411,7 @@ func TestExitCodes(t *testing.T) {
} { } {
t.Run(c.file, func(t *testing.T) { t.Run(c.file, func(t *testing.T) {
for _, lintFatal := range []bool{true, false} { for _, lintFatal := range []bool{true, false} {
t.Run(fmt.Sprintf("%t", lintFatal), func(t *testing.T) { t.Run(strconv.FormatBool(lintFatal), func(t *testing.T) {
args := []string{"-test.main", "check", "config", "testdata/" + c.file} args := []string{"-test.main", "check", "config", "testdata/" + c.file}
if lintFatal { if lintFatal {
args = append(args, "--lint-fatal") args = append(args, "--lint-fatal")

View file

@ -338,7 +338,7 @@ func readPrometheusLabels(r io.Reader, n int) ([]labels.Labels, error) {
} }
func listBlocks(path string, humanReadable bool) error { func listBlocks(path string, humanReadable bool) error {
db, err := tsdb.OpenDBReadOnly(path, nil) db, err := tsdb.OpenDBReadOnly(path, "", nil)
if err != nil { if err != nil {
return err return err
} }
@ -393,7 +393,7 @@ func getFormatedBytes(bytes int64, humanReadable bool) string {
} }
func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error) { func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error) {
db, err := tsdb.OpenDBReadOnly(path, nil) db, err := tsdb.OpenDBReadOnly(path, "", nil)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -708,8 +708,8 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
type SeriesSetFormatter func(series storage.SeriesSet) error type SeriesSetFormatter func(series storage.SeriesSet) error
func dumpSamples(ctx context.Context, path string, mint, maxt int64, match []string, formatter SeriesSetFormatter) (err error) { func dumpSamples(ctx context.Context, dbDir, sandboxDirRoot string, mint, maxt int64, match []string, formatter SeriesSetFormatter) (err error) {
db, err := tsdb.OpenDBReadOnly(path, nil) db, err := tsdb.OpenDBReadOnly(dbDir, sandboxDirRoot, nil)
if err != nil { if err != nil {
return err return err
} }
@ -856,9 +856,9 @@ func displayHistogram(dataType string, datas []int, total int) {
} }
avg := sum / len(datas) avg := sum / len(datas)
fmt.Printf("%s (min/avg/max): %d/%d/%d\n", dataType, datas[0], avg, datas[len(datas)-1]) fmt.Printf("%s (min/avg/max): %d/%d/%d\n", dataType, datas[0], avg, datas[len(datas)-1])
maxLeftLen := strconv.Itoa(len(fmt.Sprintf("%d", end))) maxLeftLen := strconv.Itoa(len(strconv.Itoa(end)))
maxRightLen := strconv.Itoa(len(fmt.Sprintf("%d", end+step))) maxRightLen := strconv.Itoa(len(strconv.Itoa(end + step)))
maxCountLen := strconv.Itoa(len(fmt.Sprintf("%d", maxCount))) maxCountLen := strconv.Itoa(len(strconv.Itoa(maxCount)))
for bucket, count := range buckets { for bucket, count := range buckets {
percentage := 100.0 * count / total percentage := 100.0 * count / total
fmt.Printf("[%"+maxLeftLen+"d, %"+maxRightLen+"d]: %"+maxCountLen+"d %s\n", bucket*step+start+1, (bucket+1)*step+start, count, strings.Repeat("#", percentage)) fmt.Printf("[%"+maxLeftLen+"d, %"+maxRightLen+"d]: %"+maxCountLen+"d %s\n", bucket*step+start+1, (bucket+1)*step+start, count, strings.Repeat("#", percentage))

View file

@ -64,6 +64,7 @@ func getDumpedSamples(t *testing.T, path string, mint, maxt int64, match []strin
err := dumpSamples( err := dumpSamples(
context.Background(), context.Background(),
path, path,
t.TempDir(),
mint, mint,
maxt, maxt,
match, match,

View file

@ -573,7 +573,7 @@ func (la labelsAndAnnotations) String() string {
} }
s := "[\n0:" + indentLines("\n"+la[0].String(), " ") s := "[\n0:" + indentLines("\n"+la[0].String(), " ")
for i, l := range la[1:] { for i, l := range la[1:] {
s += ",\n" + fmt.Sprintf("%d", i+1) + ":" + indentLines("\n"+l.String(), " ") s += ",\n" + strconv.Itoa(i+1) + ":" + indentLines("\n"+l.String(), " ")
} }
s += "\n]" s += "\n]"

View file

@ -145,6 +145,7 @@ var (
ScrapeInterval: model.Duration(1 * time.Minute), ScrapeInterval: model.Duration(1 * time.Minute),
ScrapeTimeout: model.Duration(10 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second),
EvaluationInterval: model.Duration(1 * time.Minute), EvaluationInterval: model.Duration(1 * time.Minute),
RuleQueryOffset: model.Duration(0 * time.Minute),
// When native histogram feature flag is enabled, ScrapeProtocols default // When native histogram feature flag is enabled, ScrapeProtocols default
// changes to DefaultNativeHistogramScrapeProtocols. // changes to DefaultNativeHistogramScrapeProtocols.
ScrapeProtocols: DefaultScrapeProtocols, ScrapeProtocols: DefaultScrapeProtocols,
@ -397,6 +398,8 @@ type GlobalConfig struct {
ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"` ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"`
// How frequently to evaluate rules by default. // How frequently to evaluate rules by default.
EvaluationInterval model.Duration `yaml:"evaluation_interval,omitempty"` EvaluationInterval model.Duration `yaml:"evaluation_interval,omitempty"`
// Offset the rule evaluation timestamp of this particular group by the specified duration into the past to ensure the underlying metrics have been received.
RuleQueryOffset model.Duration `yaml:"rule_query_offset"`
// File to which PromQL queries are logged. // File to which PromQL queries are logged.
QueryLogFile string `yaml:"query_log_file,omitempty"` QueryLogFile string `yaml:"query_log_file,omitempty"`
// The labels to add to any timeseries that this Prometheus instance scrapes. // The labels to add to any timeseries that this Prometheus instance scrapes.
@ -556,6 +559,7 @@ func (c *GlobalConfig) isZero() bool {
c.ScrapeInterval == 0 && c.ScrapeInterval == 0 &&
c.ScrapeTimeout == 0 && c.ScrapeTimeout == 0 &&
c.EvaluationInterval == 0 && c.EvaluationInterval == 0 &&
c.RuleQueryOffset == 0 &&
c.QueryLogFile == "" && c.QueryLogFile == "" &&
c.ScrapeProtocols == nil c.ScrapeProtocols == nil
} }

View file

@ -18,6 +18,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"net" "net"
"strconv"
"strings" "strings"
"time" "time"
@ -279,7 +280,7 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
if inst.PrivateDnsName != nil { if inst.PrivateDnsName != nil {
labels[ec2LabelPrivateDNS] = model.LabelValue(*inst.PrivateDnsName) labels[ec2LabelPrivateDNS] = model.LabelValue(*inst.PrivateDnsName)
} }
addr := net.JoinHostPort(*inst.PrivateIpAddress, fmt.Sprintf("%d", d.cfg.Port)) addr := net.JoinHostPort(*inst.PrivateIpAddress, strconv.Itoa(d.cfg.Port))
labels[model.AddressLabel] = model.LabelValue(addr) labels[model.AddressLabel] = model.LabelValue(addr)
if inst.Platform != nil { if inst.Platform != nil {

View file

@ -18,6 +18,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"net" "net"
"strconv"
"strings" "strings"
"time" "time"
@ -229,7 +230,7 @@ func (d *LightsailDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
lightsailLabelRegion: model.LabelValue(d.cfg.Region), lightsailLabelRegion: model.LabelValue(d.cfg.Region),
} }
addr := net.JoinHostPort(*inst.PrivateIpAddress, fmt.Sprintf("%d", d.cfg.Port)) addr := net.JoinHostPort(*inst.PrivateIpAddress, strconv.Itoa(d.cfg.Port))
labels[model.AddressLabel] = model.LabelValue(addr) labels[model.AddressLabel] = model.LabelValue(addr)
if inst.PublicIpAddress != nil { if inst.PublicIpAddress != nil {

View file

@ -20,6 +20,7 @@ import (
"math/rand" "math/rand"
"net" "net"
"net/http" "net/http"
"strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
@ -492,7 +493,7 @@ func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualM
} }
if ip.Properties != nil && ip.Properties.PrivateIPAddress != nil { if ip.Properties != nil && ip.Properties.PrivateIPAddress != nil {
labels[azureLabelMachinePrivateIP] = model.LabelValue(*ip.Properties.PrivateIPAddress) labels[azureLabelMachinePrivateIP] = model.LabelValue(*ip.Properties.PrivateIPAddress)
address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, fmt.Sprintf("%d", d.port)) address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, strconv.Itoa(d.port))
labels[model.AddressLabel] = model.LabelValue(address) labels[model.AddressLabel] = model.LabelValue(address)
return labels, nil return labels, nil
} }

View file

@ -539,9 +539,9 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr
// since the service may be registered remotely through a different node. // since the service may be registered remotely through a different node.
var addr string var addr string
if serviceNode.Service.Address != "" { if serviceNode.Service.Address != "" {
addr = net.JoinHostPort(serviceNode.Service.Address, fmt.Sprintf("%d", serviceNode.Service.Port)) addr = net.JoinHostPort(serviceNode.Service.Address, strconv.Itoa(serviceNode.Service.Port))
} else { } else {
addr = net.JoinHostPort(serviceNode.Node.Address, fmt.Sprintf("%d", serviceNode.Service.Port)) addr = net.JoinHostPort(serviceNode.Node.Address, strconv.Itoa(serviceNode.Service.Port))
} }
labels := model.LabelSet{ labels := model.LabelSet{

View file

@ -177,7 +177,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
} }
labels := model.LabelSet{ labels := model.LabelSet{
doLabelID: model.LabelValue(fmt.Sprintf("%d", droplet.ID)), doLabelID: model.LabelValue(strconv.Itoa(droplet.ID)),
doLabelName: model.LabelValue(droplet.Name), doLabelName: model.LabelValue(droplet.Name),
doLabelImage: model.LabelValue(droplet.Image.Slug), doLabelImage: model.LabelValue(droplet.Image.Slug),
doLabelImageName: model.LabelValue(droplet.Image.Name), doLabelImageName: model.LabelValue(droplet.Image.Name),

View file

@ -18,6 +18,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"net" "net"
"strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
@ -200,7 +201,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
tg := &targetgroup.Group{} tg := &targetgroup.Group{}
hostPort := func(a string, p int) model.LabelValue { hostPort := func(a string, p int) model.LabelValue {
return model.LabelValue(net.JoinHostPort(a, fmt.Sprintf("%d", p))) return model.LabelValue(net.JoinHostPort(a, strconv.Itoa(p)))
} }
for _, record := range response.Answer { for _, record := range response.Answer {
@ -209,7 +210,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
switch addr := record.(type) { switch addr := record.(type) {
case *dns.SRV: case *dns.SRV:
dnsSrvRecordTarget = model.LabelValue(addr.Target) dnsSrvRecordTarget = model.LabelValue(addr.Target)
dnsSrvRecordPort = model.LabelValue(fmt.Sprintf("%d", addr.Port)) dnsSrvRecordPort = model.LabelValue(strconv.Itoa(int(addr.Port)))
// Remove the final dot from rooted DNS names to make them look more usual. // Remove the final dot from rooted DNS names to make them look more usual.
addr.Target = strings.TrimRight(addr.Target, ".") addr.Target = strings.TrimRight(addr.Target, ".")

View file

@ -15,7 +15,6 @@ package hetzner
import ( import (
"context" "context"
"fmt"
"net" "net"
"net/http" "net/http"
"strconv" "strconv"
@ -92,7 +91,7 @@ func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
for i, server := range servers { for i, server := range servers {
labels := model.LabelSet{ labels := model.LabelSet{
hetznerLabelRole: model.LabelValue(HetznerRoleHcloud), hetznerLabelRole: model.LabelValue(HetznerRoleHcloud),
hetznerLabelServerID: model.LabelValue(fmt.Sprintf("%d", server.ID)), hetznerLabelServerID: model.LabelValue(strconv.FormatInt(server.ID, 10)),
hetznerLabelServerName: model.LabelValue(server.Name), hetznerLabelServerName: model.LabelValue(server.Name),
hetznerLabelDatacenter: model.LabelValue(server.Datacenter.Name), hetznerLabelDatacenter: model.LabelValue(server.Datacenter.Name),
hetznerLabelPublicIPv4: model.LabelValue(server.PublicNet.IPv4.IP.String()), hetznerLabelPublicIPv4: model.LabelValue(server.PublicNet.IPv4.IP.String()),
@ -102,10 +101,10 @@ func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
hetznerLabelHcloudDatacenterLocation: model.LabelValue(server.Datacenter.Location.Name), hetznerLabelHcloudDatacenterLocation: model.LabelValue(server.Datacenter.Location.Name),
hetznerLabelHcloudDatacenterLocationNetworkZone: model.LabelValue(server.Datacenter.Location.NetworkZone), hetznerLabelHcloudDatacenterLocationNetworkZone: model.LabelValue(server.Datacenter.Location.NetworkZone),
hetznerLabelHcloudType: model.LabelValue(server.ServerType.Name), hetznerLabelHcloudType: model.LabelValue(server.ServerType.Name),
hetznerLabelHcloudCPUCores: model.LabelValue(fmt.Sprintf("%d", server.ServerType.Cores)), hetznerLabelHcloudCPUCores: model.LabelValue(strconv.Itoa(server.ServerType.Cores)),
hetznerLabelHcloudCPUType: model.LabelValue(server.ServerType.CPUType), hetznerLabelHcloudCPUType: model.LabelValue(server.ServerType.CPUType),
hetznerLabelHcloudMemoryGB: model.LabelValue(fmt.Sprintf("%d", int(server.ServerType.Memory))), hetznerLabelHcloudMemoryGB: model.LabelValue(strconv.Itoa(int(server.ServerType.Memory))),
hetznerLabelHcloudDiskGB: model.LabelValue(fmt.Sprintf("%d", server.ServerType.Disk)), hetznerLabelHcloudDiskGB: model.LabelValue(strconv.Itoa(server.ServerType.Disk)),
model.AddressLabel: model.LabelValue(net.JoinHostPort(server.PublicNet.IPv4.IP.String(), strconv.FormatUint(uint64(d.port), 10))), model.AddressLabel: model.LabelValue(net.JoinHostPort(server.PublicNet.IPv4.IP.String(), strconv.FormatUint(uint64(d.port), 10))),
} }

View file

@ -112,7 +112,7 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error)
hetznerLabelPublicIPv4: model.LabelValue(server.Server.ServerIP), hetznerLabelPublicIPv4: model.LabelValue(server.Server.ServerIP),
hetznerLabelServerStatus: model.LabelValue(server.Server.Status), hetznerLabelServerStatus: model.LabelValue(server.Server.Status),
hetznerLabelRobotProduct: model.LabelValue(server.Server.Product), hetznerLabelRobotProduct: model.LabelValue(server.Server.Product),
hetznerLabelRobotCancelled: model.LabelValue(fmt.Sprintf("%t", server.Server.Canceled)), hetznerLabelRobotCancelled: model.LabelValue(strconv.FormatBool(server.Server.Canceled)),
model.AddressLabel: model.LabelValue(net.JoinHostPort(server.Server.ServerIP, strconv.FormatUint(uint64(d.port), 10))), model.AddressLabel: model.LabelValue(net.JoinHostPort(server.Server.ServerIP, strconv.FormatUint(uint64(d.port), 10))),
} }

View file

@ -720,7 +720,7 @@ func staticConfig(addrs ...string) discovery.StaticConfig {
var cfg discovery.StaticConfig var cfg discovery.StaticConfig
for i, addr := range addrs { for i, addr := range addrs {
cfg = append(cfg, &targetgroup.Group{ cfg = append(cfg, &targetgroup.Group{
Source: fmt.Sprint(i), Source: strconv.Itoa(i),
Targets: []model.LabelSet{ Targets: []model.LabelSet{
{model.AddressLabel: model.LabelValue(addr)}, {model.AddressLabel: model.LabelValue(addr)},
}, },

View file

@ -325,7 +325,7 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro
} }
labels := model.LabelSet{ labels := model.LabelSet{
linodeLabelID: model.LabelValue(fmt.Sprintf("%d", instance.ID)), linodeLabelID: model.LabelValue(strconv.Itoa(instance.ID)),
linodeLabelName: model.LabelValue(instance.Label), linodeLabelName: model.LabelValue(instance.Label),
linodeLabelImage: model.LabelValue(instance.Image), linodeLabelImage: model.LabelValue(instance.Image),
linodeLabelPrivateIPv4: model.LabelValue(privateIPv4), linodeLabelPrivateIPv4: model.LabelValue(privateIPv4),
@ -338,13 +338,13 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro
linodeLabelType: model.LabelValue(instance.Type), linodeLabelType: model.LabelValue(instance.Type),
linodeLabelStatus: model.LabelValue(instance.Status), linodeLabelStatus: model.LabelValue(instance.Status),
linodeLabelGroup: model.LabelValue(instance.Group), linodeLabelGroup: model.LabelValue(instance.Group),
linodeLabelGPUs: model.LabelValue(fmt.Sprintf("%d", instance.Specs.GPUs)), linodeLabelGPUs: model.LabelValue(strconv.Itoa(instance.Specs.GPUs)),
linodeLabelHypervisor: model.LabelValue(instance.Hypervisor), linodeLabelHypervisor: model.LabelValue(instance.Hypervisor),
linodeLabelBackups: model.LabelValue(backupsStatus), linodeLabelBackups: model.LabelValue(backupsStatus),
linodeLabelSpecsDiskBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Disk)<<20)), linodeLabelSpecsDiskBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Disk)<<20, 10)),
linodeLabelSpecsMemoryBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Memory)<<20)), linodeLabelSpecsMemoryBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Memory)<<20, 10)),
linodeLabelSpecsVCPUs: model.LabelValue(fmt.Sprintf("%d", instance.Specs.VCPUs)), linodeLabelSpecsVCPUs: model.LabelValue(strconv.Itoa(instance.Specs.VCPUs)),
linodeLabelSpecsTransferBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Transfer)<<20)), linodeLabelSpecsTransferBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Transfer)<<20, 10)),
} }
addr := net.JoinHostPort(publicIPv4, strconv.FormatUint(uint64(d.port), 10)) addr := net.JoinHostPort(publicIPv4, strconv.FormatUint(uint64(d.port), 10))

View file

@ -720,7 +720,7 @@ func staticConfig(addrs ...string) StaticConfig {
var cfg StaticConfig var cfg StaticConfig
for i, addr := range addrs { for i, addr := range addrs {
cfg = append(cfg, &targetgroup.Group{ cfg = append(cfg, &targetgroup.Group{
Source: fmt.Sprint(i), Source: strconv.Itoa(i),
Targets: []model.LabelSet{ Targets: []model.LabelSet{
{model.AddressLabel: model.LabelValue(addr)}, {model.AddressLabel: model.LabelValue(addr)},
}, },

View file

@ -505,7 +505,7 @@ func targetEndpoint(task *task, port uint32, containerNet bool) string {
host = task.Host host = task.Host
} }
return net.JoinHostPort(host, fmt.Sprintf("%d", port)) return net.JoinHostPort(host, strconv.Itoa(int(port)))
} }
// Get a list of ports and a list of labels from a PortMapping. // Get a list of ports and a list of labels from a PortMapping.

View file

@ -15,7 +15,7 @@ package moby
import ( import (
"context" "context"
"fmt" "strconv"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
"github.com/docker/docker/client" "github.com/docker/docker/client"
@ -44,8 +44,8 @@ func getNetworksLabels(ctx context.Context, client *client.Client, labelPrefix s
labelPrefix + labelNetworkID: network.ID, labelPrefix + labelNetworkID: network.ID,
labelPrefix + labelNetworkName: network.Name, labelPrefix + labelNetworkName: network.Name,
labelPrefix + labelNetworkScope: network.Scope, labelPrefix + labelNetworkScope: network.Scope,
labelPrefix + labelNetworkInternal: fmt.Sprintf("%t", network.Internal), labelPrefix + labelNetworkInternal: strconv.FormatBool(network.Internal),
labelPrefix + labelNetworkIngress: fmt.Sprintf("%t", network.Ingress), labelPrefix + labelNetworkIngress: strconv.FormatBool(network.Ingress),
} }
for k, v := range network.Labels { for k, v := range network.Labels {
ln := strutil.SanitizeLabelName(k) ln := strutil.SanitizeLabelName(k)

View file

@ -66,7 +66,7 @@ func (d *Discovery) refreshNodes(ctx context.Context) ([]*targetgroup.Group, err
swarmLabelNodeAddress: model.LabelValue(n.Status.Addr), swarmLabelNodeAddress: model.LabelValue(n.Status.Addr),
} }
if n.ManagerStatus != nil { if n.ManagerStatus != nil {
labels[swarmLabelNodeManagerLeader] = model.LabelValue(fmt.Sprintf("%t", n.ManagerStatus.Leader)) labels[swarmLabelNodeManagerLeader] = model.LabelValue(strconv.FormatBool(n.ManagerStatus.Leader))
labels[swarmLabelNodeManagerReachability] = model.LabelValue(n.ManagerStatus.Reachability) labels[swarmLabelNodeManagerReachability] = model.LabelValue(n.ManagerStatus.Reachability)
labels[swarmLabelNodeManagerAddr] = model.LabelValue(n.ManagerStatus.Addr) labels[swarmLabelNodeManagerAddr] = model.LabelValue(n.ManagerStatus.Addr)
} }

View file

@ -116,7 +116,7 @@ func (d *Discovery) refreshServices(ctx context.Context) ([]*targetgroup.Group,
labels[model.LabelName(k)] = model.LabelValue(v) labels[model.LabelName(k)] = model.LabelValue(v)
} }
addr := net.JoinHostPort(ip.String(), fmt.Sprintf("%d", d.port)) addr := net.JoinHostPort(ip.String(), strconv.Itoa(d.port))
labels[model.AddressLabel] = model.LabelValue(addr) labels[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, labels) tg.Targets = append(tg.Targets, labels)

View file

@ -150,7 +150,7 @@ func (d *Discovery) refreshTasks(ctx context.Context) ([]*targetgroup.Group, err
labels[model.LabelName(k)] = model.LabelValue(v) labels[model.LabelName(k)] = model.LabelValue(v)
} }
addr := net.JoinHostPort(ip.String(), fmt.Sprintf("%d", d.port)) addr := net.JoinHostPort(ip.String(), strconv.Itoa(d.port))
labels[model.AddressLabel] = model.LabelValue(addr) labels[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, labels) tg.Targets = append(tg.Targets, labels)

View file

@ -17,6 +17,7 @@ import (
"context" "context"
"fmt" "fmt"
"net" "net"
"strconv"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud"
@ -72,7 +73,7 @@ func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group
} }
tg := &targetgroup.Group{ tg := &targetgroup.Group{
Source: fmt.Sprintf("OS_" + h.region), Source: "OS_" + h.region,
} }
// OpenStack API reference // OpenStack API reference
// https://developer.openstack.org/api-ref/compute/#list-hypervisors-details // https://developer.openstack.org/api-ref/compute/#list-hypervisors-details
@ -84,7 +85,7 @@ func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group
} }
for _, hypervisor := range hypervisorList { for _, hypervisor := range hypervisorList {
labels := model.LabelSet{} labels := model.LabelSet{}
addr := net.JoinHostPort(hypervisor.HostIP, fmt.Sprintf("%d", h.port)) addr := net.JoinHostPort(hypervisor.HostIP, strconv.Itoa(h.port))
labels[model.AddressLabel] = model.LabelValue(addr) labels[model.AddressLabel] = model.LabelValue(addr)
labels[openstackLabelHypervisorID] = model.LabelValue(hypervisor.ID) labels[openstackLabelHypervisorID] = model.LabelValue(hypervisor.ID)
labels[openstackLabelHypervisorHostName] = model.LabelValue(hypervisor.HypervisorHostname) labels[openstackLabelHypervisorHostName] = model.LabelValue(hypervisor.HypervisorHostname)

View file

@ -17,6 +17,7 @@ import (
"context" "context"
"fmt" "fmt"
"net" "net"
"strconv"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
@ -120,7 +121,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
} }
pager := servers.List(client, opts) pager := servers.List(client, opts)
tg := &targetgroup.Group{ tg := &targetgroup.Group{
Source: fmt.Sprintf("OS_" + i.region), Source: "OS_" + i.region,
} }
err = pager.EachPage(func(page pagination.Page) (bool, error) { err = pager.EachPage(func(page pagination.Page) (bool, error) {
if ctx.Err() != nil { if ctx.Err() != nil {
@ -194,7 +195,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
if val, ok := floatingIPList[floatingIPKey{id: s.ID, fixed: addr}]; ok { if val, ok := floatingIPList[floatingIPKey{id: s.ID, fixed: addr}]; ok {
lbls[openstackLabelPublicIP] = model.LabelValue(val) lbls[openstackLabelPublicIP] = model.LabelValue(val)
} }
addr = net.JoinHostPort(addr, fmt.Sprintf("%d", i.port)) addr = net.JoinHostPort(addr, strconv.Itoa(i.port))
lbls[model.AddressLabel] = model.LabelValue(addr) lbls[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, lbls) tg.Targets = append(tg.Targets, lbls)

View file

@ -144,12 +144,12 @@ func (d *dedicatedServerDiscovery) refresh(context.Context) ([]*targetgroup.Grou
model.InstanceLabel: model.LabelValue(server.Name), model.InstanceLabel: model.LabelValue(server.Name),
dedicatedServerLabelPrefix + "state": model.LabelValue(server.State), dedicatedServerLabelPrefix + "state": model.LabelValue(server.State),
dedicatedServerLabelPrefix + "commercial_range": model.LabelValue(server.CommercialRange), dedicatedServerLabelPrefix + "commercial_range": model.LabelValue(server.CommercialRange),
dedicatedServerLabelPrefix + "link_speed": model.LabelValue(fmt.Sprintf("%d", server.LinkSpeed)), dedicatedServerLabelPrefix + "link_speed": model.LabelValue(strconv.Itoa(server.LinkSpeed)),
dedicatedServerLabelPrefix + "rack": model.LabelValue(server.Rack), dedicatedServerLabelPrefix + "rack": model.LabelValue(server.Rack),
dedicatedServerLabelPrefix + "no_intervention": model.LabelValue(strconv.FormatBool(server.NoIntervention)), dedicatedServerLabelPrefix + "no_intervention": model.LabelValue(strconv.FormatBool(server.NoIntervention)),
dedicatedServerLabelPrefix + "os": model.LabelValue(server.Os), dedicatedServerLabelPrefix + "os": model.LabelValue(server.Os),
dedicatedServerLabelPrefix + "support_level": model.LabelValue(server.SupportLevel), dedicatedServerLabelPrefix + "support_level": model.LabelValue(server.SupportLevel),
dedicatedServerLabelPrefix + "server_id": model.LabelValue(fmt.Sprintf("%d", server.ServerID)), dedicatedServerLabelPrefix + "server_id": model.LabelValue(strconv.FormatInt(server.ServerID, 10)),
dedicatedServerLabelPrefix + "reverse": model.LabelValue(server.Reverse), dedicatedServerLabelPrefix + "reverse": model.LabelValue(server.Reverse),
dedicatedServerLabelPrefix + "datacenter": model.LabelValue(server.Datacenter), dedicatedServerLabelPrefix + "datacenter": model.LabelValue(server.Datacenter),
dedicatedServerLabelPrefix + "name": model.LabelValue(server.Name), dedicatedServerLabelPrefix + "name": model.LabelValue(server.Name),

View file

@ -19,6 +19,7 @@ import (
"net/netip" "net/netip"
"net/url" "net/url"
"path" "path"
"strconv"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
@ -161,21 +162,21 @@ func (d *vpsDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) {
model.InstanceLabel: model.LabelValue(server.Name), model.InstanceLabel: model.LabelValue(server.Name),
vpsLabelPrefix + "offer": model.LabelValue(server.Model.Offer), vpsLabelPrefix + "offer": model.LabelValue(server.Model.Offer),
vpsLabelPrefix + "datacenter": model.LabelValue(fmt.Sprintf("%+v", server.Model.Datacenter)), vpsLabelPrefix + "datacenter": model.LabelValue(fmt.Sprintf("%+v", server.Model.Datacenter)),
vpsLabelPrefix + "model_vcore": model.LabelValue(fmt.Sprintf("%d", server.Model.Vcore)), vpsLabelPrefix + "model_vcore": model.LabelValue(strconv.Itoa(server.Model.Vcore)),
vpsLabelPrefix + "maximum_additional_ip": model.LabelValue(fmt.Sprintf("%d", server.Model.MaximumAdditionalIP)), vpsLabelPrefix + "maximum_additional_ip": model.LabelValue(strconv.Itoa(server.Model.MaximumAdditionalIP)),
vpsLabelPrefix + "version": model.LabelValue(server.Model.Version), vpsLabelPrefix + "version": model.LabelValue(server.Model.Version),
vpsLabelPrefix + "model_name": model.LabelValue(server.Model.Name), vpsLabelPrefix + "model_name": model.LabelValue(server.Model.Name),
vpsLabelPrefix + "disk": model.LabelValue(fmt.Sprintf("%d", server.Model.Disk)), vpsLabelPrefix + "disk": model.LabelValue(strconv.Itoa(server.Model.Disk)),
vpsLabelPrefix + "memory": model.LabelValue(fmt.Sprintf("%d", server.Model.Memory)), vpsLabelPrefix + "memory": model.LabelValue(strconv.Itoa(server.Model.Memory)),
vpsLabelPrefix + "zone": model.LabelValue(server.Zone), vpsLabelPrefix + "zone": model.LabelValue(server.Zone),
vpsLabelPrefix + "display_name": model.LabelValue(server.DisplayName), vpsLabelPrefix + "display_name": model.LabelValue(server.DisplayName),
vpsLabelPrefix + "cluster": model.LabelValue(server.Cluster), vpsLabelPrefix + "cluster": model.LabelValue(server.Cluster),
vpsLabelPrefix + "state": model.LabelValue(server.State), vpsLabelPrefix + "state": model.LabelValue(server.State),
vpsLabelPrefix + "name": model.LabelValue(server.Name), vpsLabelPrefix + "name": model.LabelValue(server.Name),
vpsLabelPrefix + "netboot_mode": model.LabelValue(server.NetbootMode), vpsLabelPrefix + "netboot_mode": model.LabelValue(server.NetbootMode),
vpsLabelPrefix + "memory_limit": model.LabelValue(fmt.Sprintf("%d", server.MemoryLimit)), vpsLabelPrefix + "memory_limit": model.LabelValue(strconv.Itoa(server.MemoryLimit)),
vpsLabelPrefix + "offer_type": model.LabelValue(server.OfferType), vpsLabelPrefix + "offer_type": model.LabelValue(server.OfferType),
vpsLabelPrefix + "vcore": model.LabelValue(fmt.Sprintf("%d", server.Vcore)), vpsLabelPrefix + "vcore": model.LabelValue(strconv.Itoa(server.Vcore)),
vpsLabelPrefix + "ipv4": model.LabelValue(ipv4), vpsLabelPrefix + "ipv4": model.LabelValue(ipv4),
vpsLabelPrefix + "ipv6": model.LabelValue(ipv6), vpsLabelPrefix + "ipv6": model.LabelValue(ipv6),
} }

View file

@ -237,7 +237,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
pdbLabelResource: model.LabelValue(resource.Resource), pdbLabelResource: model.LabelValue(resource.Resource),
pdbLabelType: model.LabelValue(resource.Type), pdbLabelType: model.LabelValue(resource.Type),
pdbLabelTitle: model.LabelValue(resource.Title), pdbLabelTitle: model.LabelValue(resource.Title),
pdbLabelExported: model.LabelValue(fmt.Sprintf("%t", resource.Exported)), pdbLabelExported: model.LabelValue(strconv.FormatBool(resource.Exported)),
pdbLabelFile: model.LabelValue(resource.File), pdbLabelFile: model.LabelValue(resource.File),
pdbLabelEnvironment: model.LabelValue(resource.Environment), pdbLabelEnvironment: model.LabelValue(resource.Environment),
} }

View file

@ -20,6 +20,7 @@ import (
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
"strconv"
"strings" "strings"
"time" "time"
@ -269,7 +270,7 @@ func (d *Discovery) getEndpointLabels(
model.AddressLabel: model.LabelValue(addr), model.AddressLabel: model.LabelValue(addr),
uyuniLabelMinionHostname: model.LabelValue(networkInfo.Hostname), uyuniLabelMinionHostname: model.LabelValue(networkInfo.Hostname),
uyuniLabelPrimaryFQDN: model.LabelValue(networkInfo.PrimaryFQDN), uyuniLabelPrimaryFQDN: model.LabelValue(networkInfo.PrimaryFQDN),
uyuniLablelSystemID: model.LabelValue(fmt.Sprintf("%d", endpoint.SystemID)), uyuniLablelSystemID: model.LabelValue(strconv.Itoa(endpoint.SystemID)),
uyuniLablelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)), uyuniLablelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)),
uyuniLablelEndpointName: model.LabelValue(endpoint.EndpointName), uyuniLablelEndpointName: model.LabelValue(endpoint.EndpointName),
uyuniLablelExporter: model.LabelValue(endpoint.ExporterName), uyuniLablelExporter: model.LabelValue(endpoint.ExporterName),

View file

@ -280,17 +280,17 @@ func parseServersetMember(data []byte, path string) (model.LabelSet, error) {
labels := model.LabelSet{} labels := model.LabelSet{}
labels[serversetPathLabel] = model.LabelValue(path) labels[serversetPathLabel] = model.LabelValue(path)
labels[model.AddressLabel] = model.LabelValue( labels[model.AddressLabel] = model.LabelValue(
net.JoinHostPort(member.ServiceEndpoint.Host, fmt.Sprintf("%d", member.ServiceEndpoint.Port))) net.JoinHostPort(member.ServiceEndpoint.Host, strconv.Itoa(member.ServiceEndpoint.Port)))
labels[serversetEndpointLabelPrefix+"_host"] = model.LabelValue(member.ServiceEndpoint.Host) labels[serversetEndpointLabelPrefix+"_host"] = model.LabelValue(member.ServiceEndpoint.Host)
labels[serversetEndpointLabelPrefix+"_port"] = model.LabelValue(fmt.Sprintf("%d", member.ServiceEndpoint.Port)) labels[serversetEndpointLabelPrefix+"_port"] = model.LabelValue(strconv.Itoa(member.ServiceEndpoint.Port))
for name, endpoint := range member.AdditionalEndpoints { for name, endpoint := range member.AdditionalEndpoints {
cleanName := model.LabelName(strutil.SanitizeLabelName(name)) cleanName := model.LabelName(strutil.SanitizeLabelName(name))
labels[serversetEndpointLabelPrefix+"_host_"+cleanName] = model.LabelValue( labels[serversetEndpointLabelPrefix+"_host_"+cleanName] = model.LabelValue(
endpoint.Host) endpoint.Host)
labels[serversetEndpointLabelPrefix+"_port_"+cleanName] = model.LabelValue( labels[serversetEndpointLabelPrefix+"_port_"+cleanName] = model.LabelValue(
fmt.Sprintf("%d", endpoint.Port)) strconv.Itoa(endpoint.Port))
} }
labels[serversetStatusLabel] = model.LabelValue(member.Status) labels[serversetStatusLabel] = model.LabelValue(member.Status)
@ -321,10 +321,10 @@ func parseNerveMember(data []byte, path string) (model.LabelSet, error) {
labels := model.LabelSet{} labels := model.LabelSet{}
labels[nervePathLabel] = model.LabelValue(path) labels[nervePathLabel] = model.LabelValue(path)
labels[model.AddressLabel] = model.LabelValue( labels[model.AddressLabel] = model.LabelValue(
net.JoinHostPort(member.Host, fmt.Sprintf("%d", member.Port))) net.JoinHostPort(member.Host, strconv.Itoa(member.Port)))
labels[nerveEndpointLabelPrefix+"_host"] = model.LabelValue(member.Host) labels[nerveEndpointLabelPrefix+"_host"] = model.LabelValue(member.Host)
labels[nerveEndpointLabelPrefix+"_port"] = model.LabelValue(fmt.Sprintf("%d", member.Port)) labels[nerveEndpointLabelPrefix+"_port"] = model.LabelValue(strconv.Itoa(member.Port))
labels[nerveEndpointLabelPrefix+"_name"] = model.LabelValue(member.Name) labels[nerveEndpointLabelPrefix+"_name"] = model.LabelValue(member.Name)
return labels, nil return labels, nil

View file

@ -48,7 +48,7 @@ The Prometheus monitoring server
| <code class="text-nowrap">--rules.alert.for-outage-tolerance</code> | Max time to tolerate prometheus outage for restoring "for" state of alert. Use with server mode only. | `1h` | | <code class="text-nowrap">--rules.alert.for-outage-tolerance</code> | Max time to tolerate prometheus outage for restoring "for" state of alert. Use with server mode only. | `1h` |
| <code class="text-nowrap">--rules.alert.for-grace-period</code> | Minimum duration between alert and restored "for" state. This is maintained only for alerts with configured "for" time greater than grace period. Use with server mode only. | `10m` | | <code class="text-nowrap">--rules.alert.for-grace-period</code> | Minimum duration between alert and restored "for" state. This is maintained only for alerts with configured "for" time greater than grace period. Use with server mode only. | `10m` |
| <code class="text-nowrap">--rules.alert.resend-delay</code> | Minimum amount of time to wait before resending an alert to Alertmanager. Use with server mode only. | `1m` | | <code class="text-nowrap">--rules.alert.resend-delay</code> | Minimum amount of time to wait before resending an alert to Alertmanager. Use with server mode only. | `1m` |
| <code class="text-nowrap">--rules.max-concurrent-evals</code> | Global concurrency limit for independent rules that can run concurrently. Use with server mode only. | `4` | | <code class="text-nowrap">--rules.max-concurrent-evals</code> | Global concurrency limit for independent rules that can run concurrently. When set, "query.max-concurrency" may need to be adjusted accordingly. Use with server mode only. | `4` |
| <code class="text-nowrap">--alertmanager.notification-queue-capacity</code> | The capacity of the queue for pending Alertmanager notifications. Use with server mode only. | `10000` | | <code class="text-nowrap">--alertmanager.notification-queue-capacity</code> | The capacity of the queue for pending Alertmanager notifications. Use with server mode only. | `10000` |
| <code class="text-nowrap">--query.lookback-delta</code> | The maximum lookback duration for retrieving metrics during expression evaluations and federation. Use with server mode only. | `5m` | | <code class="text-nowrap">--query.lookback-delta</code> | The maximum lookback duration for retrieving metrics during expression evaluations and federation. Use with server mode only. | `5m` |
| <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | | <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` |

View file

@ -566,6 +566,7 @@ Dump samples from a TSDB.
| Flag | Description | Default | | Flag | Description | Default |
| --- | --- | --- | | --- | --- | --- |
| <code class="text-nowrap">--sandbox-dir-root</code> | Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end. | `data/` |
| <code class="text-nowrap">--min-time</code> | Minimum timestamp to dump. | `-9223372036854775808` | | <code class="text-nowrap">--min-time</code> | Minimum timestamp to dump. | `-9223372036854775808` |
| <code class="text-nowrap">--max-time</code> | Maximum timestamp to dump. | `9223372036854775807` | | <code class="text-nowrap">--max-time</code> | Maximum timestamp to dump. | `9223372036854775807` |
| <code class="text-nowrap">--match</code> | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` | | <code class="text-nowrap">--match</code> | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` |
@ -584,7 +585,7 @@ Dump samples from a TSDB.
##### `promtool tsdb dump-openmetrics` ##### `promtool tsdb dump-openmetrics`
[Experimental] Dump samples from a TSDB into OpenMetrics format. Native histograms are not dumped. [Experimental] Dump samples from a TSDB into OpenMetrics text format, excluding native histograms and staleness markers, which are not representable in OpenMetrics.
@ -592,6 +593,7 @@ Dump samples from a TSDB.
| Flag | Description | Default | | Flag | Description | Default |
| --- | --- | --- | | --- | --- | --- |
| <code class="text-nowrap">--sandbox-dir-root</code> | Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end. | `data/` |
| <code class="text-nowrap">--min-time</code> | Minimum timestamp to dump. | `-9223372036854775808` | | <code class="text-nowrap">--min-time</code> | Minimum timestamp to dump. | `-9223372036854775808` |
| <code class="text-nowrap">--max-time</code> | Maximum timestamp to dump. | `9223372036854775807` | | <code class="text-nowrap">--max-time</code> | Maximum timestamp to dump. | `9223372036854775807` |
| <code class="text-nowrap">--match</code> | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` | | <code class="text-nowrap">--match</code> | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` |

View file

@ -70,6 +70,10 @@ global:
# How frequently to evaluate rules. # How frequently to evaluate rules.
[ evaluation_interval: <duration> | default = 1m ] [ evaluation_interval: <duration> | default = 1m ]
# Offset the rule evaluation timestamp of this particular group by the specified duration into the past to ensure the underlying metrics have been received.
# Metric availability delays are more likely to occur when Prometheus is running as a remote write target, but can also occur when there's anomalies with scraping.
[ rule_query_offset: <duration> | default = 0s ]
# The labels to add to any time series or alerts when communicating with # The labels to add to any time series or alerts when communicating with
# external systems (federation, remote storage, Alertmanager). # external systems (federation, remote storage, Alertmanager).
@ -1349,7 +1353,7 @@ interface.
The following meta labels are available on targets during [relabeling](#relabel_config): The following meta labels are available on targets during [relabeling](#relabel_config):
* `__meta_openstack_address_pool`: the pool of the private IP. * `__meta_openstack_address_pool`: the pool of the private IP.
* `__meta_openstack_instance_flavor`: the flavor of the OpenStack instance. * `__meta_openstack_instance_flavor`: the flavor ID of the OpenStack instance.
* `__meta_openstack_instance_id`: the OpenStack instance ID. * `__meta_openstack_instance_id`: the OpenStack instance ID.
* `__meta_openstack_instance_image`: the ID of the image the OpenStack instance is using. * `__meta_openstack_instance_image`: the ID of the image the OpenStack instance is using.
* `__meta_openstack_instance_name`: the OpenStack instance name. * `__meta_openstack_instance_name`: the OpenStack instance name.
@ -1357,7 +1361,7 @@ The following meta labels are available on targets during [relabeling](#relabel_
* `__meta_openstack_private_ip`: the private IP of the OpenStack instance. * `__meta_openstack_private_ip`: the private IP of the OpenStack instance.
* `__meta_openstack_project_id`: the project (tenant) owning this instance. * `__meta_openstack_project_id`: the project (tenant) owning this instance.
* `__meta_openstack_public_ip`: the public IP of the OpenStack instance. * `__meta_openstack_public_ip`: the public IP of the OpenStack instance.
* `__meta_openstack_tag_<tagkey>`: each tag value of the instance. * `__meta_openstack_tag_<key>`: each metadata item of the instance, with any unsupported characters converted to an underscore.
* `__meta_openstack_user_id`: the user account owning the tenant. * `__meta_openstack_user_id`: the user account owning the tenant.
See below for the configuration options for OpenStack discovery: See below for the configuration options for OpenStack discovery:
@ -1467,6 +1471,7 @@ For OVHcloud's [public cloud instances](https://www.ovhcloud.com/en/public-cloud
* `__meta_ovhcloud_dedicated_server_ipv6`: the IPv6 of the server * `__meta_ovhcloud_dedicated_server_ipv6`: the IPv6 of the server
* `__meta_ovhcloud_dedicated_server_link_speed`: the link speed of the server * `__meta_ovhcloud_dedicated_server_link_speed`: the link speed of the server
* `__meta_ovhcloud_dedicated_server_name`: the name of the server * `__meta_ovhcloud_dedicated_server_name`: the name of the server
* `__meta_ovhcloud_dedicated_server_no_intervention`: whether datacenter intervention is disabled for the server
* `__meta_ovhcloud_dedicated_server_os`: the operating system of the server * `__meta_ovhcloud_dedicated_server_os`: the operating system of the server
* `__meta_ovhcloud_dedicated_server_rack`: the rack of the server * `__meta_ovhcloud_dedicated_server_rack`: the rack of the server
* `__meta_ovhcloud_dedicated_server_reverse`: the reverse DNS name of the server * `__meta_ovhcloud_dedicated_server_reverse`: the reverse DNS name of the server
@ -3673,7 +3678,8 @@ queue_config:
[ min_shards: <int> | default = 1 ] [ min_shards: <int> | default = 1 ]
# Maximum number of samples per send. # Maximum number of samples per send.
[ max_samples_per_send: <int> | default = 2000] [ max_samples_per_send: <int> | default = 2000]
# Maximum time a sample will wait in buffer. # Maximum time a sample will wait for a send. The sample might wait less
# if the buffer is full. Further time might pass due to potential retries.
[ batch_send_deadline: <duration> | default = 5s ] [ batch_send_deadline: <duration> | default = 5s ]
# Initial retry delay. Gets doubled for every retry. # Initial retry delay. Gets doubled for every retry.
[ min_backoff: <duration> | default = 30ms ] [ min_backoff: <duration> | default = 30ms ]

View file

@ -86,6 +86,9 @@ name: <string>
# rule can produce. 0 is no limit. # rule can produce. 0 is no limit.
[ limit: <int> | default = 0 ] [ limit: <int> | default = 0 ]
# Offset the rule evaluation timestamp of this particular group by the specified duration into the past.
[ query_offset: <duration> | default = global.rule_query_offset ]
rules: rules:
[ - <rule> ... ] [ - <rule> ... ]
``` ```
@ -148,6 +151,9 @@ the rule, active, pending, or inactive, are cleared as well. The event will be
recorded as an error in the evaluation, and as such no stale markers are recorded as an error in the evaluation, and as such no stale markers are
written. written.
# Rule query offset
This is useful to ensure the underlying metrics have been received and stored in Prometheus. Metric availability delays are more likely to occur when Prometheus is running as a remote write target due to the nature of distributed systems, but can also occur when there's anomalies with scraping and/or short evaluation intervals.
# Failed rule evaluations due to slow evaluation # Failed rule evaluations due to slow evaluation
If a rule group hasn't finished evaluating before its next evaluation is supposed to start (as defined by the `evaluation_interval`), the next evaluation will be skipped. Subsequent evaluations of the rule group will continue to be skipped until the initial evaluation either completes or times out. When this happens, there will be a gap in the metric produced by the recording rule. The `rule_group_iterations_missed_total` metric will be incremented for each missed iteration of the rule group. If a rule group hasn't finished evaluating before its next evaluation is supposed to start (as defined by the `evaluation_interval`), the next evaluation will be skipped. Subsequent evaluations of the rule group will continue to be skipped until the initial evaluation either completes or times out. When this happens, there will be a gap in the metric produced by the recording rule. The `rule_group_iterations_missed_total` metric will be incremented for each missed iteration of the rule group.

View file

@ -197,6 +197,9 @@ or time-series database to Prometheus. To do so, the user must first convert the
source data into [OpenMetrics](https://openmetrics.io/) format, which is the source data into [OpenMetrics](https://openmetrics.io/) format, which is the
input format for the backfilling as described below. input format for the backfilling as described below.
Note that native histograms and staleness markers are not supported by this
procedure, as they cannot be represented in the OpenMetrics format.
### Usage ### Usage
Backfilling can be used via the Promtool command line. Promtool will write the blocks Backfilling can be used via the Promtool command line. Promtool will write the blocks

View file

@ -127,9 +127,9 @@ func (d *discovery) parseServiceNodes(resp *http.Response, name string) (*target
// since the service may be registered remotely through a different node. // since the service may be registered remotely through a different node.
var addr string var addr string
if node.ServiceAddress != "" { if node.ServiceAddress != "" {
addr = net.JoinHostPort(node.ServiceAddress, fmt.Sprintf("%d", node.ServicePort)) addr = net.JoinHostPort(node.ServiceAddress, strconv.Itoa(node.ServicePort))
} else { } else {
addr = net.JoinHostPort(node.Address, fmt.Sprintf("%d", node.ServicePort)) addr = net.JoinHostPort(node.Address, strconv.Itoa(node.ServicePort))
} }
target := model.LabelSet{model.AddressLabel: model.LabelValue(addr)} target := model.LabelSet{model.AddressLabel: model.LabelValue(addr)}

View file

@ -44,5 +44,10 @@
// The default refresh time for all dashboards, default to 60s // The default refresh time for all dashboards, default to 60s
refresh: '60s', refresh: '60s',
}, },
// Opt-out of multi-cluster dashboards by overriding this.
showMultiCluster: true,
// The cluster label to infer the cluster name from.
clusterLabel: 'cluster',
}, },
} }

View file

@ -10,21 +10,32 @@ local template = grafana.template;
{ {
grafanaDashboards+:: { grafanaDashboards+:: {
'prometheus.json': 'prometheus.json':
g.dashboard( local showMultiCluster = $._config.showMultiCluster;
local dashboard = g.dashboard(
'%(prefix)sOverview' % $._config.grafanaPrometheus '%(prefix)sOverview' % $._config.grafanaPrometheus
) );
.addMultiTemplate('cluster', 'prometheus_build_info{%(prometheusSelector)s}' % $._config, 'cluster') local templatedDashboard = if showMultiCluster then
.addMultiTemplate('job', 'prometheus_build_info{cluster=~"$cluster"}', 'job') dashboard
.addMultiTemplate('instance', 'prometheus_build_info{cluster=~"$cluster", job=~"$job"}', 'instance') .addMultiTemplate('cluster', 'prometheus_build_info{%(prometheusSelector)s}' % $._config, $._config.clusterLabel)
.addMultiTemplate('job', 'prometheus_build_info{cluster=~"$cluster"}', 'job')
.addMultiTemplate('instance', 'prometheus_build_info{cluster=~"$cluster", job=~"$job"}', 'instance')
else
dashboard
.addMultiTemplate('job', 'prometheus_build_info{%(prometheusSelector)s}' % $._config, 'job')
.addMultiTemplate('instance', 'prometheus_build_info{job=~"$job"}', 'instance');
templatedDashboard
.addRow( .addRow(
g.row('Prometheus Stats') g.row('Prometheus Stats')
.addPanel( .addPanel(
g.panel('Prometheus Stats') + g.panel('Prometheus Stats') +
g.tablePanel([ g.tablePanel(if showMultiCluster then [
'count by (cluster, job, instance, version) (prometheus_build_info{cluster=~"$cluster", job=~"$job", instance=~"$instance"})', 'count by (cluster, job, instance, version) (prometheus_build_info{cluster=~"$cluster", job=~"$job", instance=~"$instance"})',
'max by (cluster, job, instance) (time() - process_start_time_seconds{cluster=~"$cluster", job=~"$job", instance=~"$instance"})', 'max by (cluster, job, instance) (time() - process_start_time_seconds{cluster=~"$cluster", job=~"$job", instance=~"$instance"})',
] else [
'count by (job, instance, version) (prometheus_build_info{job=~"$job", instance=~"$instance"})',
'max by (job, instance) (time() - process_start_time_seconds{job=~"$job", instance=~"$instance"})',
], { ], {
cluster: { alias: 'Cluster' }, cluster: { alias: if showMultiCluster then 'Cluster' else '' },
job: { alias: 'Job' }, job: { alias: 'Job' },
instance: { alias: 'Instance' }, instance: { alias: 'Instance' },
version: { alias: 'Version' }, version: { alias: 'Version' },
@ -37,12 +48,18 @@ local template = grafana.template;
g.row('Discovery') g.row('Discovery')
.addPanel( .addPanel(
g.panel('Target Sync') + g.panel('Target Sync') +
g.queryPanel('sum(rate(prometheus_target_sync_length_seconds_sum{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[5m])) by (cluster, job, scrape_job, instance) * 1e3', '{{cluster}}:{{job}}:{{instance}}:{{scrape_job}}') + g.queryPanel(if showMultiCluster then 'sum(rate(prometheus_target_sync_length_seconds_sum{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[5m])) by (cluster, job, scrape_job, instance) * 1e3'
else 'sum(rate(prometheus_target_sync_length_seconds_sum{job=~"$job",instance=~"$instance"}[5m])) by (scrape_job) * 1e3',
if showMultiCluster then '{{cluster}}:{{job}}:{{instance}}:{{scrape_job}}'
else '{{scrape_job}}') +
{ yaxes: g.yaxes('ms') } { yaxes: g.yaxes('ms') }
) )
.addPanel( .addPanel(
g.panel('Targets') + g.panel('Targets') +
g.queryPanel('sum by (cluster, job, instance) (prometheus_sd_discovered_targets{cluster=~"$cluster", job=~"$job",instance=~"$instance"})', '{{cluster}}:{{job}}:{{instance}}') + g.queryPanel(if showMultiCluster then 'sum by (cluster, job, instance) (prometheus_sd_discovered_targets{cluster=~"$cluster", job=~"$job",instance=~"$instance"})'
else 'sum(prometheus_sd_discovered_targets{job=~"$job",instance=~"$instance"})',
if showMultiCluster then '{{cluster}}:{{job}}:{{instance}}'
else 'Targets') +
g.stack g.stack
) )
) )
@ -50,29 +67,47 @@ local template = grafana.template;
g.row('Retrieval') g.row('Retrieval')
.addPanel( .addPanel(
g.panel('Average Scrape Interval Duration') + g.panel('Average Scrape Interval Duration') +
g.queryPanel('rate(prometheus_target_interval_length_seconds_sum{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) * 1e3', '{{cluster}}:{{job}}:{{instance}} {{interval}} configured') + g.queryPanel(if showMultiCluster then 'rate(prometheus_target_interval_length_seconds_sum{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) * 1e3'
else 'rate(prometheus_target_interval_length_seconds_sum{job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{job=~"$job",instance=~"$instance"}[5m]) * 1e3',
if showMultiCluster then '{{cluster}}:{{job}}:{{instance}} {{interval}} configured'
else '{{interval}} configured') +
{ yaxes: g.yaxes('ms') } { yaxes: g.yaxes('ms') }
) )
.addPanel( .addPanel(
g.panel('Scrape failures') + g.panel('Scrape failures') +
g.queryPanel([ g.queryPanel(if showMultiCluster then [
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_sample_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_sample_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_bounds_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_bounds_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_order_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_order_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
], [ ] else [
'sum by (job) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total[1m]))',
'sum by (job) (rate(prometheus_target_scrapes_exceeded_sample_limit_total[1m]))',
'sum by (job) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total[1m]))',
'sum by (job) (rate(prometheus_target_scrapes_sample_out_of_bounds_total[1m]))',
'sum by (job) (rate(prometheus_target_scrapes_sample_out_of_order_total[1m]))',
], if showMultiCluster then [
'exceeded body size limit: {{cluster}} {{job}} {{instance}}', 'exceeded body size limit: {{cluster}} {{job}} {{instance}}',
'exceeded sample limit: {{cluster}} {{job}} {{instance}}', 'exceeded sample limit: {{cluster}} {{job}} {{instance}}',
'duplicate timestamp: {{cluster}} {{job}} {{instance}}', 'duplicate timestamp: {{cluster}} {{job}} {{instance}}',
'out of bounds: {{cluster}} {{job}} {{instance}}', 'out of bounds: {{cluster}} {{job}} {{instance}}',
'out of order: {{cluster}} {{job}} {{instance}}', 'out of order: {{cluster}} {{job}} {{instance}}',
] else [
'exceeded body size limit: {{job}}',
'exceeded sample limit: {{job}}',
'duplicate timestamp: {{job}}',
'out of bounds: {{job}}',
'out of order: {{job}}',
]) + ]) +
g.stack g.stack
) )
.addPanel( .addPanel(
g.panel('Appended Samples') + g.panel('Appended Samples') +
g.queryPanel('rate(prometheus_tsdb_head_samples_appended_total{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m])', '{{cluster}} {{job}} {{instance}}') + g.queryPanel(if showMultiCluster then 'rate(prometheus_tsdb_head_samples_appended_total{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m])'
else 'rate(prometheus_tsdb_head_samples_appended_total{job=~"$job",instance=~"$instance"}[5m])',
if showMultiCluster then '{{cluster}} {{job}} {{instance}}'
else '{{job}} {{instance}}') +
g.stack g.stack
) )
) )
@ -80,12 +115,18 @@ local template = grafana.template;
g.row('Storage') g.row('Storage')
.addPanel( .addPanel(
g.panel('Head Series') + g.panel('Head Series') +
g.queryPanel('prometheus_tsdb_head_series{cluster=~"$cluster",job=~"$job",instance=~"$instance"}', '{{cluster}} {{job}} {{instance}} head series') + g.queryPanel(if showMultiCluster then 'prometheus_tsdb_head_series{cluster=~"$cluster",job=~"$job",instance=~"$instance"}'
else 'prometheus_tsdb_head_series{job=~"$job",instance=~"$instance"}',
if showMultiCluster then '{{cluster}} {{job}} {{instance}} head series'
else '{{job}} {{instance}} head series') +
g.stack g.stack
) )
.addPanel( .addPanel(
g.panel('Head Chunks') + g.panel('Head Chunks') +
g.queryPanel('prometheus_tsdb_head_chunks{cluster=~"$cluster",job=~"$job",instance=~"$instance"}', '{{cluster}} {{job}} {{instance}} head chunks') + g.queryPanel(if showMultiCluster then 'prometheus_tsdb_head_chunks{cluster=~"$cluster",job=~"$job",instance=~"$instance"}'
else 'prometheus_tsdb_head_chunks{job=~"$job",instance=~"$instance"}',
if showMultiCluster then '{{cluster}} {{job}} {{instance}} head chunks'
else '{{job}} {{instance}} head chunks') +
g.stack g.stack
) )
) )
@ -93,12 +134,18 @@ local template = grafana.template;
g.row('Query') g.row('Query')
.addPanel( .addPanel(
g.panel('Query Rate') + g.panel('Query Rate') +
g.queryPanel('rate(prometheus_engine_query_duration_seconds_count{cluster=~"$cluster",job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])', '{{cluster}} {{job}} {{instance}}') + g.queryPanel(if showMultiCluster then 'rate(prometheus_engine_query_duration_seconds_count{cluster=~"$cluster",job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])'
else 'rate(prometheus_engine_query_duration_seconds_count{job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])',
if showMultiCluster then '{{cluster}} {{job}} {{instance}}'
else '{{job}} {{instance}}') +
g.stack, g.stack,
) )
.addPanel( .addPanel(
g.panel('Stage Duration') + g.panel('Stage Duration') +
g.queryPanel('max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",cluster=~"$cluster", job=~"$job",instance=~"$instance"}) * 1e3', '{{slice}}') + g.queryPanel(if showMultiCluster then 'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",cluster=~"$cluster", job=~"$job",instance=~"$instance"}) * 1e3'
else 'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",job=~"$job",instance=~"$instance"}) * 1e3',
if showMultiCluster then '{{slice}}'
else '{{slice}}') +
{ yaxes: g.yaxes('ms') } + { yaxes: g.yaxes('ms') } +
g.stack, g.stack,
) )

View file

@ -14,9 +14,9 @@
package histogram package histogram
import ( import (
"fmt"
"math" "math"
"math/rand" "math/rand"
"strconv"
"testing" "testing"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -2134,7 +2134,7 @@ func TestAllFloatBucketIterator(t *testing.T) {
} }
for i, c := range cases { for i, c := range cases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) {
var expBuckets, actBuckets []Bucket[float64] var expBuckets, actBuckets []Bucket[float64]
if c.includeNeg { if c.includeNeg {
@ -2360,7 +2360,7 @@ func TestAllReverseFloatBucketIterator(t *testing.T) {
} }
for i, c := range cases { for i, c := range cases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) {
var expBuckets, actBuckets []Bucket[float64] var expBuckets, actBuckets []Bucket[float64]
if c.includePos { if c.includePos {

View file

@ -14,8 +14,8 @@
package histogram package histogram
import ( import (
"fmt"
"math" "math"
"strconv"
"testing" "testing"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -72,7 +72,7 @@ func TestHistogramString(t *testing.T) {
} }
for i, c := range cases { for i, c := range cases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) {
actualString := c.histogram.String() actualString := c.histogram.String()
require.Equal(t, c.expectedString, actualString) require.Equal(t, c.expectedString, actualString)
}) })
@ -211,7 +211,7 @@ func TestCumulativeBucketIterator(t *testing.T) {
} }
for i, c := range cases { for i, c := range cases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) {
it := c.histogram.CumulativeBucketIterator() it := c.histogram.CumulativeBucketIterator()
actualBuckets := make([]Bucket[uint64], 0, len(c.expectedBuckets)) actualBuckets := make([]Bucket[uint64], 0, len(c.expectedBuckets))
for it.Next() { for it.Next() {
@ -371,7 +371,7 @@ func TestRegularBucketIterator(t *testing.T) {
} }
for i, c := range cases { for i, c := range cases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) {
it := c.histogram.PositiveBucketIterator() it := c.histogram.PositiveBucketIterator()
actualPositiveBuckets := make([]Bucket[uint64], 0, len(c.expectedPositiveBuckets)) actualPositiveBuckets := make([]Bucket[uint64], 0, len(c.expectedPositiveBuckets))
for it.Next() { for it.Next() {

View file

@ -17,6 +17,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"net/http" "net/http"
"strconv"
"strings" "strings"
"testing" "testing"
@ -732,7 +733,7 @@ func TestScratchBuilder(t *testing.T) {
want: FromStrings("ddd", "444"), want: FromStrings("ddd", "444"),
}, },
} { } {
t.Run(fmt.Sprint(i), func(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) {
b := NewScratchBuilder(len(tcase.add)) b := NewScratchBuilder(len(tcase.add))
for _, lbl := range tcase.add { for _, lbl := range tcase.add {
b.Add(lbl.Name, lbl.Value) b.Add(lbl.Name, lbl.Value)

View file

@ -14,7 +14,8 @@
package labels package labels
import ( import (
"fmt" "bytes"
"strconv"
) )
// MatchType is an enum for label matching types. // MatchType is an enum for label matching types.
@ -78,7 +79,29 @@ func MustNewMatcher(mt MatchType, name, val string) *Matcher {
} }
func (m *Matcher) String() string { func (m *Matcher) String() string {
return fmt.Sprintf("%s%s%q", m.Name, m.Type, m.Value) // Start a buffer with a pre-allocated size on stack to cover most needs.
var bytea [1024]byte
b := bytes.NewBuffer(bytea[:0])
if m.shouldQuoteName() {
b.Write(strconv.AppendQuote(b.AvailableBuffer(), m.Name))
} else {
b.WriteString(m.Name)
}
b.WriteString(m.Type.String())
b.Write(strconv.AppendQuote(b.AvailableBuffer(), m.Value))
return b.String()
}
func (m *Matcher) shouldQuoteName() bool {
for i, c := range m.Name {
if c == '_' || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (i > 0 && c >= '0' && c <= '9') {
continue
}
return true
}
return false
} }
// Matches returns whether the matcher matches the given string value. // Matches returns whether the matcher matches the given string value.

View file

@ -15,6 +15,7 @@ package labels
import ( import (
"fmt" "fmt"
"math/rand"
"testing" "testing"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -225,3 +226,128 @@ func BenchmarkNewMatcher(b *testing.B) {
} }
}) })
} }
func BenchmarkMatcher_String(b *testing.B) {
type benchCase struct {
name string
matchers []*Matcher
}
cases := []benchCase{
{
name: "short name equal",
matchers: []*Matcher{
MustNewMatcher(MatchEqual, "foo", "bar"),
MustNewMatcher(MatchEqual, "bar", "baz"),
MustNewMatcher(MatchEqual, "abc", "def"),
MustNewMatcher(MatchEqual, "ghi", "klm"),
MustNewMatcher(MatchEqual, "nop", "qrs"),
},
},
{
name: "short quoted name not equal",
matchers: []*Matcher{
MustNewMatcher(MatchEqual, "f.o", "bar"),
MustNewMatcher(MatchEqual, "b.r", "baz"),
MustNewMatcher(MatchEqual, "a.c", "def"),
MustNewMatcher(MatchEqual, "g.i", "klm"),
MustNewMatcher(MatchEqual, "n.p", "qrs"),
},
},
{
name: "short quoted name with quotes not equal",
matchers: []*Matcher{
MustNewMatcher(MatchEqual, `"foo"`, "bar"),
MustNewMatcher(MatchEqual, `"foo"`, "baz"),
MustNewMatcher(MatchEqual, `"foo"`, "def"),
MustNewMatcher(MatchEqual, `"foo"`, "klm"),
MustNewMatcher(MatchEqual, `"foo"`, "qrs"),
},
},
{
name: "short name value with quotes equal",
matchers: []*Matcher{
MustNewMatcher(MatchEqual, "foo", `"bar"`),
MustNewMatcher(MatchEqual, "bar", `"baz"`),
MustNewMatcher(MatchEqual, "abc", `"def"`),
MustNewMatcher(MatchEqual, "ghi", `"klm"`),
MustNewMatcher(MatchEqual, "nop", `"qrs"`),
},
},
{
name: "short name and long value regexp",
matchers: []*Matcher{
MustNewMatcher(MatchRegexp, "foo", "five_six_seven_eight_nine_ten_one_two_three_four"),
MustNewMatcher(MatchRegexp, "bar", "one_two_three_four_five_six_seven_eight_nine_ten"),
MustNewMatcher(MatchRegexp, "abc", "two_three_four_five_six_seven_eight_nine_ten_one"),
MustNewMatcher(MatchRegexp, "ghi", "three_four_five_six_seven_eight_nine_ten_one_two"),
MustNewMatcher(MatchRegexp, "nop", "four_five_six_seven_eight_nine_ten_one_two_three"),
},
},
{
name: "short name and long value with quotes equal",
matchers: []*Matcher{
MustNewMatcher(MatchEqual, "foo", `five_six_seven_eight_nine_ten_"one"_two_three_four`),
MustNewMatcher(MatchEqual, "bar", `one_two_three_four_five_six_"seven"_eight_nine_ten`),
MustNewMatcher(MatchEqual, "abc", `two_three_four_five_six_seven_"eight"_nine_ten_one`),
MustNewMatcher(MatchEqual, "ghi", `three_four_five_six_seven_eight_"nine"_ten_one_two`),
MustNewMatcher(MatchEqual, "nop", `four_five_six_seven_eight_nine_"ten"_one_two_three`),
},
},
{
name: "long name regexp",
matchers: []*Matcher{
MustNewMatcher(MatchRegexp, "one_two_three_four_five_six_seven_eight_nine_ten", "val"),
MustNewMatcher(MatchRegexp, "two_three_four_five_six_seven_eight_nine_ten_one", "val"),
MustNewMatcher(MatchRegexp, "three_four_five_six_seven_eight_nine_ten_one_two", "val"),
MustNewMatcher(MatchRegexp, "four_five_six_seven_eight_nine_ten_one_two_three", "val"),
MustNewMatcher(MatchRegexp, "five_six_seven_eight_nine_ten_one_two_three_four", "val"),
},
},
{
name: "long quoted name regexp",
matchers: []*Matcher{
MustNewMatcher(MatchRegexp, "one.two.three.four.five.six.seven.eight.nine.ten", "val"),
MustNewMatcher(MatchRegexp, "two.three.four.five.six.seven.eight.nine.ten.one", "val"),
MustNewMatcher(MatchRegexp, "three.four.five.six.seven.eight.nine.ten.one.two", "val"),
MustNewMatcher(MatchRegexp, "four.five.six.seven.eight.nine.ten.one.two.three", "val"),
MustNewMatcher(MatchRegexp, "five.six.seven.eight.nine.ten.one.two.three.four", "val"),
},
},
{
name: "long name and long value regexp",
matchers: []*Matcher{
MustNewMatcher(MatchRegexp, "one_two_three_four_five_six_seven_eight_nine_ten", "five_six_seven_eight_nine_ten_one_two_three_four"),
MustNewMatcher(MatchRegexp, "two_three_four_five_six_seven_eight_nine_ten_one", "one_two_three_four_five_six_seven_eight_nine_ten"),
MustNewMatcher(MatchRegexp, "three_four_five_six_seven_eight_nine_ten_one_two", "two_three_four_five_six_seven_eight_nine_ten_one"),
MustNewMatcher(MatchRegexp, "four_five_six_seven_eight_nine_ten_one_two_three", "three_four_five_six_seven_eight_nine_ten_one_two"),
MustNewMatcher(MatchRegexp, "five_six_seven_eight_nine_ten_one_two_three_four", "four_five_six_seven_eight_nine_ten_one_two_three"),
},
},
{
name: "long quoted name and long value regexp",
matchers: []*Matcher{
MustNewMatcher(MatchRegexp, "one.two.three.four.five.six.seven.eight.nine.ten", "five.six.seven.eight.nine.ten.one.two.three.four"),
MustNewMatcher(MatchRegexp, "two.three.four.five.six.seven.eight.nine.ten.one", "one.two.three.four.five.six.seven.eight.nine.ten"),
MustNewMatcher(MatchRegexp, "three.four.five.six.seven.eight.nine.ten.one.two", "two.three.four.five.six.seven.eight.nine.ten.one"),
MustNewMatcher(MatchRegexp, "four.five.six.seven.eight.nine.ten.one.two.three", "three.four.five.six.seven.eight.nine.ten.one.two"),
MustNewMatcher(MatchRegexp, "five.six.seven.eight.nine.ten.one.two.three.four", "four.five.six.seven.eight.nine.ten.one.two.three"),
},
},
}
var mixed []*Matcher
for _, bc := range cases {
mixed = append(mixed, bc.matchers...)
}
rand.Shuffle(len(mixed), func(i, j int) { mixed[i], mixed[j] = mixed[j], mixed[i] })
cases = append(cases, benchCase{name: "mixed", matchers: mixed})
for _, bc := range cases {
b.Run(bc.name, func(b *testing.B) {
for i := 0; i <= b.N; i++ {
m := bc.matchers[i%len(bc.matchers)]
_ = m.String()
}
})
}
}

View file

@ -828,7 +828,12 @@ type zeroOrOneCharacterStringMatcher struct {
} }
func (m *zeroOrOneCharacterStringMatcher) Matches(s string) bool { func (m *zeroOrOneCharacterStringMatcher) Matches(s string) bool {
if moreThanOneRune(s) { // If there's more than one rune in the string, then it can't match.
if r, size := utf8.DecodeRuneInString(s); r == utf8.RuneError {
// Size is 0 for empty strings, 1 for invalid rune.
// Empty string matches, invalid rune matches if there isn't anything else.
return size == len(s)
} else if size < len(s) {
return false return false
} }
@ -840,27 +845,6 @@ func (m *zeroOrOneCharacterStringMatcher) Matches(s string) bool {
return s[0] != '\n' return s[0] != '\n'
} }
// moreThanOneRune returns true if there are more than one runes in the string.
// It doesn't check whether the string is valid UTF-8.
// The return value should be always equal to utf8.RuneCountInString(s) > 1,
// but the function is optimized for the common case where the string prefix is ASCII.
func moreThanOneRune(s string) bool {
// If len(s) is exactly one or zero, there can't be more than one rune.
// Exit through this path quickly.
if len(s) <= 1 {
return false
}
// There's one or more bytes:
// If first byte is ASCII then there are multiple runes if there are more bytes after that.
if s[0] < utf8.RuneSelf {
return len(s) > 1
}
// Less common case: first is a multibyte rune.
return utf8.RuneCountInString(s) > 1
}
// trueMatcher is a stringMatcher which matches any string (always returns true). // trueMatcher is a stringMatcher which matches any string (always returns true).
type trueMatcher struct{} type trueMatcher struct{}

View file

@ -19,6 +19,7 @@ import (
"strings" "strings"
"testing" "testing"
"time" "time"
"unicode/utf8"
"github.com/grafana/regexp" "github.com/grafana/regexp"
"github.com/grafana/regexp/syntax" "github.com/grafana/regexp/syntax"
@ -36,6 +37,7 @@ var (
".*foo", ".*foo",
"^.*foo$", "^.*foo$",
"^.+foo$", "^.+foo$",
".?",
".*", ".*",
".+", ".+",
"foo.+", "foo.+",
@ -88,6 +90,12 @@ var (
// Values matching / not matching the test regexps on long alternations. // Values matching / not matching the test regexps on long alternations.
"zQPbMkNO", "zQPbMkNo", "jyyfj00j0061", "jyyfj00j006", "jyyfj00j00612", "NNSPdvMi", "NNSPdvMiXXX", "NNSPdvMixxx", "nnSPdvMi", "nnSPdvMiXXX", "zQPbMkNO", "zQPbMkNo", "jyyfj00j0061", "jyyfj00j006", "jyyfj00j00612", "NNSPdvMi", "NNSPdvMiXXX", "NNSPdvMixxx", "nnSPdvMi", "nnSPdvMiXXX",
// Invalid utf8
"\xfefoo",
"foo\xfe",
"\xfd",
"\xff\xff",
} }
) )
@ -926,19 +934,91 @@ func BenchmarkOptimizeEqualStringMatchers(b *testing.B) {
} }
func TestZeroOrOneCharacterStringMatcher(t *testing.T) { func TestZeroOrOneCharacterStringMatcher(t *testing.T) {
matcher := &zeroOrOneCharacterStringMatcher{matchNL: true} t.Run("match newline", func(t *testing.T) {
require.True(t, matcher.Matches("")) matcher := &zeroOrOneCharacterStringMatcher{matchNL: true}
require.True(t, matcher.Matches("x")) require.True(t, matcher.Matches(""))
require.True(t, matcher.Matches("\n")) require.True(t, matcher.Matches("x"))
require.False(t, matcher.Matches("xx")) require.True(t, matcher.Matches("\n"))
require.False(t, matcher.Matches("\n\n")) require.False(t, matcher.Matches("xx"))
require.False(t, matcher.Matches("\n\n"))
})
matcher = &zeroOrOneCharacterStringMatcher{matchNL: false} t.Run("do not match newline", func(t *testing.T) {
require.True(t, matcher.Matches("")) matcher := &zeroOrOneCharacterStringMatcher{matchNL: false}
require.True(t, matcher.Matches("x")) require.True(t, matcher.Matches(""))
require.False(t, matcher.Matches("\n")) require.True(t, matcher.Matches("x"))
require.False(t, matcher.Matches("xx")) require.False(t, matcher.Matches("\n"))
require.False(t, matcher.Matches("\n\n")) require.False(t, matcher.Matches("xx"))
require.False(t, matcher.Matches("\n\n"))
})
t.Run("unicode", func(t *testing.T) {
// Just for documentation purposes, emoji1 is 1 rune, emoji2 is 2 runes.
// Having this in mind, will make future readers fixing tests easier.
emoji1 := "😀"
emoji2 := "❤️"
require.Equal(t, 1, utf8.RuneCountInString(emoji1))
require.Equal(t, 2, utf8.RuneCountInString(emoji2))
matcher := &zeroOrOneCharacterStringMatcher{matchNL: true}
require.True(t, matcher.Matches(emoji1))
require.False(t, matcher.Matches(emoji2))
require.False(t, matcher.Matches(emoji1+emoji1))
require.False(t, matcher.Matches("x"+emoji1))
require.False(t, matcher.Matches(emoji1+"x"))
require.False(t, matcher.Matches(emoji1+emoji2))
})
t.Run("invalid unicode", func(t *testing.T) {
// Just for reference, we also compare to what `^.?$` regular expression matches.
re := regexp.MustCompile("^.?$")
matcher := &zeroOrOneCharacterStringMatcher{matchNL: true}
requireMatches := func(s string, expected bool) {
t.Helper()
require.Equal(t, expected, matcher.Matches(s))
require.Equal(t, re.MatchString(s), matcher.Matches(s))
}
requireMatches("\xff", true)
requireMatches("x\xff", false)
requireMatches("\xffx", false)
requireMatches("\xff\xfe", false)
})
}
func BenchmarkZeroOrOneCharacterStringMatcher(b *testing.B) {
type benchCase struct {
str string
matches bool
}
emoji1 := "😀"
emoji2 := "❤️"
cases := []benchCase{
{"", true},
{"x", true},
{"\n", true},
{"xx", false},
{"\n\n", false},
{emoji1, true},
{emoji2, false},
{emoji1 + emoji1, false},
{strings.Repeat("x", 100), false},
{strings.Repeat(emoji1, 100), false},
{strings.Repeat(emoji2, 100), false},
}
matcher := &zeroOrOneCharacterStringMatcher{matchNL: true}
b.ResetTimer()
for n := 0; n < b.N; n++ {
c := cases[n%len(cases)]
got := matcher.Matches(c.str)
if got != c.matches {
b.Fatalf("unexpected result for %q: got %t, want %t", c.str, got, c.matches)
}
}
} }
func TestLiteralPrefixStringMatcher(t *testing.T) { func TestLiteralPrefixStringMatcher(t *testing.T) {

View file

@ -17,6 +17,7 @@ import (
"crypto/md5" "crypto/md5"
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"strconv"
"strings" "strings"
"github.com/grafana/regexp" "github.com/grafana/regexp"
@ -290,7 +291,7 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) {
hash := md5.Sum([]byte(val)) hash := md5.Sum([]byte(val))
// Use only the last 8 bytes of the hash to give the same result as earlier versions of this code. // Use only the last 8 bytes of the hash to give the same result as earlier versions of this code.
mod := binary.BigEndian.Uint64(hash[8:]) % cfg.Modulus mod := binary.BigEndian.Uint64(hash[8:]) % cfg.Modulus
lb.Set(cfg.TargetLabel, fmt.Sprintf("%d", mod)) lb.Set(cfg.TargetLabel, strconv.FormatUint(mod, 10))
case LabelMap: case LabelMap:
lb.Range(func(l labels.Label) { lb.Range(func(l labels.Label) {
if cfg.Regex.MatchString(l.Name) { if cfg.Regex.MatchString(l.Name) {

View file

@ -14,7 +14,7 @@
package relabel package relabel
import ( import (
"fmt" "strconv"
"testing" "testing"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -657,7 +657,7 @@ func TestRelabelValidate(t *testing.T) {
}, },
} }
for i, test := range tests { for i, test := range tests {
t.Run(fmt.Sprint(i), func(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) {
err := test.config.Validate() err := test.config.Validate()
if test.expected == "" { if test.expected == "" {
require.NoError(t, err) require.NoError(t, err)

View file

@ -136,10 +136,11 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) {
// RuleGroup is a list of sequentially evaluated recording and alerting rules. // RuleGroup is a list of sequentially evaluated recording and alerting rules.
type RuleGroup struct { type RuleGroup struct {
Name string `yaml:"name"` Name string `yaml:"name"`
Interval model.Duration `yaml:"interval,omitempty"` Interval model.Duration `yaml:"interval,omitempty"`
Limit int `yaml:"limit,omitempty"` QueryOffset *model.Duration `yaml:"query_offset,omitempty"`
Rules []RuleNode `yaml:"rules"` Limit int `yaml:"limit,omitempty"`
Rules []RuleNode `yaml:"rules"`
} }
// Rule describes an alerting or recording rule. // Rule describes an alerting or recording rule.

View file

@ -74,7 +74,7 @@ func TestHandlerNextBatch(t *testing.T) {
for i := range make([]struct{}, 2*maxBatchSize+1) { for i := range make([]struct{}, 2*maxBatchSize+1) {
h.queue = append(h.queue, &Alert{ h.queue = append(h.queue, &Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
}) })
} }
@ -186,10 +186,10 @@ func TestHandlerSendAll(t *testing.T) {
for i := range make([]struct{}, maxBatchSize) { for i := range make([]struct{}, maxBatchSize) {
h.queue = append(h.queue, &Alert{ h.queue = append(h.queue, &Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
}) })
expected = append(expected, &Alert{ expected = append(expected, &Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
}) })
} }
@ -297,23 +297,23 @@ func TestHandlerSendAllRemapPerAm(t *testing.T) {
for i := range make([]struct{}, maxBatchSize/2) { for i := range make([]struct{}, maxBatchSize/2) {
h.queue = append(h.queue, h.queue = append(h.queue,
&Alert{ &Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
}, },
&Alert{ &Alert{
Labels: labels.FromStrings("alertname", "test", "alertnamedrop", fmt.Sprintf("%d", i)), Labels: labels.FromStrings("alertname", "test", "alertnamedrop", strconv.Itoa(i)),
}, },
) )
expected1 = append(expected1, expected1 = append(expected1,
&Alert{ &Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
}, &Alert{ }, &Alert{
Labels: labels.FromStrings("alertname", "test", "alertnamedrop", fmt.Sprintf("%d", i)), Labels: labels.FromStrings("alertname", "test", "alertnamedrop", strconv.Itoa(i)),
}, },
) )
expected2 = append(expected2, &Alert{ expected2 = append(expected2, &Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
}) })
} }
@ -502,7 +502,7 @@ func TestHandlerQueuing(t *testing.T) {
var alerts []*Alert var alerts []*Alert
for i := range make([]struct{}, 20*maxBatchSize) { for i := range make([]struct{}, 20*maxBatchSize) {
alerts = append(alerts, &Alert{ alerts = append(alerts, &Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
}) })
} }
@ -762,7 +762,7 @@ func TestHangingNotifier(t *testing.T) {
var alerts []*Alert var alerts []*Alert
for i := range make([]struct{}, 20*maxBatchSize) { for i := range make([]struct{}, 20*maxBatchSize) {
alerts = append(alerts, &Alert{ alerts = append(alerts, &Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
}) })
} }

File diff suppressed because it is too large Load diff

View file

@ -138,6 +138,16 @@ func TestExprString(t *testing.T) {
{ {
in: `{__name__="",a="x"}`, in: `{__name__="",a="x"}`,
}, },
{
in: `{"a.b"="c"}`,
},
{
in: `{"0"="1"}`,
},
{
in: `{"_0"="1"}`,
out: `{_0="1"}`,
},
} }
for _, test := range inputs { for _, test := range inputs {

View file

@ -269,3 +269,448 @@ eval instant at 50m histogram_sum(sum(incr_sum_histogram))
eval instant at 50m histogram_sum(sum(last_over_time(incr_sum_histogram[5m]))) eval instant at 50m histogram_sum(sum(last_over_time(incr_sum_histogram[5m])))
{} 30 {} 30
# Apply rate function to histogram.
load 15s
histogram_rate {{schema:1 count:12 sum:18.4 z_bucket:2 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[1 2 0 1 1]}}+{{schema:1 count:9 sum:18.4 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 0 1 1] n_buckets:[1 1 0 1 1]}}x100
eval instant at 5m rate(histogram_rate[45s])
{} {{schema:1 count:0.6 sum:1.2266666666666652 z_bucket:0.06666666666666667 z_bucket_w:0.001 buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667] n_buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667]}}
eval range from 5m to 5m30s step 30s rate(histogram_rate[45s])
{} {{schema:1 count:0.6 sum:1.2266666666666652 z_bucket:0.06666666666666667 z_bucket_w:0.001 buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667] n_buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667]}}x1
# Apply count and sum function to histogram.
load 10m
histogram_count_sum_2 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1
eval instant at 10m histogram_count(histogram_count_sum_2)
{} 24
eval instant at 10m histogram_sum(histogram_count_sum_2)
{} 100
# Apply stddev and stdvar function to histogram with {1, 2, 3, 4} (low res).
load 10m
histogram_stddev_stdvar_1 {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}}x1
eval instant at 10m histogram_stddev(histogram_stddev_stdvar_1)
{} 1.0787993180043811
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_1)
{} 1.163807968526718
# Apply stddev and stdvar function to histogram with {1, 1, 1, 1} (high res).
load 10m
histogram_stddev_stdvar_2 {{schema:8 count:10 sum:10 buckets:[1 2 3 4]}}x1
eval instant at 10m histogram_stddev(histogram_stddev_stdvar_2)
{} 0.0048960313898237465
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_2)
{} 2.3971123370139447e-05
# Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9}.
load 10m
histogram_stddev_stdvar_3 {{schema:3 count:7 sum:62 z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1
eval instant at 10m histogram_stddev(histogram_stddev_stdvar_3)
{} 42.947236400258
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_3)
{} 1844.4651144196398
# Apply stddev and stdvar function to histogram with {-100000, -10000, -1000, -888, -888, -100, -50, -9, -8, -3}.
load 10m
histogram_stddev_stdvar_4 {{schema:0 count:10 sum:-112946 z_bucket:0 n_buckets:[0 0 1 1 1 0 1 1 0 0 3 0 0 0 1 0 0 1]}}x1
eval instant at 10m histogram_stddev(histogram_stddev_stdvar_4)
{} 27556.344499842
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_4)
{} 759352122.1939945
# Apply stddev and stdvar function to histogram with {-10x10}.
load 10m
histogram_stddev_stdvar_5 {{schema:0 count:10 sum:-100 z_bucket:0 n_buckets:[0 0 0 0 10]}}x1
eval instant at 10m histogram_stddev(histogram_stddev_stdvar_5)
{} 1.3137084989848
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_5)
{} 1.725830020304794
# Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9, NaN}.
load 10m
histogram_stddev_stdvar_6 {{schema:3 count:7 sum:NaN z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1
eval instant at 10m histogram_stddev(histogram_stddev_stdvar_6)
{} NaN
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_6)
{} NaN
# Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9, Inf}.
load 10m
histogram_stddev_stdvar_7 {{schema:3 count:7 sum:Inf z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1
eval instant at 10m histogram_stddev(histogram_stddev_stdvar_7)
{} NaN
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_7)
{} NaN
# Apply quantile function to histogram with all positive buckets with zero bucket.
load 10m
histogram_quantile_1 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1
eval instant at 10m histogram_quantile(1.001, histogram_quantile_1)
{} Inf
eval instant at 10m histogram_quantile(1, histogram_quantile_1)
{} 16
eval instant at 10m histogram_quantile(0.99, histogram_quantile_1)
{} 15.759999999999998
eval instant at 10m histogram_quantile(0.9, histogram_quantile_1)
{} 13.600000000000001
eval instant at 10m histogram_quantile(0.6, histogram_quantile_1)
{} 4.799999999999997
eval instant at 10m histogram_quantile(0.5, histogram_quantile_1)
{} 1.6666666666666665
eval instant at 10m histogram_quantile(0.1, histogram_quantile_1)
{} 0.0006000000000000001
eval instant at 10m histogram_quantile(0, histogram_quantile_1)
{} 0
eval instant at 10m histogram_quantile(-1, histogram_quantile_1)
{} -Inf
# Apply quantile function to histogram with all negative buckets with zero bucket.
load 10m
histogram_quantile_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 n_buckets:[2 3 0 1 4]}}x1
eval instant at 10m histogram_quantile(1.001, histogram_quantile_2)
{} Inf
eval instant at 10m histogram_quantile(1, histogram_quantile_2)
{} 0
eval instant at 10m histogram_quantile(0.99, histogram_quantile_2)
{} -6.000000000000048e-05
eval instant at 10m histogram_quantile(0.9, histogram_quantile_2)
{} -0.0005999999999999996
eval instant at 10m histogram_quantile(0.5, histogram_quantile_2)
{} -1.6666666666666667
eval instant at 10m histogram_quantile(0.1, histogram_quantile_2)
{} -13.6
eval instant at 10m histogram_quantile(0, histogram_quantile_2)
{} -16
eval instant at 10m histogram_quantile(-1, histogram_quantile_2)
{} -Inf
# Apply quantile function to histogram with both positive and negative buckets with zero bucket.
load 10m
histogram_quantile_3 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1
eval instant at 10m histogram_quantile(1.001, histogram_quantile_3)
{} Inf
eval instant at 10m histogram_quantile(1, histogram_quantile_3)
{} 16
eval instant at 10m histogram_quantile(0.99, histogram_quantile_3)
{} 15.519999999999996
eval instant at 10m histogram_quantile(0.9, histogram_quantile_3)
{} 11.200000000000003
eval instant at 10m histogram_quantile(0.7, histogram_quantile_3)
{} 1.2666666666666657
eval instant at 10m histogram_quantile(0.55, histogram_quantile_3)
{} 0.0006000000000000005
eval instant at 10m histogram_quantile(0.5, histogram_quantile_3)
{} 0
eval instant at 10m histogram_quantile(0.45, histogram_quantile_3)
{} -0.0005999999999999996
eval instant at 10m histogram_quantile(0.3, histogram_quantile_3)
{} -1.266666666666667
eval instant at 10m histogram_quantile(0.1, histogram_quantile_3)
{} -11.2
eval instant at 10m histogram_quantile(0.01, histogram_quantile_3)
{} -15.52
eval instant at 10m histogram_quantile(0, histogram_quantile_3)
{} -16
eval instant at 10m histogram_quantile(-1, histogram_quantile_3)
{} -Inf
# Apply fraction function to empty histogram.
load 10m
histogram_fraction_1 {{}}x1
eval instant at 10m histogram_fraction(3.1415, 42, histogram_fraction_1)
{} NaN
# Apply fraction function to histogram with positive and zero buckets.
load 10m
histogram_fraction_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1
eval instant at 10m histogram_fraction(0, +Inf, histogram_fraction_2)
{} 1
eval instant at 10m histogram_fraction(-Inf, 0, histogram_fraction_2)
{} 0
eval instant at 10m histogram_fraction(-0.001, 0, histogram_fraction_2)
{} 0
eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_2)
{} 0.16666666666666666
eval instant at 10m histogram_fraction(0, 0.0005, histogram_fraction_2)
{} 0.08333333333333333
eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_2)
{} 0.8333333333333334
eval instant at 10m histogram_fraction(-inf, -0.001, histogram_fraction_2)
{} 0
eval instant at 10m histogram_fraction(1, 2, histogram_fraction_2)
{} 0.25
eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_2)
{} 0.125
eval instant at 10m histogram_fraction(1, 8, histogram_fraction_2)
{} 0.3333333333333333
eval instant at 10m histogram_fraction(1, 6, histogram_fraction_2)
{} 0.2916666666666667
eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_2)
{} 0.16666666666666666
eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_2)
{} 0
eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_2)
{} 0
eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_2)
{} 0
eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_2)
{} 0
eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_2)
{} 0
eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_2)
{} 0
eval instant at 10m histogram_fraction(0, 0, histogram_fraction_2)
{} 0
eval instant at 10m histogram_fraction(0.000001, 0.000001, histogram_fraction_2)
{} 0
eval instant at 10m histogram_fraction(42, 42, histogram_fraction_2)
{} 0
eval instant at 10m histogram_fraction(-3.1, -3.1, histogram_fraction_2)
{} 0
eval instant at 10m histogram_fraction(3.1415, NaN, histogram_fraction_2)
{} NaN
eval instant at 10m histogram_fraction(NaN, 42, histogram_fraction_2)
{} NaN
eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_2)
{} NaN
eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_2)
{} 1
# Apply fraction function to histogram with negative and zero buckets.
load 10m
histogram_fraction_3 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 n_buckets:[2 3 0 1 4]}}x1
eval instant at 10m histogram_fraction(0, +Inf, histogram_fraction_3)
{} 0
eval instant at 10m histogram_fraction(-Inf, 0, histogram_fraction_3)
{} 1
eval instant at 10m histogram_fraction(-0.001, 0, histogram_fraction_3)
{} 0.16666666666666666
eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_3)
{} 0
eval instant at 10m histogram_fraction(-0.0005, 0, histogram_fraction_3)
{} 0.08333333333333333
eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_3)
{} 0
eval instant at 10m histogram_fraction(-inf, -0.001, histogram_fraction_3)
{} 0.8333333333333334
eval instant at 10m histogram_fraction(1, 2, histogram_fraction_3)
{} 0
eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_3)
{} 0
eval instant at 10m histogram_fraction(1, 8, histogram_fraction_3)
{} 0
eval instant at 10m histogram_fraction(1, 6, histogram_fraction_3)
{} 0
eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_3)
{} 0
eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_3)
{} 0.25
eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_3)
{} 0.125
eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_3)
{} 0.3333333333333333
eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_3)
{} 0.2916666666666667
eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_3)
{} 0.16666666666666666
eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_3)
{} 0
eval instant at 10m histogram_fraction(0, 0, histogram_fraction_3)
{} 0
eval instant at 10m histogram_fraction(0.000001, 0.000001, histogram_fraction_3)
{} 0
eval instant at 10m histogram_fraction(42, 42, histogram_fraction_3)
{} 0
eval instant at 10m histogram_fraction(-3.1, -3.1, histogram_fraction_3)
{} 0
eval instant at 10m histogram_fraction(3.1415, NaN, histogram_fraction_3)
{} NaN
eval instant at 10m histogram_fraction(NaN, 42, histogram_fraction_3)
{} NaN
eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_3)
{} NaN
eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_3)
{} 1
# Apply fraction function to histogram with both positive, negative and zero buckets.
load 10m
histogram_fraction_4 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1
eval instant at 10m histogram_fraction(0, +Inf, histogram_fraction_4)
{} 0.5
eval instant at 10m histogram_fraction(-Inf, 0, histogram_fraction_4)
{} 0.5
eval instant at 10m histogram_fraction(-0.001, 0, histogram_fraction_4)
{} 0.08333333333333333
eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_4)
{} 0.08333333333333333
eval instant at 10m histogram_fraction(-0.0005, 0.0005, histogram_fraction_4)
{} 0.08333333333333333
eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_4)
{} 0.4166666666666667
eval instant at 10m histogram_fraction(-inf, -0.001, histogram_fraction_4)
{} 0.4166666666666667
eval instant at 10m histogram_fraction(1, 2, histogram_fraction_4)
{} 0.125
eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_4)
{} 0.0625
eval instant at 10m histogram_fraction(1, 8, histogram_fraction_4)
{} 0.16666666666666666
eval instant at 10m histogram_fraction(1, 6, histogram_fraction_4)
{} 0.14583333333333334
eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_4)
{} 0.08333333333333333
eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_4)
{} 0.125
eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_4)
{} 0.0625
eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_4)
{} 0.16666666666666666
eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_4)
{} 0.14583333333333334
eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_4)
{} 0.08333333333333333
eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_4)
{} 0
eval instant at 10m histogram_fraction(0, 0, histogram_fraction_4)
{} 0
eval instant at 10m histogram_fraction(0.000001, 0.000001, histogram_fraction_4)
{} 0
eval instant at 10m histogram_fraction(42, 42, histogram_fraction_4)
{} 0
eval instant at 10m histogram_fraction(-3.1, -3.1, histogram_fraction_4)
{} 0
eval instant at 10m histogram_fraction(3.1415, NaN, histogram_fraction_4)
{} NaN
eval instant at 10m histogram_fraction(NaN, 42, histogram_fraction_4)
{} NaN
eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_4)
{} NaN
eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_4)
{} 1

View file

@ -16,6 +16,8 @@ package promql
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"errors"
"fmt"
"io" "io"
"os" "os"
"path/filepath" "path/filepath"
@ -36,6 +38,8 @@ type ActiveQueryTracker struct {
maxConcurrent int maxConcurrent int
} }
var _ io.Closer = &ActiveQueryTracker{}
type Entry struct { type Entry struct {
Query string `json:"query"` Query string `json:"query"`
Timestamp int64 `json:"timestamp_sec"` Timestamp int64 `json:"timestamp_sec"`
@ -83,6 +87,23 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) {
} }
} }
type mmapedFile struct {
f io.Closer
m mmap.MMap
}
func (f *mmapedFile) Close() error {
err := f.m.Unmap()
if err != nil {
err = fmt.Errorf("mmapedFile: unmapping: %w", err)
}
if fErr := f.f.Close(); fErr != nil {
return errors.Join(fmt.Errorf("close mmapedFile.f: %w", fErr), err)
}
return err
}
func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, io.Closer, error) { func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, io.Closer, error) {
file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o666) file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o666)
if err != nil { if err != nil {
@ -108,7 +129,7 @@ func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, io
return nil, nil, err return nil, nil, err
} }
return fileAsBytes, file, err return fileAsBytes, &mmapedFile{f: file, m: fileAsBytes}, err
} }
func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger log.Logger) *ActiveQueryTracker { func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger log.Logger) *ActiveQueryTracker {
@ -204,9 +225,13 @@ func (tracker ActiveQueryTracker) Insert(ctx context.Context, query string) (int
} }
} }
func (tracker *ActiveQueryTracker) Close() { // Close closes tracker.
func (tracker *ActiveQueryTracker) Close() error {
if tracker == nil || tracker.closer == nil { if tracker == nil || tracker.closer == nil {
return return nil
} }
tracker.closer.Close() if err := tracker.closer.Close(); err != nil {
return fmt.Errorf("close ActiveQueryTracker.closer: %w", err)
}
return nil
} }

View file

@ -16,6 +16,7 @@ package promql
import ( import (
"context" "context"
"os" "os"
"path/filepath"
"testing" "testing"
"github.com/grafana/regexp" "github.com/grafana/regexp"
@ -104,26 +105,26 @@ func TestIndexReuse(t *testing.T) {
} }
func TestMMapFile(t *testing.T) { func TestMMapFile(t *testing.T) {
file, err := os.CreateTemp("", "mmapedFile") dir := t.TempDir()
fpath := filepath.Join(dir, "mmapedFile")
const data = "ab"
fileAsBytes, closer, err := getMMapedFile(fpath, 2, nil)
require.NoError(t, err) require.NoError(t, err)
copy(fileAsBytes, data)
require.NoError(t, closer.Close())
filename := file.Name() f, err := os.Open(fpath)
defer os.Remove(filename)
fileAsBytes, _, err := getMMapedFile(filename, 2, nil)
require.NoError(t, err)
copy(fileAsBytes, "ab")
f, err := os.Open(filename)
require.NoError(t, err) require.NoError(t, err)
t.Cleanup(func() {
_ = f.Close()
})
bytes := make([]byte, 4) bytes := make([]byte, 4)
n, err := f.Read(bytes) n, err := f.Read(bytes)
require.Equal(t, 2, n)
require.NoError(t, err, "Unexpected error while reading file.") require.NoError(t, err, "Unexpected error while reading file.")
require.Equal(t, 2, n)
require.Equal(t, fileAsBytes, bytes[:2], "Mmap failed") require.Equal(t, []byte(data), bytes[:2], "Mmap failed")
} }
func TestParseBrokenJSON(t *testing.T) { func TestParseBrokenJSON(t *testing.T) {

View file

@ -338,10 +338,9 @@ const resolvedRetention = 15 * time.Minute
// Eval evaluates the rule expression and then creates pending alerts and fires // Eval evaluates the rule expression and then creates pending alerts and fires
// or removes previously pending alerts accordingly. // or removes previously pending alerts accordingly.
func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, externalURL *url.URL, limit int) (promql.Vector, error) { func (r *AlertingRule) Eval(ctx context.Context, queryOffset time.Duration, ts time.Time, query QueryFunc, externalURL *url.URL, limit int) (promql.Vector, error) {
ctx = NewOriginContext(ctx, NewRuleDetail(r)) ctx = NewOriginContext(ctx, NewRuleDetail(r))
res, err := query(ctx, r.vector.String(), ts.Add(-queryOffset))
res, err := query(ctx, r.vector.String(), ts)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -484,8 +483,8 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc,
} }
if r.restored.Load() { if r.restored.Load() {
vec = append(vec, r.sample(a, ts)) vec = append(vec, r.sample(a, ts.Add(-queryOffset)))
vec = append(vec, r.forStateSample(a, ts, float64(a.ActiveAt.Unix()))) vec = append(vec, r.forStateSample(a, ts.Add(-queryOffset), float64(a.ActiveAt.Unix())))
} }
} }

View file

@ -123,7 +123,7 @@ func TestAlertingRuleTemplateWithHistogram(t *testing.T) {
) )
evalTime := time.Now() evalTime := time.Now()
res, err := rule.Eval(context.TODO(), evalTime, q, nil, 0) res, err := rule.Eval(context.TODO(), 0, evalTime, q, nil, 0)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, res, 2) require.Len(t, res, 2)
@ -230,7 +230,7 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) {
t.Logf("case %d", i) t.Logf("case %d", i)
evalTime := baseTime.Add(time.Duration(i) * time.Minute) evalTime := baseTime.Add(time.Duration(i) * time.Minute)
result[0].T = timestamp.FromTime(evalTime) result[0].T = timestamp.FromTime(evalTime)
res, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0) res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
require.NoError(t, err) require.NoError(t, err)
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
@ -247,7 +247,7 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) {
testutil.RequireEqual(t, result, filteredRes) testutil.RequireEqual(t, result, filteredRes)
} }
evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute) evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute)
res, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0) res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, res) require.Empty(t, res)
} }
@ -315,7 +315,7 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) {
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
res, err := ruleWithoutExternalLabels.Eval( res, err := ruleWithoutExternalLabels.Eval(
context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0, context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0,
) )
require.NoError(t, err) require.NoError(t, err)
for _, smpl := range res { for _, smpl := range res {
@ -329,7 +329,7 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) {
} }
res, err = ruleWithExternalLabels.Eval( res, err = ruleWithExternalLabels.Eval(
context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0, context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0,
) )
require.NoError(t, err) require.NoError(t, err)
for _, smpl := range res { for _, smpl := range res {
@ -408,7 +408,7 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) {
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
res, err := ruleWithoutExternalURL.Eval( res, err := ruleWithoutExternalURL.Eval(
context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0, context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0,
) )
require.NoError(t, err) require.NoError(t, err)
for _, smpl := range res { for _, smpl := range res {
@ -422,7 +422,7 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) {
} }
res, err = ruleWithExternalURL.Eval( res, err = ruleWithExternalURL.Eval(
context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0, context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0,
) )
require.NoError(t, err) require.NoError(t, err)
for _, smpl := range res { for _, smpl := range res {
@ -477,7 +477,7 @@ func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) {
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
res, err := rule.Eval( res, err := rule.Eval(
context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0, context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0,
) )
require.NoError(t, err) require.NoError(t, err)
for _, smpl := range res { for _, smpl := range res {
@ -544,7 +544,7 @@ instance: {{ $v.Labels.instance }}, value: {{ printf "%.0f" $v.Value }};
close(getDoneCh) close(getDoneCh)
}() }()
_, err = ruleWithQueryInTemplate.Eval( _, err = ruleWithQueryInTemplate.Eval(
context.TODO(), evalTime, slowQueryFunc, nil, 0, context.TODO(), 0, evalTime, slowQueryFunc, nil, 0,
) )
require.NoError(t, err) require.NoError(t, err)
} }
@ -596,7 +596,7 @@ func TestAlertingRuleDuplicate(t *testing.T) {
"", "",
true, log.NewNopLogger(), true, log.NewNopLogger(),
) )
_, err := rule.Eval(ctx, now, EngineQueryFunc(engine, storage), nil, 0) _, err := rule.Eval(ctx, 0, now, EngineQueryFunc(engine, storage), nil, 0)
require.Error(t, err) require.Error(t, err)
require.EqualError(t, err, "vector contains metrics with the same labelset after applying alert labels") require.EqualError(t, err, "vector contains metrics with the same labelset after applying alert labels")
} }
@ -644,7 +644,7 @@ func TestAlertingRuleLimit(t *testing.T) {
evalTime := time.Unix(0, 0) evalTime := time.Unix(0, 0)
for _, test := range tests { for _, test := range tests {
switch _, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, test.limit); { switch _, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, test.limit); {
case err != nil: case err != nil:
require.EqualError(t, err, test.err) require.EqualError(t, err, test.err)
case test.err != "": case test.err != "":
@ -871,7 +871,7 @@ func TestKeepFiringFor(t *testing.T) {
t.Logf("case %d", i) t.Logf("case %d", i)
evalTime := baseTime.Add(time.Duration(i) * time.Minute) evalTime := baseTime.Add(time.Duration(i) * time.Minute)
result[0].T = timestamp.FromTime(evalTime) result[0].T = timestamp.FromTime(evalTime)
res, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0) res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
require.NoError(t, err) require.NoError(t, err)
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
@ -888,7 +888,7 @@ func TestKeepFiringFor(t *testing.T) {
testutil.RequireEqual(t, result, filteredRes) testutil.RequireEqual(t, result, filteredRes)
} }
evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute) evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute)
res, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0) res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, res) require.Empty(t, res)
} }
@ -925,7 +925,7 @@ func TestPendingAndKeepFiringFor(t *testing.T) {
baseTime := time.Unix(0, 0) baseTime := time.Unix(0, 0)
result.T = timestamp.FromTime(baseTime) result.T = timestamp.FromTime(baseTime)
res, err := rule.Eval(context.TODO(), baseTime, EngineQueryFunc(testEngine, storage), nil, 0) res, err := rule.Eval(context.TODO(), 0, baseTime, EngineQueryFunc(testEngine, storage), nil, 0)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, res, 2) require.Len(t, res, 2)
@ -940,7 +940,7 @@ func TestPendingAndKeepFiringFor(t *testing.T) {
} }
evalTime := baseTime.Add(time.Minute) evalTime := baseTime.Add(time.Minute)
res, err = rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0) res, err = rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, res) require.Empty(t, res)
} }
@ -974,7 +974,7 @@ func TestAlertingEvalWithOrigin(t *testing.T) {
true, log.NewNopLogger(), true, log.NewNopLogger(),
) )
_, err = rule.Eval(ctx, now, func(ctx context.Context, qs string, _ time.Time) (promql.Vector, error) { _, err = rule.Eval(ctx, 0, now, func(ctx context.Context, qs string, _ time.Time) (promql.Vector, error) {
detail = FromOriginContext(ctx) detail = FromOriginContext(ctx)
return nil, nil return nil, nil
}, nil, 0) }, nil, 0)

View file

@ -47,6 +47,7 @@ type Group struct {
name string name string
file string file string
interval time.Duration interval time.Duration
queryOffset *time.Duration
limit int limit int
rules []Rule rules []Rule
seriesInPreviousEval []map[string]labels.Labels // One per Rule. seriesInPreviousEval []map[string]labels.Labels // One per Rule.
@ -90,6 +91,7 @@ type GroupOptions struct {
Rules []Rule Rules []Rule
ShouldRestore bool ShouldRestore bool
Opts *ManagerOptions Opts *ManagerOptions
QueryOffset *time.Duration
done chan struct{} done chan struct{}
EvalIterationFunc GroupEvalIterationFunc EvalIterationFunc GroupEvalIterationFunc
} }
@ -126,6 +128,7 @@ func NewGroup(o GroupOptions) *Group {
name: o.Name, name: o.Name,
file: o.File, file: o.File,
interval: o.Interval, interval: o.Interval,
queryOffset: o.QueryOffset,
limit: o.Limit, limit: o.Limit,
rules: o.Rules, rules: o.Rules,
shouldRestore: o.ShouldRestore, shouldRestore: o.ShouldRestore,
@ -443,6 +446,8 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
wg sync.WaitGroup wg sync.WaitGroup
) )
ruleQueryOffset := g.QueryOffset()
for i, rule := range g.rules { for i, rule := range g.rules {
select { select {
case <-g.done: case <-g.done:
@ -473,7 +478,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
g.metrics.EvalTotal.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() g.metrics.EvalTotal.WithLabelValues(GroupKey(g.File(), g.Name())).Inc()
vector, err := rule.Eval(ctx, ts, g.opts.QueryFunc, g.opts.ExternalURL, g.Limit()) vector, err := rule.Eval(ctx, ruleQueryOffset, ts, g.opts.QueryFunc, g.opts.ExternalURL, g.Limit())
if err != nil { if err != nil {
rule.SetHealth(HealthBad) rule.SetHealth(HealthBad)
rule.SetLastError(err) rule.SetLastError(err)
@ -562,7 +567,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
for metric, lset := range g.seriesInPreviousEval[i] { for metric, lset := range g.seriesInPreviousEval[i] {
if _, ok := seriesReturned[metric]; !ok { if _, ok := seriesReturned[metric]; !ok {
// Series no longer exposed, mark it stale. // Series no longer exposed, mark it stale.
_, err = app.Append(0, lset, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN)) _, err = app.Append(0, lset, timestamp.FromTime(ts.Add(-ruleQueryOffset)), math.Float64frombits(value.StaleNaN))
unwrappedErr := errors.Unwrap(err) unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil { if unwrappedErr == nil {
unwrappedErr = err unwrappedErr = err
@ -601,14 +606,27 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
g.cleanupStaleSeries(ctx, ts) g.cleanupStaleSeries(ctx, ts)
} }
func (g *Group) QueryOffset() time.Duration {
if g.queryOffset != nil {
return *g.queryOffset
}
if g.opts.DefaultRuleQueryOffset != nil {
return g.opts.DefaultRuleQueryOffset()
}
return time.Duration(0)
}
func (g *Group) cleanupStaleSeries(ctx context.Context, ts time.Time) { func (g *Group) cleanupStaleSeries(ctx context.Context, ts time.Time) {
if len(g.staleSeries) == 0 { if len(g.staleSeries) == 0 {
return return
} }
app := g.opts.Appendable.Appender(ctx) app := g.opts.Appendable.Appender(ctx)
queryOffset := g.QueryOffset()
for _, s := range g.staleSeries { for _, s := range g.staleSeries {
// Rule that produced series no longer configured, mark it stale. // Rule that produced series no longer configured, mark it stale.
_, err := app.Append(0, s, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN)) _, err := app.Append(0, s, timestamp.FromTime(ts.Add(-queryOffset)), math.Float64frombits(value.StaleNaN))
unwrappedErr := errors.Unwrap(err) unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil { if unwrappedErr == nil {
unwrappedErr = err unwrappedErr = err

View file

@ -116,6 +116,7 @@ type ManagerOptions struct {
ForGracePeriod time.Duration ForGracePeriod time.Duration
ResendDelay time.Duration ResendDelay time.Duration
GroupLoader GroupLoader GroupLoader GroupLoader
DefaultRuleQueryOffset func() time.Duration
MaxConcurrentEvals int64 MaxConcurrentEvals int64
ConcurrentEvalsEnabled bool ConcurrentEvalsEnabled bool
RuleConcurrencyController RuleConcurrencyController RuleConcurrencyController RuleConcurrencyController
@ -336,6 +337,7 @@ func (m *Manager) LoadGroups(
Rules: rules, Rules: rules,
ShouldRestore: shouldRestore, ShouldRestore: shouldRestore,
Opts: m.opts, Opts: m.opts,
QueryOffset: (*time.Duration)(rg.QueryOffset),
done: m.done, done: m.done,
EvalIterationFunc: groupEvalIterationFunc, EvalIterationFunc: groupEvalIterationFunc,
}) })

View file

@ -16,9 +16,12 @@ package rules
import ( import (
"context" "context"
"fmt" "fmt"
"io/fs"
"math" "math"
"os" "os"
"path"
"sort" "sort"
"strconv"
"sync" "sync"
"testing" "testing"
"time" "time"
@ -161,7 +164,7 @@ func TestAlertingRule(t *testing.T) {
evalTime := baseTime.Add(test.time) evalTime := baseTime.Add(test.time)
res, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0) res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
require.NoError(t, err) require.NoError(t, err)
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
@ -191,152 +194,156 @@ func TestAlertingRule(t *testing.T) {
} }
func TestForStateAddSamples(t *testing.T) { func TestForStateAddSamples(t *testing.T) {
storage := promqltest.LoadedStorage(t, ` for _, queryOffset := range []time.Duration{0, time.Minute} {
t.Run(fmt.Sprintf("queryOffset %s", queryOffset.String()), func(t *testing.T) {
storage := promqltest.LoadedStorage(t, `
load 5m load 5m
http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 95 105 105 95 85 http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 95 105 105 95 85
http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"} 80 90 100 110 120 130 140 http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"} 80 90 100 110 120 130 140
`) `)
t.Cleanup(func() { storage.Close() }) t.Cleanup(func() { storage.Close() })
expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`) expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
require.NoError(t, err) require.NoError(t, err)
rule := NewAlertingRule( rule := NewAlertingRule(
"HTTPRequestRateLow", "HTTPRequestRateLow",
expr, expr,
time.Minute, time.Minute,
0, 0,
labels.FromStrings("severity", "{{\"c\"}}ritical"), labels.FromStrings("severity", "{{\"c\"}}ritical"),
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil, labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
) )
result := promql.Vector{ result := promql.Vector{
promql.Sample{ promql.Sample{
Metric: labels.FromStrings( Metric: labels.FromStrings(
"__name__", "ALERTS_FOR_STATE", "__name__", "ALERTS_FOR_STATE",
"alertname", "HTTPRequestRateLow", "alertname", "HTTPRequestRateLow",
"group", "canary", "group", "canary",
"instance", "0", "instance", "0",
"job", "app-server", "job", "app-server",
"severity", "critical", "severity", "critical",
), ),
F: 1, F: 1,
}, },
promql.Sample{ promql.Sample{
Metric: labels.FromStrings( Metric: labels.FromStrings(
"__name__", "ALERTS_FOR_STATE", "__name__", "ALERTS_FOR_STATE",
"alertname", "HTTPRequestRateLow", "alertname", "HTTPRequestRateLow",
"group", "canary", "group", "canary",
"instance", "1", "instance", "1",
"job", "app-server", "job", "app-server",
"severity", "critical", "severity", "critical",
), ),
F: 1, F: 1,
}, },
promql.Sample{ promql.Sample{
Metric: labels.FromStrings( Metric: labels.FromStrings(
"__name__", "ALERTS_FOR_STATE", "__name__", "ALERTS_FOR_STATE",
"alertname", "HTTPRequestRateLow", "alertname", "HTTPRequestRateLow",
"group", "canary", "group", "canary",
"instance", "0", "instance", "0",
"job", "app-server", "job", "app-server",
"severity", "critical", "severity", "critical",
), ),
F: 1, F: 1,
}, },
promql.Sample{ promql.Sample{
Metric: labels.FromStrings( Metric: labels.FromStrings(
"__name__", "ALERTS_FOR_STATE", "__name__", "ALERTS_FOR_STATE",
"alertname", "HTTPRequestRateLow", "alertname", "HTTPRequestRateLow",
"group", "canary", "group", "canary",
"instance", "1", "instance", "1",
"job", "app-server", "job", "app-server",
"severity", "critical", "severity", "critical",
), ),
F: 1, F: 1,
}, },
}
baseTime := time.Unix(0, 0)
tests := []struct {
time time.Duration
result promql.Vector
persistThisTime bool // If true, it means this 'time' is persisted for 'for'.
}{
{
time: 0,
result: append(promql.Vector{}, result[:2]...),
persistThisTime: true,
},
{
time: 5 * time.Minute,
result: append(promql.Vector{}, result[2:]...),
},
{
time: 10 * time.Minute,
result: append(promql.Vector{}, result[2:3]...),
},
{
time: 15 * time.Minute,
result: nil,
},
{
time: 20 * time.Minute,
result: nil,
},
{
time: 25 * time.Minute,
result: append(promql.Vector{}, result[:1]...),
persistThisTime: true,
},
{
time: 30 * time.Minute,
result: append(promql.Vector{}, result[2:3]...),
},
}
var forState float64
for i, test := range tests {
t.Logf("case %d", i)
evalTime := baseTime.Add(test.time)
if test.persistThisTime {
forState = float64(evalTime.Unix())
}
if test.result == nil {
forState = float64(value.StaleNaN)
}
res, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
require.NoError(t, err)
var filteredRes promql.Vector // After removing 'ALERTS' samples.
for _, smpl := range res {
smplName := smpl.Metric.Get("__name__")
if smplName == "ALERTS_FOR_STATE" {
filteredRes = append(filteredRes, smpl)
} else {
// If not 'ALERTS_FOR_STATE', it has to be 'ALERTS'.
require.Equal(t, "ALERTS", smplName)
} }
}
for i := range test.result {
test.result[i].T = timestamp.FromTime(evalTime)
// Updating the expected 'for' state.
if test.result[i].F >= 0 {
test.result[i].F = forState
}
}
require.Equal(t, len(test.result), len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
sort.Slice(filteredRes, func(i, j int) bool { baseTime := time.Unix(0, 0)
return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0
tests := []struct {
time time.Duration
result promql.Vector
persistThisTime bool // If true, it means this 'time' is persisted for 'for'.
}{
{
time: 0,
result: append(promql.Vector{}, result[:2]...),
persistThisTime: true,
},
{
time: 5 * time.Minute,
result: append(promql.Vector{}, result[2:]...),
},
{
time: 10 * time.Minute,
result: append(promql.Vector{}, result[2:3]...),
},
{
time: 15 * time.Minute,
result: nil,
},
{
time: 20 * time.Minute,
result: nil,
},
{
time: 25 * time.Minute,
result: append(promql.Vector{}, result[:1]...),
persistThisTime: true,
},
{
time: 30 * time.Minute,
result: append(promql.Vector{}, result[2:3]...),
},
}
var forState float64
for i, test := range tests {
t.Logf("case %d", i)
evalTime := baseTime.Add(test.time).Add(queryOffset)
if test.persistThisTime {
forState = float64(evalTime.Unix())
}
if test.result == nil {
forState = float64(value.StaleNaN)
}
res, err := rule.Eval(context.TODO(), queryOffset, evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
require.NoError(t, err)
var filteredRes promql.Vector // After removing 'ALERTS' samples.
for _, smpl := range res {
smplName := smpl.Metric.Get("__name__")
if smplName == "ALERTS_FOR_STATE" {
filteredRes = append(filteredRes, smpl)
} else {
// If not 'ALERTS_FOR_STATE', it has to be 'ALERTS'.
require.Equal(t, "ALERTS", smplName)
}
}
for i := range test.result {
test.result[i].T = timestamp.FromTime(evalTime.Add(-queryOffset))
// Updating the expected 'for' state.
if test.result[i].F >= 0 {
test.result[i].F = forState
}
}
require.Equal(t, len(test.result), len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
sort.Slice(filteredRes, func(i, j int) bool {
return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0
})
prom_testutil.RequireEqual(t, test.result, filteredRes)
for _, aa := range rule.ActiveAlerts() {
require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
}
}
}) })
prom_testutil.RequireEqual(t, test.result, filteredRes)
for _, aa := range rule.ActiveAlerts() {
require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
}
} }
} }
@ -348,243 +355,251 @@ func sortAlerts(items []*Alert) {
} }
func TestForStateRestore(t *testing.T) { func TestForStateRestore(t *testing.T) {
storage := promqltest.LoadedStorage(t, ` for _, queryOffset := range []time.Duration{0, time.Minute} {
t.Run(fmt.Sprintf("queryOffset %s", queryOffset.String()), func(t *testing.T) {
storage := promqltest.LoadedStorage(t, `
load 5m load 5m
http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 50 0 0 25 0 0 40 0 120 http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 50 0 0 25 0 0 40 0 120
http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"} 125 90 60 0 0 25 0 0 40 0 130 http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"} 125 90 60 0 0 25 0 0 40 0 130
`) `)
t.Cleanup(func() { storage.Close() }) t.Cleanup(func() { storage.Close() })
expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`) expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
require.NoError(t, err) require.NoError(t, err)
opts := &ManagerOptions{ opts := &ManagerOptions{
QueryFunc: EngineQueryFunc(testEngine, storage), QueryFunc: EngineQueryFunc(testEngine, storage),
Appendable: storage, Appendable: storage,
Queryable: storage, Queryable: storage,
Context: context.Background(), Context: context.Background(),
Logger: log.NewNopLogger(), Logger: log.NewNopLogger(),
NotifyFunc: func(ctx context.Context, expr string, alerts ...*Alert) {}, NotifyFunc: func(ctx context.Context, expr string, alerts ...*Alert) {},
OutageTolerance: 30 * time.Minute, OutageTolerance: 30 * time.Minute,
ForGracePeriod: 10 * time.Minute, ForGracePeriod: 10 * time.Minute,
}
alertForDuration := 25 * time.Minute
// Initial run before prometheus goes down.
rule := NewAlertingRule(
"HTTPRequestRateLow",
expr,
alertForDuration,
0,
labels.FromStrings("severity", "critical"),
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
)
group := NewGroup(GroupOptions{
Name: "default",
Interval: time.Second,
Rules: []Rule{rule},
ShouldRestore: true,
Opts: opts,
})
groups := make(map[string]*Group)
groups["default;"] = group
initialRuns := []time.Duration{0, 5 * time.Minute}
baseTime := time.Unix(0, 0)
for _, duration := range initialRuns {
evalTime := baseTime.Add(duration)
group.Eval(context.TODO(), evalTime)
}
// Prometheus goes down here. We create new rules and groups.
type testInput struct {
name string
restoreDuration time.Duration
expectedAlerts []*Alert
num int
noRestore bool
gracePeriod bool
downDuration time.Duration
before func()
}
tests := []testInput{
{
name: "normal restore (alerts were not firing)",
restoreDuration: 15 * time.Minute,
expectedAlerts: rule.ActiveAlerts(),
downDuration: 10 * time.Minute,
},
{
name: "outage tolerance",
restoreDuration: 40 * time.Minute,
noRestore: true,
num: 2,
},
{
name: "no active alerts",
restoreDuration: 50 * time.Minute,
expectedAlerts: []*Alert{},
},
{
name: "test the grace period",
restoreDuration: 25 * time.Minute,
expectedAlerts: []*Alert{},
gracePeriod: true,
before: func() {
for _, duration := range []time.Duration{10 * time.Minute, 15 * time.Minute, 20 * time.Minute} {
evalTime := baseTime.Add(duration)
group.Eval(context.TODO(), evalTime)
}
},
num: 2,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.before != nil {
tt.before()
} }
newRule := NewAlertingRule( alertForDuration := 25 * time.Minute
// Initial run before prometheus goes down.
rule := NewAlertingRule(
"HTTPRequestRateLow", "HTTPRequestRateLow",
expr, expr,
alertForDuration, alertForDuration,
0, 0,
labels.FromStrings("severity", "critical"), labels.FromStrings("severity", "critical"),
labels.EmptyLabels(), labels.EmptyLabels(), "", false, nil, labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
) )
newGroup := NewGroup(GroupOptions{
group := NewGroup(GroupOptions{
Name: "default", Name: "default",
Interval: time.Second, Interval: time.Second,
Rules: []Rule{newRule}, Rules: []Rule{rule},
ShouldRestore: true, ShouldRestore: true,
Opts: opts, Opts: opts,
}) })
groups := make(map[string]*Group)
groups["default;"] = group
newGroups := make(map[string]*Group) initialRuns := []time.Duration{0, 5 * time.Minute}
newGroups["default;"] = newGroup
restoreTime := baseTime.Add(tt.restoreDuration) baseTime := time.Unix(0, 0)
// First eval before restoration. for _, duration := range initialRuns {
newGroup.Eval(context.TODO(), restoreTime) evalTime := baseTime.Add(duration)
// Restore happens here. group.Eval(context.TODO(), evalTime)
newGroup.RestoreForState(restoreTime)
got := newRule.ActiveAlerts()
for _, aa := range got {
require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
} }
sort.Slice(got, func(i, j int) bool {
return labels.Compare(got[i].Labels, got[j].Labels) < 0
})
// In all cases, we expect the restoration process to have completed. // Prometheus goes down here. We create new rules and groups.
require.Truef(t, newRule.Restored(), "expected the rule restoration process to have completed") type testInput struct {
name string
restoreDuration time.Duration
expectedAlerts []*Alert
// Checking if we have restored it correctly. num int
switch { noRestore bool
case tt.noRestore: gracePeriod bool
require.Len(t, got, tt.num) downDuration time.Duration
for _, e := range got { before func()
require.Equal(t, e.ActiveAt, restoreTime) }
}
case tt.gracePeriod:
require.Len(t, got, tt.num) tests := []testInput{
for _, e := range got { {
require.Equal(t, opts.ForGracePeriod, e.ActiveAt.Add(alertForDuration).Sub(restoreTime)) name: "normal restore (alerts were not firing)",
} restoreDuration: 15 * time.Minute,
default: expectedAlerts: rule.ActiveAlerts(),
exp := tt.expectedAlerts downDuration: 10 * time.Minute,
require.Equal(t, len(exp), len(got)) },
sortAlerts(exp) {
sortAlerts(got) name: "outage tolerance",
for i, e := range exp { restoreDuration: 40 * time.Minute,
require.Equal(t, e.Labels, got[i].Labels) noRestore: true,
num: 2,
},
{
name: "no active alerts",
restoreDuration: 50 * time.Minute,
expectedAlerts: []*Alert{},
},
{
name: "test the grace period",
restoreDuration: 25 * time.Minute,
expectedAlerts: []*Alert{},
gracePeriod: true,
before: func() {
for _, duration := range []time.Duration{10 * time.Minute, 15 * time.Minute, 20 * time.Minute} {
evalTime := baseTime.Add(duration)
group.Eval(context.TODO(), evalTime)
}
},
num: 2,
},
}
// Difference in time should be within 1e6 ns, i.e. 1ms for _, tt := range tests {
// (due to conversion between ns & ms, float64 & int64). t.Run(tt.name, func(t *testing.T) {
activeAtDiff := float64(e.ActiveAt.Unix() + int64(tt.downDuration/time.Second) - got[i].ActiveAt.Unix()) if tt.before != nil {
require.Equal(t, 0.0, math.Abs(activeAtDiff), "'for' state restored time is wrong") tt.before()
} }
newRule := NewAlertingRule(
"HTTPRequestRateLow",
expr,
alertForDuration,
0,
labels.FromStrings("severity", "critical"),
labels.EmptyLabels(), labels.EmptyLabels(), "", false, nil,
)
newGroup := NewGroup(GroupOptions{
Name: "default",
Interval: time.Second,
Rules: []Rule{newRule},
ShouldRestore: true,
Opts: opts,
QueryOffset: &queryOffset,
})
newGroups := make(map[string]*Group)
newGroups["default;"] = newGroup
restoreTime := baseTime.Add(tt.restoreDuration).Add(queryOffset)
// First eval before restoration.
newGroup.Eval(context.TODO(), restoreTime)
// Restore happens here.
newGroup.RestoreForState(restoreTime)
got := newRule.ActiveAlerts()
for _, aa := range got {
require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
}
sort.Slice(got, func(i, j int) bool {
return labels.Compare(got[i].Labels, got[j].Labels) < 0
})
// In all cases, we expect the restoration process to have completed.
require.Truef(t, newRule.Restored(), "expected the rule restoration process to have completed")
// Checking if we have restored it correctly.
switch {
case tt.noRestore:
require.Len(t, got, tt.num)
for _, e := range got {
require.Equal(t, e.ActiveAt, restoreTime)
}
case tt.gracePeriod:
require.Len(t, got, tt.num)
for _, e := range got {
require.Equal(t, opts.ForGracePeriod, e.ActiveAt.Add(alertForDuration).Sub(restoreTime))
}
default:
exp := tt.expectedAlerts
require.Equal(t, len(exp), len(got))
sortAlerts(exp)
sortAlerts(got)
for i, e := range exp {
require.Equal(t, e.Labels, got[i].Labels)
// Difference in time should be within 1e6 ns, i.e. 1ms
// (due to conversion between ns & ms, float64 & int64).
activeAtDiff := queryOffset.Seconds() + float64(e.ActiveAt.Unix()+int64(tt.downDuration/time.Second)-got[i].ActiveAt.Unix())
require.Equal(t, 0.0, math.Abs(activeAtDiff), "'for' state restored time is wrong")
}
}
})
} }
}) })
} }
} }
func TestStaleness(t *testing.T) { func TestStaleness(t *testing.T) {
st := teststorage.New(t) for _, queryOffset := range []time.Duration{0, time.Minute} {
defer st.Close() st := teststorage.New(t)
engineOpts := promql.EngineOpts{ defer st.Close()
Logger: nil, engineOpts := promql.EngineOpts{
Reg: nil, Logger: nil,
MaxSamples: 10, Reg: nil,
Timeout: 10 * time.Second, MaxSamples: 10,
Timeout: 10 * time.Second,
}
engine := promql.NewEngine(engineOpts)
opts := &ManagerOptions{
QueryFunc: EngineQueryFunc(engine, st),
Appendable: st,
Queryable: st,
Context: context.Background(),
Logger: log.NewNopLogger(),
}
expr, err := parser.ParseExpr("a + 1")
require.NoError(t, err)
rule := NewRecordingRule("a_plus_one", expr, labels.Labels{})
group := NewGroup(GroupOptions{
Name: "default",
Interval: time.Second,
Rules: []Rule{rule},
ShouldRestore: true,
Opts: opts,
QueryOffset: &queryOffset,
})
// A time series that has two samples and then goes stale.
app := st.Appender(context.Background())
app.Append(0, labels.FromStrings(model.MetricNameLabel, "a"), 0, 1)
app.Append(0, labels.FromStrings(model.MetricNameLabel, "a"), 1000, 2)
app.Append(0, labels.FromStrings(model.MetricNameLabel, "a"), 2000, math.Float64frombits(value.StaleNaN))
err = app.Commit()
require.NoError(t, err)
ctx := context.Background()
// Execute 3 times, 1 second apart.
group.Eval(ctx, time.Unix(0, 0).Add(queryOffset))
group.Eval(ctx, time.Unix(1, 0).Add(queryOffset))
group.Eval(ctx, time.Unix(2, 0).Add(queryOffset))
querier, err := st.Querier(0, 2000)
require.NoError(t, err)
defer querier.Close()
matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a_plus_one")
require.NoError(t, err)
set := querier.Select(ctx, false, nil, matcher)
samples, err := readSeriesSet(set)
require.NoError(t, err)
metric := labels.FromStrings(model.MetricNameLabel, "a_plus_one").String()
metricSample, ok := samples[metric]
require.True(t, ok, "Series %s not returned.", metric)
require.True(t, value.IsStaleNaN(metricSample[2].F), "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(metricSample[2].F))
metricSample[2].F = 42 // require.Equal cannot handle NaN.
want := map[string][]promql.FPoint{
metric: {{T: 0, F: 2}, {T: 1000, F: 3}, {T: 2000, F: 42}},
}
require.Equal(t, want, samples)
} }
engine := promql.NewEngine(engineOpts)
opts := &ManagerOptions{
QueryFunc: EngineQueryFunc(engine, st),
Appendable: st,
Queryable: st,
Context: context.Background(),
Logger: log.NewNopLogger(),
}
expr, err := parser.ParseExpr("a + 1")
require.NoError(t, err)
rule := NewRecordingRule("a_plus_one", expr, labels.Labels{})
group := NewGroup(GroupOptions{
Name: "default",
Interval: time.Second,
Rules: []Rule{rule},
ShouldRestore: true,
Opts: opts,
})
// A time series that has two samples and then goes stale.
app := st.Appender(context.Background())
app.Append(0, labels.FromStrings(model.MetricNameLabel, "a"), 0, 1)
app.Append(0, labels.FromStrings(model.MetricNameLabel, "a"), 1000, 2)
app.Append(0, labels.FromStrings(model.MetricNameLabel, "a"), 2000, math.Float64frombits(value.StaleNaN))
err = app.Commit()
require.NoError(t, err)
ctx := context.Background()
// Execute 3 times, 1 second apart.
group.Eval(ctx, time.Unix(0, 0))
group.Eval(ctx, time.Unix(1, 0))
group.Eval(ctx, time.Unix(2, 0))
querier, err := st.Querier(0, 2000)
require.NoError(t, err)
defer querier.Close()
matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a_plus_one")
require.NoError(t, err)
set := querier.Select(ctx, false, nil, matcher)
samples, err := readSeriesSet(set)
require.NoError(t, err)
metric := labels.FromStrings(model.MetricNameLabel, "a_plus_one").String()
metricSample, ok := samples[metric]
require.True(t, ok, "Series %s not returned.", metric)
require.True(t, value.IsStaleNaN(metricSample[2].F), "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(metricSample[2].F))
metricSample[2].F = 42 // require.Equal cannot handle NaN.
want := map[string][]promql.FPoint{
metric: {{T: 0, F: 2}, {T: 1000, F: 3}, {T: 2000, F: 42}},
}
require.Equal(t, want, samples)
} }
// Convert a SeriesSet into a form usable with require.Equal. // Convert a SeriesSet into a form usable with require.Equal.
@ -608,6 +623,46 @@ func readSeriesSet(ss storage.SeriesSet) (map[string][]promql.FPoint, error) {
return result, ss.Err() return result, ss.Err()
} }
func TestGroup_QueryOffset(t *testing.T) {
config := `
groups:
- name: group1
query_offset: 2m
- name: group2
query_offset: 0s
- name: group3
`
dir := t.TempDir()
fname := path.Join(dir, "rules.yaml")
err := os.WriteFile(fname, []byte(config), fs.ModePerm)
require.NoError(t, err)
m := NewManager(&ManagerOptions{
Logger: log.NewNopLogger(),
DefaultRuleQueryOffset: func() time.Duration {
return time.Minute
},
})
m.start()
err = m.Update(time.Second, []string{fname}, labels.EmptyLabels(), "", nil)
require.NoError(t, err)
rgs := m.RuleGroups()
sort.Slice(rgs, func(i, j int) bool {
return rgs[i].Name() < rgs[j].Name()
})
// From config.
require.Equal(t, 2*time.Minute, rgs[0].QueryOffset())
// Setting 0 in config is detected.
require.Equal(t, time.Duration(0), rgs[1].QueryOffset())
// Default when nothing is set.
require.Equal(t, time.Minute, rgs[2].QueryOffset())
m.Stop()
}
func TestCopyState(t *testing.T) { func TestCopyState(t *testing.T) {
oldGroup := &Group{ oldGroup := &Group{
rules: []Rule{ rules: []Rule{
@ -1361,7 +1416,7 @@ func TestNativeHistogramsInRecordingRules(t *testing.T) {
ts := time.Now() ts := time.Now()
app := db.Appender(context.Background()) app := db.Appender(context.Background())
for i, h := range hists { for i, h := range hists {
l := labels.FromStrings("__name__", "histogram_metric", "idx", fmt.Sprintf("%d", i)) l := labels.FromStrings("__name__", "histogram_metric", "idx", strconv.Itoa(i))
_, err := app.AppendHistogram(0, l, ts.UnixMilli(), h.Copy(), nil) _, err := app.AppendHistogram(0, l, ts.UnixMilli(), h.Copy(), nil)
require.NoError(t, err) require.NoError(t, err)
} }
@ -2043,7 +2098,7 @@ func TestBoundedRuleEvalConcurrency(t *testing.T) {
require.EqualValues(t, maxInflight.Load(), int32(maxConcurrency)+int32(groupCount)) require.EqualValues(t, maxInflight.Load(), int32(maxConcurrency)+int32(groupCount))
} }
const artificialDelay = 10 * time.Millisecond const artificialDelay = 15 * time.Millisecond
func optsFactory(storage storage.Storage, maxInflight, inflightQueries *atomic.Int32, maxConcurrent int64) *ManagerOptions { func optsFactory(storage storage.Storage, maxInflight, inflightQueries *atomic.Int32, maxConcurrent int64) *ManagerOptions {
var inflightMu sync.Mutex var inflightMu sync.Mutex

View file

@ -31,7 +31,7 @@ type unknownRule struct{}
func (u unknownRule) Name() string { return "" } func (u unknownRule) Name() string { return "" }
func (u unknownRule) Labels() labels.Labels { return labels.EmptyLabels() } func (u unknownRule) Labels() labels.Labels { return labels.EmptyLabels() }
func (u unknownRule) Eval(context.Context, time.Time, QueryFunc, *url.URL, int) (promql.Vector, error) { func (u unknownRule) Eval(context.Context, time.Duration, time.Time, QueryFunc, *url.URL, int) (promql.Vector, error) {
return nil, nil return nil, nil
} }
func (u unknownRule) String() string { return "" } func (u unknownRule) String() string { return "" }

View file

@ -77,10 +77,9 @@ func (rule *RecordingRule) Labels() labels.Labels {
} }
// Eval evaluates the rule and then overrides the metric names and labels accordingly. // Eval evaluates the rule and then overrides the metric names and labels accordingly.
func (rule *RecordingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, _ *url.URL, limit int) (promql.Vector, error) { func (rule *RecordingRule) Eval(ctx context.Context, queryOffset time.Duration, ts time.Time, query QueryFunc, _ *url.URL, limit int) (promql.Vector, error) {
ctx = NewOriginContext(ctx, NewRuleDetail(rule)) ctx = NewOriginContext(ctx, NewRuleDetail(rule))
vector, err := query(ctx, rule.vector.String(), ts.Add(-queryOffset))
vector, err := query(ctx, rule.vector.String(), ts)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -126,7 +126,7 @@ func TestRuleEval(t *testing.T) {
for _, scenario := range ruleEvalTestScenarios { for _, scenario := range ruleEvalTestScenarios {
t.Run(scenario.name, func(t *testing.T) { t.Run(scenario.name, func(t *testing.T) {
rule := NewRecordingRule("test_rule", scenario.expr, scenario.ruleLabels) rule := NewRecordingRule("test_rule", scenario.expr, scenario.ruleLabels)
result, err := rule.Eval(context.TODO(), ruleEvaluationTime, EngineQueryFunc(testEngine, storage), nil, 0) result, err := rule.Eval(context.TODO(), 0, ruleEvaluationTime, EngineQueryFunc(testEngine, storage), nil, 0)
require.NoError(t, err) require.NoError(t, err)
testutil.RequireEqual(t, scenario.expected, result) testutil.RequireEqual(t, scenario.expected, result)
}) })
@ -144,7 +144,7 @@ func BenchmarkRuleEval(b *testing.B) {
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
_, err := rule.Eval(context.TODO(), ruleEvaluationTime, EngineQueryFunc(testEngine, storage), nil, 0) _, err := rule.Eval(context.TODO(), 0, ruleEvaluationTime, EngineQueryFunc(testEngine, storage), nil, 0)
if err != nil { if err != nil {
require.NoError(b, err) require.NoError(b, err)
} }
@ -173,7 +173,7 @@ func TestRuleEvalDuplicate(t *testing.T) {
expr, _ := parser.ParseExpr(`vector(0) or label_replace(vector(0),"test","x","","")`) expr, _ := parser.ParseExpr(`vector(0) or label_replace(vector(0),"test","x","","")`)
rule := NewRecordingRule("foo", expr, labels.FromStrings("test", "test")) rule := NewRecordingRule("foo", expr, labels.FromStrings("test", "test"))
_, err := rule.Eval(ctx, now, EngineQueryFunc(engine, storage), nil, 0) _, err := rule.Eval(ctx, 0, now, EngineQueryFunc(engine, storage), nil, 0)
require.Error(t, err) require.Error(t, err)
require.EqualError(t, err, "vector contains metrics with the same labelset after applying rule labels") require.EqualError(t, err, "vector contains metrics with the same labelset after applying rule labels")
} }
@ -215,7 +215,7 @@ func TestRecordingRuleLimit(t *testing.T) {
evalTime := time.Unix(0, 0) evalTime := time.Unix(0, 0)
for _, test := range tests { for _, test := range tests {
switch _, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, test.limit); { switch _, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, test.limit); {
case err != nil: case err != nil:
require.EqualError(t, err, test.err) require.EqualError(t, err, test.err)
case test.err != "": case test.err != "":
@ -243,7 +243,7 @@ func TestRecordingEvalWithOrigin(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
rule := NewRecordingRule(name, expr, lbs) rule := NewRecordingRule(name, expr, lbs)
_, err = rule.Eval(ctx, now, func(ctx context.Context, qs string, _ time.Time) (promql.Vector, error) { _, err = rule.Eval(ctx, 0, now, func(ctx context.Context, qs string, _ time.Time) (promql.Vector, error) {
detail = FromOriginContext(ctx) detail = FromOriginContext(ctx)
return nil, nil return nil, nil
}, nil, 0) }, nil, 0)

View file

@ -40,7 +40,7 @@ type Rule interface {
// Labels of the rule. // Labels of the rule.
Labels() labels.Labels Labels() labels.Labels
// Eval evaluates the rule, including any associated recording or alerting actions. // Eval evaluates the rule, including any associated recording or alerting actions.
Eval(context.Context, time.Time, QueryFunc, *url.URL, int) (promql.Vector, error) Eval(ctx context.Context, queryOffset time.Duration, evaluationTime time.Time, queryFunc QueryFunc, externalURL *url.URL, limit int) (promql.Vector, error)
// String returns a human-readable string representation of the rule. // String returns a human-readable string representation of the rule.
String() string String() string
// Query returns the rule query expression. // Query returns the rule query expression.

View file

@ -1285,7 +1285,7 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) {
for i := 0; i < 500; i++ { for i := 0; i < 500; i++ {
s = fmt.Sprintf("%smetric_%d_%d 42\n", s, i, numScrapes) s = fmt.Sprintf("%smetric_%d_%d 42\n", s, i, numScrapes)
} }
w.Write([]byte(fmt.Sprintf(s + "&"))) w.Write([]byte(s + "&"))
} else { } else {
cancel() cancel()
} }

View file

@ -21,6 +21,7 @@ import (
"net/http/httptest" "net/http/httptest"
"net/url" "net/url"
"os" "os"
"strconv"
"strings" "strings"
"testing" "testing"
"time" "time"
@ -67,7 +68,7 @@ func TestTargetOffset(t *testing.T) {
// Calculate offsets for 10000 different targets. // Calculate offsets for 10000 different targets.
for i := range offsets { for i := range offsets {
target := newTestTarget("example.com:80", 0, labels.FromStrings( target := newTestTarget("example.com:80", 0, labels.FromStrings(
"label", fmt.Sprintf("%d", i), "label", strconv.Itoa(i),
)) ))
offsets[i] = target.offset(interval, offsetSeed) offsets[i] = target.offset(interval, offsetSeed)
} }

View file

@ -24,8 +24,8 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: install Go - name: Install Go
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with: with:
go-version: 1.22.x go-version: 1.22.x
@ -33,6 +33,7 @@ jobs:
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter' if: github.repository == 'prometheus/snmp_exporter'
- name: Lint - name: Lint
uses: golangci/golangci-lint-action@9d1e0624a798bb64f6c3cea93db47765312263dc # v5.1.0 uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1
with: with:
version: v1.56.2 args: --verbose
version: v1.59.0

View file

@ -75,7 +75,7 @@ type AzureADConfig struct { //nolint:revive // exported.
// OAuth is the oauth config that is being used to authenticate. // OAuth is the oauth config that is being used to authenticate.
OAuth *OAuthConfig `yaml:"oauth,omitempty"` OAuth *OAuthConfig `yaml:"oauth,omitempty"`
// OAuth is the oauth config that is being used to authenticate. // SDK is the SDK config that is being used to authenticate.
SDK *SDKConfig `yaml:"sdk,omitempty"` SDK *SDKConfig `yaml:"sdk,omitempty"`
// Cloud is the Azure cloud in which the service is running. Example: AzurePublic/AzureGovernment/AzureChina. // Cloud is the Azure cloud in which the service is running. Example: AzurePublic/AzureGovernment/AzureChina.

View file

@ -468,7 +468,7 @@ func TestReleaseNoninternedString(t *testing.T) {
m.StoreSeries([]record.RefSeries{ m.StoreSeries([]record.RefSeries{
{ {
Ref: chunks.HeadSeriesRef(i), Ref: chunks.HeadSeriesRef(i),
Labels: labels.FromStrings("asdf", fmt.Sprintf("%d", i)), Labels: labels.FromStrings("asdf", strconv.Itoa(i)),
}, },
}, 0) }, 0)
m.SeriesReset(1) m.SeriesReset(1)

View file

@ -14,8 +14,8 @@
package agent package agent
import ( import (
"fmt"
"math" "math"
"strconv"
"sync" "sync"
"testing" "testing"
"time" "time"
@ -53,7 +53,7 @@ func TestNoDeadlock(t *testing.T) {
series := &memSeries{ series := &memSeries{
ref: chunks.HeadSeriesRef(i), ref: chunks.HeadSeriesRef(i),
lset: labels.FromMap(map[string]string{ lset: labels.FromMap(map[string]string{
"id": fmt.Sprintf("%d", i), "id": strconv.Itoa(i),
}), }),
} }
stripeSeries.Set(series.lset.Hash(), series) stripeSeries.Set(series.lset.Hash(), series)

View file

@ -381,6 +381,33 @@ func listChunkFiles(dir string) (map[int]string, error) {
return res, nil return res, nil
} }
// HardLinkChunkFiles creates hardlinks for chunk files from src to dst.
// It does nothing if src doesn't exist and ensures dst is created if not.
func HardLinkChunkFiles(src, dst string) error {
_, err := os.Stat(src)
if os.IsNotExist(err) {
return nil
}
if err != nil {
return fmt.Errorf("check source chunks dir: %w", err)
}
if err := os.MkdirAll(dst, 0o777); err != nil {
return fmt.Errorf("set up destination chunks dir: %w", err)
}
files, err := listChunkFiles(src)
if err != nil {
return fmt.Errorf("list chunks: %w", err)
}
for _, filePath := range files {
_, fileName := filepath.Split(filePath)
err := os.Link(filepath.Join(src, fileName), filepath.Join(dst, fileName))
if err != nil {
return fmt.Errorf("hardlink a chunk: %w", err)
}
}
return nil
}
// repairLastChunkFile deletes the last file if it's empty. // repairLastChunkFile deletes the last file if it's empty.
// Because we don't fsync when creating these files, we could end // Because we don't fsync when creating these files, we could end
// up with an empty file at the end during an abrupt shutdown. // up with an empty file at the end during an abrupt shutdown.

View file

@ -55,7 +55,7 @@ func (q *writeJobQueue) assertInvariants(t *testing.T) {
require.Len(t, s.segment, s.nextWrite) require.Len(t, s.segment, s.nextWrite)
} }
// Last segment must have at least one element, or we wouldn't have created it. // Last segment must have at least one element, or we wouldn't have created it.
require.Greater(t, s.nextWrite, 0) require.Positive(t, s.nextWrite)
} }
require.Equal(t, q.size, totalSize) require.Equal(t, q.size, totalSize)

View file

@ -22,6 +22,7 @@ import (
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
"strconv"
"sync" "sync"
"testing" "testing"
"time" "time"
@ -1129,7 +1130,7 @@ func BenchmarkCompactionFromHead(b *testing.B) {
for ln := 0; ln < labelNames; ln++ { for ln := 0; ln < labelNames; ln++ {
app := h.Appender(context.Background()) app := h.Appender(context.Background())
for lv := 0; lv < labelValues; lv++ { for lv := 0; lv < labelValues; lv++ {
app.Append(0, labels.FromStrings(fmt.Sprintf("%d", ln), fmt.Sprintf("%d%s%d", lv, postingsBenchSuffix, ln)), 0, 0) app.Append(0, labels.FromStrings(strconv.Itoa(ln), fmt.Sprintf("%d%s%d", lv, postingsBenchSuffix, ln)), 0, 0)
} }
require.NoError(b, app.Commit()) require.NoError(b, app.Commit())
} }
@ -1161,7 +1162,7 @@ func BenchmarkCompactionFromOOOHead(b *testing.B) {
for ln := 0; ln < labelNames; ln++ { for ln := 0; ln < labelNames; ln++ {
app := h.Appender(context.Background()) app := h.Appender(context.Background())
for lv := 0; lv < labelValues; lv++ { for lv := 0; lv < labelValues; lv++ {
lbls := labels.FromStrings(fmt.Sprintf("%d", ln), fmt.Sprintf("%d%s%d", lv, postingsBenchSuffix, ln)) lbls := labels.FromStrings(strconv.Itoa(ln), fmt.Sprintf("%d%s%d", lv, postingsBenchSuffix, ln))
_, err = app.Append(0, lbls, int64(totalSamples), 0) _, err = app.Append(0, lbls, int64(totalSamples), 0)
require.NoError(b, err) require.NoError(b, err)
for ts := 0; ts < totalSamples; ts++ { for ts := 0; ts < totalSamples; ts++ {
@ -1297,7 +1298,7 @@ func TestCancelCompactions(t *testing.T) {
// This checks that the `context.Canceled` error is properly checked at all levels: // This checks that the `context.Canceled` error is properly checked at all levels:
// - tsdb_errors.NewMulti() should have the Is() method implemented for correct checks. // - tsdb_errors.NewMulti() should have the Is() method implemented for correct checks.
// - callers should check with errors.Is() instead of ==. // - callers should check with errors.Is() instead of ==.
readOnlyDB, err := OpenDBReadOnly(tmpdirCopy, log.NewNopLogger()) readOnlyDB, err := OpenDBReadOnly(tmpdirCopy, "", log.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
blocks, err := readOnlyDB.Blocks() blocks, err := readOnlyDB.Blocks()
require.NoError(t, err) require.NoError(t, err)

View file

@ -383,26 +383,36 @@ var ErrClosed = errors.New("db already closed")
// Current implementation doesn't support concurrency so // Current implementation doesn't support concurrency so
// all API calls should happen in the same go routine. // all API calls should happen in the same go routine.
type DBReadOnly struct { type DBReadOnly struct {
logger log.Logger logger log.Logger
dir string dir string
closers []io.Closer sandboxDir string
closed chan struct{} closers []io.Closer
closed chan struct{}
} }
// OpenDBReadOnly opens DB in the given directory for read only operations. // OpenDBReadOnly opens DB in the given directory for read only operations.
func OpenDBReadOnly(dir string, l log.Logger) (*DBReadOnly, error) { func OpenDBReadOnly(dir, sandboxDirRoot string, l log.Logger) (*DBReadOnly, error) {
if _, err := os.Stat(dir); err != nil { if _, err := os.Stat(dir); err != nil {
return nil, fmt.Errorf("opening the db dir: %w", err) return nil, fmt.Errorf("opening the db dir: %w", err)
} }
if sandboxDirRoot == "" {
sandboxDirRoot = dir
}
sandboxDir, err := os.MkdirTemp(sandboxDirRoot, "tmp_dbro_sandbox")
if err != nil {
return nil, fmt.Errorf("setting up sandbox dir: %w", err)
}
if l == nil { if l == nil {
l = log.NewNopLogger() l = log.NewNopLogger()
} }
return &DBReadOnly{ return &DBReadOnly{
logger: l, logger: l,
dir: dir, dir: dir,
closed: make(chan struct{}), sandboxDir: sandboxDir,
closed: make(chan struct{}),
}, nil }, nil
} }
@ -491,7 +501,14 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue
} }
opts := DefaultHeadOptions() opts := DefaultHeadOptions()
opts.ChunkDirRoot = db.dir // Hard link the chunk files to a dir in db.sandboxDir in case the Head needs to truncate some of them
// or cut new ones while replaying the WAL.
// See https://github.com/prometheus/prometheus/issues/11618.
err = chunks.HardLinkChunkFiles(mmappedChunksDir(db.dir), mmappedChunksDir(db.sandboxDir))
if err != nil {
return nil, err
}
opts.ChunkDirRoot = db.sandboxDir
head, err := NewHead(nil, db.logger, nil, nil, opts, NewHeadStats()) head, err := NewHead(nil, db.logger, nil, nil, opts, NewHeadStats())
if err != nil { if err != nil {
return nil, err return nil, err
@ -519,7 +536,7 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue
} }
} }
opts := DefaultHeadOptions() opts := DefaultHeadOptions()
opts.ChunkDirRoot = db.dir opts.ChunkDirRoot = db.sandboxDir
head, err = NewHead(nil, db.logger, w, wbl, opts, NewHeadStats()) head, err = NewHead(nil, db.logger, w, wbl, opts, NewHeadStats())
if err != nil { if err != nil {
return nil, err return nil, err
@ -690,8 +707,14 @@ func (db *DBReadOnly) Block(blockID string) (BlockReader, error) {
return block, nil return block, nil
} }
// Close all block readers. // Close all block readers and delete the sandbox dir.
func (db *DBReadOnly) Close() error { func (db *DBReadOnly) Close() error {
defer func() {
// Delete the temporary sandbox directory that was created when opening the DB.
if err := os.RemoveAll(db.sandboxDir); err != nil {
level.Error(db.logger).Log("msg", "delete sandbox dir", "err", err)
}
}()
select { select {
case <-db.closed: case <-db.closed:
return ErrClosed return ErrClosed

View file

@ -25,6 +25,7 @@ import (
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
"runtime"
"sort" "sort"
"strconv" "strconv"
"sync" "sync"
@ -1065,7 +1066,7 @@ func TestWALSegmentSizeOptions(t *testing.T) {
for i := int64(0); i < 155; i++ { for i := int64(0); i < 155; i++ {
app := db.Appender(context.Background()) app := db.Appender(context.Background())
ref, err := app.Append(0, labels.FromStrings("wal"+fmt.Sprintf("%d", i), "size"), i, rand.Float64()) ref, err := app.Append(0, labels.FromStrings("wal"+strconv.Itoa(int(i)), "size"), i, rand.Float64())
require.NoError(t, err) require.NoError(t, err)
for j := int64(1); j <= 78; j++ { for j := int64(1); j <= 78; j++ {
_, err := app.Append(ref, labels.EmptyLabels(), i+j, rand.Float64()) _, err := app.Append(ref, labels.EmptyLabels(), i+j, rand.Float64())
@ -2494,7 +2495,7 @@ func TestDBReadOnly(t *testing.T) {
} }
// Open a read only db and ensure that the API returns the same result as the normal DB. // Open a read only db and ensure that the API returns the same result as the normal DB.
dbReadOnly, err := OpenDBReadOnly(dbDir, logger) dbReadOnly, err := OpenDBReadOnly(dbDir, "", logger)
require.NoError(t, err) require.NoError(t, err)
defer func() { require.NoError(t, dbReadOnly.Close()) }() defer func() { require.NoError(t, dbReadOnly.Close()) }()
@ -2548,10 +2549,14 @@ func TestDBReadOnly(t *testing.T) {
// TestDBReadOnlyClosing ensures that after closing the db // TestDBReadOnlyClosing ensures that after closing the db
// all api methods return an ErrClosed. // all api methods return an ErrClosed.
func TestDBReadOnlyClosing(t *testing.T) { func TestDBReadOnlyClosing(t *testing.T) {
dbDir := t.TempDir() sandboxDir := t.TempDir()
db, err := OpenDBReadOnly(dbDir, log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))) db, err := OpenDBReadOnly(t.TempDir(), sandboxDir, log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)))
require.NoError(t, err) require.NoError(t, err)
// The sandboxDir was there.
require.DirExists(t, db.sandboxDir)
require.NoError(t, db.Close()) require.NoError(t, db.Close())
// The sandboxDir was deleted when closing.
require.NoDirExists(t, db.sandboxDir)
require.Equal(t, db.Close(), ErrClosed) require.Equal(t, db.Close(), ErrClosed)
_, err = db.Blocks() _, err = db.Blocks()
require.Equal(t, err, ErrClosed) require.Equal(t, err, ErrClosed)
@ -2587,7 +2592,7 @@ func TestDBReadOnly_FlushWAL(t *testing.T) {
} }
// Flush WAL. // Flush WAL.
db, err := OpenDBReadOnly(dbDir, logger) db, err := OpenDBReadOnly(dbDir, "", logger)
require.NoError(t, err) require.NoError(t, err)
flush := t.TempDir() flush := t.TempDir()
@ -2595,7 +2600,7 @@ func TestDBReadOnly_FlushWAL(t *testing.T) {
require.NoError(t, db.Close()) require.NoError(t, db.Close())
// Reopen the DB from the flushed WAL block. // Reopen the DB from the flushed WAL block.
db, err = OpenDBReadOnly(flush, logger) db, err = OpenDBReadOnly(flush, "", logger)
require.NoError(t, err) require.NoError(t, err)
defer func() { require.NoError(t, db.Close()) }() defer func() { require.NoError(t, db.Close()) }()
blocks, err := db.Blocks() blocks, err := db.Blocks()
@ -2624,6 +2629,80 @@ func TestDBReadOnly_FlushWAL(t *testing.T) {
require.Equal(t, 1000.0, sum) require.Equal(t, 1000.0, sum)
} }
func TestDBReadOnly_Querier_NoAlteration(t *testing.T) {
countChunks := func(dir string) int {
files, err := os.ReadDir(mmappedChunksDir(dir))
require.NoError(t, err)
return len(files)
}
dirHash := func(dir string) (hash []byte) {
// Windows requires the DB to be closed: "xxx\lock: The process cannot access the file because it is being used by another process."
// But closing the DB alters the directory in this case (it'll cut a new chunk).
if runtime.GOOS != "windows" {
hash = testutil.DirHash(t, dir)
}
return
}
spinUpQuerierAndCheck := func(dir, sandboxDir string, chunksCount int) {
dBDirHash := dirHash(dir)
// Bootsrap a RO db from the same dir and set up a querier.
dbReadOnly, err := OpenDBReadOnly(dir, sandboxDir, nil)
require.NoError(t, err)
require.Equal(t, chunksCount, countChunks(dir))
q, err := dbReadOnly.Querier(math.MinInt, math.MaxInt)
require.NoError(t, err)
require.NoError(t, q.Close())
require.NoError(t, dbReadOnly.Close())
// The RO Head doesn't alter RW db chunks_head/.
require.Equal(t, chunksCount, countChunks(dir))
require.Equal(t, dirHash(dir), dBDirHash)
}
t.Run("doesn't cut chunks while replaying WAL", func(t *testing.T) {
db := openTestDB(t, nil, nil)
defer func() {
require.NoError(t, db.Close())
}()
// Append until the first mmaped head chunk.
for i := 0; i < 121; i++ {
app := db.Appender(context.Background())
_, err := app.Append(0, labels.FromStrings("foo", "bar"), int64(i), 0)
require.NoError(t, err)
require.NoError(t, app.Commit())
}
spinUpQuerierAndCheck(db.dir, t.TempDir(), 0)
// The RW Head should have no problem cutting its own chunk,
// this also proves that a chunk needed to be cut.
require.NotPanics(t, func() { db.ForceHeadMMap() })
require.Equal(t, 1, countChunks(db.dir))
})
t.Run("doesn't truncate corrupted chunks", func(t *testing.T) {
db := openTestDB(t, nil, nil)
require.NoError(t, db.Close())
// Simulate a corrupted chunk: without a header.
_, err := os.Create(path.Join(mmappedChunksDir(db.dir), "000001"))
require.NoError(t, err)
spinUpQuerierAndCheck(db.dir, t.TempDir(), 1)
// The RW Head should have no problem truncating its corrupted file:
// this proves that the chunk needed to be truncated.
db, err = Open(db.dir, nil, nil, nil, nil)
defer func() {
require.NoError(t, db.Close())
}()
require.NoError(t, err)
require.Equal(t, 0, countChunks(db.dir))
})
}
func TestDBCannotSeePartialCommits(t *testing.T) { func TestDBCannotSeePartialCommits(t *testing.T) {
if defaultIsolationDisabled { if defaultIsolationDisabled {
t.Skip("skipping test since tsdb isolation is disabled") t.Skip("skipping test since tsdb isolation is disabled")
@ -4495,7 +4574,7 @@ func TestOOOCompaction(t *testing.T) {
ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls) ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls)
require.NoError(t, err) require.NoError(t, err)
require.False(t, created) require.False(t, created)
require.Greater(t, ms.ooo.oooHeadChunk.chunk.NumSamples(), 0) require.Positive(t, ms.ooo.oooHeadChunk.chunk.NumSamples())
require.Len(t, ms.ooo.oooMmappedChunks, 14) // 7 original, 7 duplicate. require.Len(t, ms.ooo.oooMmappedChunks, 14) // 7 original, 7 duplicate.
} }
checkNonEmptyOOOChunk(series1) checkNonEmptyOOOChunk(series1)
@ -4636,7 +4715,7 @@ func TestOOOCompactionWithNormalCompaction(t *testing.T) {
ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls) ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls)
require.NoError(t, err) require.NoError(t, err)
require.False(t, created) require.False(t, created)
require.Greater(t, ms.ooo.oooHeadChunk.chunk.NumSamples(), 0) require.Positive(t, ms.ooo.oooHeadChunk.chunk.NumSamples())
} }
// If the normal Head is not compacted, the OOO head compaction does not take place. // If the normal Head is not compacted, the OOO head compaction does not take place.
@ -4737,7 +4816,7 @@ func TestOOOCompactionWithDisabledWriteLog(t *testing.T) {
ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls) ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls)
require.NoError(t, err) require.NoError(t, err)
require.False(t, created) require.False(t, created)
require.Greater(t, ms.ooo.oooHeadChunk.chunk.NumSamples(), 0) require.Positive(t, ms.ooo.oooHeadChunk.chunk.NumSamples())
} }
// If the normal Head is not compacted, the OOO head compaction does not take place. // If the normal Head is not compacted, the OOO head compaction does not take place.
@ -5438,8 +5517,8 @@ func TestWBLAndMmapReplay(t *testing.T) {
addedRecs++ addedRecs++
require.NoError(t, newWbl.Log(rec)) require.NoError(t, newWbl.Log(rec))
} }
require.Greater(t, markers, 0) require.Positive(t, markers)
require.Greater(t, addedRecs, 0) require.Positive(t, addedRecs)
require.NoError(t, newWbl.Close()) require.NoError(t, newWbl.Close())
require.NoError(t, sr.Close()) require.NoError(t, sr.Close())
require.NoError(t, os.RemoveAll(wblDir)) require.NoError(t, os.RemoveAll(wblDir))

View file

@ -37,7 +37,7 @@ const (
type CircularExemplarStorage struct { type CircularExemplarStorage struct {
lock sync.RWMutex lock sync.RWMutex
exemplars []*circularBufferEntry exemplars []circularBufferEntry
nextIndex int nextIndex int
metrics *ExemplarMetrics metrics *ExemplarMetrics
@ -121,7 +121,7 @@ func NewCircularExemplarStorage(length int64, m *ExemplarMetrics) (ExemplarStora
length = 0 length = 0
} }
c := &CircularExemplarStorage{ c := &CircularExemplarStorage{
exemplars: make([]*circularBufferEntry, length), exemplars: make([]circularBufferEntry, length),
index: make(map[string]*indexEntry, length/estimatedExemplarsPerSeries), index: make(map[string]*indexEntry, length/estimatedExemplarsPerSeries),
metrics: m, metrics: m,
} }
@ -214,12 +214,12 @@ func (ce *CircularExemplarStorage) ValidateExemplar(l labels.Labels, e exemplar.
// Optimize by moving the lock to be per series (& benchmark it). // Optimize by moving the lock to be per series (& benchmark it).
ce.lock.RLock() ce.lock.RLock()
defer ce.lock.RUnlock() defer ce.lock.RUnlock()
return ce.validateExemplar(seriesLabels, e, false) return ce.validateExemplar(ce.index[string(seriesLabels)], e, false)
} }
// Not thread safe. The appended parameters tells us whether this is an external validation, or internal // Not thread safe. The appended parameters tells us whether this is an external validation, or internal
// as a result of an AddExemplar call, in which case we should update any relevant metrics. // as a result of an AddExemplar call, in which case we should update any relevant metrics.
func (ce *CircularExemplarStorage) validateExemplar(key []byte, e exemplar.Exemplar, appended bool) error { func (ce *CircularExemplarStorage) validateExemplar(idx *indexEntry, e exemplar.Exemplar, appended bool) error {
if len(ce.exemplars) == 0 { if len(ce.exemplars) == 0 {
return storage.ErrExemplarsDisabled return storage.ErrExemplarsDisabled
} }
@ -239,8 +239,7 @@ func (ce *CircularExemplarStorage) validateExemplar(key []byte, e exemplar.Exemp
return err return err
} }
idx, ok := ce.index[string(key)] if idx == nil {
if !ok {
return nil return nil
} }
@ -292,7 +291,7 @@ func (ce *CircularExemplarStorage) Resize(l int64) int {
oldBuffer := ce.exemplars oldBuffer := ce.exemplars
oldNextIndex := int64(ce.nextIndex) oldNextIndex := int64(ce.nextIndex)
ce.exemplars = make([]*circularBufferEntry, l) ce.exemplars = make([]circularBufferEntry, l)
ce.index = make(map[string]*indexEntry, l/estimatedExemplarsPerSeries) ce.index = make(map[string]*indexEntry, l/estimatedExemplarsPerSeries)
ce.nextIndex = 0 ce.nextIndex = 0
@ -311,10 +310,11 @@ func (ce *CircularExemplarStorage) Resize(l int64) int {
// This way we don't migrate exemplars that would just be overwritten when migrating later exemplars. // This way we don't migrate exemplars that would just be overwritten when migrating later exemplars.
startIndex := (oldNextIndex - count + int64(len(oldBuffer))) % int64(len(oldBuffer)) startIndex := (oldNextIndex - count + int64(len(oldBuffer))) % int64(len(oldBuffer))
var buf [1024]byte
for i := int64(0); i < count; i++ { for i := int64(0); i < count; i++ {
idx := (startIndex + i) % int64(len(oldBuffer)) idx := (startIndex + i) % int64(len(oldBuffer))
if entry := oldBuffer[idx]; entry != nil { if oldBuffer[idx].ref != nil {
ce.migrate(entry) ce.migrate(&oldBuffer[idx], buf[:])
migrated++ migrated++
} }
} }
@ -328,9 +328,8 @@ func (ce *CircularExemplarStorage) Resize(l int64) int {
// migrate is like AddExemplar but reuses existing structs. Expected to be called in batch and requires // migrate is like AddExemplar but reuses existing structs. Expected to be called in batch and requires
// external lock and does not compute metrics. // external lock and does not compute metrics.
func (ce *CircularExemplarStorage) migrate(entry *circularBufferEntry) { func (ce *CircularExemplarStorage) migrate(entry *circularBufferEntry, buf []byte) {
var buf [1024]byte seriesLabels := entry.ref.seriesLabels.Bytes(buf[:0])
seriesLabels := entry.ref.seriesLabels.Bytes(buf[:])
idx, ok := ce.index[string(seriesLabels)] idx, ok := ce.index[string(seriesLabels)]
if !ok { if !ok {
@ -344,7 +343,7 @@ func (ce *CircularExemplarStorage) migrate(entry *circularBufferEntry) {
idx.newest = ce.nextIndex idx.newest = ce.nextIndex
entry.next = noExemplar entry.next = noExemplar
ce.exemplars[ce.nextIndex] = entry ce.exemplars[ce.nextIndex] = *entry
ce.nextIndex = (ce.nextIndex + 1) % len(ce.exemplars) ce.nextIndex = (ce.nextIndex + 1) % len(ce.exemplars)
} }
@ -362,7 +361,8 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp
ce.lock.Lock() ce.lock.Lock()
defer ce.lock.Unlock() defer ce.lock.Unlock()
err := ce.validateExemplar(seriesLabels, e, true) idx, ok := ce.index[string(seriesLabels)]
err := ce.validateExemplar(idx, e, true)
if err != nil { if err != nil {
if errors.Is(err, storage.ErrDuplicateExemplar) { if errors.Is(err, storage.ErrDuplicateExemplar) {
// Duplicate exemplar, noop. // Duplicate exemplar, noop.
@ -371,25 +371,23 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp
return err return err
} }
_, ok := ce.index[string(seriesLabels)]
if !ok { if !ok {
ce.index[string(seriesLabels)] = &indexEntry{oldest: ce.nextIndex, seriesLabels: l} idx = &indexEntry{oldest: ce.nextIndex, seriesLabels: l}
ce.index[string(seriesLabels)] = idx
} else { } else {
ce.exemplars[ce.index[string(seriesLabels)].newest].next = ce.nextIndex ce.exemplars[idx.newest].next = ce.nextIndex
} }
if prev := ce.exemplars[ce.nextIndex]; prev == nil { if prev := &ce.exemplars[ce.nextIndex]; prev.ref != nil {
ce.exemplars[ce.nextIndex] = &circularBufferEntry{}
} else {
// There exists an exemplar already on this ce.nextIndex entry, // There exists an exemplar already on this ce.nextIndex entry,
// drop it, to make place for others. // drop it, to make place for others.
var buf [1024]byte
prevLabels := prev.ref.seriesLabels.Bytes(buf[:])
if prev.next == noExemplar { if prev.next == noExemplar {
// Last item for this series, remove index entry. // Last item for this series, remove index entry.
var buf [1024]byte
prevLabels := prev.ref.seriesLabels.Bytes(buf[:])
delete(ce.index, string(prevLabels)) delete(ce.index, string(prevLabels))
} else { } else {
ce.index[string(prevLabels)].oldest = prev.next prev.ref.oldest = prev.next
} }
} }
@ -397,8 +395,8 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp
// since this is the first exemplar stored for this series. // since this is the first exemplar stored for this series.
ce.exemplars[ce.nextIndex].next = noExemplar ce.exemplars[ce.nextIndex].next = noExemplar
ce.exemplars[ce.nextIndex].exemplar = e ce.exemplars[ce.nextIndex].exemplar = e
ce.exemplars[ce.nextIndex].ref = ce.index[string(seriesLabels)] ce.exemplars[ce.nextIndex].ref = idx
ce.index[string(seriesLabels)].newest = ce.nextIndex idx.newest = ce.nextIndex
ce.nextIndex = (ce.nextIndex + 1) % len(ce.exemplars) ce.nextIndex = (ce.nextIndex + 1) % len(ce.exemplars)
@ -416,15 +414,15 @@ func (ce *CircularExemplarStorage) computeMetrics() {
return return
} }
if next := ce.exemplars[ce.nextIndex]; next != nil { if ce.exemplars[ce.nextIndex].ref != nil {
ce.metrics.exemplarsInStorage.Set(float64(len(ce.exemplars))) ce.metrics.exemplarsInStorage.Set(float64(len(ce.exemplars)))
ce.metrics.lastExemplarsTs.Set(float64(next.exemplar.Ts) / 1000) ce.metrics.lastExemplarsTs.Set(float64(ce.exemplars[ce.nextIndex].exemplar.Ts) / 1000)
return return
} }
// We did not yet fill the buffer. // We did not yet fill the buffer.
ce.metrics.exemplarsInStorage.Set(float64(ce.nextIndex)) ce.metrics.exemplarsInStorage.Set(float64(ce.nextIndex))
if ce.exemplars[0] != nil { if ce.exemplars[0].ref != nil {
ce.metrics.lastExemplarsTs.Set(float64(ce.exemplars[0].exemplar.Ts) / 1000) ce.metrics.lastExemplarsTs.Set(float64(ce.exemplars[0].exemplar.Ts) / 1000)
} }
} }
@ -438,7 +436,7 @@ func (ce *CircularExemplarStorage) IterateExemplars(f func(seriesLabels labels.L
idx := ce.nextIndex idx := ce.nextIndex
l := len(ce.exemplars) l := len(ce.exemplars)
for i := 0; i < l; i, idx = i+1, (idx+1)%l { for i := 0; i < l; i, idx = i+1, (idx+1)%l {
if ce.exemplars[idx] == nil { if ce.exemplars[idx].ref == nil {
continue continue
} }
err := f(ce.exemplars[idx].ref.seriesLabels, ce.exemplars[idx].exemplar) err := f(ce.exemplars[idx].ref.seriesLabels, ce.exemplars[idx].exemplar)

View file

@ -415,27 +415,29 @@ func BenchmarkAddExemplar(b *testing.B) {
// before adding. // before adding.
exLabels := labels.FromStrings("trace_id", "89620921") exLabels := labels.FromStrings("trace_id", "89620921")
for _, n := range []int{10000, 100000, 1000000} { for _, capacity := range []int{1000, 10000, 100000} {
b.Run(fmt.Sprintf("%d", n), func(b *testing.B) { for _, n := range []int{10000, 100000, 1000000} {
for j := 0; j < b.N; j++ { b.Run(fmt.Sprintf("%d/%d", n, capacity), func(b *testing.B) {
b.StopTimer() for j := 0; j < b.N; j++ {
exs, err := NewCircularExemplarStorage(int64(n), eMetrics) b.StopTimer()
require.NoError(b, err) exs, err := NewCircularExemplarStorage(int64(capacity), eMetrics)
es := exs.(*CircularExemplarStorage) require.NoError(b, err)
var l labels.Labels es := exs.(*CircularExemplarStorage)
b.StartTimer() var l labels.Labels
b.StartTimer()
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
if i%100 == 0 { if i%100 == 0 {
l = labels.FromStrings("service", strconv.Itoa(i)) l = labels.FromStrings("service", strconv.Itoa(i))
} }
err = es.AddExemplar(l, exemplar.Exemplar{Value: float64(i), Ts: int64(i), Labels: exLabels}) err = es.AddExemplar(l, exemplar.Exemplar{Value: float64(i), Ts: int64(i), Labels: exLabels})
if err != nil { if err != nil {
require.NoError(b, err) require.NoError(b, err)
}
} }
} }
} })
}) }
} }
} }
@ -480,8 +482,11 @@ func BenchmarkResizeExemplars(b *testing.B) {
require.NoError(b, err) require.NoError(b, err)
es := exs.(*CircularExemplarStorage) es := exs.(*CircularExemplarStorage)
var l labels.Labels
for i := 0; i < int(float64(tc.startSize)*float64(1.5)); i++ { for i := 0; i < int(float64(tc.startSize)*float64(1.5)); i++ {
l := labels.FromStrings("service", strconv.Itoa(i)) if i%100 == 0 {
l = labels.FromStrings("service", strconv.Itoa(i))
}
err = es.AddExemplar(l, exemplar.Exemplar{Value: float64(i), Ts: int64(i)}) err = es.AddExemplar(l, exemplar.Exemplar{Value: float64(i), Ts: int64(i)})
if err != nil { if err != nil {

View file

@ -310,12 +310,22 @@ func (h *Head) resetInMemoryState() error {
return err return err
} }
if h.series != nil {
// reset the existing series to make sure we call the appropriated hooks
// and increment the series removed metrics
fs := h.series.iterForDeletion(func(_ int, _ uint64, s *memSeries, flushedForCallback map[chunks.HeadSeriesRef]labels.Labels) {
// All series should be flushed
flushedForCallback[s.ref] = s.lset
})
h.metrics.seriesRemoved.Add(float64(fs))
}
h.series = newStripeSeries(h.opts.StripeSize, h.opts.SeriesCallback)
h.iso = newIsolation(h.opts.IsolationDisabled) h.iso = newIsolation(h.opts.IsolationDisabled)
h.oooIso = newOOOIsolation() h.oooIso = newOOOIsolation()
h.numSeries.Store(0)
h.exemplarMetrics = em h.exemplarMetrics = em
h.exemplars = es h.exemplars = es
h.series = newStripeSeries(h.opts.StripeSize, h.opts.SeriesCallback)
h.postings = index.NewUnorderedMemPostings() h.postings = index.NewUnorderedMemPostings()
h.tombstones = tombstones.NewMemTombstones() h.tombstones = tombstones.NewMemTombstones()
h.deleted = map[chunks.HeadSeriesRef]int{} h.deleted = map[chunks.HeadSeriesRef]int{}
@ -1861,11 +1871,10 @@ func newStripeSeries(stripeSize int, seriesCallback SeriesLifecycleCallback) *st
// minMmapFile is the min mmap file number seen in the series (in-order and out-of-order) after gc'ing the series. // minMmapFile is the min mmap file number seen in the series (in-order and out-of-order) after gc'ing the series.
func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (_ map[storage.SeriesRef]struct{}, _ int, _, _ int64, minMmapFile int) { func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (_ map[storage.SeriesRef]struct{}, _ int, _, _ int64, minMmapFile int) {
var ( var (
deleted = map[storage.SeriesRef]struct{}{} deleted = map[storage.SeriesRef]struct{}{}
rmChunks = 0 rmChunks = 0
actualMint int64 = math.MaxInt64 actualMint int64 = math.MaxInt64
minOOOTime int64 = math.MaxInt64 minOOOTime int64 = math.MaxInt64
deletedFromPrevStripe = 0
) )
minMmapFile = math.MaxInt32 minMmapFile = math.MaxInt32
@ -1923,27 +1932,7 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (
deletedForCallback[series.ref] = series.lset deletedForCallback[series.ref] = series.lset
} }
// Run through all series shard by shard, checking which should be deleted. s.iterForDeletion(check)
for i := 0; i < s.size; i++ {
deletedForCallback := make(map[chunks.HeadSeriesRef]labels.Labels, deletedFromPrevStripe)
s.locks[i].Lock()
// Delete conflicts first so seriesHashmap.del doesn't move them to the `unique` field,
// after deleting `unique`.
for hash, all := range s.hashes[i].conflicts {
for _, series := range all {
check(i, hash, series, deletedForCallback)
}
}
for hash, series := range s.hashes[i].unique {
check(i, hash, series, deletedForCallback)
}
s.locks[i].Unlock()
s.seriesLifecycleCallback.PostDeletion(deletedForCallback)
deletedFromPrevStripe = len(deletedForCallback)
}
if actualMint == math.MaxInt64 { if actualMint == math.MaxInt64 {
actualMint = mint actualMint = mint
@ -1952,6 +1941,35 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (
return deleted, rmChunks, actualMint, minOOOTime, minMmapFile return deleted, rmChunks, actualMint, minOOOTime, minMmapFile
} }
// The iterForDeletion function iterates through all series, invoking the checkDeletedFunc for each.
// The checkDeletedFunc takes a map as input and should add to it all series that were deleted and should be included
// when invoking the PostDeletion hook.
func (s *stripeSeries) iterForDeletion(checkDeletedFunc func(int, uint64, *memSeries, map[chunks.HeadSeriesRef]labels.Labels)) int {
seriesSetFromPrevStripe := 0
totalDeletedSeries := 0
// Run through all series shard by shard
for i := 0; i < s.size; i++ {
seriesSet := make(map[chunks.HeadSeriesRef]labels.Labels, seriesSetFromPrevStripe)
s.locks[i].Lock()
// Iterate conflicts first so f doesn't move them to the `unique` field,
// after deleting `unique`.
for hash, all := range s.hashes[i].conflicts {
for _, series := range all {
checkDeletedFunc(i, hash, series, seriesSet)
}
}
for hash, series := range s.hashes[i].unique {
checkDeletedFunc(i, hash, series, seriesSet)
}
s.locks[i].Unlock()
s.seriesLifecycleCallback.PostDeletion(seriesSet)
totalDeletedSeries += len(seriesSet)
seriesSetFromPrevStripe = len(seriesSet)
}
return totalDeletedSeries
}
func (s *stripeSeries) getByID(id chunks.HeadSeriesRef) *memSeries { func (s *stripeSeries) getByID(id chunks.HeadSeriesRef) *memSeries {
i := uint64(id) & uint64(s.size-1) i := uint64(id) & uint64(s.size-1)

View file

@ -3383,7 +3383,7 @@ func TestWaitForPendingReadersInTimeRange(t *testing.T) {
func TestAppendHistogram(t *testing.T) { func TestAppendHistogram(t *testing.T) {
l := labels.FromStrings("a", "b") l := labels.FromStrings("a", "b")
for _, numHistograms := range []int{1, 10, 150, 200, 250, 300} { for _, numHistograms := range []int{1, 10, 150, 200, 250, 300} {
t.Run(fmt.Sprintf("%d", numHistograms), func(t *testing.T) { t.Run(strconv.Itoa(numHistograms), func(t *testing.T) {
head, _ := newTestHead(t, 1000, wlog.CompressionNone, false) head, _ := newTestHead(t, 1000, wlog.CompressionNone, false)
t.Cleanup(func() { t.Cleanup(func() {
require.NoError(t, head.Close()) require.NoError(t, head.Close())
@ -3557,7 +3557,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) {
expMmapChunks = append(expMmapChunks, &cpy) expMmapChunks = append(expMmapChunks, &cpy)
} }
expHeadChunkSamples := ms.headChunks.chunk.NumSamples() expHeadChunkSamples := ms.headChunks.chunk.NumSamples()
require.Greater(t, expHeadChunkSamples, 0) require.Positive(t, expHeadChunkSamples)
// Series with mix of histograms and float. // Series with mix of histograms and float.
s2 := labels.FromStrings("a", "b2") s2 := labels.FromStrings("a", "b2")
@ -3692,7 +3692,7 @@ func TestChunkSnapshot(t *testing.T) {
e := ex{ e := ex{
seriesLabels: lbls, seriesLabels: lbls,
e: exemplar.Exemplar{ e: exemplar.Exemplar{
Labels: labels.FromStrings("trace_id", fmt.Sprintf("%d", rand.Int())), Labels: labels.FromStrings("trace_id", strconv.Itoa(rand.Int())),
Value: rand.Float64(), Value: rand.Float64(),
Ts: ts, Ts: ts,
}, },
@ -4007,6 +4007,9 @@ func TestSnapshotError(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
f, err := os.OpenFile(path.Join(snapDir, files[0].Name()), os.O_RDWR, 0) f, err := os.OpenFile(path.Join(snapDir, files[0].Name()), os.O_RDWR, 0)
require.NoError(t, err) require.NoError(t, err)
// Create snapshot backup to be restored on future test cases.
snapshotBackup, err := io.ReadAll(f)
require.NoError(t, err)
_, err = f.WriteAt([]byte{0b11111111}, 18) _, err = f.WriteAt([]byte{0b11111111}, 18)
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, f.Close()) require.NoError(t, f.Close())
@ -4021,10 +4024,44 @@ func TestSnapshotError(t *testing.T) {
// There should be no series in the memory after snapshot error since WAL was removed. // There should be no series in the memory after snapshot error since WAL was removed.
require.Equal(t, 1.0, prom_testutil.ToFloat64(head.metrics.snapshotReplayErrorTotal)) require.Equal(t, 1.0, prom_testutil.ToFloat64(head.metrics.snapshotReplayErrorTotal))
require.Equal(t, uint64(0), head.NumSeries())
require.Nil(t, head.series.getByHash(lbls.Hash(), lbls)) require.Nil(t, head.series.getByHash(lbls.Hash(), lbls))
tm, err = head.tombstones.Get(1) tm, err = head.tombstones.Get(1)
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, tm) require.Empty(t, tm)
require.NoError(t, head.Close())
// Test corruption in the middle of the snapshot.
f, err = os.OpenFile(path.Join(snapDir, files[0].Name()), os.O_RDWR, 0)
require.NoError(t, err)
_, err = f.WriteAt(snapshotBackup, 0)
require.NoError(t, err)
_, err = f.WriteAt([]byte{0b11111111}, 300)
require.NoError(t, err)
require.NoError(t, f.Close())
c := &countSeriesLifecycleCallback{}
opts := head.opts
opts.SeriesCallback = c
w, err = wlog.NewSize(nil, nil, head.wal.Dir(), 32768, wlog.CompressionNone)
require.NoError(t, err)
head, err = NewHead(prometheus.NewRegistry(), nil, w, nil, head.opts, nil)
require.NoError(t, err)
require.NoError(t, head.Init(math.MinInt64))
// There should be no series in the memory after snapshot error since WAL was removed.
require.Equal(t, 1.0, prom_testutil.ToFloat64(head.metrics.snapshotReplayErrorTotal))
require.Nil(t, head.series.getByHash(lbls.Hash(), lbls))
require.Equal(t, uint64(0), head.NumSeries())
// Since the snapshot could replay certain series, we continue invoking the create hooks.
// In such instances, we need to ensure that we also trigger the delete hooks when resetting the memory.
require.Equal(t, int64(2), c.created.Load())
require.Equal(t, int64(2), c.deleted.Load())
require.Equal(t, 2.0, prom_testutil.ToFloat64(head.metrics.seriesRemoved))
require.Equal(t, 2.0, prom_testutil.ToFloat64(head.metrics.seriesCreated))
} }
func TestHistogramMetrics(t *testing.T) { func TestHistogramMetrics(t *testing.T) {
@ -4601,7 +4638,7 @@ func TestChunkSnapshotTakenAfterIncompleteSnapshot(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.NotEqual(t, "", name) require.NotEqual(t, "", name)
require.Equal(t, 0, idx) require.Equal(t, 0, idx)
require.Greater(t, offset, 0) require.Positive(t, offset)
} }
// TestWBLReplay checks the replay at a low level. // TestWBLReplay checks the replay at a low level.
@ -5032,7 +5069,7 @@ func TestOOOAppendWithNoSeries(t *testing.T) {
require.Equal(t, expSamples, ms.headChunks.chunk.NumSamples()) require.Equal(t, expSamples, ms.headChunks.chunk.NumSamples())
} }
newLabels := func(idx int) labels.Labels { return labels.FromStrings("foo", fmt.Sprintf("%d", idx)) } newLabels := func(idx int) labels.Labels { return labels.FromStrings("foo", strconv.Itoa(idx)) }
s1 := newLabels(1) s1 := newLabels(1)
appendSample(s1, 300) // At 300m. appendSample(s1, 300) // At 300m.
@ -5829,3 +5866,14 @@ func TestHeadCompactableDoesNotCompactEmptyHead(t *testing.T) {
require.False(t, head.compactable()) require.False(t, head.compactable())
} }
type countSeriesLifecycleCallback struct {
created atomic.Int64
deleted atomic.Int64
}
func (c *countSeriesLifecycleCallback) PreCreation(labels.Labels) error { return nil }
func (c *countSeriesLifecycleCallback) PostCreation(labels.Labels) { c.created.Inc() }
func (c *countSeriesLifecycleCallback) PostDeletion(s map[chunks.HeadSeriesRef]labels.Labels) {
c.deleted.Add(int64(len(s)))
}

View file

@ -51,6 +51,9 @@ const (
indexFilename = "index" indexFilename = "index"
seriesByteAlign = 16 seriesByteAlign = 16
// checkContextEveryNIterations is used in some tight loops to check if the context is done.
checkContextEveryNIterations = 128
) )
type indexWriterSeries struct { type indexWriterSeries struct {
@ -1797,7 +1800,12 @@ func (r *Reader) postingsForLabelMatchingV1(ctx context.Context, name string, ma
} }
var its []Postings var its []Postings
count := 1
for val, offset := range e { for val, offset := range e {
if count%checkContextEveryNIterations == 0 && ctx.Err() != nil {
return ErrPostings(ctx.Err())
}
count++
if !match(val) { if !match(val) {
continue continue
} }

View file

@ -20,7 +20,9 @@ import (
"hash/crc32" "hash/crc32"
"os" "os"
"path/filepath" "path/filepath"
"slices"
"sort" "sort"
"strconv"
"testing" "testing"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -160,39 +162,14 @@ func TestIndexRW_Create_Open(t *testing.T) {
} }
func TestIndexRW_Postings(t *testing.T) { func TestIndexRW_Postings(t *testing.T) {
dir := t.TempDir()
ctx := context.Background() ctx := context.Background()
var input indexWriterSeriesSlice
fn := filepath.Join(dir, indexFilename) for i := 1; i < 5; i++ {
input = append(input, &indexWriterSeries{
iw, err := NewWriter(context.Background(), fn) labels: labels.FromStrings("a", "1", "b", strconv.Itoa(i)),
require.NoError(t, err) })
series := []labels.Labels{
labels.FromStrings("a", "1", "b", "1"),
labels.FromStrings("a", "1", "b", "2"),
labels.FromStrings("a", "1", "b", "3"),
labels.FromStrings("a", "1", "b", "4"),
} }
ir, fn, _ := createFileReader(ctx, t, input)
require.NoError(t, iw.AddSymbol("1"))
require.NoError(t, iw.AddSymbol("2"))
require.NoError(t, iw.AddSymbol("3"))
require.NoError(t, iw.AddSymbol("4"))
require.NoError(t, iw.AddSymbol("a"))
require.NoError(t, iw.AddSymbol("b"))
// Postings lists are only written if a series with the respective
// reference was added before.
require.NoError(t, iw.AddSeries(1, series[0]))
require.NoError(t, iw.AddSeries(2, series[1]))
require.NoError(t, iw.AddSeries(3, series[2]))
require.NoError(t, iw.AddSeries(4, series[3]))
require.NoError(t, iw.Close())
ir, err := NewFileReader(fn)
require.NoError(t, err)
p, err := ir.Postings(ctx, "a", "1") p, err := ir.Postings(ctx, "a", "1")
require.NoError(t, err) require.NoError(t, err)
@ -205,7 +182,7 @@ func TestIndexRW_Postings(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, c) require.Empty(t, c)
testutil.RequireEqual(t, series[i], builder.Labels()) testutil.RequireEqual(t, input[i].labels, builder.Labels())
} }
require.NoError(t, p.Err()) require.NoError(t, p.Err())
@ -240,8 +217,6 @@ func TestIndexRW_Postings(t *testing.T) {
"b": {"1", "2", "3", "4"}, "b": {"1", "2", "3", "4"},
}, labelIndices) }, labelIndices)
require.NoError(t, ir.Close())
t.Run("ShardedPostings()", func(t *testing.T) { t.Run("ShardedPostings()", func(t *testing.T) {
ir, err := NewFileReader(fn) ir, err := NewFileReader(fn)
require.NoError(t, err) require.NoError(t, err)
@ -296,42 +271,16 @@ func TestIndexRW_Postings(t *testing.T) {
} }
func TestPostingsMany(t *testing.T) { func TestPostingsMany(t *testing.T) {
dir := t.TempDir()
ctx := context.Background() ctx := context.Background()
fn := filepath.Join(dir, indexFilename)
iw, err := NewWriter(context.Background(), fn)
require.NoError(t, err)
// Create a label in the index which has 999 values. // Create a label in the index which has 999 values.
symbols := map[string]struct{}{} var input indexWriterSeriesSlice
series := []labels.Labels{}
for i := 1; i < 1000; i++ { for i := 1; i < 1000; i++ {
v := fmt.Sprintf("%03d", i) v := fmt.Sprintf("%03d", i)
series = append(series, labels.FromStrings("i", v, "foo", "bar")) input = append(input, &indexWriterSeries{
symbols[v] = struct{}{} labels: labels.FromStrings("i", v, "foo", "bar"),
})
} }
symbols["i"] = struct{}{} ir, _, symbols := createFileReader(ctx, t, input)
symbols["foo"] = struct{}{}
symbols["bar"] = struct{}{}
syms := []string{}
for s := range symbols {
syms = append(syms, s)
}
sort.Strings(syms)
for _, s := range syms {
require.NoError(t, iw.AddSymbol(s))
}
for i, s := range series {
require.NoError(t, iw.AddSeries(storage.SeriesRef(i), s))
}
require.NoError(t, iw.Close())
ir, err := NewFileReader(fn)
require.NoError(t, err)
defer func() { require.NoError(t, ir.Close()) }()
cases := []struct { cases := []struct {
in []string in []string
@ -387,25 +336,13 @@ func TestPostingsMany(t *testing.T) {
} }
func TestPersistence_index_e2e(t *testing.T) { func TestPersistence_index_e2e(t *testing.T) {
dir := t.TempDir()
ctx := context.Background() ctx := context.Background()
lbls, err := labels.ReadLabels(filepath.Join("..", "testdata", "20kseries.json"), 20000) lbls, err := labels.ReadLabels(filepath.Join("..", "testdata", "20kseries.json"), 20000)
require.NoError(t, err) require.NoError(t, err)
// Sort labels as the index writer expects series in sorted order. // Sort labels as the index writer expects series in sorted order.
sort.Sort(labels.Slice(lbls)) sort.Sort(labels.Slice(lbls))
symbols := map[string]struct{}{}
for _, lset := range lbls {
lset.Range(func(l labels.Label) {
symbols[l.Name] = struct{}{}
symbols[l.Value] = struct{}{}
})
}
var input indexWriterSeriesSlice var input indexWriterSeriesSlice
ref := uint64(0) ref := uint64(0)
// Generate ChunkMetas for every label set. // Generate ChunkMetas for every label set.
for i, lset := range lbls { for i, lset := range lbls {
@ -426,17 +363,7 @@ func TestPersistence_index_e2e(t *testing.T) {
}) })
} }
iw, err := NewWriter(context.Background(), filepath.Join(dir, indexFilename)) ir, _, _ := createFileReader(ctx, t, input)
require.NoError(t, err)
syms := []string{}
for s := range symbols {
syms = append(syms, s)
}
sort.Strings(syms)
for _, s := range syms {
require.NoError(t, iw.AddSymbol(s))
}
// Population procedure as done by compaction. // Population procedure as done by compaction.
var ( var (
@ -447,8 +374,6 @@ func TestPersistence_index_e2e(t *testing.T) {
mi := newMockIndex() mi := newMockIndex()
for i, s := range input { for i, s := range input {
err = iw.AddSeries(storage.SeriesRef(i), s.labels, s.chunks...)
require.NoError(t, err)
require.NoError(t, mi.AddSeries(storage.SeriesRef(i), s.labels, s.chunks...)) require.NoError(t, mi.AddSeries(storage.SeriesRef(i), s.labels, s.chunks...))
s.labels.Range(func(l labels.Label) { s.labels.Range(func(l labels.Label) {
@ -462,12 +387,6 @@ func TestPersistence_index_e2e(t *testing.T) {
postings.Add(storage.SeriesRef(i), s.labels) postings.Add(storage.SeriesRef(i), s.labels)
} }
err = iw.Close()
require.NoError(t, err)
ir, err := NewFileReader(filepath.Join(dir, indexFilename))
require.NoError(t, err)
for p := range mi.postings { for p := range mi.postings {
gotp, err := ir.Postings(ctx, p.Name, p.Value) gotp, err := ir.Postings(ctx, p.Name, p.Value)
require.NoError(t, err) require.NoError(t, err)
@ -523,8 +442,6 @@ func TestPersistence_index_e2e(t *testing.T) {
} }
sort.Strings(expSymbols) sort.Strings(expSymbols)
require.Equal(t, expSymbols, gotSymbols) require.Equal(t, expSymbols, gotSymbols)
require.NoError(t, ir.Close())
} }
func TestWriter_ShouldReturnErrorOnSeriesWithDuplicatedLabelNames(t *testing.T) { func TestWriter_ShouldReturnErrorOnSeriesWithDuplicatedLabelNames(t *testing.T) {
@ -624,39 +541,14 @@ func BenchmarkReader_ShardedPostings(b *testing.B) {
numShards = 16 numShards = 16
) )
dir, err := os.MkdirTemp("", "benchmark_reader_sharded_postings")
require.NoError(b, err)
defer func() {
require.NoError(b, os.RemoveAll(dir))
}()
ctx := context.Background() ctx := context.Background()
var input indexWriterSeriesSlice
// Generate an index.
fn := filepath.Join(dir, indexFilename)
iw, err := NewWriter(ctx, fn)
require.NoError(b, err)
for i := 1; i <= numSeries; i++ { for i := 1; i <= numSeries; i++ {
require.NoError(b, iw.AddSymbol(fmt.Sprintf("%10d", i))) input = append(input, &indexWriterSeries{
labels: labels.FromStrings("const", fmt.Sprintf("%10d", 1), "unique", fmt.Sprintf("%10d", i)),
})
} }
require.NoError(b, iw.AddSymbol("const")) ir, _, _ := createFileReader(ctx, b, input)
require.NoError(b, iw.AddSymbol("unique"))
for i := 1; i <= numSeries; i++ {
require.NoError(b, iw.AddSeries(storage.SeriesRef(i),
labels.FromStrings("const", fmt.Sprintf("%10d", 1), "unique", fmt.Sprintf("%10d", i))))
}
require.NoError(b, iw.Close())
b.ResetTimer()
// Create a reader to read back all postings from the index.
ir, err := NewFileReader(fn)
require.NoError(b, err)
b.ResetTimer() b.ResetTimer()
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
@ -719,3 +611,64 @@ func TestChunksTimeOrdering(t *testing.T) {
require.NoError(t, idx.Close()) require.NoError(t, idx.Close())
} }
func TestReader_PostingsForLabelMatchingHonorsContextCancel(t *testing.T) {
const seriesCount = 1000
var input indexWriterSeriesSlice
for i := 1; i < seriesCount; i++ {
input = append(input, &indexWriterSeries{
labels: labels.FromStrings("__name__", fmt.Sprintf("%4d", i)),
chunks: []chunks.Meta{
{Ref: 1, MinTime: 0, MaxTime: 10},
},
})
}
ir, _, _ := createFileReader(context.Background(), t, input)
failAfter := uint64(seriesCount / 2) // Fail after processing half of the series.
ctx := &testutil.MockContextErrAfter{FailAfter: failAfter}
p := ir.PostingsForLabelMatching(ctx, "__name__", func(string) bool {
return true
})
require.Error(t, p.Err())
require.Equal(t, failAfter, ctx.Count())
}
// createFileReader creates a temporary index file. It writes the provided input to this file.
// It returns a Reader for this file, the file's name, and the symbol map.
func createFileReader(ctx context.Context, tb testing.TB, input indexWriterSeriesSlice) (*Reader, string, map[string]struct{}) {
tb.Helper()
fn := filepath.Join(tb.TempDir(), indexFilename)
iw, err := NewWriter(ctx, fn)
require.NoError(tb, err)
symbols := map[string]struct{}{}
for _, s := range input {
s.labels.Range(func(l labels.Label) {
symbols[l.Name] = struct{}{}
symbols[l.Value] = struct{}{}
})
}
syms := []string{}
for s := range symbols {
syms = append(syms, s)
}
slices.Sort(syms)
for _, s := range syms {
require.NoError(tb, iw.AddSymbol(s))
}
for i, s := range input {
require.NoError(tb, iw.AddSeries(storage.SeriesRef(i), s.labels, s.chunks...))
}
require.NoError(tb, iw.Close())
ir, err := NewFileReader(fn)
require.NoError(tb, err)
tb.Cleanup(func() {
require.NoError(tb, ir.Close())
})
return ir, fn, symbols
}

View file

@ -416,7 +416,13 @@ func (p *MemPostings) PostingsForLabelMatching(ctx context.Context, name string,
} }
var its []Postings var its []Postings
count := 1
for _, v := range vals { for _, v := range vals {
if count%checkContextEveryNIterations == 0 && ctx.Err() != nil {
p.mtx.RUnlock()
return ErrPostings(ctx.Err())
}
count++
if match(v) { if match(v) {
its = append(its, NewListPostings(e[v])) its = append(its, NewListPostings(e[v]))
} }

View file

@ -22,12 +22,15 @@ import (
"math/rand" "math/rand"
"sort" "sort"
"strconv" "strconv"
"strings"
"testing" "testing"
"github.com/grafana/regexp"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/util/testutil"
) )
func TestMemPostings_addFor(t *testing.T) { func TestMemPostings_addFor(t *testing.T) {
@ -49,7 +52,7 @@ func TestMemPostings_ensureOrder(t *testing.T) {
for j := range l { for j := range l {
l[j] = storage.SeriesRef(rand.Uint64()) l[j] = storage.SeriesRef(rand.Uint64())
} }
v := fmt.Sprintf("%d", i) v := strconv.Itoa(i)
p.m["a"][v] = l p.m["a"][v] = l
} }
@ -390,7 +393,7 @@ func BenchmarkMerge(t *testing.B) {
its := make([]Postings, len(refs)) its := make([]Postings, len(refs))
for _, nSeries := range []int{1, 10, 100, 1000, 10000, 100000} { for _, nSeries := range []int{1, 10, 100, 1000, 10000, 100000} {
t.Run(fmt.Sprint(nSeries), func(bench *testing.B) { t.Run(strconv.Itoa(nSeries), func(bench *testing.B) {
ctx := context.Background() ctx := context.Background()
for i := 0; i < bench.N; i++ { for i := 0; i < bench.N; i++ {
// Reset the ListPostings to their original values each time round the loop. // Reset the ListPostings to their original values each time round the loop.
@ -1282,3 +1285,71 @@ func BenchmarkListPostings(b *testing.B) {
}) })
} }
} }
func slowRegexpString() string {
nums := map[int]struct{}{}
for i := 10_000; i < 20_000; i++ {
if i%3 == 0 {
nums[i] = struct{}{}
}
}
var sb strings.Builder
sb.WriteString(".*(9999")
for i := range nums {
sb.WriteString("|")
sb.WriteString(strconv.Itoa(i))
}
sb.WriteString(").*")
return sb.String()
}
func BenchmarkMemPostings_PostingsForLabelMatching(b *testing.B) {
fast := regexp.MustCompile("^(100|200)$")
slowRegexp := "^" + slowRegexpString() + "$"
b.Logf("Slow regexp length = %d", len(slowRegexp))
slow := regexp.MustCompile(slowRegexp)
for _, labelValueCount := range []int{1_000, 10_000, 100_000} {
b.Run(fmt.Sprintf("labels=%d", labelValueCount), func(b *testing.B) {
mp := NewMemPostings()
for i := 0; i < labelValueCount; i++ {
mp.Add(storage.SeriesRef(i), labels.FromStrings("label", strconv.Itoa(i)))
}
fp, err := ExpandPostings(mp.PostingsForLabelMatching(context.Background(), "label", fast.MatchString))
require.NoError(b, err)
b.Logf("Fast matcher matches %d series", len(fp))
b.Run("matcher=fast", func(b *testing.B) {
for i := 0; i < b.N; i++ {
mp.PostingsForLabelMatching(context.Background(), "label", fast.MatchString).Next()
}
})
sp, err := ExpandPostings(mp.PostingsForLabelMatching(context.Background(), "label", slow.MatchString))
require.NoError(b, err)
b.Logf("Slow matcher matches %d series", len(sp))
b.Run("matcher=slow", func(b *testing.B) {
for i := 0; i < b.N; i++ {
mp.PostingsForLabelMatching(context.Background(), "label", slow.MatchString).Next()
}
})
})
}
}
func TestMemPostings_PostingsForLabelMatchingHonorsContextCancel(t *testing.T) {
memP := NewMemPostings()
seriesCount := 10 * checkContextEveryNIterations
for i := 1; i <= seriesCount; i++ {
memP.Add(storage.SeriesRef(i), labels.FromStrings("__name__", fmt.Sprintf("%4d", i)))
}
failAfter := uint64(seriesCount / 2 / checkContextEveryNIterations)
ctx := &testutil.MockContextErrAfter{FailAfter: failAfter}
p := memP.PostingsForLabelMatching(ctx, "__name__", func(string) bool {
return true
})
require.Error(t, p.Err())
require.Equal(t, failAfter+1, ctx.Count()) // Plus one for the Err() call that puts the error in the result.
}

View file

@ -33,6 +33,9 @@ import (
"github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/annotations"
) )
// checkContextEveryNIterations is used in some tight loops to check if the context is done.
const checkContextEveryNIterations = 100
type blockBaseQuerier struct { type blockBaseQuerier struct {
blockID ulid.ULID blockID ulid.ULID
index IndexReader index IndexReader
@ -354,11 +357,16 @@ func inversePostingsForMatcher(ctx context.Context, ix IndexReader, m *labels.Ma
} }
res := vals[:0] res := vals[:0]
// If the inverse match is ="", we just want all the values. // If the match before inversion was !="" or !~"", we just want all the values.
if m.Type == labels.MatchEqual && m.Value == "" { if m.Value == "" && (m.Type == labels.MatchRegexp || m.Type == labels.MatchEqual) {
res = vals res = vals
} else { } else {
count := 1
for _, val := range vals { for _, val := range vals {
if count%checkContextEveryNIterations == 0 && ctx.Err() != nil {
return nil, ctx.Err()
}
count++
if !m.Matches(val) { if !m.Matches(val) {
res = append(res, val) res = append(res, val)
} }
@ -387,7 +395,12 @@ func labelValuesWithMatchers(ctx context.Context, r IndexReader, name string, ma
// re-use the allValues slice to avoid allocations // re-use the allValues slice to avoid allocations
// this is safe because the iteration is always ahead of the append // this is safe because the iteration is always ahead of the append
filteredValues := allValues[:0] filteredValues := allValues[:0]
count := 1
for _, v := range allValues { for _, v := range allValues {
if count%checkContextEveryNIterations == 0 && ctx.Err() != nil {
return nil, ctx.Err()
}
count++
if m.Matches(v) { if m.Matches(v) {
filteredValues = append(filteredValues, v) filteredValues = append(filteredValues, v)
} }

View file

@ -38,6 +38,7 @@ import (
"github.com/prometheus/prometheus/tsdb/tombstones" "github.com/prometheus/prometheus/tsdb/tombstones"
"github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/annotations"
"github.com/prometheus/prometheus/util/testutil"
) )
// TODO(bwplotka): Replace those mocks with remote.concreteSeriesSet. // TODO(bwplotka): Replace those mocks with remote.concreteSeriesSet.
@ -2807,6 +2808,13 @@ func TestPostingsForMatchers(t *testing.T) {
}, },
}, },
// Not regex. // Not regex.
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "i", "")},
exp: []labels.Labels{
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
},
},
{ {
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "n", "^1$")}, matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "n", "^1$")},
exp: []labels.Labels{ exp: []labels.Labels{
@ -3638,3 +3646,77 @@ func TestQueryWithOneChunkCompletelyDeleted(t *testing.T) {
require.NoError(t, css.Err()) require.NoError(t, css.Err())
require.Equal(t, 1, seriesCount) require.Equal(t, 1, seriesCount)
} }
func TestReader_PostingsForLabelMatchingHonorsContextCancel(t *testing.T) {
ir := mockReaderOfLabels{}
failAfter := uint64(mockReaderOfLabelsSeriesCount / 2 / checkContextEveryNIterations)
ctx := &testutil.MockContextErrAfter{FailAfter: failAfter}
_, err := labelValuesWithMatchers(ctx, ir, "__name__", labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".+"))
require.Error(t, err)
require.Equal(t, failAfter+1, ctx.Count()) // Plus one for the Err() call that puts the error in the result.
}
func TestReader_InversePostingsForMatcherHonorsContextCancel(t *testing.T) {
ir := mockReaderOfLabels{}
failAfter := uint64(mockReaderOfLabelsSeriesCount / 2 / checkContextEveryNIterations)
ctx := &testutil.MockContextErrAfter{FailAfter: failAfter}
_, err := inversePostingsForMatcher(ctx, ir, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*"))
require.Error(t, err)
require.Equal(t, failAfter+1, ctx.Count()) // Plus one for the Err() call that puts the error in the result.
}
type mockReaderOfLabels struct{}
const mockReaderOfLabelsSeriesCount = checkContextEveryNIterations * 10
func (m mockReaderOfLabels) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, error) {
return make([]string, mockReaderOfLabelsSeriesCount), nil
}
func (m mockReaderOfLabels) LabelValueFor(context.Context, storage.SeriesRef, string) (string, error) {
panic("LabelValueFor called")
}
func (m mockReaderOfLabels) SortedLabelValues(context.Context, string, ...*labels.Matcher) ([]string, error) {
panic("SortedLabelValues called")
}
func (m mockReaderOfLabels) Close() error {
return nil
}
func (m mockReaderOfLabels) LabelNames(context.Context, ...*labels.Matcher) ([]string, error) {
panic("LabelNames called")
}
func (m mockReaderOfLabels) LabelNamesFor(context.Context, ...storage.SeriesRef) ([]string, error) {
panic("LabelNamesFor called")
}
func (m mockReaderOfLabels) PostingsForLabelMatching(context.Context, string, func(string) bool) index.Postings {
panic("PostingsForLabelMatching called")
}
func (m mockReaderOfLabels) Postings(context.Context, string, ...string) (index.Postings, error) {
panic("Postings called")
}
func (m mockReaderOfLabels) ShardedPostings(index.Postings, uint64, uint64) index.Postings {
panic("Postings called")
}
func (m mockReaderOfLabels) SortedPostings(index.Postings) index.Postings {
panic("SortedPostings called")
}
func (m mockReaderOfLabels) Series(storage.SeriesRef, *labels.ScratchBuilder, *[]chunks.Meta) error {
panic("Series called")
}
func (m mockReaderOfLabels) Symbols() index.StringIter {
panic("Series called")
}

View file

@ -19,6 +19,7 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"sort" "sort"
"strconv"
"strings" "strings"
"testing" "testing"
@ -232,10 +233,10 @@ func TestCheckpoint(t *testing.T) {
// Write changing metadata for each series. In the end, only the latest // Write changing metadata for each series. In the end, only the latest
// version should end up in the checkpoint. // version should end up in the checkpoint.
b = enc.Metadata([]record.RefMetadata{ b = enc.Metadata([]record.RefMetadata{
{Ref: 0, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)}, {Ref: 0, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)},
{Ref: 1, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)}, {Ref: 1, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)},
{Ref: 2, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)}, {Ref: 2, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)},
{Ref: 3, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)}, {Ref: 3, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)},
}, nil) }, nil)
require.NoError(t, w.Log(b)) require.NoError(t, w.Log(b))
@ -250,7 +251,7 @@ func TestCheckpoint(t *testing.T) {
require.NoError(t, w.Truncate(107)) require.NoError(t, w.Truncate(107))
require.NoError(t, DeleteCheckpoints(w.Dir(), 106)) require.NoError(t, DeleteCheckpoints(w.Dir(), 106))
require.Equal(t, histogramsInWAL+floatHistogramsInWAL+samplesInWAL, stats.TotalSamples) require.Equal(t, histogramsInWAL+floatHistogramsInWAL+samplesInWAL, stats.TotalSamples)
require.Greater(t, stats.DroppedSamples, 0) require.Positive(t, stats.DroppedSamples)
// Only the new checkpoint should be left. // Only the new checkpoint should be left.
files, err := os.ReadDir(dir) files, err := os.ReadDir(dir)
@ -324,8 +325,8 @@ func TestCheckpoint(t *testing.T) {
testutil.RequireEqual(t, expectedRefSeries, series) testutil.RequireEqual(t, expectedRefSeries, series)
expectedRefMetadata := []record.RefMetadata{ expectedRefMetadata := []record.RefMetadata{
{Ref: 0, Unit: fmt.Sprintf("%d", last-100), Help: fmt.Sprintf("%d", last-100)}, {Ref: 0, Unit: strconv.FormatInt(last-100, 10), Help: strconv.FormatInt(last-100, 10)},
{Ref: 2, Unit: fmt.Sprintf("%d", last-100), Help: fmt.Sprintf("%d", last-100)}, {Ref: 2, Unit: strconv.FormatInt(last-100, 10), Help: strconv.FormatInt(last-100, 10)},
{Ref: 4, Unit: "unit", Help: "help"}, {Ref: 4, Unit: "unit", Help: "help"},
} }
sort.Slice(metadata, func(i, j int) bool { return metadata[i].Ref < metadata[j].Ref }) sort.Slice(metadata, func(i, j int) bool { return metadata[i].Ref < metadata[j].Ref })

View file

@ -13,7 +13,12 @@
package testutil package testutil
import "time" import (
"context"
"time"
"go.uber.org/atomic"
)
// A MockContext provides a simple stub implementation of a Context. // A MockContext provides a simple stub implementation of a Context.
type MockContext struct { type MockContext struct {
@ -40,3 +45,23 @@ func (c *MockContext) Err() error {
func (c *MockContext) Value(interface{}) interface{} { func (c *MockContext) Value(interface{}) interface{} {
return nil return nil
} }
// MockContextErrAfter is a MockContext that will return an error after a certain
// number of calls to Err().
type MockContextErrAfter struct {
MockContext
count atomic.Uint64
FailAfter uint64
}
func (c *MockContextErrAfter) Err() error {
c.count.Inc()
if c.count.Load() >= c.FailAfter {
return context.Canceled
}
return c.MockContext.Err()
}
func (c *MockContextErrAfter) Count() uint64 {
return c.count.Load()
}

View file

@ -116,9 +116,11 @@ type RulesRetriever interface {
AlertingRules() []*rules.AlertingRule AlertingRules() []*rules.AlertingRule
} }
// StatsRenderer converts engine statistics into a format suitable for the API.
type StatsRenderer func(context.Context, *stats.Statistics, string) stats.QueryStats type StatsRenderer func(context.Context, *stats.Statistics, string) stats.QueryStats
func defaultStatsRenderer(_ context.Context, s *stats.Statistics, param string) stats.QueryStats { // DefaultStatsRenderer is the default stats renderer for the API.
func DefaultStatsRenderer(_ context.Context, s *stats.Statistics, param string) stats.QueryStats {
if param != "" { if param != "" {
return stats.NewQueryStats(s) return stats.NewQueryStats(s)
} }
@ -272,7 +274,7 @@ func NewAPI(
buildInfo: buildInfo, buildInfo: buildInfo,
gatherer: gatherer, gatherer: gatherer,
isAgent: isAgent, isAgent: isAgent,
statsRenderer: defaultStatsRenderer, statsRenderer: DefaultStatsRenderer,
remoteReadHandler: remote.NewReadHandler(logger, registerer, q, configFunc, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame), remoteReadHandler: remote.NewReadHandler(logger, registerer, q, configFunc, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame),
} }
@ -461,7 +463,7 @@ func (api *API) query(r *http.Request) (result apiFuncResult) {
// Optional stats field in response if parameter "stats" is not empty. // Optional stats field in response if parameter "stats" is not empty.
sr := api.statsRenderer sr := api.statsRenderer
if sr == nil { if sr == nil {
sr = defaultStatsRenderer sr = DefaultStatsRenderer
} }
qs := sr(ctx, qry.Stats(), r.FormValue("stats")) qs := sr(ctx, qry.Stats(), r.FormValue("stats"))
@ -563,7 +565,7 @@ func (api *API) queryRange(r *http.Request) (result apiFuncResult) {
// Optional stats field in response if parameter "stats" is not empty. // Optional stats field in response if parameter "stats" is not empty.
sr := api.statsRenderer sr := api.statsRenderer
if sr == nil { if sr == nil {
sr = defaultStatsRenderer sr = DefaultStatsRenderer
} }
qs := sr(ctx, qry.Stats(), r.FormValue("stats")) qs := sr(ctx, qry.Stats(), r.FormValue("stats"))
@ -702,7 +704,7 @@ func (api *API) labelNames(r *http.Request) apiFuncResult {
names = []string{} names = []string{}
} }
if len(names) >= limit { if len(names) > limit {
names = names[:limit] names = names[:limit]
warnings = warnings.Add(errors.New("results truncated due to limit")) warnings = warnings.Add(errors.New("results truncated due to limit"))
} }
@ -791,7 +793,7 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) {
slices.Sort(vals) slices.Sort(vals)
if len(vals) >= limit { if len(vals) > limit {
vals = vals[:limit] vals = vals[:limit]
warnings = warnings.Add(errors.New("results truncated due to limit")) warnings = warnings.Add(errors.New("results truncated due to limit"))
} }
@ -887,7 +889,8 @@ func (api *API) series(r *http.Request) (result apiFuncResult) {
} }
metrics = append(metrics, set.At().Labels()) metrics = append(metrics, set.At().Labels())
if len(metrics) >= limit { if len(metrics) > limit {
metrics = metrics[:limit]
warnings.Add(errors.New("results truncated due to limit")) warnings.Add(errors.New("results truncated due to limit"))
return apiFuncResult{metrics, nil, warnings, closer} return apiFuncResult{metrics, nil, warnings, closer}
} }

View file

@ -25,6 +25,7 @@ import (
"reflect" "reflect"
"runtime" "runtime"
"sort" "sort"
"strconv"
"strings" "strings"
"testing" "testing"
"time" "time"
@ -1059,6 +1060,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
responseLen int // If nonzero, check only the length; `response` is ignored. responseLen int // If nonzero, check only the length; `response` is ignored.
responseMetadataTotal int responseMetadataTotal int
responseAsJSON string responseAsJSON string
warningsCount int
errType errorType errType errorType
sorter func(interface{}) sorter func(interface{})
metadata []targetMetadata metadata []targetMetadata
@ -1416,7 +1418,17 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
"match[]": []string{"test_metric1"}, "match[]": []string{"test_metric1"},
"limit": []string{"1"}, "limit": []string{"1"},
}, },
responseLen: 1, // API does not specify which particular value will come back. responseLen: 1, // API does not specify which particular value will come back.
warningsCount: 1,
},
{
endpoint: api.series,
query: url.Values{
"match[]": []string{"test_metric1"},
"limit": []string{"2"},
},
responseLen: 2, // API does not specify which particular value will come back.
warningsCount: 0, // No warnings if limit isn't exceeded.
}, },
// Missing match[] query params in series requests. // Missing match[] query params in series requests.
{ {
@ -2699,7 +2711,19 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
query: url.Values{ query: url.Values{
"limit": []string{"2"}, "limit": []string{"2"},
}, },
responseLen: 2, // API does not specify which particular values will come back. responseLen: 2, // API does not specify which particular values will come back.
warningsCount: 1,
},
{
endpoint: api.labelValues,
params: map[string]string{
"name": "__name__",
},
query: url.Values{
"limit": []string{"4"},
},
responseLen: 4, // API does not specify which particular values will come back.
warningsCount: 0, // No warnings if limit isn't exceeded.
}, },
// Label names. // Label names.
{ {
@ -2846,7 +2870,16 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
query: url.Values{ query: url.Values{
"limit": []string{"2"}, "limit": []string{"2"},
}, },
responseLen: 2, // API does not specify which particular values will come back. responseLen: 2, // API does not specify which particular values will come back.
warningsCount: 1,
},
{
endpoint: api.labelNames,
query: url.Values{
"limit": []string{"3"},
},
responseLen: 3, // API does not specify which particular values will come back.
warningsCount: 0, // No warnings if limit isn't exceeded.
}, },
}...) }...)
} }
@ -2923,6 +2956,8 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
require.NoError(t, err) require.NoError(t, err)
require.JSONEq(t, test.responseAsJSON, string(s)) require.JSONEq(t, test.responseAsJSON, string(s))
} }
require.Len(t, res.warnings, test.warningsCount)
}) })
} }
}) })
@ -2938,8 +2973,10 @@ func assertAPIError(t *testing.T, got *apiError, exp errorType) {
t.Helper() t.Helper()
if exp == errorNone { if exp == errorNone {
//nolint:testifylint
require.Nil(t, got) require.Nil(t, got)
} else { } else {
//nolint:testifylint
require.NotNil(t, got) require.NotNil(t, got)
require.Equal(t, exp, got.typ, "(%q)", got) require.Equal(t, exp, got.typ, "(%q)", got)
} }
@ -3544,7 +3581,7 @@ func TestTSDBStatus(t *testing.T) {
}, },
} { } {
tc := tc tc := tc
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) {
api := &API{db: tc.db, gatherer: prometheus.DefaultGatherer} api := &API{db: tc.db, gatherer: prometheus.DefaultGatherer}
endpoint := tc.endpoint(api) endpoint := tc.endpoint(api)
req, err := http.NewRequest(tc.method, fmt.Sprintf("?%s", tc.values.Encode()), nil) req, err := http.NewRequest(tc.method, fmt.Sprintf("?%s", tc.values.Encode()), nil)

View file

@ -22,6 +22,7 @@ import (
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"sort" "sort"
"strconv"
"strings" "strings"
"testing" "testing"
"time" "time"
@ -341,8 +342,8 @@ func TestFederationWithNativeHistograms(t *testing.T) {
} }
app := db.Appender(context.Background()) app := db.Appender(context.Background())
for i := 0; i < 6; i++ { for i := 0; i < 6; i++ {
l := labels.FromStrings("__name__", "test_metric", "foo", fmt.Sprintf("%d", i)) l := labels.FromStrings("__name__", "test_metric", "foo", strconv.Itoa(i))
expL := labels.FromStrings("__name__", "test_metric", "instance", "", "foo", fmt.Sprintf("%d", i)) expL := labels.FromStrings("__name__", "test_metric", "instance", "", "foo", strconv.Itoa(i))
var err error var err error
switch i { switch i {
case 0, 3: case 0, 3: