mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
Merge remote-tracking branch 'prometheus/main' into arve/wlog-histograms
This commit is contained in:
commit
90145fa884
32
.gitpod.Dockerfile
vendored
32
.gitpod.Dockerfile
vendored
|
@ -1,15 +1,33 @@
|
||||||
FROM gitpod/workspace-full
|
FROM gitpod/workspace-full
|
||||||
|
|
||||||
|
# Set Node.js version as an environment variable.
|
||||||
ENV CUSTOM_NODE_VERSION=16
|
ENV CUSTOM_NODE_VERSION=16
|
||||||
ENV CUSTOM_GO_VERSION=1.19
|
|
||||||
ENV GOPATH=$HOME/go-packages
|
|
||||||
ENV GOROOT=$HOME/go
|
|
||||||
ENV PATH=$GOROOT/bin:$GOPATH/bin:$PATH
|
|
||||||
|
|
||||||
|
# Install and use the specified Node.js version via nvm.
|
||||||
RUN bash -c ". .nvm/nvm.sh && nvm install ${CUSTOM_NODE_VERSION} && nvm use ${CUSTOM_NODE_VERSION} && nvm alias default ${CUSTOM_NODE_VERSION}"
|
RUN bash -c ". .nvm/nvm.sh && nvm install ${CUSTOM_NODE_VERSION} && nvm use ${CUSTOM_NODE_VERSION} && nvm alias default ${CUSTOM_NODE_VERSION}"
|
||||||
|
|
||||||
|
# Ensure nvm uses the default Node.js version in all new shells.
|
||||||
RUN echo "nvm use default &>/dev/null" >> ~/.bashrc.d/51-nvm-fix
|
RUN echo "nvm use default &>/dev/null" >> ~/.bashrc.d/51-nvm-fix
|
||||||
RUN curl -fsSL https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz | tar xzs \
|
|
||||||
&& printf '%s\n' 'export GOPATH=/workspace/go' \
|
|
||||||
'export PATH=$GOPATH/bin:$PATH' > $HOME/.bashrc.d/300-go
|
|
||||||
|
|
||||||
|
# Remove any existing Go installation in $HOME path.
|
||||||
|
RUN rm -rf $HOME/go $HOME/go-packages
|
||||||
|
|
||||||
|
# Export go environment variables.
|
||||||
|
RUN echo "export GOPATH=/workspace/go" >> ~/.bashrc.d/300-go && \
|
||||||
|
echo "export GOBIN=\$GOPATH/bin" >> ~/.bashrc.d/300-go && \
|
||||||
|
echo "export GOROOT=${HOME}/go" >> ~/.bashrc.d/300-go && \
|
||||||
|
echo "export PATH=\$GOROOT/bin:\$GOBIN:\$PATH" >> ~/.bashrc
|
||||||
|
|
||||||
|
# Reload the environment variables to ensure go environment variables are
|
||||||
|
# available in subsequent commands.
|
||||||
|
RUN bash -c "source ~/.bashrc && source ~/.bashrc.d/300-go"
|
||||||
|
|
||||||
|
# Fetch the Go version dynamically from the Prometheus go.mod file and Install Go in $HOME path.
|
||||||
|
RUN export CUSTOM_GO_VERSION=$(curl -sSL "https://raw.githubusercontent.com/prometheus/prometheus/main/go.mod" | awk '/^go/{print $2".0"}') && \
|
||||||
|
curl -fsSL "https://dl.google.com/go/go${CUSTOM_GO_VERSION}.linux-amd64.tar.gz" | \
|
||||||
|
tar -xz -C $HOME
|
||||||
|
|
||||||
|
# Fetch the goyacc parser version dynamically from the Prometheus Makefile
|
||||||
|
# and install it globally in $GOBIN path.
|
||||||
|
RUN GOYACC_VERSION=$(curl -fsSL "https://raw.githubusercontent.com/prometheus/prometheus/main/Makefile" | awk -F'=' '/GOYACC_VERSION \?=/{gsub(/ /, "", $2); print $2}') && \
|
||||||
|
go install "golang.org/x/tools/cmd/goyacc@${GOYACC_VERSION}"
|
||||||
|
|
|
@ -21,6 +21,7 @@ linters:
|
||||||
- goimports
|
- goimports
|
||||||
- misspell
|
- misspell
|
||||||
- nolintlint
|
- nolintlint
|
||||||
|
- perfsprint
|
||||||
- predeclared
|
- predeclared
|
||||||
- revive
|
- revive
|
||||||
- testifylint
|
- testifylint
|
||||||
|
@ -44,7 +45,9 @@ issues:
|
||||||
- linters:
|
- linters:
|
||||||
- godot
|
- godot
|
||||||
source: "^// ==="
|
source: "^// ==="
|
||||||
|
- linters:
|
||||||
|
- perfsprint
|
||||||
|
text: "fmt.Sprintf can be replaced with string addition"
|
||||||
linters-settings:
|
linters-settings:
|
||||||
depguard:
|
depguard:
|
||||||
rules:
|
rules:
|
||||||
|
@ -85,6 +88,9 @@ linters-settings:
|
||||||
local-prefixes: github.com/prometheus/prometheus
|
local-prefixes: github.com/prometheus/prometheus
|
||||||
gofumpt:
|
gofumpt:
|
||||||
extra-rules: true
|
extra-rules: true
|
||||||
|
perfsprint:
|
||||||
|
# Optimizes `fmt.Errorf`.
|
||||||
|
errorf: false
|
||||||
revive:
|
revive:
|
||||||
# By default, revive will enable only the linting rules that are named in the configuration file.
|
# By default, revive will enable only the linting rules that are named in the configuration file.
|
||||||
# So, it's needed to explicitly set in configuration all required rules.
|
# So, it's needed to explicitly set in configuration all required rules.
|
||||||
|
|
|
@ -42,7 +42,12 @@ go build ./cmd/prometheus/
|
||||||
make test # Make sure all the tests pass before you commit and push :)
|
make test # Make sure all the tests pass before you commit and push :)
|
||||||
```
|
```
|
||||||
|
|
||||||
We use [`golangci-lint`](https://github.com/golangci/golangci-lint) for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action.
|
To run a collection of Go linters through [`golangci-lint`](https://github.com/golangci/golangci-lint), do:
|
||||||
|
```bash
|
||||||
|
make lint
|
||||||
|
```
|
||||||
|
|
||||||
|
If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action. See [this section of the golangci-lint documentation](https://golangci-lint.run/usage/false-positives/#nolint-directive) for more information.
|
||||||
|
|
||||||
All our issues are regularly tagged so that you can also filter down the issues involving the components you want to work on. For our labeling policy refer [the wiki page](https://github.com/prometheus/prometheus/wiki/Label-Names-and-Descriptions).
|
All our issues are regularly tagged so that you can also filter down the issues involving the components you want to work on. For our labeling policy refer [the wiki page](https://github.com/prometheus/prometheus/wiki/Label-Names-and-Descriptions).
|
||||||
|
|
||||||
|
|
|
@ -418,7 +418,7 @@ func main() {
|
||||||
serverOnlyFlag(a, "rules.alert.resend-delay", "Minimum amount of time to wait before resending an alert to Alertmanager.").
|
serverOnlyFlag(a, "rules.alert.resend-delay", "Minimum amount of time to wait before resending an alert to Alertmanager.").
|
||||||
Default("1m").SetValue(&cfg.resendDelay)
|
Default("1m").SetValue(&cfg.resendDelay)
|
||||||
|
|
||||||
serverOnlyFlag(a, "rules.max-concurrent-evals", "Global concurrency limit for independent rules that can run concurrently.").
|
serverOnlyFlag(a, "rules.max-concurrent-evals", "Global concurrency limit for independent rules that can run concurrently. When set, \"query.max-concurrency\" may need to be adjusted accordingly.").
|
||||||
Default("4").Int64Var(&cfg.maxConcurrentEvals)
|
Default("4").Int64Var(&cfg.maxConcurrentEvals)
|
||||||
|
|
||||||
a.Flag("scrape.adjust-timestamps", "Adjust scrape timestamps by up to `scrape.timestamp-tolerance` to align them to the intended schedule. See https://github.com/prometheus/prometheus/issues/7846 for more context. Experimental. This flag will be removed in a future release.").
|
a.Flag("scrape.adjust-timestamps", "Adjust scrape timestamps by up to `scrape.timestamp-tolerance` to align them to the intended schedule. See https://github.com/prometheus/prometheus/issues/7846 for more context. Experimental. This flag will be removed in a future release.").
|
||||||
|
|
|
@ -24,6 +24,7 @@ import (
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -189,7 +190,7 @@ func TestSendAlerts(t *testing.T) {
|
||||||
|
|
||||||
for i, tc := range testCases {
|
for i, tc := range testCases {
|
||||||
tc := tc
|
tc := tc
|
||||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||||
senderFunc := senderFunc(func(alerts ...*notifier.Alert) {
|
senderFunc := senderFunc(func(alerts ...*notifier.Alert) {
|
||||||
require.NotEmpty(t, tc.in, "sender called with 0 alert")
|
require.NotEmpty(t, tc.in, "sender called with 0 alert")
|
||||||
require.Equal(t, tc.exp, alerts)
|
require.Equal(t, tc.exp, alerts)
|
||||||
|
|
|
@ -88,7 +88,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
|
||||||
blockDuration := getCompatibleBlockDuration(maxBlockDuration)
|
blockDuration := getCompatibleBlockDuration(maxBlockDuration)
|
||||||
mint = blockDuration * (mint / blockDuration)
|
mint = blockDuration * (mint / blockDuration)
|
||||||
|
|
||||||
db, err := tsdb.OpenDBReadOnly(outputDir, nil)
|
db, err := tsdb.OpenDBReadOnly(outputDir, "", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -235,12 +235,14 @@ func main() {
|
||||||
|
|
||||||
tsdbDumpCmd := tsdbCmd.Command("dump", "Dump samples from a TSDB.")
|
tsdbDumpCmd := tsdbCmd.Command("dump", "Dump samples from a TSDB.")
|
||||||
dumpPath := tsdbDumpCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
|
dumpPath := tsdbDumpCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
|
||||||
|
dumpSandboxDirRoot := tsdbDumpCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end.").Default(defaultDBPath).String()
|
||||||
dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
|
dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
|
||||||
dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
|
dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
|
||||||
dumpMatch := tsdbDumpCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings()
|
dumpMatch := tsdbDumpCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings()
|
||||||
|
|
||||||
tsdbDumpOpenMetricsCmd := tsdbCmd.Command("dump-openmetrics", "[Experimental] Dump samples from a TSDB into OpenMetrics format. Native histograms are not dumped.")
|
tsdbDumpOpenMetricsCmd := tsdbCmd.Command("dump-openmetrics", "[Experimental] Dump samples from a TSDB into OpenMetrics text format, excluding native histograms and staleness markers, which are not representable in OpenMetrics.")
|
||||||
dumpOpenMetricsPath := tsdbDumpOpenMetricsCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
|
dumpOpenMetricsPath := tsdbDumpOpenMetricsCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
|
||||||
|
dumpOpenMetricsSandboxDirRoot := tsdbDumpOpenMetricsCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end.").Default(defaultDBPath).String()
|
||||||
dumpOpenMetricsMinTime := tsdbDumpOpenMetricsCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
|
dumpOpenMetricsMinTime := tsdbDumpOpenMetricsCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
|
||||||
dumpOpenMetricsMaxTime := tsdbDumpOpenMetricsCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
|
dumpOpenMetricsMaxTime := tsdbDumpOpenMetricsCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
|
||||||
dumpOpenMetricsMatch := tsdbDumpOpenMetricsCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings()
|
dumpOpenMetricsMatch := tsdbDumpOpenMetricsCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings()
|
||||||
|
@ -396,9 +398,9 @@ func main() {
|
||||||
os.Exit(checkErr(listBlocks(*listPath, *listHumanReadable)))
|
os.Exit(checkErr(listBlocks(*listPath, *listHumanReadable)))
|
||||||
|
|
||||||
case tsdbDumpCmd.FullCommand():
|
case tsdbDumpCmd.FullCommand():
|
||||||
os.Exit(checkErr(dumpSamples(ctx, *dumpPath, *dumpMinTime, *dumpMaxTime, *dumpMatch, formatSeriesSet)))
|
os.Exit(checkErr(dumpSamples(ctx, *dumpPath, *dumpSandboxDirRoot, *dumpMinTime, *dumpMaxTime, *dumpMatch, formatSeriesSet)))
|
||||||
case tsdbDumpOpenMetricsCmd.FullCommand():
|
case tsdbDumpOpenMetricsCmd.FullCommand():
|
||||||
os.Exit(checkErr(dumpSamples(ctx, *dumpOpenMetricsPath, *dumpOpenMetricsMinTime, *dumpOpenMetricsMaxTime, *dumpOpenMetricsMatch, formatSeriesSetOpenMetrics)))
|
os.Exit(checkErr(dumpSamples(ctx, *dumpOpenMetricsPath, *dumpOpenMetricsSandboxDirRoot, *dumpOpenMetricsMinTime, *dumpOpenMetricsMaxTime, *dumpOpenMetricsMatch, formatSeriesSetOpenMetrics)))
|
||||||
// TODO(aSquare14): Work on adding support for custom block size.
|
// TODO(aSquare14): Work on adding support for custom block size.
|
||||||
case openMetricsImportCmd.FullCommand():
|
case openMetricsImportCmd.FullCommand():
|
||||||
os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration))
|
os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration))
|
||||||
|
|
|
@ -25,6 +25,7 @@ import (
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -410,7 +411,7 @@ func TestExitCodes(t *testing.T) {
|
||||||
} {
|
} {
|
||||||
t.Run(c.file, func(t *testing.T) {
|
t.Run(c.file, func(t *testing.T) {
|
||||||
for _, lintFatal := range []bool{true, false} {
|
for _, lintFatal := range []bool{true, false} {
|
||||||
t.Run(fmt.Sprintf("%t", lintFatal), func(t *testing.T) {
|
t.Run(strconv.FormatBool(lintFatal), func(t *testing.T) {
|
||||||
args := []string{"-test.main", "check", "config", "testdata/" + c.file}
|
args := []string{"-test.main", "check", "config", "testdata/" + c.file}
|
||||||
if lintFatal {
|
if lintFatal {
|
||||||
args = append(args, "--lint-fatal")
|
args = append(args, "--lint-fatal")
|
||||||
|
|
|
@ -338,7 +338,7 @@ func readPrometheusLabels(r io.Reader, n int) ([]labels.Labels, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func listBlocks(path string, humanReadable bool) error {
|
func listBlocks(path string, humanReadable bool) error {
|
||||||
db, err := tsdb.OpenDBReadOnly(path, nil)
|
db, err := tsdb.OpenDBReadOnly(path, "", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -393,7 +393,7 @@ func getFormatedBytes(bytes int64, humanReadable bool) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error) {
|
func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error) {
|
||||||
db, err := tsdb.OpenDBReadOnly(path, nil)
|
db, err := tsdb.OpenDBReadOnly(path, "", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -708,8 +708,8 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
|
||||||
|
|
||||||
type SeriesSetFormatter func(series storage.SeriesSet) error
|
type SeriesSetFormatter func(series storage.SeriesSet) error
|
||||||
|
|
||||||
func dumpSamples(ctx context.Context, path string, mint, maxt int64, match []string, formatter SeriesSetFormatter) (err error) {
|
func dumpSamples(ctx context.Context, dbDir, sandboxDirRoot string, mint, maxt int64, match []string, formatter SeriesSetFormatter) (err error) {
|
||||||
db, err := tsdb.OpenDBReadOnly(path, nil)
|
db, err := tsdb.OpenDBReadOnly(dbDir, sandboxDirRoot, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -856,9 +856,9 @@ func displayHistogram(dataType string, datas []int, total int) {
|
||||||
}
|
}
|
||||||
avg := sum / len(datas)
|
avg := sum / len(datas)
|
||||||
fmt.Printf("%s (min/avg/max): %d/%d/%d\n", dataType, datas[0], avg, datas[len(datas)-1])
|
fmt.Printf("%s (min/avg/max): %d/%d/%d\n", dataType, datas[0], avg, datas[len(datas)-1])
|
||||||
maxLeftLen := strconv.Itoa(len(fmt.Sprintf("%d", end)))
|
maxLeftLen := strconv.Itoa(len(strconv.Itoa(end)))
|
||||||
maxRightLen := strconv.Itoa(len(fmt.Sprintf("%d", end+step)))
|
maxRightLen := strconv.Itoa(len(strconv.Itoa(end + step)))
|
||||||
maxCountLen := strconv.Itoa(len(fmt.Sprintf("%d", maxCount)))
|
maxCountLen := strconv.Itoa(len(strconv.Itoa(maxCount)))
|
||||||
for bucket, count := range buckets {
|
for bucket, count := range buckets {
|
||||||
percentage := 100.0 * count / total
|
percentage := 100.0 * count / total
|
||||||
fmt.Printf("[%"+maxLeftLen+"d, %"+maxRightLen+"d]: %"+maxCountLen+"d %s\n", bucket*step+start+1, (bucket+1)*step+start, count, strings.Repeat("#", percentage))
|
fmt.Printf("[%"+maxLeftLen+"d, %"+maxRightLen+"d]: %"+maxCountLen+"d %s\n", bucket*step+start+1, (bucket+1)*step+start, count, strings.Repeat("#", percentage))
|
||||||
|
|
|
@ -64,6 +64,7 @@ func getDumpedSamples(t *testing.T, path string, mint, maxt int64, match []strin
|
||||||
err := dumpSamples(
|
err := dumpSamples(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
path,
|
path,
|
||||||
|
t.TempDir(),
|
||||||
mint,
|
mint,
|
||||||
maxt,
|
maxt,
|
||||||
match,
|
match,
|
||||||
|
|
|
@ -573,7 +573,7 @@ func (la labelsAndAnnotations) String() string {
|
||||||
}
|
}
|
||||||
s := "[\n0:" + indentLines("\n"+la[0].String(), " ")
|
s := "[\n0:" + indentLines("\n"+la[0].String(), " ")
|
||||||
for i, l := range la[1:] {
|
for i, l := range la[1:] {
|
||||||
s += ",\n" + fmt.Sprintf("%d", i+1) + ":" + indentLines("\n"+l.String(), " ")
|
s += ",\n" + strconv.Itoa(i+1) + ":" + indentLines("\n"+l.String(), " ")
|
||||||
}
|
}
|
||||||
s += "\n]"
|
s += "\n]"
|
||||||
|
|
||||||
|
|
|
@ -18,6 +18,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -279,7 +280,7 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
|
||||||
if inst.PrivateDnsName != nil {
|
if inst.PrivateDnsName != nil {
|
||||||
labels[ec2LabelPrivateDNS] = model.LabelValue(*inst.PrivateDnsName)
|
labels[ec2LabelPrivateDNS] = model.LabelValue(*inst.PrivateDnsName)
|
||||||
}
|
}
|
||||||
addr := net.JoinHostPort(*inst.PrivateIpAddress, fmt.Sprintf("%d", d.cfg.Port))
|
addr := net.JoinHostPort(*inst.PrivateIpAddress, strconv.Itoa(d.cfg.Port))
|
||||||
labels[model.AddressLabel] = model.LabelValue(addr)
|
labels[model.AddressLabel] = model.LabelValue(addr)
|
||||||
|
|
||||||
if inst.Platform != nil {
|
if inst.Platform != nil {
|
||||||
|
|
|
@ -18,6 +18,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -229,7 +230,7 @@ func (d *LightsailDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
|
||||||
lightsailLabelRegion: model.LabelValue(d.cfg.Region),
|
lightsailLabelRegion: model.LabelValue(d.cfg.Region),
|
||||||
}
|
}
|
||||||
|
|
||||||
addr := net.JoinHostPort(*inst.PrivateIpAddress, fmt.Sprintf("%d", d.cfg.Port))
|
addr := net.JoinHostPort(*inst.PrivateIpAddress, strconv.Itoa(d.cfg.Port))
|
||||||
labels[model.AddressLabel] = model.LabelValue(addr)
|
labels[model.AddressLabel] = model.LabelValue(addr)
|
||||||
|
|
||||||
if inst.PublicIpAddress != nil {
|
if inst.PublicIpAddress != nil {
|
||||||
|
|
|
@ -20,6 +20,7 @@ import (
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
@ -492,7 +493,7 @@ func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualM
|
||||||
}
|
}
|
||||||
if ip.Properties != nil && ip.Properties.PrivateIPAddress != nil {
|
if ip.Properties != nil && ip.Properties.PrivateIPAddress != nil {
|
||||||
labels[azureLabelMachinePrivateIP] = model.LabelValue(*ip.Properties.PrivateIPAddress)
|
labels[azureLabelMachinePrivateIP] = model.LabelValue(*ip.Properties.PrivateIPAddress)
|
||||||
address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, fmt.Sprintf("%d", d.port))
|
address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, strconv.Itoa(d.port))
|
||||||
labels[model.AddressLabel] = model.LabelValue(address)
|
labels[model.AddressLabel] = model.LabelValue(address)
|
||||||
return labels, nil
|
return labels, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -539,9 +539,9 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr
|
||||||
// since the service may be registered remotely through a different node.
|
// since the service may be registered remotely through a different node.
|
||||||
var addr string
|
var addr string
|
||||||
if serviceNode.Service.Address != "" {
|
if serviceNode.Service.Address != "" {
|
||||||
addr = net.JoinHostPort(serviceNode.Service.Address, fmt.Sprintf("%d", serviceNode.Service.Port))
|
addr = net.JoinHostPort(serviceNode.Service.Address, strconv.Itoa(serviceNode.Service.Port))
|
||||||
} else {
|
} else {
|
||||||
addr = net.JoinHostPort(serviceNode.Node.Address, fmt.Sprintf("%d", serviceNode.Service.Port))
|
addr = net.JoinHostPort(serviceNode.Node.Address, strconv.Itoa(serviceNode.Service.Port))
|
||||||
}
|
}
|
||||||
|
|
||||||
labels := model.LabelSet{
|
labels := model.LabelSet{
|
||||||
|
|
|
@ -177,7 +177,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
labels := model.LabelSet{
|
labels := model.LabelSet{
|
||||||
doLabelID: model.LabelValue(fmt.Sprintf("%d", droplet.ID)),
|
doLabelID: model.LabelValue(strconv.Itoa(droplet.ID)),
|
||||||
doLabelName: model.LabelValue(droplet.Name),
|
doLabelName: model.LabelValue(droplet.Name),
|
||||||
doLabelImage: model.LabelValue(droplet.Image.Slug),
|
doLabelImage: model.LabelValue(droplet.Image.Slug),
|
||||||
doLabelImageName: model.LabelValue(droplet.Image.Name),
|
doLabelImageName: model.LabelValue(droplet.Image.Name),
|
||||||
|
|
|
@ -18,6 +18,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
@ -200,7 +201,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
|
||||||
|
|
||||||
tg := &targetgroup.Group{}
|
tg := &targetgroup.Group{}
|
||||||
hostPort := func(a string, p int) model.LabelValue {
|
hostPort := func(a string, p int) model.LabelValue {
|
||||||
return model.LabelValue(net.JoinHostPort(a, fmt.Sprintf("%d", p)))
|
return model.LabelValue(net.JoinHostPort(a, strconv.Itoa(p)))
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, record := range response.Answer {
|
for _, record := range response.Answer {
|
||||||
|
@ -209,7 +210,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
|
||||||
switch addr := record.(type) {
|
switch addr := record.(type) {
|
||||||
case *dns.SRV:
|
case *dns.SRV:
|
||||||
dnsSrvRecordTarget = model.LabelValue(addr.Target)
|
dnsSrvRecordTarget = model.LabelValue(addr.Target)
|
||||||
dnsSrvRecordPort = model.LabelValue(fmt.Sprintf("%d", addr.Port))
|
dnsSrvRecordPort = model.LabelValue(strconv.Itoa(int(addr.Port)))
|
||||||
|
|
||||||
// Remove the final dot from rooted DNS names to make them look more usual.
|
// Remove the final dot from rooted DNS names to make them look more usual.
|
||||||
addr.Target = strings.TrimRight(addr.Target, ".")
|
addr.Target = strings.TrimRight(addr.Target, ".")
|
||||||
|
|
|
@ -15,7 +15,6 @@ package hetzner
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -92,7 +91,7 @@ func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
|
||||||
for i, server := range servers {
|
for i, server := range servers {
|
||||||
labels := model.LabelSet{
|
labels := model.LabelSet{
|
||||||
hetznerLabelRole: model.LabelValue(HetznerRoleHcloud),
|
hetznerLabelRole: model.LabelValue(HetznerRoleHcloud),
|
||||||
hetznerLabelServerID: model.LabelValue(fmt.Sprintf("%d", server.ID)),
|
hetznerLabelServerID: model.LabelValue(strconv.FormatInt(server.ID, 10)),
|
||||||
hetznerLabelServerName: model.LabelValue(server.Name),
|
hetznerLabelServerName: model.LabelValue(server.Name),
|
||||||
hetznerLabelDatacenter: model.LabelValue(server.Datacenter.Name),
|
hetznerLabelDatacenter: model.LabelValue(server.Datacenter.Name),
|
||||||
hetznerLabelPublicIPv4: model.LabelValue(server.PublicNet.IPv4.IP.String()),
|
hetznerLabelPublicIPv4: model.LabelValue(server.PublicNet.IPv4.IP.String()),
|
||||||
|
@ -102,10 +101,10 @@ func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
|
||||||
hetznerLabelHcloudDatacenterLocation: model.LabelValue(server.Datacenter.Location.Name),
|
hetznerLabelHcloudDatacenterLocation: model.LabelValue(server.Datacenter.Location.Name),
|
||||||
hetznerLabelHcloudDatacenterLocationNetworkZone: model.LabelValue(server.Datacenter.Location.NetworkZone),
|
hetznerLabelHcloudDatacenterLocationNetworkZone: model.LabelValue(server.Datacenter.Location.NetworkZone),
|
||||||
hetznerLabelHcloudType: model.LabelValue(server.ServerType.Name),
|
hetznerLabelHcloudType: model.LabelValue(server.ServerType.Name),
|
||||||
hetznerLabelHcloudCPUCores: model.LabelValue(fmt.Sprintf("%d", server.ServerType.Cores)),
|
hetznerLabelHcloudCPUCores: model.LabelValue(strconv.Itoa(server.ServerType.Cores)),
|
||||||
hetznerLabelHcloudCPUType: model.LabelValue(server.ServerType.CPUType),
|
hetznerLabelHcloudCPUType: model.LabelValue(server.ServerType.CPUType),
|
||||||
hetznerLabelHcloudMemoryGB: model.LabelValue(fmt.Sprintf("%d", int(server.ServerType.Memory))),
|
hetznerLabelHcloudMemoryGB: model.LabelValue(strconv.Itoa(int(server.ServerType.Memory))),
|
||||||
hetznerLabelHcloudDiskGB: model.LabelValue(fmt.Sprintf("%d", server.ServerType.Disk)),
|
hetznerLabelHcloudDiskGB: model.LabelValue(strconv.Itoa(server.ServerType.Disk)),
|
||||||
|
|
||||||
model.AddressLabel: model.LabelValue(net.JoinHostPort(server.PublicNet.IPv4.IP.String(), strconv.FormatUint(uint64(d.port), 10))),
|
model.AddressLabel: model.LabelValue(net.JoinHostPort(server.PublicNet.IPv4.IP.String(), strconv.FormatUint(uint64(d.port), 10))),
|
||||||
}
|
}
|
||||||
|
|
|
@ -112,7 +112,7 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error)
|
||||||
hetznerLabelPublicIPv4: model.LabelValue(server.Server.ServerIP),
|
hetznerLabelPublicIPv4: model.LabelValue(server.Server.ServerIP),
|
||||||
hetznerLabelServerStatus: model.LabelValue(server.Server.Status),
|
hetznerLabelServerStatus: model.LabelValue(server.Server.Status),
|
||||||
hetznerLabelRobotProduct: model.LabelValue(server.Server.Product),
|
hetznerLabelRobotProduct: model.LabelValue(server.Server.Product),
|
||||||
hetznerLabelRobotCancelled: model.LabelValue(fmt.Sprintf("%t", server.Server.Canceled)),
|
hetznerLabelRobotCancelled: model.LabelValue(strconv.FormatBool(server.Server.Canceled)),
|
||||||
|
|
||||||
model.AddressLabel: model.LabelValue(net.JoinHostPort(server.Server.ServerIP, strconv.FormatUint(uint64(d.port), 10))),
|
model.AddressLabel: model.LabelValue(net.JoinHostPort(server.Server.ServerIP, strconv.FormatUint(uint64(d.port), 10))),
|
||||||
}
|
}
|
||||||
|
|
|
@ -720,7 +720,7 @@ func staticConfig(addrs ...string) discovery.StaticConfig {
|
||||||
var cfg discovery.StaticConfig
|
var cfg discovery.StaticConfig
|
||||||
for i, addr := range addrs {
|
for i, addr := range addrs {
|
||||||
cfg = append(cfg, &targetgroup.Group{
|
cfg = append(cfg, &targetgroup.Group{
|
||||||
Source: fmt.Sprint(i),
|
Source: strconv.Itoa(i),
|
||||||
Targets: []model.LabelSet{
|
Targets: []model.LabelSet{
|
||||||
{model.AddressLabel: model.LabelValue(addr)},
|
{model.AddressLabel: model.LabelValue(addr)},
|
||||||
},
|
},
|
||||||
|
|
|
@ -325,7 +325,7 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro
|
||||||
}
|
}
|
||||||
|
|
||||||
labels := model.LabelSet{
|
labels := model.LabelSet{
|
||||||
linodeLabelID: model.LabelValue(fmt.Sprintf("%d", instance.ID)),
|
linodeLabelID: model.LabelValue(strconv.Itoa(instance.ID)),
|
||||||
linodeLabelName: model.LabelValue(instance.Label),
|
linodeLabelName: model.LabelValue(instance.Label),
|
||||||
linodeLabelImage: model.LabelValue(instance.Image),
|
linodeLabelImage: model.LabelValue(instance.Image),
|
||||||
linodeLabelPrivateIPv4: model.LabelValue(privateIPv4),
|
linodeLabelPrivateIPv4: model.LabelValue(privateIPv4),
|
||||||
|
@ -338,13 +338,13 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro
|
||||||
linodeLabelType: model.LabelValue(instance.Type),
|
linodeLabelType: model.LabelValue(instance.Type),
|
||||||
linodeLabelStatus: model.LabelValue(instance.Status),
|
linodeLabelStatus: model.LabelValue(instance.Status),
|
||||||
linodeLabelGroup: model.LabelValue(instance.Group),
|
linodeLabelGroup: model.LabelValue(instance.Group),
|
||||||
linodeLabelGPUs: model.LabelValue(fmt.Sprintf("%d", instance.Specs.GPUs)),
|
linodeLabelGPUs: model.LabelValue(strconv.Itoa(instance.Specs.GPUs)),
|
||||||
linodeLabelHypervisor: model.LabelValue(instance.Hypervisor),
|
linodeLabelHypervisor: model.LabelValue(instance.Hypervisor),
|
||||||
linodeLabelBackups: model.LabelValue(backupsStatus),
|
linodeLabelBackups: model.LabelValue(backupsStatus),
|
||||||
linodeLabelSpecsDiskBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Disk)<<20)),
|
linodeLabelSpecsDiskBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Disk)<<20, 10)),
|
||||||
linodeLabelSpecsMemoryBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Memory)<<20)),
|
linodeLabelSpecsMemoryBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Memory)<<20, 10)),
|
||||||
linodeLabelSpecsVCPUs: model.LabelValue(fmt.Sprintf("%d", instance.Specs.VCPUs)),
|
linodeLabelSpecsVCPUs: model.LabelValue(strconv.Itoa(instance.Specs.VCPUs)),
|
||||||
linodeLabelSpecsTransferBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Transfer)<<20)),
|
linodeLabelSpecsTransferBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Transfer)<<20, 10)),
|
||||||
}
|
}
|
||||||
|
|
||||||
addr := net.JoinHostPort(publicIPv4, strconv.FormatUint(uint64(d.port), 10))
|
addr := net.JoinHostPort(publicIPv4, strconv.FormatUint(uint64(d.port), 10))
|
||||||
|
|
|
@ -720,7 +720,7 @@ func staticConfig(addrs ...string) StaticConfig {
|
||||||
var cfg StaticConfig
|
var cfg StaticConfig
|
||||||
for i, addr := range addrs {
|
for i, addr := range addrs {
|
||||||
cfg = append(cfg, &targetgroup.Group{
|
cfg = append(cfg, &targetgroup.Group{
|
||||||
Source: fmt.Sprint(i),
|
Source: strconv.Itoa(i),
|
||||||
Targets: []model.LabelSet{
|
Targets: []model.LabelSet{
|
||||||
{model.AddressLabel: model.LabelValue(addr)},
|
{model.AddressLabel: model.LabelValue(addr)},
|
||||||
},
|
},
|
||||||
|
|
|
@ -505,7 +505,7 @@ func targetEndpoint(task *task, port uint32, containerNet bool) string {
|
||||||
host = task.Host
|
host = task.Host
|
||||||
}
|
}
|
||||||
|
|
||||||
return net.JoinHostPort(host, fmt.Sprintf("%d", port))
|
return net.JoinHostPort(host, strconv.Itoa(int(port)))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get a list of ports and a list of labels from a PortMapping.
|
// Get a list of ports and a list of labels from a PortMapping.
|
||||||
|
|
|
@ -15,7 +15,7 @@ package moby
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"strconv"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
|
@ -44,8 +44,8 @@ func getNetworksLabels(ctx context.Context, client *client.Client, labelPrefix s
|
||||||
labelPrefix + labelNetworkID: network.ID,
|
labelPrefix + labelNetworkID: network.ID,
|
||||||
labelPrefix + labelNetworkName: network.Name,
|
labelPrefix + labelNetworkName: network.Name,
|
||||||
labelPrefix + labelNetworkScope: network.Scope,
|
labelPrefix + labelNetworkScope: network.Scope,
|
||||||
labelPrefix + labelNetworkInternal: fmt.Sprintf("%t", network.Internal),
|
labelPrefix + labelNetworkInternal: strconv.FormatBool(network.Internal),
|
||||||
labelPrefix + labelNetworkIngress: fmt.Sprintf("%t", network.Ingress),
|
labelPrefix + labelNetworkIngress: strconv.FormatBool(network.Ingress),
|
||||||
}
|
}
|
||||||
for k, v := range network.Labels {
|
for k, v := range network.Labels {
|
||||||
ln := strutil.SanitizeLabelName(k)
|
ln := strutil.SanitizeLabelName(k)
|
||||||
|
|
|
@ -66,7 +66,7 @@ func (d *Discovery) refreshNodes(ctx context.Context) ([]*targetgroup.Group, err
|
||||||
swarmLabelNodeAddress: model.LabelValue(n.Status.Addr),
|
swarmLabelNodeAddress: model.LabelValue(n.Status.Addr),
|
||||||
}
|
}
|
||||||
if n.ManagerStatus != nil {
|
if n.ManagerStatus != nil {
|
||||||
labels[swarmLabelNodeManagerLeader] = model.LabelValue(fmt.Sprintf("%t", n.ManagerStatus.Leader))
|
labels[swarmLabelNodeManagerLeader] = model.LabelValue(strconv.FormatBool(n.ManagerStatus.Leader))
|
||||||
labels[swarmLabelNodeManagerReachability] = model.LabelValue(n.ManagerStatus.Reachability)
|
labels[swarmLabelNodeManagerReachability] = model.LabelValue(n.ManagerStatus.Reachability)
|
||||||
labels[swarmLabelNodeManagerAddr] = model.LabelValue(n.ManagerStatus.Addr)
|
labels[swarmLabelNodeManagerAddr] = model.LabelValue(n.ManagerStatus.Addr)
|
||||||
}
|
}
|
||||||
|
|
|
@ -116,7 +116,7 @@ func (d *Discovery) refreshServices(ctx context.Context) ([]*targetgroup.Group,
|
||||||
labels[model.LabelName(k)] = model.LabelValue(v)
|
labels[model.LabelName(k)] = model.LabelValue(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
addr := net.JoinHostPort(ip.String(), fmt.Sprintf("%d", d.port))
|
addr := net.JoinHostPort(ip.String(), strconv.Itoa(d.port))
|
||||||
labels[model.AddressLabel] = model.LabelValue(addr)
|
labels[model.AddressLabel] = model.LabelValue(addr)
|
||||||
|
|
||||||
tg.Targets = append(tg.Targets, labels)
|
tg.Targets = append(tg.Targets, labels)
|
||||||
|
|
|
@ -150,7 +150,7 @@ func (d *Discovery) refreshTasks(ctx context.Context) ([]*targetgroup.Group, err
|
||||||
labels[model.LabelName(k)] = model.LabelValue(v)
|
labels[model.LabelName(k)] = model.LabelValue(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
addr := net.JoinHostPort(ip.String(), fmt.Sprintf("%d", d.port))
|
addr := net.JoinHostPort(ip.String(), strconv.Itoa(d.port))
|
||||||
labels[model.AddressLabel] = model.LabelValue(addr)
|
labels[model.AddressLabel] = model.LabelValue(addr)
|
||||||
|
|
||||||
tg.Targets = append(tg.Targets, labels)
|
tg.Targets = append(tg.Targets, labels)
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/gophercloud/gophercloud"
|
"github.com/gophercloud/gophercloud"
|
||||||
|
@ -72,7 +73,7 @@ func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group
|
||||||
}
|
}
|
||||||
|
|
||||||
tg := &targetgroup.Group{
|
tg := &targetgroup.Group{
|
||||||
Source: fmt.Sprintf("OS_" + h.region),
|
Source: "OS_" + h.region,
|
||||||
}
|
}
|
||||||
// OpenStack API reference
|
// OpenStack API reference
|
||||||
// https://developer.openstack.org/api-ref/compute/#list-hypervisors-details
|
// https://developer.openstack.org/api-ref/compute/#list-hypervisors-details
|
||||||
|
@ -84,7 +85,7 @@ func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group
|
||||||
}
|
}
|
||||||
for _, hypervisor := range hypervisorList {
|
for _, hypervisor := range hypervisorList {
|
||||||
labels := model.LabelSet{}
|
labels := model.LabelSet{}
|
||||||
addr := net.JoinHostPort(hypervisor.HostIP, fmt.Sprintf("%d", h.port))
|
addr := net.JoinHostPort(hypervisor.HostIP, strconv.Itoa(h.port))
|
||||||
labels[model.AddressLabel] = model.LabelValue(addr)
|
labels[model.AddressLabel] = model.LabelValue(addr)
|
||||||
labels[openstackLabelHypervisorID] = model.LabelValue(hypervisor.ID)
|
labels[openstackLabelHypervisorID] = model.LabelValue(hypervisor.ID)
|
||||||
labels[openstackLabelHypervisorHostName] = model.LabelValue(hypervisor.HypervisorHostname)
|
labels[openstackLabelHypervisorHostName] = model.LabelValue(hypervisor.HypervisorHostname)
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/go-kit/log/level"
|
"github.com/go-kit/log/level"
|
||||||
|
@ -120,7 +121,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
|
||||||
}
|
}
|
||||||
pager := servers.List(client, opts)
|
pager := servers.List(client, opts)
|
||||||
tg := &targetgroup.Group{
|
tg := &targetgroup.Group{
|
||||||
Source: fmt.Sprintf("OS_" + i.region),
|
Source: "OS_" + i.region,
|
||||||
}
|
}
|
||||||
err = pager.EachPage(func(page pagination.Page) (bool, error) {
|
err = pager.EachPage(func(page pagination.Page) (bool, error) {
|
||||||
if ctx.Err() != nil {
|
if ctx.Err() != nil {
|
||||||
|
@ -194,7 +195,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
|
||||||
if val, ok := floatingIPList[floatingIPKey{id: s.ID, fixed: addr}]; ok {
|
if val, ok := floatingIPList[floatingIPKey{id: s.ID, fixed: addr}]; ok {
|
||||||
lbls[openstackLabelPublicIP] = model.LabelValue(val)
|
lbls[openstackLabelPublicIP] = model.LabelValue(val)
|
||||||
}
|
}
|
||||||
addr = net.JoinHostPort(addr, fmt.Sprintf("%d", i.port))
|
addr = net.JoinHostPort(addr, strconv.Itoa(i.port))
|
||||||
lbls[model.AddressLabel] = model.LabelValue(addr)
|
lbls[model.AddressLabel] = model.LabelValue(addr)
|
||||||
|
|
||||||
tg.Targets = append(tg.Targets, lbls)
|
tg.Targets = append(tg.Targets, lbls)
|
||||||
|
|
|
@ -144,12 +144,12 @@ func (d *dedicatedServerDiscovery) refresh(context.Context) ([]*targetgroup.Grou
|
||||||
model.InstanceLabel: model.LabelValue(server.Name),
|
model.InstanceLabel: model.LabelValue(server.Name),
|
||||||
dedicatedServerLabelPrefix + "state": model.LabelValue(server.State),
|
dedicatedServerLabelPrefix + "state": model.LabelValue(server.State),
|
||||||
dedicatedServerLabelPrefix + "commercial_range": model.LabelValue(server.CommercialRange),
|
dedicatedServerLabelPrefix + "commercial_range": model.LabelValue(server.CommercialRange),
|
||||||
dedicatedServerLabelPrefix + "link_speed": model.LabelValue(fmt.Sprintf("%d", server.LinkSpeed)),
|
dedicatedServerLabelPrefix + "link_speed": model.LabelValue(strconv.Itoa(server.LinkSpeed)),
|
||||||
dedicatedServerLabelPrefix + "rack": model.LabelValue(server.Rack),
|
dedicatedServerLabelPrefix + "rack": model.LabelValue(server.Rack),
|
||||||
dedicatedServerLabelPrefix + "no_intervention": model.LabelValue(strconv.FormatBool(server.NoIntervention)),
|
dedicatedServerLabelPrefix + "no_intervention": model.LabelValue(strconv.FormatBool(server.NoIntervention)),
|
||||||
dedicatedServerLabelPrefix + "os": model.LabelValue(server.Os),
|
dedicatedServerLabelPrefix + "os": model.LabelValue(server.Os),
|
||||||
dedicatedServerLabelPrefix + "support_level": model.LabelValue(server.SupportLevel),
|
dedicatedServerLabelPrefix + "support_level": model.LabelValue(server.SupportLevel),
|
||||||
dedicatedServerLabelPrefix + "server_id": model.LabelValue(fmt.Sprintf("%d", server.ServerID)),
|
dedicatedServerLabelPrefix + "server_id": model.LabelValue(strconv.FormatInt(server.ServerID, 10)),
|
||||||
dedicatedServerLabelPrefix + "reverse": model.LabelValue(server.Reverse),
|
dedicatedServerLabelPrefix + "reverse": model.LabelValue(server.Reverse),
|
||||||
dedicatedServerLabelPrefix + "datacenter": model.LabelValue(server.Datacenter),
|
dedicatedServerLabelPrefix + "datacenter": model.LabelValue(server.Datacenter),
|
||||||
dedicatedServerLabelPrefix + "name": model.LabelValue(server.Name),
|
dedicatedServerLabelPrefix + "name": model.LabelValue(server.Name),
|
||||||
|
|
|
@ -19,6 +19,7 @@ import (
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/go-kit/log/level"
|
"github.com/go-kit/log/level"
|
||||||
|
@ -161,21 +162,21 @@ func (d *vpsDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) {
|
||||||
model.InstanceLabel: model.LabelValue(server.Name),
|
model.InstanceLabel: model.LabelValue(server.Name),
|
||||||
vpsLabelPrefix + "offer": model.LabelValue(server.Model.Offer),
|
vpsLabelPrefix + "offer": model.LabelValue(server.Model.Offer),
|
||||||
vpsLabelPrefix + "datacenter": model.LabelValue(fmt.Sprintf("%+v", server.Model.Datacenter)),
|
vpsLabelPrefix + "datacenter": model.LabelValue(fmt.Sprintf("%+v", server.Model.Datacenter)),
|
||||||
vpsLabelPrefix + "model_vcore": model.LabelValue(fmt.Sprintf("%d", server.Model.Vcore)),
|
vpsLabelPrefix + "model_vcore": model.LabelValue(strconv.Itoa(server.Model.Vcore)),
|
||||||
vpsLabelPrefix + "maximum_additional_ip": model.LabelValue(fmt.Sprintf("%d", server.Model.MaximumAdditionalIP)),
|
vpsLabelPrefix + "maximum_additional_ip": model.LabelValue(strconv.Itoa(server.Model.MaximumAdditionalIP)),
|
||||||
vpsLabelPrefix + "version": model.LabelValue(server.Model.Version),
|
vpsLabelPrefix + "version": model.LabelValue(server.Model.Version),
|
||||||
vpsLabelPrefix + "model_name": model.LabelValue(server.Model.Name),
|
vpsLabelPrefix + "model_name": model.LabelValue(server.Model.Name),
|
||||||
vpsLabelPrefix + "disk": model.LabelValue(fmt.Sprintf("%d", server.Model.Disk)),
|
vpsLabelPrefix + "disk": model.LabelValue(strconv.Itoa(server.Model.Disk)),
|
||||||
vpsLabelPrefix + "memory": model.LabelValue(fmt.Sprintf("%d", server.Model.Memory)),
|
vpsLabelPrefix + "memory": model.LabelValue(strconv.Itoa(server.Model.Memory)),
|
||||||
vpsLabelPrefix + "zone": model.LabelValue(server.Zone),
|
vpsLabelPrefix + "zone": model.LabelValue(server.Zone),
|
||||||
vpsLabelPrefix + "display_name": model.LabelValue(server.DisplayName),
|
vpsLabelPrefix + "display_name": model.LabelValue(server.DisplayName),
|
||||||
vpsLabelPrefix + "cluster": model.LabelValue(server.Cluster),
|
vpsLabelPrefix + "cluster": model.LabelValue(server.Cluster),
|
||||||
vpsLabelPrefix + "state": model.LabelValue(server.State),
|
vpsLabelPrefix + "state": model.LabelValue(server.State),
|
||||||
vpsLabelPrefix + "name": model.LabelValue(server.Name),
|
vpsLabelPrefix + "name": model.LabelValue(server.Name),
|
||||||
vpsLabelPrefix + "netboot_mode": model.LabelValue(server.NetbootMode),
|
vpsLabelPrefix + "netboot_mode": model.LabelValue(server.NetbootMode),
|
||||||
vpsLabelPrefix + "memory_limit": model.LabelValue(fmt.Sprintf("%d", server.MemoryLimit)),
|
vpsLabelPrefix + "memory_limit": model.LabelValue(strconv.Itoa(server.MemoryLimit)),
|
||||||
vpsLabelPrefix + "offer_type": model.LabelValue(server.OfferType),
|
vpsLabelPrefix + "offer_type": model.LabelValue(server.OfferType),
|
||||||
vpsLabelPrefix + "vcore": model.LabelValue(fmt.Sprintf("%d", server.Vcore)),
|
vpsLabelPrefix + "vcore": model.LabelValue(strconv.Itoa(server.Vcore)),
|
||||||
vpsLabelPrefix + "ipv4": model.LabelValue(ipv4),
|
vpsLabelPrefix + "ipv4": model.LabelValue(ipv4),
|
||||||
vpsLabelPrefix + "ipv6": model.LabelValue(ipv6),
|
vpsLabelPrefix + "ipv6": model.LabelValue(ipv6),
|
||||||
}
|
}
|
||||||
|
|
|
@ -237,7 +237,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||||
pdbLabelResource: model.LabelValue(resource.Resource),
|
pdbLabelResource: model.LabelValue(resource.Resource),
|
||||||
pdbLabelType: model.LabelValue(resource.Type),
|
pdbLabelType: model.LabelValue(resource.Type),
|
||||||
pdbLabelTitle: model.LabelValue(resource.Title),
|
pdbLabelTitle: model.LabelValue(resource.Title),
|
||||||
pdbLabelExported: model.LabelValue(fmt.Sprintf("%t", resource.Exported)),
|
pdbLabelExported: model.LabelValue(strconv.FormatBool(resource.Exported)),
|
||||||
pdbLabelFile: model.LabelValue(resource.File),
|
pdbLabelFile: model.LabelValue(resource.File),
|
||||||
pdbLabelEnvironment: model.LabelValue(resource.Environment),
|
pdbLabelEnvironment: model.LabelValue(resource.Environment),
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,6 +20,7 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -269,7 +270,7 @@ func (d *Discovery) getEndpointLabels(
|
||||||
model.AddressLabel: model.LabelValue(addr),
|
model.AddressLabel: model.LabelValue(addr),
|
||||||
uyuniLabelMinionHostname: model.LabelValue(networkInfo.Hostname),
|
uyuniLabelMinionHostname: model.LabelValue(networkInfo.Hostname),
|
||||||
uyuniLabelPrimaryFQDN: model.LabelValue(networkInfo.PrimaryFQDN),
|
uyuniLabelPrimaryFQDN: model.LabelValue(networkInfo.PrimaryFQDN),
|
||||||
uyuniLablelSystemID: model.LabelValue(fmt.Sprintf("%d", endpoint.SystemID)),
|
uyuniLablelSystemID: model.LabelValue(strconv.Itoa(endpoint.SystemID)),
|
||||||
uyuniLablelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)),
|
uyuniLablelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)),
|
||||||
uyuniLablelEndpointName: model.LabelValue(endpoint.EndpointName),
|
uyuniLablelEndpointName: model.LabelValue(endpoint.EndpointName),
|
||||||
uyuniLablelExporter: model.LabelValue(endpoint.ExporterName),
|
uyuniLablelExporter: model.LabelValue(endpoint.ExporterName),
|
||||||
|
|
|
@ -280,17 +280,17 @@ func parseServersetMember(data []byte, path string) (model.LabelSet, error) {
|
||||||
labels := model.LabelSet{}
|
labels := model.LabelSet{}
|
||||||
labels[serversetPathLabel] = model.LabelValue(path)
|
labels[serversetPathLabel] = model.LabelValue(path)
|
||||||
labels[model.AddressLabel] = model.LabelValue(
|
labels[model.AddressLabel] = model.LabelValue(
|
||||||
net.JoinHostPort(member.ServiceEndpoint.Host, fmt.Sprintf("%d", member.ServiceEndpoint.Port)))
|
net.JoinHostPort(member.ServiceEndpoint.Host, strconv.Itoa(member.ServiceEndpoint.Port)))
|
||||||
|
|
||||||
labels[serversetEndpointLabelPrefix+"_host"] = model.LabelValue(member.ServiceEndpoint.Host)
|
labels[serversetEndpointLabelPrefix+"_host"] = model.LabelValue(member.ServiceEndpoint.Host)
|
||||||
labels[serversetEndpointLabelPrefix+"_port"] = model.LabelValue(fmt.Sprintf("%d", member.ServiceEndpoint.Port))
|
labels[serversetEndpointLabelPrefix+"_port"] = model.LabelValue(strconv.Itoa(member.ServiceEndpoint.Port))
|
||||||
|
|
||||||
for name, endpoint := range member.AdditionalEndpoints {
|
for name, endpoint := range member.AdditionalEndpoints {
|
||||||
cleanName := model.LabelName(strutil.SanitizeLabelName(name))
|
cleanName := model.LabelName(strutil.SanitizeLabelName(name))
|
||||||
labels[serversetEndpointLabelPrefix+"_host_"+cleanName] = model.LabelValue(
|
labels[serversetEndpointLabelPrefix+"_host_"+cleanName] = model.LabelValue(
|
||||||
endpoint.Host)
|
endpoint.Host)
|
||||||
labels[serversetEndpointLabelPrefix+"_port_"+cleanName] = model.LabelValue(
|
labels[serversetEndpointLabelPrefix+"_port_"+cleanName] = model.LabelValue(
|
||||||
fmt.Sprintf("%d", endpoint.Port))
|
strconv.Itoa(endpoint.Port))
|
||||||
}
|
}
|
||||||
|
|
||||||
labels[serversetStatusLabel] = model.LabelValue(member.Status)
|
labels[serversetStatusLabel] = model.LabelValue(member.Status)
|
||||||
|
@ -321,10 +321,10 @@ func parseNerveMember(data []byte, path string) (model.LabelSet, error) {
|
||||||
labels := model.LabelSet{}
|
labels := model.LabelSet{}
|
||||||
labels[nervePathLabel] = model.LabelValue(path)
|
labels[nervePathLabel] = model.LabelValue(path)
|
||||||
labels[model.AddressLabel] = model.LabelValue(
|
labels[model.AddressLabel] = model.LabelValue(
|
||||||
net.JoinHostPort(member.Host, fmt.Sprintf("%d", member.Port)))
|
net.JoinHostPort(member.Host, strconv.Itoa(member.Port)))
|
||||||
|
|
||||||
labels[nerveEndpointLabelPrefix+"_host"] = model.LabelValue(member.Host)
|
labels[nerveEndpointLabelPrefix+"_host"] = model.LabelValue(member.Host)
|
||||||
labels[nerveEndpointLabelPrefix+"_port"] = model.LabelValue(fmt.Sprintf("%d", member.Port))
|
labels[nerveEndpointLabelPrefix+"_port"] = model.LabelValue(strconv.Itoa(member.Port))
|
||||||
labels[nerveEndpointLabelPrefix+"_name"] = model.LabelValue(member.Name)
|
labels[nerveEndpointLabelPrefix+"_name"] = model.LabelValue(member.Name)
|
||||||
|
|
||||||
return labels, nil
|
return labels, nil
|
||||||
|
|
|
@ -48,7 +48,7 @@ The Prometheus monitoring server
|
||||||
| <code class="text-nowrap">--rules.alert.for-outage-tolerance</code> | Max time to tolerate prometheus outage for restoring "for" state of alert. Use with server mode only. | `1h` |
|
| <code class="text-nowrap">--rules.alert.for-outage-tolerance</code> | Max time to tolerate prometheus outage for restoring "for" state of alert. Use with server mode only. | `1h` |
|
||||||
| <code class="text-nowrap">--rules.alert.for-grace-period</code> | Minimum duration between alert and restored "for" state. This is maintained only for alerts with configured "for" time greater than grace period. Use with server mode only. | `10m` |
|
| <code class="text-nowrap">--rules.alert.for-grace-period</code> | Minimum duration between alert and restored "for" state. This is maintained only for alerts with configured "for" time greater than grace period. Use with server mode only. | `10m` |
|
||||||
| <code class="text-nowrap">--rules.alert.resend-delay</code> | Minimum amount of time to wait before resending an alert to Alertmanager. Use with server mode only. | `1m` |
|
| <code class="text-nowrap">--rules.alert.resend-delay</code> | Minimum amount of time to wait before resending an alert to Alertmanager. Use with server mode only. | `1m` |
|
||||||
| <code class="text-nowrap">--rules.max-concurrent-evals</code> | Global concurrency limit for independent rules that can run concurrently. Use with server mode only. | `4` |
|
| <code class="text-nowrap">--rules.max-concurrent-evals</code> | Global concurrency limit for independent rules that can run concurrently. When set, "query.max-concurrency" may need to be adjusted accordingly. Use with server mode only. | `4` |
|
||||||
| <code class="text-nowrap">--alertmanager.notification-queue-capacity</code> | The capacity of the queue for pending Alertmanager notifications. Use with server mode only. | `10000` |
|
| <code class="text-nowrap">--alertmanager.notification-queue-capacity</code> | The capacity of the queue for pending Alertmanager notifications. Use with server mode only. | `10000` |
|
||||||
| <code class="text-nowrap">--query.lookback-delta</code> | The maximum lookback duration for retrieving metrics during expression evaluations and federation. Use with server mode only. | `5m` |
|
| <code class="text-nowrap">--query.lookback-delta</code> | The maximum lookback duration for retrieving metrics during expression evaluations and federation. Use with server mode only. | `5m` |
|
||||||
| <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` |
|
| <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` |
|
||||||
|
|
|
@ -566,6 +566,7 @@ Dump samples from a TSDB.
|
||||||
|
|
||||||
| Flag | Description | Default |
|
| Flag | Description | Default |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
|
| <code class="text-nowrap">--sandbox-dir-root</code> | Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end. | `data/` |
|
||||||
| <code class="text-nowrap">--min-time</code> | Minimum timestamp to dump. | `-9223372036854775808` |
|
| <code class="text-nowrap">--min-time</code> | Minimum timestamp to dump. | `-9223372036854775808` |
|
||||||
| <code class="text-nowrap">--max-time</code> | Maximum timestamp to dump. | `9223372036854775807` |
|
| <code class="text-nowrap">--max-time</code> | Maximum timestamp to dump. | `9223372036854775807` |
|
||||||
| <code class="text-nowrap">--match</code> | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` |
|
| <code class="text-nowrap">--match</code> | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` |
|
||||||
|
@ -584,7 +585,7 @@ Dump samples from a TSDB.
|
||||||
|
|
||||||
##### `promtool tsdb dump-openmetrics`
|
##### `promtool tsdb dump-openmetrics`
|
||||||
|
|
||||||
[Experimental] Dump samples from a TSDB into OpenMetrics format. Native histograms are not dumped.
|
[Experimental] Dump samples from a TSDB into OpenMetrics text format, excluding native histograms and staleness markers, which are not representable in OpenMetrics.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -592,6 +593,7 @@ Dump samples from a TSDB.
|
||||||
|
|
||||||
| Flag | Description | Default |
|
| Flag | Description | Default |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
|
| <code class="text-nowrap">--sandbox-dir-root</code> | Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end. | `data/` |
|
||||||
| <code class="text-nowrap">--min-time</code> | Minimum timestamp to dump. | `-9223372036854775808` |
|
| <code class="text-nowrap">--min-time</code> | Minimum timestamp to dump. | `-9223372036854775808` |
|
||||||
| <code class="text-nowrap">--max-time</code> | Maximum timestamp to dump. | `9223372036854775807` |
|
| <code class="text-nowrap">--max-time</code> | Maximum timestamp to dump. | `9223372036854775807` |
|
||||||
| <code class="text-nowrap">--match</code> | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` |
|
| <code class="text-nowrap">--match</code> | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` |
|
||||||
|
|
|
@ -1467,6 +1467,7 @@ For OVHcloud's [public cloud instances](https://www.ovhcloud.com/en/public-cloud
|
||||||
* `__meta_ovhcloud_dedicated_server_ipv6`: the IPv6 of the server
|
* `__meta_ovhcloud_dedicated_server_ipv6`: the IPv6 of the server
|
||||||
* `__meta_ovhcloud_dedicated_server_link_speed`: the link speed of the server
|
* `__meta_ovhcloud_dedicated_server_link_speed`: the link speed of the server
|
||||||
* `__meta_ovhcloud_dedicated_server_name`: the name of the server
|
* `__meta_ovhcloud_dedicated_server_name`: the name of the server
|
||||||
|
* `__meta_ovhcloud_dedicated_server_no_intervention`: whether datacenter intervention is disabled for the server
|
||||||
* `__meta_ovhcloud_dedicated_server_os`: the operating system of the server
|
* `__meta_ovhcloud_dedicated_server_os`: the operating system of the server
|
||||||
* `__meta_ovhcloud_dedicated_server_rack`: the rack of the server
|
* `__meta_ovhcloud_dedicated_server_rack`: the rack of the server
|
||||||
* `__meta_ovhcloud_dedicated_server_reverse`: the reverse DNS name of the server
|
* `__meta_ovhcloud_dedicated_server_reverse`: the reverse DNS name of the server
|
||||||
|
@ -3673,7 +3674,8 @@ queue_config:
|
||||||
[ min_shards: <int> | default = 1 ]
|
[ min_shards: <int> | default = 1 ]
|
||||||
# Maximum number of samples per send.
|
# Maximum number of samples per send.
|
||||||
[ max_samples_per_send: <int> | default = 2000]
|
[ max_samples_per_send: <int> | default = 2000]
|
||||||
# Maximum time a sample will wait in buffer.
|
# Maximum time a sample will wait for a send. The sample might wait less
|
||||||
|
# if the buffer is full. Further time might pass due to potential retries.
|
||||||
[ batch_send_deadline: <duration> | default = 5s ]
|
[ batch_send_deadline: <duration> | default = 5s ]
|
||||||
# Initial retry delay. Gets doubled for every retry.
|
# Initial retry delay. Gets doubled for every retry.
|
||||||
[ min_backoff: <duration> | default = 30ms ]
|
[ min_backoff: <duration> | default = 30ms ]
|
||||||
|
|
|
@ -197,6 +197,9 @@ or time-series database to Prometheus. To do so, the user must first convert the
|
||||||
source data into [OpenMetrics](https://openmetrics.io/) format, which is the
|
source data into [OpenMetrics](https://openmetrics.io/) format, which is the
|
||||||
input format for the backfilling as described below.
|
input format for the backfilling as described below.
|
||||||
|
|
||||||
|
Note that native histograms and staleness markers are not supported by this
|
||||||
|
procedure, as they cannot be represented in the OpenMetrics format.
|
||||||
|
|
||||||
### Usage
|
### Usage
|
||||||
|
|
||||||
Backfilling can be used via the Promtool command line. Promtool will write the blocks
|
Backfilling can be used via the Promtool command line. Promtool will write the blocks
|
||||||
|
|
|
@ -127,9 +127,9 @@ func (d *discovery) parseServiceNodes(resp *http.Response, name string) (*target
|
||||||
// since the service may be registered remotely through a different node.
|
// since the service may be registered remotely through a different node.
|
||||||
var addr string
|
var addr string
|
||||||
if node.ServiceAddress != "" {
|
if node.ServiceAddress != "" {
|
||||||
addr = net.JoinHostPort(node.ServiceAddress, fmt.Sprintf("%d", node.ServicePort))
|
addr = net.JoinHostPort(node.ServiceAddress, strconv.Itoa(node.ServicePort))
|
||||||
} else {
|
} else {
|
||||||
addr = net.JoinHostPort(node.Address, fmt.Sprintf("%d", node.ServicePort))
|
addr = net.JoinHostPort(node.Address, strconv.Itoa(node.ServicePort))
|
||||||
}
|
}
|
||||||
|
|
||||||
target := model.LabelSet{model.AddressLabel: model.LabelValue(addr)}
|
target := model.LabelSet{model.AddressLabel: model.LabelValue(addr)}
|
||||||
|
|
|
@ -44,5 +44,10 @@
|
||||||
// The default refresh time for all dashboards, default to 60s
|
// The default refresh time for all dashboards, default to 60s
|
||||||
refresh: '60s',
|
refresh: '60s',
|
||||||
},
|
},
|
||||||
|
|
||||||
|
// Opt-out of multi-cluster dashboards by overriding this.
|
||||||
|
showMultiCluster: true,
|
||||||
|
// The cluster label to infer the cluster name from.
|
||||||
|
clusterLabel: 'cluster',
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,21 +10,32 @@ local template = grafana.template;
|
||||||
{
|
{
|
||||||
grafanaDashboards+:: {
|
grafanaDashboards+:: {
|
||||||
'prometheus.json':
|
'prometheus.json':
|
||||||
g.dashboard(
|
local showMultiCluster = $._config.showMultiCluster;
|
||||||
|
local dashboard = g.dashboard(
|
||||||
'%(prefix)sOverview' % $._config.grafanaPrometheus
|
'%(prefix)sOverview' % $._config.grafanaPrometheus
|
||||||
)
|
);
|
||||||
.addMultiTemplate('cluster', 'prometheus_build_info{%(prometheusSelector)s}' % $._config, 'cluster')
|
local templatedDashboard = if showMultiCluster then
|
||||||
.addMultiTemplate('job', 'prometheus_build_info{cluster=~"$cluster"}', 'job')
|
dashboard
|
||||||
.addMultiTemplate('instance', 'prometheus_build_info{cluster=~"$cluster", job=~"$job"}', 'instance')
|
.addMultiTemplate('cluster', 'prometheus_build_info{%(prometheusSelector)s}' % $._config, $._config.clusterLabel)
|
||||||
|
.addMultiTemplate('job', 'prometheus_build_info{cluster=~"$cluster"}', 'job')
|
||||||
|
.addMultiTemplate('instance', 'prometheus_build_info{cluster=~"$cluster", job=~"$job"}', 'instance')
|
||||||
|
else
|
||||||
|
dashboard
|
||||||
|
.addMultiTemplate('job', 'prometheus_build_info{%(prometheusSelector)s}' % $._config, 'job')
|
||||||
|
.addMultiTemplate('instance', 'prometheus_build_info{job=~"$job"}', 'instance');
|
||||||
|
templatedDashboard
|
||||||
.addRow(
|
.addRow(
|
||||||
g.row('Prometheus Stats')
|
g.row('Prometheus Stats')
|
||||||
.addPanel(
|
.addPanel(
|
||||||
g.panel('Prometheus Stats') +
|
g.panel('Prometheus Stats') +
|
||||||
g.tablePanel([
|
g.tablePanel(if showMultiCluster then [
|
||||||
'count by (cluster, job, instance, version) (prometheus_build_info{cluster=~"$cluster", job=~"$job", instance=~"$instance"})',
|
'count by (cluster, job, instance, version) (prometheus_build_info{cluster=~"$cluster", job=~"$job", instance=~"$instance"})',
|
||||||
'max by (cluster, job, instance) (time() - process_start_time_seconds{cluster=~"$cluster", job=~"$job", instance=~"$instance"})',
|
'max by (cluster, job, instance) (time() - process_start_time_seconds{cluster=~"$cluster", job=~"$job", instance=~"$instance"})',
|
||||||
|
] else [
|
||||||
|
'count by (job, instance, version) (prometheus_build_info{job=~"$job", instance=~"$instance"})',
|
||||||
|
'max by (job, instance) (time() - process_start_time_seconds{job=~"$job", instance=~"$instance"})',
|
||||||
], {
|
], {
|
||||||
cluster: { alias: 'Cluster' },
|
cluster: { alias: if showMultiCluster then 'Cluster' else '' },
|
||||||
job: { alias: 'Job' },
|
job: { alias: 'Job' },
|
||||||
instance: { alias: 'Instance' },
|
instance: { alias: 'Instance' },
|
||||||
version: { alias: 'Version' },
|
version: { alias: 'Version' },
|
||||||
|
@ -37,12 +48,18 @@ local template = grafana.template;
|
||||||
g.row('Discovery')
|
g.row('Discovery')
|
||||||
.addPanel(
|
.addPanel(
|
||||||
g.panel('Target Sync') +
|
g.panel('Target Sync') +
|
||||||
g.queryPanel('sum(rate(prometheus_target_sync_length_seconds_sum{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[5m])) by (cluster, job, scrape_job, instance) * 1e3', '{{cluster}}:{{job}}:{{instance}}:{{scrape_job}}') +
|
g.queryPanel(if showMultiCluster then 'sum(rate(prometheus_target_sync_length_seconds_sum{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[5m])) by (cluster, job, scrape_job, instance) * 1e3'
|
||||||
|
else 'sum(rate(prometheus_target_sync_length_seconds_sum{job=~"$job",instance=~"$instance"}[5m])) by (scrape_job) * 1e3',
|
||||||
|
if showMultiCluster then '{{cluster}}:{{job}}:{{instance}}:{{scrape_job}}'
|
||||||
|
else '{{scrape_job}}') +
|
||||||
{ yaxes: g.yaxes('ms') }
|
{ yaxes: g.yaxes('ms') }
|
||||||
)
|
)
|
||||||
.addPanel(
|
.addPanel(
|
||||||
g.panel('Targets') +
|
g.panel('Targets') +
|
||||||
g.queryPanel('sum by (cluster, job, instance) (prometheus_sd_discovered_targets{cluster=~"$cluster", job=~"$job",instance=~"$instance"})', '{{cluster}}:{{job}}:{{instance}}') +
|
g.queryPanel(if showMultiCluster then 'sum by (cluster, job, instance) (prometheus_sd_discovered_targets{cluster=~"$cluster", job=~"$job",instance=~"$instance"})'
|
||||||
|
else 'sum(prometheus_sd_discovered_targets{job=~"$job",instance=~"$instance"})',
|
||||||
|
if showMultiCluster then '{{cluster}}:{{job}}:{{instance}}'
|
||||||
|
else 'Targets') +
|
||||||
g.stack
|
g.stack
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
@ -50,29 +67,47 @@ local template = grafana.template;
|
||||||
g.row('Retrieval')
|
g.row('Retrieval')
|
||||||
.addPanel(
|
.addPanel(
|
||||||
g.panel('Average Scrape Interval Duration') +
|
g.panel('Average Scrape Interval Duration') +
|
||||||
g.queryPanel('rate(prometheus_target_interval_length_seconds_sum{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) * 1e3', '{{cluster}}:{{job}}:{{instance}} {{interval}} configured') +
|
g.queryPanel(if showMultiCluster then 'rate(prometheus_target_interval_length_seconds_sum{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) * 1e3'
|
||||||
|
else 'rate(prometheus_target_interval_length_seconds_sum{job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{job=~"$job",instance=~"$instance"}[5m]) * 1e3',
|
||||||
|
if showMultiCluster then '{{cluster}}:{{job}}:{{instance}} {{interval}} configured'
|
||||||
|
else '{{interval}} configured') +
|
||||||
{ yaxes: g.yaxes('ms') }
|
{ yaxes: g.yaxes('ms') }
|
||||||
)
|
)
|
||||||
.addPanel(
|
.addPanel(
|
||||||
g.panel('Scrape failures') +
|
g.panel('Scrape failures') +
|
||||||
g.queryPanel([
|
g.queryPanel(if showMultiCluster then [
|
||||||
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
|
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
|
||||||
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_sample_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
|
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_sample_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
|
||||||
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
|
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
|
||||||
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_bounds_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
|
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_bounds_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
|
||||||
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_order_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
|
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_order_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
|
||||||
], [
|
] else [
|
||||||
|
'sum by (job) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total[1m]))',
|
||||||
|
'sum by (job) (rate(prometheus_target_scrapes_exceeded_sample_limit_total[1m]))',
|
||||||
|
'sum by (job) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total[1m]))',
|
||||||
|
'sum by (job) (rate(prometheus_target_scrapes_sample_out_of_bounds_total[1m]))',
|
||||||
|
'sum by (job) (rate(prometheus_target_scrapes_sample_out_of_order_total[1m]))',
|
||||||
|
], if showMultiCluster then [
|
||||||
'exceeded body size limit: {{cluster}} {{job}} {{instance}}',
|
'exceeded body size limit: {{cluster}} {{job}} {{instance}}',
|
||||||
'exceeded sample limit: {{cluster}} {{job}} {{instance}}',
|
'exceeded sample limit: {{cluster}} {{job}} {{instance}}',
|
||||||
'duplicate timestamp: {{cluster}} {{job}} {{instance}}',
|
'duplicate timestamp: {{cluster}} {{job}} {{instance}}',
|
||||||
'out of bounds: {{cluster}} {{job}} {{instance}}',
|
'out of bounds: {{cluster}} {{job}} {{instance}}',
|
||||||
'out of order: {{cluster}} {{job}} {{instance}}',
|
'out of order: {{cluster}} {{job}} {{instance}}',
|
||||||
|
] else [
|
||||||
|
'exceeded body size limit: {{job}}',
|
||||||
|
'exceeded sample limit: {{job}}',
|
||||||
|
'duplicate timestamp: {{job}}',
|
||||||
|
'out of bounds: {{job}}',
|
||||||
|
'out of order: {{job}}',
|
||||||
]) +
|
]) +
|
||||||
g.stack
|
g.stack
|
||||||
)
|
)
|
||||||
.addPanel(
|
.addPanel(
|
||||||
g.panel('Appended Samples') +
|
g.panel('Appended Samples') +
|
||||||
g.queryPanel('rate(prometheus_tsdb_head_samples_appended_total{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m])', '{{cluster}} {{job}} {{instance}}') +
|
g.queryPanel(if showMultiCluster then 'rate(prometheus_tsdb_head_samples_appended_total{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m])'
|
||||||
|
else 'rate(prometheus_tsdb_head_samples_appended_total{job=~"$job",instance=~"$instance"}[5m])',
|
||||||
|
if showMultiCluster then '{{cluster}} {{job}} {{instance}}'
|
||||||
|
else '{{job}} {{instance}}') +
|
||||||
g.stack
|
g.stack
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
@ -80,12 +115,18 @@ local template = grafana.template;
|
||||||
g.row('Storage')
|
g.row('Storage')
|
||||||
.addPanel(
|
.addPanel(
|
||||||
g.panel('Head Series') +
|
g.panel('Head Series') +
|
||||||
g.queryPanel('prometheus_tsdb_head_series{cluster=~"$cluster",job=~"$job",instance=~"$instance"}', '{{cluster}} {{job}} {{instance}} head series') +
|
g.queryPanel(if showMultiCluster then 'prometheus_tsdb_head_series{cluster=~"$cluster",job=~"$job",instance=~"$instance"}'
|
||||||
|
else 'prometheus_tsdb_head_series{job=~"$job",instance=~"$instance"}',
|
||||||
|
if showMultiCluster then '{{cluster}} {{job}} {{instance}} head series'
|
||||||
|
else '{{job}} {{instance}} head series') +
|
||||||
g.stack
|
g.stack
|
||||||
)
|
)
|
||||||
.addPanel(
|
.addPanel(
|
||||||
g.panel('Head Chunks') +
|
g.panel('Head Chunks') +
|
||||||
g.queryPanel('prometheus_tsdb_head_chunks{cluster=~"$cluster",job=~"$job",instance=~"$instance"}', '{{cluster}} {{job}} {{instance}} head chunks') +
|
g.queryPanel(if showMultiCluster then 'prometheus_tsdb_head_chunks{cluster=~"$cluster",job=~"$job",instance=~"$instance"}'
|
||||||
|
else 'prometheus_tsdb_head_chunks{job=~"$job",instance=~"$instance"}',
|
||||||
|
if showMultiCluster then '{{cluster}} {{job}} {{instance}} head chunks'
|
||||||
|
else '{{job}} {{instance}} head chunks') +
|
||||||
g.stack
|
g.stack
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
@ -93,12 +134,18 @@ local template = grafana.template;
|
||||||
g.row('Query')
|
g.row('Query')
|
||||||
.addPanel(
|
.addPanel(
|
||||||
g.panel('Query Rate') +
|
g.panel('Query Rate') +
|
||||||
g.queryPanel('rate(prometheus_engine_query_duration_seconds_count{cluster=~"$cluster",job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])', '{{cluster}} {{job}} {{instance}}') +
|
g.queryPanel(if showMultiCluster then 'rate(prometheus_engine_query_duration_seconds_count{cluster=~"$cluster",job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])'
|
||||||
|
else 'rate(prometheus_engine_query_duration_seconds_count{job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])',
|
||||||
|
if showMultiCluster then '{{cluster}} {{job}} {{instance}}'
|
||||||
|
else '{{job}} {{instance}}') +
|
||||||
g.stack,
|
g.stack,
|
||||||
)
|
)
|
||||||
.addPanel(
|
.addPanel(
|
||||||
g.panel('Stage Duration') +
|
g.panel('Stage Duration') +
|
||||||
g.queryPanel('max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",cluster=~"$cluster", job=~"$job",instance=~"$instance"}) * 1e3', '{{slice}}') +
|
g.queryPanel(if showMultiCluster then 'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",cluster=~"$cluster", job=~"$job",instance=~"$instance"}) * 1e3'
|
||||||
|
else 'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",job=~"$job",instance=~"$instance"}) * 1e3',
|
||||||
|
if showMultiCluster then '{{slice}}'
|
||||||
|
else '{{slice}}') +
|
||||||
{ yaxes: g.yaxes('ms') } +
|
{ yaxes: g.yaxes('ms') } +
|
||||||
g.stack,
|
g.stack,
|
||||||
)
|
)
|
||||||
|
|
2
go.mod
2
go.mod
|
@ -60,7 +60,6 @@ require (
|
||||||
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
|
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
|
||||||
github.com/stretchr/testify v1.9.0
|
github.com/stretchr/testify v1.9.0
|
||||||
github.com/vultr/govultr/v2 v2.17.2
|
github.com/vultr/govultr/v2 v2.17.2
|
||||||
go.opentelemetry.io/collector/featuregate v1.5.0
|
|
||||||
go.opentelemetry.io/collector/pdata v1.5.0
|
go.opentelemetry.io/collector/pdata v1.5.0
|
||||||
go.opentelemetry.io/collector/semconv v0.98.0
|
go.opentelemetry.io/collector/semconv v0.98.0
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0
|
||||||
|
@ -151,7 +150,6 @@ require (
|
||||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||||
github.com/hashicorp/go-retryablehttp v0.7.4 // indirect
|
github.com/hashicorp/go-retryablehttp v0.7.4 // indirect
|
||||||
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
|
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
|
||||||
github.com/hashicorp/go-version v1.6.0 // indirect
|
|
||||||
github.com/hashicorp/golang-lru v0.6.0 // indirect
|
github.com/hashicorp/golang-lru v0.6.0 // indirect
|
||||||
github.com/hashicorp/serf v0.10.1 // indirect
|
github.com/hashicorp/serf v0.10.1 // indirect
|
||||||
github.com/imdario/mergo v0.3.16 // indirect
|
github.com/imdario/mergo v0.3.16 // indirect
|
||||||
|
|
2
go.sum
2
go.sum
|
@ -722,8 +722,6 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||||
go.opentelemetry.io/collector/featuregate v1.5.0 h1:uK8qnYQKz1TMkK+FDTFsywg/EybW/gbnOUaPNUkRznM=
|
|
||||||
go.opentelemetry.io/collector/featuregate v1.5.0/go.mod h1:w7nUODKxEi3FLf1HslCiE6YWtMtOOrMnSwsDam8Mg9w=
|
|
||||||
go.opentelemetry.io/collector/pdata v1.5.0 h1:1fKTmUpr0xCOhP/B0VEvtz7bYPQ45luQ8XFyA07j8LE=
|
go.opentelemetry.io/collector/pdata v1.5.0 h1:1fKTmUpr0xCOhP/B0VEvtz7bYPQ45luQ8XFyA07j8LE=
|
||||||
go.opentelemetry.io/collector/pdata v1.5.0/go.mod h1:TYj8aKRWZyT/KuKQXKyqSEvK/GV+slFaDMEI+Ke64Yw=
|
go.opentelemetry.io/collector/pdata v1.5.0/go.mod h1:TYj8aKRWZyT/KuKQXKyqSEvK/GV+slFaDMEI+Ke64Yw=
|
||||||
go.opentelemetry.io/collector/semconv v0.98.0 h1:zO4L4TmlxXoYu8UgPeYElGY19BW7wPjM+quL5CzoOoY=
|
go.opentelemetry.io/collector/semconv v0.98.0 h1:zO4L4TmlxXoYu8UgPeYElGY19BW7wPjM+quL5CzoOoY=
|
||||||
|
|
|
@ -14,9 +14,9 @@
|
||||||
package histogram
|
package histogram
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"math"
|
"math"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -2134,7 +2134,7 @@ func TestAllFloatBucketIterator(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, c := range cases {
|
for i, c := range cases {
|
||||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||||
var expBuckets, actBuckets []Bucket[float64]
|
var expBuckets, actBuckets []Bucket[float64]
|
||||||
|
|
||||||
if c.includeNeg {
|
if c.includeNeg {
|
||||||
|
@ -2360,7 +2360,7 @@ func TestAllReverseFloatBucketIterator(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, c := range cases {
|
for i, c := range cases {
|
||||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||||
var expBuckets, actBuckets []Bucket[float64]
|
var expBuckets, actBuckets []Bucket[float64]
|
||||||
|
|
||||||
if c.includePos {
|
if c.includePos {
|
||||||
|
|
|
@ -14,8 +14,8 @@
|
||||||
package histogram
|
package histogram
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"math"
|
"math"
|
||||||
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -72,7 +72,7 @@ func TestHistogramString(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, c := range cases {
|
for i, c := range cases {
|
||||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||||
actualString := c.histogram.String()
|
actualString := c.histogram.String()
|
||||||
require.Equal(t, c.expectedString, actualString)
|
require.Equal(t, c.expectedString, actualString)
|
||||||
})
|
})
|
||||||
|
@ -211,7 +211,7 @@ func TestCumulativeBucketIterator(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, c := range cases {
|
for i, c := range cases {
|
||||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||||
it := c.histogram.CumulativeBucketIterator()
|
it := c.histogram.CumulativeBucketIterator()
|
||||||
actualBuckets := make([]Bucket[uint64], 0, len(c.expectedBuckets))
|
actualBuckets := make([]Bucket[uint64], 0, len(c.expectedBuckets))
|
||||||
for it.Next() {
|
for it.Next() {
|
||||||
|
@ -371,7 +371,7 @@ func TestRegularBucketIterator(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, c := range cases {
|
for i, c := range cases {
|
||||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||||
it := c.histogram.PositiveBucketIterator()
|
it := c.histogram.PositiveBucketIterator()
|
||||||
actualPositiveBuckets := make([]Bucket[uint64], 0, len(c.expectedPositiveBuckets))
|
actualPositiveBuckets := make([]Bucket[uint64], 0, len(c.expectedPositiveBuckets))
|
||||||
for it.Next() {
|
for it.Next() {
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
@ -732,7 +733,7 @@ func TestScratchBuilder(t *testing.T) {
|
||||||
want: FromStrings("ddd", "444"),
|
want: FromStrings("ddd", "444"),
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
t.Run(fmt.Sprint(i), func(t *testing.T) {
|
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||||
b := NewScratchBuilder(len(tcase.add))
|
b := NewScratchBuilder(len(tcase.add))
|
||||||
for _, lbl := range tcase.add {
|
for _, lbl := range tcase.add {
|
||||||
b.Add(lbl.Name, lbl.Value)
|
b.Add(lbl.Name, lbl.Value)
|
||||||
|
|
|
@ -14,7 +14,8 @@
|
||||||
package labels
|
package labels
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"bytes"
|
||||||
|
"strconv"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MatchType is an enum for label matching types.
|
// MatchType is an enum for label matching types.
|
||||||
|
@ -78,7 +79,29 @@ func MustNewMatcher(mt MatchType, name, val string) *Matcher {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Matcher) String() string {
|
func (m *Matcher) String() string {
|
||||||
return fmt.Sprintf("%s%s%q", m.Name, m.Type, m.Value)
|
// Start a buffer with a pre-allocated size on stack to cover most needs.
|
||||||
|
var bytea [1024]byte
|
||||||
|
b := bytes.NewBuffer(bytea[:0])
|
||||||
|
|
||||||
|
if m.shouldQuoteName() {
|
||||||
|
b.Write(strconv.AppendQuote(b.AvailableBuffer(), m.Name))
|
||||||
|
} else {
|
||||||
|
b.WriteString(m.Name)
|
||||||
|
}
|
||||||
|
b.WriteString(m.Type.String())
|
||||||
|
b.Write(strconv.AppendQuote(b.AvailableBuffer(), m.Value))
|
||||||
|
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Matcher) shouldQuoteName() bool {
|
||||||
|
for i, c := range m.Name {
|
||||||
|
if c == '_' || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (i > 0 && c >= '0' && c <= '9') {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Matches returns whether the matcher matches the given string value.
|
// Matches returns whether the matcher matches the given string value.
|
||||||
|
|
|
@ -15,6 +15,7 @@ package labels
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -225,3 +226,128 @@ func BenchmarkNewMatcher(b *testing.B) {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func BenchmarkMatcher_String(b *testing.B) {
|
||||||
|
type benchCase struct {
|
||||||
|
name string
|
||||||
|
matchers []*Matcher
|
||||||
|
}
|
||||||
|
cases := []benchCase{
|
||||||
|
{
|
||||||
|
name: "short name equal",
|
||||||
|
matchers: []*Matcher{
|
||||||
|
MustNewMatcher(MatchEqual, "foo", "bar"),
|
||||||
|
MustNewMatcher(MatchEqual, "bar", "baz"),
|
||||||
|
MustNewMatcher(MatchEqual, "abc", "def"),
|
||||||
|
MustNewMatcher(MatchEqual, "ghi", "klm"),
|
||||||
|
MustNewMatcher(MatchEqual, "nop", "qrs"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "short quoted name not equal",
|
||||||
|
matchers: []*Matcher{
|
||||||
|
MustNewMatcher(MatchEqual, "f.o", "bar"),
|
||||||
|
MustNewMatcher(MatchEqual, "b.r", "baz"),
|
||||||
|
MustNewMatcher(MatchEqual, "a.c", "def"),
|
||||||
|
MustNewMatcher(MatchEqual, "g.i", "klm"),
|
||||||
|
MustNewMatcher(MatchEqual, "n.p", "qrs"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "short quoted name with quotes not equal",
|
||||||
|
matchers: []*Matcher{
|
||||||
|
MustNewMatcher(MatchEqual, `"foo"`, "bar"),
|
||||||
|
MustNewMatcher(MatchEqual, `"foo"`, "baz"),
|
||||||
|
MustNewMatcher(MatchEqual, `"foo"`, "def"),
|
||||||
|
MustNewMatcher(MatchEqual, `"foo"`, "klm"),
|
||||||
|
MustNewMatcher(MatchEqual, `"foo"`, "qrs"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "short name value with quotes equal",
|
||||||
|
matchers: []*Matcher{
|
||||||
|
MustNewMatcher(MatchEqual, "foo", `"bar"`),
|
||||||
|
MustNewMatcher(MatchEqual, "bar", `"baz"`),
|
||||||
|
MustNewMatcher(MatchEqual, "abc", `"def"`),
|
||||||
|
MustNewMatcher(MatchEqual, "ghi", `"klm"`),
|
||||||
|
MustNewMatcher(MatchEqual, "nop", `"qrs"`),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "short name and long value regexp",
|
||||||
|
matchers: []*Matcher{
|
||||||
|
MustNewMatcher(MatchRegexp, "foo", "five_six_seven_eight_nine_ten_one_two_three_four"),
|
||||||
|
MustNewMatcher(MatchRegexp, "bar", "one_two_three_four_five_six_seven_eight_nine_ten"),
|
||||||
|
MustNewMatcher(MatchRegexp, "abc", "two_three_four_five_six_seven_eight_nine_ten_one"),
|
||||||
|
MustNewMatcher(MatchRegexp, "ghi", "three_four_five_six_seven_eight_nine_ten_one_two"),
|
||||||
|
MustNewMatcher(MatchRegexp, "nop", "four_five_six_seven_eight_nine_ten_one_two_three"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "short name and long value with quotes equal",
|
||||||
|
matchers: []*Matcher{
|
||||||
|
MustNewMatcher(MatchEqual, "foo", `five_six_seven_eight_nine_ten_"one"_two_three_four`),
|
||||||
|
MustNewMatcher(MatchEqual, "bar", `one_two_three_four_five_six_"seven"_eight_nine_ten`),
|
||||||
|
MustNewMatcher(MatchEqual, "abc", `two_three_four_five_six_seven_"eight"_nine_ten_one`),
|
||||||
|
MustNewMatcher(MatchEqual, "ghi", `three_four_five_six_seven_eight_"nine"_ten_one_two`),
|
||||||
|
MustNewMatcher(MatchEqual, "nop", `four_five_six_seven_eight_nine_"ten"_one_two_three`),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "long name regexp",
|
||||||
|
matchers: []*Matcher{
|
||||||
|
MustNewMatcher(MatchRegexp, "one_two_three_four_five_six_seven_eight_nine_ten", "val"),
|
||||||
|
MustNewMatcher(MatchRegexp, "two_three_four_five_six_seven_eight_nine_ten_one", "val"),
|
||||||
|
MustNewMatcher(MatchRegexp, "three_four_five_six_seven_eight_nine_ten_one_two", "val"),
|
||||||
|
MustNewMatcher(MatchRegexp, "four_five_six_seven_eight_nine_ten_one_two_three", "val"),
|
||||||
|
MustNewMatcher(MatchRegexp, "five_six_seven_eight_nine_ten_one_two_three_four", "val"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "long quoted name regexp",
|
||||||
|
matchers: []*Matcher{
|
||||||
|
MustNewMatcher(MatchRegexp, "one.two.three.four.five.six.seven.eight.nine.ten", "val"),
|
||||||
|
MustNewMatcher(MatchRegexp, "two.three.four.five.six.seven.eight.nine.ten.one", "val"),
|
||||||
|
MustNewMatcher(MatchRegexp, "three.four.five.six.seven.eight.nine.ten.one.two", "val"),
|
||||||
|
MustNewMatcher(MatchRegexp, "four.five.six.seven.eight.nine.ten.one.two.three", "val"),
|
||||||
|
MustNewMatcher(MatchRegexp, "five.six.seven.eight.nine.ten.one.two.three.four", "val"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "long name and long value regexp",
|
||||||
|
matchers: []*Matcher{
|
||||||
|
MustNewMatcher(MatchRegexp, "one_two_three_four_five_six_seven_eight_nine_ten", "five_six_seven_eight_nine_ten_one_two_three_four"),
|
||||||
|
MustNewMatcher(MatchRegexp, "two_three_four_five_six_seven_eight_nine_ten_one", "one_two_three_four_five_six_seven_eight_nine_ten"),
|
||||||
|
MustNewMatcher(MatchRegexp, "three_four_five_six_seven_eight_nine_ten_one_two", "two_three_four_five_six_seven_eight_nine_ten_one"),
|
||||||
|
MustNewMatcher(MatchRegexp, "four_five_six_seven_eight_nine_ten_one_two_three", "three_four_five_six_seven_eight_nine_ten_one_two"),
|
||||||
|
MustNewMatcher(MatchRegexp, "five_six_seven_eight_nine_ten_one_two_three_four", "four_five_six_seven_eight_nine_ten_one_two_three"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "long quoted name and long value regexp",
|
||||||
|
matchers: []*Matcher{
|
||||||
|
MustNewMatcher(MatchRegexp, "one.two.three.four.five.six.seven.eight.nine.ten", "five.six.seven.eight.nine.ten.one.two.three.four"),
|
||||||
|
MustNewMatcher(MatchRegexp, "two.three.four.five.six.seven.eight.nine.ten.one", "one.two.three.four.five.six.seven.eight.nine.ten"),
|
||||||
|
MustNewMatcher(MatchRegexp, "three.four.five.six.seven.eight.nine.ten.one.two", "two.three.four.five.six.seven.eight.nine.ten.one"),
|
||||||
|
MustNewMatcher(MatchRegexp, "four.five.six.seven.eight.nine.ten.one.two.three", "three.four.five.six.seven.eight.nine.ten.one.two"),
|
||||||
|
MustNewMatcher(MatchRegexp, "five.six.seven.eight.nine.ten.one.two.three.four", "four.five.six.seven.eight.nine.ten.one.two.three"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var mixed []*Matcher
|
||||||
|
for _, bc := range cases {
|
||||||
|
mixed = append(mixed, bc.matchers...)
|
||||||
|
}
|
||||||
|
rand.Shuffle(len(mixed), func(i, j int) { mixed[i], mixed[j] = mixed[j], mixed[i] })
|
||||||
|
cases = append(cases, benchCase{name: "mixed", matchers: mixed})
|
||||||
|
|
||||||
|
for _, bc := range cases {
|
||||||
|
b.Run(bc.name, func(b *testing.B) {
|
||||||
|
for i := 0; i <= b.N; i++ {
|
||||||
|
m := bc.matchers[i%len(bc.matchers)]
|
||||||
|
_ = m.String()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -828,7 +828,12 @@ type zeroOrOneCharacterStringMatcher struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *zeroOrOneCharacterStringMatcher) Matches(s string) bool {
|
func (m *zeroOrOneCharacterStringMatcher) Matches(s string) bool {
|
||||||
if moreThanOneRune(s) {
|
// If there's more than one rune in the string, then it can't match.
|
||||||
|
if r, size := utf8.DecodeRuneInString(s); r == utf8.RuneError {
|
||||||
|
// Size is 0 for empty strings, 1 for invalid rune.
|
||||||
|
// Empty string matches, invalid rune matches if there isn't anything else.
|
||||||
|
return size == len(s)
|
||||||
|
} else if size < len(s) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -840,27 +845,6 @@ func (m *zeroOrOneCharacterStringMatcher) Matches(s string) bool {
|
||||||
return s[0] != '\n'
|
return s[0] != '\n'
|
||||||
}
|
}
|
||||||
|
|
||||||
// moreThanOneRune returns true if there are more than one runes in the string.
|
|
||||||
// It doesn't check whether the string is valid UTF-8.
|
|
||||||
// The return value should be always equal to utf8.RuneCountInString(s) > 1,
|
|
||||||
// but the function is optimized for the common case where the string prefix is ASCII.
|
|
||||||
func moreThanOneRune(s string) bool {
|
|
||||||
// If len(s) is exactly one or zero, there can't be more than one rune.
|
|
||||||
// Exit through this path quickly.
|
|
||||||
if len(s) <= 1 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// There's one or more bytes:
|
|
||||||
// If first byte is ASCII then there are multiple runes if there are more bytes after that.
|
|
||||||
if s[0] < utf8.RuneSelf {
|
|
||||||
return len(s) > 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// Less common case: first is a multibyte rune.
|
|
||||||
return utf8.RuneCountInString(s) > 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// trueMatcher is a stringMatcher which matches any string (always returns true).
|
// trueMatcher is a stringMatcher which matches any string (always returns true).
|
||||||
type trueMatcher struct{}
|
type trueMatcher struct{}
|
||||||
|
|
||||||
|
|
|
@ -19,6 +19,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
"github.com/grafana/regexp"
|
"github.com/grafana/regexp"
|
||||||
"github.com/grafana/regexp/syntax"
|
"github.com/grafana/regexp/syntax"
|
||||||
|
@ -36,6 +37,7 @@ var (
|
||||||
".*foo",
|
".*foo",
|
||||||
"^.*foo$",
|
"^.*foo$",
|
||||||
"^.+foo$",
|
"^.+foo$",
|
||||||
|
".?",
|
||||||
".*",
|
".*",
|
||||||
".+",
|
".+",
|
||||||
"foo.+",
|
"foo.+",
|
||||||
|
@ -88,6 +90,12 @@ var (
|
||||||
|
|
||||||
// Values matching / not matching the test regexps on long alternations.
|
// Values matching / not matching the test regexps on long alternations.
|
||||||
"zQPbMkNO", "zQPbMkNo", "jyyfj00j0061", "jyyfj00j006", "jyyfj00j00612", "NNSPdvMi", "NNSPdvMiXXX", "NNSPdvMixxx", "nnSPdvMi", "nnSPdvMiXXX",
|
"zQPbMkNO", "zQPbMkNo", "jyyfj00j0061", "jyyfj00j006", "jyyfj00j00612", "NNSPdvMi", "NNSPdvMiXXX", "NNSPdvMixxx", "nnSPdvMi", "nnSPdvMiXXX",
|
||||||
|
|
||||||
|
// Invalid utf8
|
||||||
|
"\xfefoo",
|
||||||
|
"foo\xfe",
|
||||||
|
"\xfd",
|
||||||
|
"\xff\xff",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -926,19 +934,91 @@ func BenchmarkOptimizeEqualStringMatchers(b *testing.B) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestZeroOrOneCharacterStringMatcher(t *testing.T) {
|
func TestZeroOrOneCharacterStringMatcher(t *testing.T) {
|
||||||
matcher := &zeroOrOneCharacterStringMatcher{matchNL: true}
|
t.Run("match newline", func(t *testing.T) {
|
||||||
require.True(t, matcher.Matches(""))
|
matcher := &zeroOrOneCharacterStringMatcher{matchNL: true}
|
||||||
require.True(t, matcher.Matches("x"))
|
require.True(t, matcher.Matches(""))
|
||||||
require.True(t, matcher.Matches("\n"))
|
require.True(t, matcher.Matches("x"))
|
||||||
require.False(t, matcher.Matches("xx"))
|
require.True(t, matcher.Matches("\n"))
|
||||||
require.False(t, matcher.Matches("\n\n"))
|
require.False(t, matcher.Matches("xx"))
|
||||||
|
require.False(t, matcher.Matches("\n\n"))
|
||||||
|
})
|
||||||
|
|
||||||
matcher = &zeroOrOneCharacterStringMatcher{matchNL: false}
|
t.Run("do not match newline", func(t *testing.T) {
|
||||||
require.True(t, matcher.Matches(""))
|
matcher := &zeroOrOneCharacterStringMatcher{matchNL: false}
|
||||||
require.True(t, matcher.Matches("x"))
|
require.True(t, matcher.Matches(""))
|
||||||
require.False(t, matcher.Matches("\n"))
|
require.True(t, matcher.Matches("x"))
|
||||||
require.False(t, matcher.Matches("xx"))
|
require.False(t, matcher.Matches("\n"))
|
||||||
require.False(t, matcher.Matches("\n\n"))
|
require.False(t, matcher.Matches("xx"))
|
||||||
|
require.False(t, matcher.Matches("\n\n"))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("unicode", func(t *testing.T) {
|
||||||
|
// Just for documentation purposes, emoji1 is 1 rune, emoji2 is 2 runes.
|
||||||
|
// Having this in mind, will make future readers fixing tests easier.
|
||||||
|
emoji1 := "😀"
|
||||||
|
emoji2 := "❤️"
|
||||||
|
require.Equal(t, 1, utf8.RuneCountInString(emoji1))
|
||||||
|
require.Equal(t, 2, utf8.RuneCountInString(emoji2))
|
||||||
|
|
||||||
|
matcher := &zeroOrOneCharacterStringMatcher{matchNL: true}
|
||||||
|
require.True(t, matcher.Matches(emoji1))
|
||||||
|
require.False(t, matcher.Matches(emoji2))
|
||||||
|
require.False(t, matcher.Matches(emoji1+emoji1))
|
||||||
|
require.False(t, matcher.Matches("x"+emoji1))
|
||||||
|
require.False(t, matcher.Matches(emoji1+"x"))
|
||||||
|
require.False(t, matcher.Matches(emoji1+emoji2))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("invalid unicode", func(t *testing.T) {
|
||||||
|
// Just for reference, we also compare to what `^.?$` regular expression matches.
|
||||||
|
re := regexp.MustCompile("^.?$")
|
||||||
|
matcher := &zeroOrOneCharacterStringMatcher{matchNL: true}
|
||||||
|
|
||||||
|
requireMatches := func(s string, expected bool) {
|
||||||
|
t.Helper()
|
||||||
|
require.Equal(t, expected, matcher.Matches(s))
|
||||||
|
require.Equal(t, re.MatchString(s), matcher.Matches(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
requireMatches("\xff", true)
|
||||||
|
requireMatches("x\xff", false)
|
||||||
|
requireMatches("\xffx", false)
|
||||||
|
requireMatches("\xff\xfe", false)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkZeroOrOneCharacterStringMatcher(b *testing.B) {
|
||||||
|
type benchCase struct {
|
||||||
|
str string
|
||||||
|
matches bool
|
||||||
|
}
|
||||||
|
|
||||||
|
emoji1 := "😀"
|
||||||
|
emoji2 := "❤️"
|
||||||
|
cases := []benchCase{
|
||||||
|
{"", true},
|
||||||
|
{"x", true},
|
||||||
|
{"\n", true},
|
||||||
|
{"xx", false},
|
||||||
|
{"\n\n", false},
|
||||||
|
{emoji1, true},
|
||||||
|
{emoji2, false},
|
||||||
|
{emoji1 + emoji1, false},
|
||||||
|
{strings.Repeat("x", 100), false},
|
||||||
|
{strings.Repeat(emoji1, 100), false},
|
||||||
|
{strings.Repeat(emoji2, 100), false},
|
||||||
|
}
|
||||||
|
|
||||||
|
matcher := &zeroOrOneCharacterStringMatcher{matchNL: true}
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
c := cases[n%len(cases)]
|
||||||
|
got := matcher.Matches(c.str)
|
||||||
|
if got != c.matches {
|
||||||
|
b.Fatalf("unexpected result for %q: got %t, want %t", c.str, got, c.matches)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLiteralPrefixStringMatcher(t *testing.T) {
|
func TestLiteralPrefixStringMatcher(t *testing.T) {
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/grafana/regexp"
|
"github.com/grafana/regexp"
|
||||||
|
@ -290,7 +291,7 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) {
|
||||||
hash := md5.Sum([]byte(val))
|
hash := md5.Sum([]byte(val))
|
||||||
// Use only the last 8 bytes of the hash to give the same result as earlier versions of this code.
|
// Use only the last 8 bytes of the hash to give the same result as earlier versions of this code.
|
||||||
mod := binary.BigEndian.Uint64(hash[8:]) % cfg.Modulus
|
mod := binary.BigEndian.Uint64(hash[8:]) % cfg.Modulus
|
||||||
lb.Set(cfg.TargetLabel, fmt.Sprintf("%d", mod))
|
lb.Set(cfg.TargetLabel, strconv.FormatUint(mod, 10))
|
||||||
case LabelMap:
|
case LabelMap:
|
||||||
lb.Range(func(l labels.Label) {
|
lb.Range(func(l labels.Label) {
|
||||||
if cfg.Regex.MatchString(l.Name) {
|
if cfg.Regex.MatchString(l.Name) {
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
package relabel
|
package relabel
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
@ -657,7 +657,7 @@ func TestRelabelValidate(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for i, test := range tests {
|
for i, test := range tests {
|
||||||
t.Run(fmt.Sprint(i), func(t *testing.T) {
|
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||||
err := test.config.Validate()
|
err := test.config.Validate()
|
||||||
if test.expected == "" {
|
if test.expected == "" {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
|
@ -74,7 +74,7 @@ func TestHandlerNextBatch(t *testing.T) {
|
||||||
|
|
||||||
for i := range make([]struct{}, 2*maxBatchSize+1) {
|
for i := range make([]struct{}, 2*maxBatchSize+1) {
|
||||||
h.queue = append(h.queue, &Alert{
|
h.queue = append(h.queue, &Alert{
|
||||||
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)),
|
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -186,10 +186,10 @@ func TestHandlerSendAll(t *testing.T) {
|
||||||
|
|
||||||
for i := range make([]struct{}, maxBatchSize) {
|
for i := range make([]struct{}, maxBatchSize) {
|
||||||
h.queue = append(h.queue, &Alert{
|
h.queue = append(h.queue, &Alert{
|
||||||
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)),
|
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
|
||||||
})
|
})
|
||||||
expected = append(expected, &Alert{
|
expected = append(expected, &Alert{
|
||||||
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)),
|
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -297,23 +297,23 @@ func TestHandlerSendAllRemapPerAm(t *testing.T) {
|
||||||
for i := range make([]struct{}, maxBatchSize/2) {
|
for i := range make([]struct{}, maxBatchSize/2) {
|
||||||
h.queue = append(h.queue,
|
h.queue = append(h.queue,
|
||||||
&Alert{
|
&Alert{
|
||||||
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)),
|
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
|
||||||
},
|
},
|
||||||
&Alert{
|
&Alert{
|
||||||
Labels: labels.FromStrings("alertname", "test", "alertnamedrop", fmt.Sprintf("%d", i)),
|
Labels: labels.FromStrings("alertname", "test", "alertnamedrop", strconv.Itoa(i)),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
expected1 = append(expected1,
|
expected1 = append(expected1,
|
||||||
&Alert{
|
&Alert{
|
||||||
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)),
|
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
|
||||||
}, &Alert{
|
}, &Alert{
|
||||||
Labels: labels.FromStrings("alertname", "test", "alertnamedrop", fmt.Sprintf("%d", i)),
|
Labels: labels.FromStrings("alertname", "test", "alertnamedrop", strconv.Itoa(i)),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
expected2 = append(expected2, &Alert{
|
expected2 = append(expected2, &Alert{
|
||||||
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)),
|
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -502,7 +502,7 @@ func TestHandlerQueuing(t *testing.T) {
|
||||||
var alerts []*Alert
|
var alerts []*Alert
|
||||||
for i := range make([]struct{}, 20*maxBatchSize) {
|
for i := range make([]struct{}, 20*maxBatchSize) {
|
||||||
alerts = append(alerts, &Alert{
|
alerts = append(alerts, &Alert{
|
||||||
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)),
|
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -762,7 +762,7 @@ func TestHangingNotifier(t *testing.T) {
|
||||||
var alerts []*Alert
|
var alerts []*Alert
|
||||||
for i := range make([]struct{}, 20*maxBatchSize) {
|
for i := range make([]struct{}, 20*maxBatchSize) {
|
||||||
alerts = append(alerts, &Alert{
|
alerts = append(alerts, &Alert{
|
||||||
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)),
|
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -138,6 +138,16 @@ func TestExprString(t *testing.T) {
|
||||||
{
|
{
|
||||||
in: `{__name__="",a="x"}`,
|
in: `{__name__="",a="x"}`,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
in: `{"a.b"="c"}`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: `{"0"="1"}`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: `{"_0"="1"}`,
|
||||||
|
out: `{_0="1"}`,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range inputs {
|
for _, test := range inputs {
|
||||||
|
|
445
promql/promqltest/testdata/native_histograms.test
vendored
445
promql/promqltest/testdata/native_histograms.test
vendored
|
@ -269,3 +269,448 @@ eval instant at 50m histogram_sum(sum(incr_sum_histogram))
|
||||||
|
|
||||||
eval instant at 50m histogram_sum(sum(last_over_time(incr_sum_histogram[5m])))
|
eval instant at 50m histogram_sum(sum(last_over_time(incr_sum_histogram[5m])))
|
||||||
{} 30
|
{} 30
|
||||||
|
|
||||||
|
# Apply rate function to histogram.
|
||||||
|
load 15s
|
||||||
|
histogram_rate {{schema:1 count:12 sum:18.4 z_bucket:2 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[1 2 0 1 1]}}+{{schema:1 count:9 sum:18.4 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 0 1 1] n_buckets:[1 1 0 1 1]}}x100
|
||||||
|
|
||||||
|
eval instant at 5m rate(histogram_rate[45s])
|
||||||
|
{} {{schema:1 count:0.6 sum:1.2266666666666652 z_bucket:0.06666666666666667 z_bucket_w:0.001 buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667] n_buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667]}}
|
||||||
|
|
||||||
|
eval range from 5m to 5m30s step 30s rate(histogram_rate[45s])
|
||||||
|
{} {{schema:1 count:0.6 sum:1.2266666666666652 z_bucket:0.06666666666666667 z_bucket_w:0.001 buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667] n_buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667]}}x1
|
||||||
|
|
||||||
|
# Apply count and sum function to histogram.
|
||||||
|
load 10m
|
||||||
|
histogram_count_sum_2 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1
|
||||||
|
|
||||||
|
eval instant at 10m histogram_count(histogram_count_sum_2)
|
||||||
|
{} 24
|
||||||
|
|
||||||
|
eval instant at 10m histogram_sum(histogram_count_sum_2)
|
||||||
|
{} 100
|
||||||
|
|
||||||
|
# Apply stddev and stdvar function to histogram with {1, 2, 3, 4} (low res).
|
||||||
|
load 10m
|
||||||
|
histogram_stddev_stdvar_1 {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}}x1
|
||||||
|
|
||||||
|
eval instant at 10m histogram_stddev(histogram_stddev_stdvar_1)
|
||||||
|
{} 1.0787993180043811
|
||||||
|
|
||||||
|
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_1)
|
||||||
|
{} 1.163807968526718
|
||||||
|
|
||||||
|
# Apply stddev and stdvar function to histogram with {1, 1, 1, 1} (high res).
|
||||||
|
load 10m
|
||||||
|
histogram_stddev_stdvar_2 {{schema:8 count:10 sum:10 buckets:[1 2 3 4]}}x1
|
||||||
|
|
||||||
|
eval instant at 10m histogram_stddev(histogram_stddev_stdvar_2)
|
||||||
|
{} 0.0048960313898237465
|
||||||
|
|
||||||
|
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_2)
|
||||||
|
{} 2.3971123370139447e-05
|
||||||
|
|
||||||
|
# Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9}.
|
||||||
|
load 10m
|
||||||
|
histogram_stddev_stdvar_3 {{schema:3 count:7 sum:62 z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1
|
||||||
|
|
||||||
|
eval instant at 10m histogram_stddev(histogram_stddev_stdvar_3)
|
||||||
|
{} 42.947236400258
|
||||||
|
|
||||||
|
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_3)
|
||||||
|
{} 1844.4651144196398
|
||||||
|
|
||||||
|
# Apply stddev and stdvar function to histogram with {-100000, -10000, -1000, -888, -888, -100, -50, -9, -8, -3}.
|
||||||
|
load 10m
|
||||||
|
histogram_stddev_stdvar_4 {{schema:0 count:10 sum:-112946 z_bucket:0 n_buckets:[0 0 1 1 1 0 1 1 0 0 3 0 0 0 1 0 0 1]}}x1
|
||||||
|
|
||||||
|
eval instant at 10m histogram_stddev(histogram_stddev_stdvar_4)
|
||||||
|
{} 27556.344499842
|
||||||
|
|
||||||
|
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_4)
|
||||||
|
{} 759352122.1939945
|
||||||
|
|
||||||
|
# Apply stddev and stdvar function to histogram with {-10x10}.
|
||||||
|
load 10m
|
||||||
|
histogram_stddev_stdvar_5 {{schema:0 count:10 sum:-100 z_bucket:0 n_buckets:[0 0 0 0 10]}}x1
|
||||||
|
|
||||||
|
eval instant at 10m histogram_stddev(histogram_stddev_stdvar_5)
|
||||||
|
{} 1.3137084989848
|
||||||
|
|
||||||
|
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_5)
|
||||||
|
{} 1.725830020304794
|
||||||
|
|
||||||
|
# Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9, NaN}.
|
||||||
|
load 10m
|
||||||
|
histogram_stddev_stdvar_6 {{schema:3 count:7 sum:NaN z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1
|
||||||
|
|
||||||
|
eval instant at 10m histogram_stddev(histogram_stddev_stdvar_6)
|
||||||
|
{} NaN
|
||||||
|
|
||||||
|
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_6)
|
||||||
|
{} NaN
|
||||||
|
|
||||||
|
# Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9, Inf}.
|
||||||
|
load 10m
|
||||||
|
histogram_stddev_stdvar_7 {{schema:3 count:7 sum:Inf z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1
|
||||||
|
|
||||||
|
eval instant at 10m histogram_stddev(histogram_stddev_stdvar_7)
|
||||||
|
{} NaN
|
||||||
|
|
||||||
|
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_7)
|
||||||
|
{} NaN
|
||||||
|
|
||||||
|
# Apply quantile function to histogram with all positive buckets with zero bucket.
|
||||||
|
load 10m
|
||||||
|
histogram_quantile_1 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(1.001, histogram_quantile_1)
|
||||||
|
{} Inf
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(1, histogram_quantile_1)
|
||||||
|
{} 16
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(0.99, histogram_quantile_1)
|
||||||
|
{} 15.759999999999998
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(0.9, histogram_quantile_1)
|
||||||
|
{} 13.600000000000001
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(0.6, histogram_quantile_1)
|
||||||
|
{} 4.799999999999997
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(0.5, histogram_quantile_1)
|
||||||
|
{} 1.6666666666666665
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(0.1, histogram_quantile_1)
|
||||||
|
{} 0.0006000000000000001
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(0, histogram_quantile_1)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(-1, histogram_quantile_1)
|
||||||
|
{} -Inf
|
||||||
|
|
||||||
|
# Apply quantile function to histogram with all negative buckets with zero bucket.
|
||||||
|
load 10m
|
||||||
|
histogram_quantile_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 n_buckets:[2 3 0 1 4]}}x1
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(1.001, histogram_quantile_2)
|
||||||
|
{} Inf
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(1, histogram_quantile_2)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(0.99, histogram_quantile_2)
|
||||||
|
{} -6.000000000000048e-05
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(0.9, histogram_quantile_2)
|
||||||
|
{} -0.0005999999999999996
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(0.5, histogram_quantile_2)
|
||||||
|
{} -1.6666666666666667
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(0.1, histogram_quantile_2)
|
||||||
|
{} -13.6
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(0, histogram_quantile_2)
|
||||||
|
{} -16
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(-1, histogram_quantile_2)
|
||||||
|
{} -Inf
|
||||||
|
|
||||||
|
# Apply quantile function to histogram with both positive and negative buckets with zero bucket.
|
||||||
|
load 10m
|
||||||
|
histogram_quantile_3 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(1.001, histogram_quantile_3)
|
||||||
|
{} Inf
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(1, histogram_quantile_3)
|
||||||
|
{} 16
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(0.99, histogram_quantile_3)
|
||||||
|
{} 15.519999999999996
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(0.9, histogram_quantile_3)
|
||||||
|
{} 11.200000000000003
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(0.7, histogram_quantile_3)
|
||||||
|
{} 1.2666666666666657
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(0.55, histogram_quantile_3)
|
||||||
|
{} 0.0006000000000000005
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(0.5, histogram_quantile_3)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(0.45, histogram_quantile_3)
|
||||||
|
{} -0.0005999999999999996
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(0.3, histogram_quantile_3)
|
||||||
|
{} -1.266666666666667
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(0.1, histogram_quantile_3)
|
||||||
|
{} -11.2
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(0.01, histogram_quantile_3)
|
||||||
|
{} -15.52
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(0, histogram_quantile_3)
|
||||||
|
{} -16
|
||||||
|
|
||||||
|
eval instant at 10m histogram_quantile(-1, histogram_quantile_3)
|
||||||
|
{} -Inf
|
||||||
|
|
||||||
|
# Apply fraction function to empty histogram.
|
||||||
|
load 10m
|
||||||
|
histogram_fraction_1 {{}}x1
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(3.1415, 42, histogram_fraction_1)
|
||||||
|
{} NaN
|
||||||
|
|
||||||
|
# Apply fraction function to histogram with positive and zero buckets.
|
||||||
|
load 10m
|
||||||
|
histogram_fraction_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(0, +Inf, histogram_fraction_2)
|
||||||
|
{} 1
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-Inf, 0, histogram_fraction_2)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-0.001, 0, histogram_fraction_2)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_2)
|
||||||
|
{} 0.16666666666666666
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(0, 0.0005, histogram_fraction_2)
|
||||||
|
{} 0.08333333333333333
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_2)
|
||||||
|
{} 0.8333333333333334
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-inf, -0.001, histogram_fraction_2)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(1, 2, histogram_fraction_2)
|
||||||
|
{} 0.25
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_2)
|
||||||
|
{} 0.125
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(1, 8, histogram_fraction_2)
|
||||||
|
{} 0.3333333333333333
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(1, 6, histogram_fraction_2)
|
||||||
|
{} 0.2916666666666667
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_2)
|
||||||
|
{} 0.16666666666666666
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_2)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_2)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_2)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_2)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_2)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_2)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(0, 0, histogram_fraction_2)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(0.000001, 0.000001, histogram_fraction_2)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(42, 42, histogram_fraction_2)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-3.1, -3.1, histogram_fraction_2)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(3.1415, NaN, histogram_fraction_2)
|
||||||
|
{} NaN
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(NaN, 42, histogram_fraction_2)
|
||||||
|
{} NaN
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_2)
|
||||||
|
{} NaN
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_2)
|
||||||
|
{} 1
|
||||||
|
|
||||||
|
# Apply fraction function to histogram with negative and zero buckets.
|
||||||
|
load 10m
|
||||||
|
histogram_fraction_3 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 n_buckets:[2 3 0 1 4]}}x1
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(0, +Inf, histogram_fraction_3)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-Inf, 0, histogram_fraction_3)
|
||||||
|
{} 1
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-0.001, 0, histogram_fraction_3)
|
||||||
|
{} 0.16666666666666666
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_3)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-0.0005, 0, histogram_fraction_3)
|
||||||
|
{} 0.08333333333333333
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_3)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-inf, -0.001, histogram_fraction_3)
|
||||||
|
{} 0.8333333333333334
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(1, 2, histogram_fraction_3)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_3)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(1, 8, histogram_fraction_3)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(1, 6, histogram_fraction_3)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_3)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_3)
|
||||||
|
{} 0.25
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_3)
|
||||||
|
{} 0.125
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_3)
|
||||||
|
{} 0.3333333333333333
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_3)
|
||||||
|
{} 0.2916666666666667
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_3)
|
||||||
|
{} 0.16666666666666666
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_3)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(0, 0, histogram_fraction_3)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(0.000001, 0.000001, histogram_fraction_3)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(42, 42, histogram_fraction_3)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-3.1, -3.1, histogram_fraction_3)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(3.1415, NaN, histogram_fraction_3)
|
||||||
|
{} NaN
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(NaN, 42, histogram_fraction_3)
|
||||||
|
{} NaN
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_3)
|
||||||
|
{} NaN
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_3)
|
||||||
|
{} 1
|
||||||
|
|
||||||
|
# Apply fraction function to histogram with both positive, negative and zero buckets.
|
||||||
|
load 10m
|
||||||
|
histogram_fraction_4 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(0, +Inf, histogram_fraction_4)
|
||||||
|
{} 0.5
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-Inf, 0, histogram_fraction_4)
|
||||||
|
{} 0.5
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-0.001, 0, histogram_fraction_4)
|
||||||
|
{} 0.08333333333333333
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_4)
|
||||||
|
{} 0.08333333333333333
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-0.0005, 0.0005, histogram_fraction_4)
|
||||||
|
{} 0.08333333333333333
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_4)
|
||||||
|
{} 0.4166666666666667
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-inf, -0.001, histogram_fraction_4)
|
||||||
|
{} 0.4166666666666667
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(1, 2, histogram_fraction_4)
|
||||||
|
{} 0.125
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_4)
|
||||||
|
{} 0.0625
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(1, 8, histogram_fraction_4)
|
||||||
|
{} 0.16666666666666666
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(1, 6, histogram_fraction_4)
|
||||||
|
{} 0.14583333333333334
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_4)
|
||||||
|
{} 0.08333333333333333
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_4)
|
||||||
|
{} 0.125
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_4)
|
||||||
|
{} 0.0625
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_4)
|
||||||
|
{} 0.16666666666666666
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_4)
|
||||||
|
{} 0.14583333333333334
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_4)
|
||||||
|
{} 0.08333333333333333
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_4)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(0, 0, histogram_fraction_4)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(0.000001, 0.000001, histogram_fraction_4)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(42, 42, histogram_fraction_4)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-3.1, -3.1, histogram_fraction_4)
|
||||||
|
{} 0
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(3.1415, NaN, histogram_fraction_4)
|
||||||
|
{} NaN
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(NaN, 42, histogram_fraction_4)
|
||||||
|
{} NaN
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_4)
|
||||||
|
{} NaN
|
||||||
|
|
||||||
|
eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_4)
|
||||||
|
{} 1
|
||||||
|
|
|
@ -19,6 +19,7 @@ import (
|
||||||
"math"
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
@ -1361,7 +1362,7 @@ func TestNativeHistogramsInRecordingRules(t *testing.T) {
|
||||||
ts := time.Now()
|
ts := time.Now()
|
||||||
app := db.Appender(context.Background())
|
app := db.Appender(context.Background())
|
||||||
for i, h := range hists {
|
for i, h := range hists {
|
||||||
l := labels.FromStrings("__name__", "histogram_metric", "idx", fmt.Sprintf("%d", i))
|
l := labels.FromStrings("__name__", "histogram_metric", "idx", strconv.Itoa(i))
|
||||||
_, err := app.AppendHistogram(0, l, ts.UnixMilli(), h.Copy(), nil)
|
_, err := app.AppendHistogram(0, l, ts.UnixMilli(), h.Copy(), nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
@ -2043,7 +2044,7 @@ func TestBoundedRuleEvalConcurrency(t *testing.T) {
|
||||||
require.EqualValues(t, maxInflight.Load(), int32(maxConcurrency)+int32(groupCount))
|
require.EqualValues(t, maxInflight.Load(), int32(maxConcurrency)+int32(groupCount))
|
||||||
}
|
}
|
||||||
|
|
||||||
const artificialDelay = 10 * time.Millisecond
|
const artificialDelay = 15 * time.Millisecond
|
||||||
|
|
||||||
func optsFactory(storage storage.Storage, maxInflight, inflightQueries *atomic.Int32, maxConcurrent int64) *ManagerOptions {
|
func optsFactory(storage storage.Storage, maxInflight, inflightQueries *atomic.Int32, maxConcurrent int64) *ManagerOptions {
|
||||||
var inflightMu sync.Mutex
|
var inflightMu sync.Mutex
|
||||||
|
|
|
@ -1285,7 +1285,7 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) {
|
||||||
for i := 0; i < 500; i++ {
|
for i := 0; i < 500; i++ {
|
||||||
s = fmt.Sprintf("%smetric_%d_%d 42\n", s, i, numScrapes)
|
s = fmt.Sprintf("%smetric_%d_%d 42\n", s, i, numScrapes)
|
||||||
}
|
}
|
||||||
w.Write([]byte(fmt.Sprintf(s + "&")))
|
w.Write([]byte(s + "&"))
|
||||||
} else {
|
} else {
|
||||||
cancel()
|
cancel()
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,6 +21,7 @@ import (
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
@ -67,7 +68,7 @@ func TestTargetOffset(t *testing.T) {
|
||||||
// Calculate offsets for 10000 different targets.
|
// Calculate offsets for 10000 different targets.
|
||||||
for i := range offsets {
|
for i := range offsets {
|
||||||
target := newTestTarget("example.com:80", 0, labels.FromStrings(
|
target := newTestTarget("example.com:80", 0, labels.FromStrings(
|
||||||
"label", fmt.Sprintf("%d", i),
|
"label", strconv.Itoa(i),
|
||||||
))
|
))
|
||||||
offsets[i] = target.offset(interval, offsetSeed)
|
offsets[i] = target.offset(interval, offsetSeed)
|
||||||
}
|
}
|
||||||
|
|
|
@ -75,7 +75,7 @@ type AzureADConfig struct { //nolint:revive // exported.
|
||||||
// OAuth is the oauth config that is being used to authenticate.
|
// OAuth is the oauth config that is being used to authenticate.
|
||||||
OAuth *OAuthConfig `yaml:"oauth,omitempty"`
|
OAuth *OAuthConfig `yaml:"oauth,omitempty"`
|
||||||
|
|
||||||
// OAuth is the oauth config that is being used to authenticate.
|
// SDK is the SDK config that is being used to authenticate.
|
||||||
SDK *SDKConfig `yaml:"sdk,omitempty"`
|
SDK *SDKConfig `yaml:"sdk,omitempty"`
|
||||||
|
|
||||||
// Cloud is the Azure cloud in which the service is running. Example: AzurePublic/AzureGovernment/AzureChina.
|
// Cloud is the Azure cloud in which the service is running. Example: AzurePublic/AzureGovernment/AzureChina.
|
||||||
|
|
|
@ -19,15 +19,6 @@ package prometheus
|
||||||
import (
|
import (
|
||||||
"strings"
|
"strings"
|
||||||
"unicode"
|
"unicode"
|
||||||
|
|
||||||
"go.opentelemetry.io/collector/featuregate"
|
|
||||||
)
|
|
||||||
|
|
||||||
var dropSanitizationGate = featuregate.GlobalRegistry().MustRegister(
|
|
||||||
"pkg.translator.prometheus.PermissiveLabelSanitization",
|
|
||||||
featuregate.StageAlpha,
|
|
||||||
featuregate.WithRegisterDescription("Controls whether to change labels starting with '_' to 'key_'."),
|
|
||||||
featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/8950"),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Normalizes the specified label to follow Prometheus label names standard
|
// Normalizes the specified label to follow Prometheus label names standard
|
||||||
|
@ -50,7 +41,7 @@ func NormalizeLabel(label string) string {
|
||||||
// If label starts with a number, prepend with "key_"
|
// If label starts with a number, prepend with "key_"
|
||||||
if unicode.IsDigit(rune(label[0])) {
|
if unicode.IsDigit(rune(label[0])) {
|
||||||
label = "key_" + label
|
label = "key_" + label
|
||||||
} else if strings.HasPrefix(label, "_") && !strings.HasPrefix(label, "__") && !dropSanitizationGate.IsEnabled() {
|
} else if strings.HasPrefix(label, "_") && !strings.HasPrefix(label, "__") {
|
||||||
label = "key" + label
|
label = "key" + label
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,6 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"unicode"
|
"unicode"
|
||||||
|
|
||||||
"go.opentelemetry.io/collector/featuregate"
|
|
||||||
"go.opentelemetry.io/collector/pdata/pmetric"
|
"go.opentelemetry.io/collector/pdata/pmetric"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -78,13 +77,6 @@ var perUnitMap = map[string]string{
|
||||||
"y": "year",
|
"y": "year",
|
||||||
}
|
}
|
||||||
|
|
||||||
var normalizeNameGate = featuregate.GlobalRegistry().MustRegister(
|
|
||||||
"pkg.translator.prometheus.NormalizeName",
|
|
||||||
featuregate.StageBeta,
|
|
||||||
featuregate.WithRegisterDescription("Controls whether metrics names are automatically normalized to follow Prometheus naming convention"),
|
|
||||||
featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/8950"),
|
|
||||||
)
|
|
||||||
|
|
||||||
// BuildCompliantName builds a Prometheus-compliant metric name for the specified metric
|
// BuildCompliantName builds a Prometheus-compliant metric name for the specified metric
|
||||||
//
|
//
|
||||||
// Metric name is prefixed with specified namespace and underscore (if any).
|
// Metric name is prefixed with specified namespace and underscore (if any).
|
||||||
|
@ -97,7 +89,7 @@ func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffix
|
||||||
var metricName string
|
var metricName string
|
||||||
|
|
||||||
// Full normalization following standard Prometheus naming conventions
|
// Full normalization following standard Prometheus naming conventions
|
||||||
if addMetricSuffixes && normalizeNameGate.IsEnabled() {
|
if addMetricSuffixes {
|
||||||
return normalizeName(metric, namespace)
|
return normalizeName(metric, namespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -468,7 +468,7 @@ func TestReleaseNoninternedString(t *testing.T) {
|
||||||
m.StoreSeries([]record.RefSeries{
|
m.StoreSeries([]record.RefSeries{
|
||||||
{
|
{
|
||||||
Ref: chunks.HeadSeriesRef(i),
|
Ref: chunks.HeadSeriesRef(i),
|
||||||
Labels: labels.FromStrings("asdf", fmt.Sprintf("%d", i)),
|
Labels: labels.FromStrings("asdf", strconv.Itoa(i)),
|
||||||
},
|
},
|
||||||
}, 0)
|
}, 0)
|
||||||
m.SeriesReset(1)
|
m.SeriesReset(1)
|
||||||
|
|
|
@ -14,8 +14,8 @@
|
||||||
package agent
|
package agent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"math"
|
"math"
|
||||||
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
@ -53,7 +53,7 @@ func TestNoDeadlock(t *testing.T) {
|
||||||
series := &memSeries{
|
series := &memSeries{
|
||||||
ref: chunks.HeadSeriesRef(i),
|
ref: chunks.HeadSeriesRef(i),
|
||||||
lset: labels.FromMap(map[string]string{
|
lset: labels.FromMap(map[string]string{
|
||||||
"id": fmt.Sprintf("%d", i),
|
"id": strconv.Itoa(i),
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
stripeSeries.Set(series.lset.Hash(), series)
|
stripeSeries.Set(series.lset.Hash(), series)
|
||||||
|
|
|
@ -381,6 +381,33 @@ func listChunkFiles(dir string) (map[int]string, error) {
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HardLinkChunkFiles creates hardlinks for chunk files from src to dst.
|
||||||
|
// It does nothing if src doesn't exist and ensures dst is created if not.
|
||||||
|
func HardLinkChunkFiles(src, dst string) error {
|
||||||
|
_, err := os.Stat(src)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("check source chunks dir: %w", err)
|
||||||
|
}
|
||||||
|
if err := os.MkdirAll(dst, 0o777); err != nil {
|
||||||
|
return fmt.Errorf("set up destination chunks dir: %w", err)
|
||||||
|
}
|
||||||
|
files, err := listChunkFiles(src)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("list chunks: %w", err)
|
||||||
|
}
|
||||||
|
for _, filePath := range files {
|
||||||
|
_, fileName := filepath.Split(filePath)
|
||||||
|
err := os.Link(filepath.Join(src, fileName), filepath.Join(dst, fileName))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("hardlink a chunk: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// repairLastChunkFile deletes the last file if it's empty.
|
// repairLastChunkFile deletes the last file if it's empty.
|
||||||
// Because we don't fsync when creating these files, we could end
|
// Because we don't fsync when creating these files, we could end
|
||||||
// up with an empty file at the end during an abrupt shutdown.
|
// up with an empty file at the end during an abrupt shutdown.
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
@ -1129,7 +1130,7 @@ func BenchmarkCompactionFromHead(b *testing.B) {
|
||||||
for ln := 0; ln < labelNames; ln++ {
|
for ln := 0; ln < labelNames; ln++ {
|
||||||
app := h.Appender(context.Background())
|
app := h.Appender(context.Background())
|
||||||
for lv := 0; lv < labelValues; lv++ {
|
for lv := 0; lv < labelValues; lv++ {
|
||||||
app.Append(0, labels.FromStrings(fmt.Sprintf("%d", ln), fmt.Sprintf("%d%s%d", lv, postingsBenchSuffix, ln)), 0, 0)
|
app.Append(0, labels.FromStrings(strconv.Itoa(ln), fmt.Sprintf("%d%s%d", lv, postingsBenchSuffix, ln)), 0, 0)
|
||||||
}
|
}
|
||||||
require.NoError(b, app.Commit())
|
require.NoError(b, app.Commit())
|
||||||
}
|
}
|
||||||
|
@ -1161,7 +1162,7 @@ func BenchmarkCompactionFromOOOHead(b *testing.B) {
|
||||||
for ln := 0; ln < labelNames; ln++ {
|
for ln := 0; ln < labelNames; ln++ {
|
||||||
app := h.Appender(context.Background())
|
app := h.Appender(context.Background())
|
||||||
for lv := 0; lv < labelValues; lv++ {
|
for lv := 0; lv < labelValues; lv++ {
|
||||||
lbls := labels.FromStrings(fmt.Sprintf("%d", ln), fmt.Sprintf("%d%s%d", lv, postingsBenchSuffix, ln))
|
lbls := labels.FromStrings(strconv.Itoa(ln), fmt.Sprintf("%d%s%d", lv, postingsBenchSuffix, ln))
|
||||||
_, err = app.Append(0, lbls, int64(totalSamples), 0)
|
_, err = app.Append(0, lbls, int64(totalSamples), 0)
|
||||||
require.NoError(b, err)
|
require.NoError(b, err)
|
||||||
for ts := 0; ts < totalSamples; ts++ {
|
for ts := 0; ts < totalSamples; ts++ {
|
||||||
|
@ -1297,7 +1298,7 @@ func TestCancelCompactions(t *testing.T) {
|
||||||
// This checks that the `context.Canceled` error is properly checked at all levels:
|
// This checks that the `context.Canceled` error is properly checked at all levels:
|
||||||
// - tsdb_errors.NewMulti() should have the Is() method implemented for correct checks.
|
// - tsdb_errors.NewMulti() should have the Is() method implemented for correct checks.
|
||||||
// - callers should check with errors.Is() instead of ==.
|
// - callers should check with errors.Is() instead of ==.
|
||||||
readOnlyDB, err := OpenDBReadOnly(tmpdirCopy, log.NewNopLogger())
|
readOnlyDB, err := OpenDBReadOnly(tmpdirCopy, "", log.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
blocks, err := readOnlyDB.Blocks()
|
blocks, err := readOnlyDB.Blocks()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
45
tsdb/db.go
45
tsdb/db.go
|
@ -383,26 +383,36 @@ var ErrClosed = errors.New("db already closed")
|
||||||
// Current implementation doesn't support concurrency so
|
// Current implementation doesn't support concurrency so
|
||||||
// all API calls should happen in the same go routine.
|
// all API calls should happen in the same go routine.
|
||||||
type DBReadOnly struct {
|
type DBReadOnly struct {
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
dir string
|
dir string
|
||||||
closers []io.Closer
|
sandboxDir string
|
||||||
closed chan struct{}
|
closers []io.Closer
|
||||||
|
closed chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// OpenDBReadOnly opens DB in the given directory for read only operations.
|
// OpenDBReadOnly opens DB in the given directory for read only operations.
|
||||||
func OpenDBReadOnly(dir string, l log.Logger) (*DBReadOnly, error) {
|
func OpenDBReadOnly(dir, sandboxDirRoot string, l log.Logger) (*DBReadOnly, error) {
|
||||||
if _, err := os.Stat(dir); err != nil {
|
if _, err := os.Stat(dir); err != nil {
|
||||||
return nil, fmt.Errorf("opening the db dir: %w", err)
|
return nil, fmt.Errorf("opening the db dir: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if sandboxDirRoot == "" {
|
||||||
|
sandboxDirRoot = dir
|
||||||
|
}
|
||||||
|
sandboxDir, err := os.MkdirTemp(sandboxDirRoot, "tmp_dbro_sandbox")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("setting up sandbox dir: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
if l == nil {
|
if l == nil {
|
||||||
l = log.NewNopLogger()
|
l = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
return &DBReadOnly{
|
return &DBReadOnly{
|
||||||
logger: l,
|
logger: l,
|
||||||
dir: dir,
|
dir: dir,
|
||||||
closed: make(chan struct{}),
|
sandboxDir: sandboxDir,
|
||||||
|
closed: make(chan struct{}),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -491,7 +501,14 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := DefaultHeadOptions()
|
opts := DefaultHeadOptions()
|
||||||
opts.ChunkDirRoot = db.dir
|
// Hard link the chunk files to a dir in db.sandboxDir in case the Head needs to truncate some of them
|
||||||
|
// or cut new ones while replaying the WAL.
|
||||||
|
// See https://github.com/prometheus/prometheus/issues/11618.
|
||||||
|
err = chunks.HardLinkChunkFiles(mmappedChunksDir(db.dir), mmappedChunksDir(db.sandboxDir))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
opts.ChunkDirRoot = db.sandboxDir
|
||||||
head, err := NewHead(nil, db.logger, nil, nil, opts, NewHeadStats())
|
head, err := NewHead(nil, db.logger, nil, nil, opts, NewHeadStats())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -519,7 +536,7 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
opts := DefaultHeadOptions()
|
opts := DefaultHeadOptions()
|
||||||
opts.ChunkDirRoot = db.dir
|
opts.ChunkDirRoot = db.sandboxDir
|
||||||
head, err = NewHead(nil, db.logger, w, wbl, opts, NewHeadStats())
|
head, err = NewHead(nil, db.logger, w, wbl, opts, NewHeadStats())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -690,8 +707,14 @@ func (db *DBReadOnly) Block(blockID string) (BlockReader, error) {
|
||||||
return block, nil
|
return block, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close all block readers.
|
// Close all block readers and delete the sandbox dir.
|
||||||
func (db *DBReadOnly) Close() error {
|
func (db *DBReadOnly) Close() error {
|
||||||
|
defer func() {
|
||||||
|
// Delete the temporary sandbox directory that was created when opening the DB.
|
||||||
|
if err := os.RemoveAll(db.sandboxDir); err != nil {
|
||||||
|
level.Error(db.logger).Log("msg", "delete sandbox dir", "err", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
select {
|
select {
|
||||||
case <-db.closed:
|
case <-db.closed:
|
||||||
return ErrClosed
|
return ErrClosed
|
||||||
|
|
|
@ -25,6 +25,7 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -1065,7 +1066,7 @@ func TestWALSegmentSizeOptions(t *testing.T) {
|
||||||
|
|
||||||
for i := int64(0); i < 155; i++ {
|
for i := int64(0); i < 155; i++ {
|
||||||
app := db.Appender(context.Background())
|
app := db.Appender(context.Background())
|
||||||
ref, err := app.Append(0, labels.FromStrings("wal"+fmt.Sprintf("%d", i), "size"), i, rand.Float64())
|
ref, err := app.Append(0, labels.FromStrings("wal"+strconv.Itoa(int(i)), "size"), i, rand.Float64())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for j := int64(1); j <= 78; j++ {
|
for j := int64(1); j <= 78; j++ {
|
||||||
_, err := app.Append(ref, labels.EmptyLabels(), i+j, rand.Float64())
|
_, err := app.Append(ref, labels.EmptyLabels(), i+j, rand.Float64())
|
||||||
|
@ -2494,7 +2495,7 @@ func TestDBReadOnly(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Open a read only db and ensure that the API returns the same result as the normal DB.
|
// Open a read only db and ensure that the API returns the same result as the normal DB.
|
||||||
dbReadOnly, err := OpenDBReadOnly(dbDir, logger)
|
dbReadOnly, err := OpenDBReadOnly(dbDir, "", logger)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer func() { require.NoError(t, dbReadOnly.Close()) }()
|
defer func() { require.NoError(t, dbReadOnly.Close()) }()
|
||||||
|
|
||||||
|
@ -2548,10 +2549,14 @@ func TestDBReadOnly(t *testing.T) {
|
||||||
// TestDBReadOnlyClosing ensures that after closing the db
|
// TestDBReadOnlyClosing ensures that after closing the db
|
||||||
// all api methods return an ErrClosed.
|
// all api methods return an ErrClosed.
|
||||||
func TestDBReadOnlyClosing(t *testing.T) {
|
func TestDBReadOnlyClosing(t *testing.T) {
|
||||||
dbDir := t.TempDir()
|
sandboxDir := t.TempDir()
|
||||||
db, err := OpenDBReadOnly(dbDir, log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)))
|
db, err := OpenDBReadOnly(t.TempDir(), sandboxDir, log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
// The sandboxDir was there.
|
||||||
|
require.DirExists(t, db.sandboxDir)
|
||||||
require.NoError(t, db.Close())
|
require.NoError(t, db.Close())
|
||||||
|
// The sandboxDir was deleted when closing.
|
||||||
|
require.NoDirExists(t, db.sandboxDir)
|
||||||
require.Equal(t, db.Close(), ErrClosed)
|
require.Equal(t, db.Close(), ErrClosed)
|
||||||
_, err = db.Blocks()
|
_, err = db.Blocks()
|
||||||
require.Equal(t, err, ErrClosed)
|
require.Equal(t, err, ErrClosed)
|
||||||
|
@ -2587,7 +2592,7 @@ func TestDBReadOnly_FlushWAL(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Flush WAL.
|
// Flush WAL.
|
||||||
db, err := OpenDBReadOnly(dbDir, logger)
|
db, err := OpenDBReadOnly(dbDir, "", logger)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
flush := t.TempDir()
|
flush := t.TempDir()
|
||||||
|
@ -2595,7 +2600,7 @@ func TestDBReadOnly_FlushWAL(t *testing.T) {
|
||||||
require.NoError(t, db.Close())
|
require.NoError(t, db.Close())
|
||||||
|
|
||||||
// Reopen the DB from the flushed WAL block.
|
// Reopen the DB from the flushed WAL block.
|
||||||
db, err = OpenDBReadOnly(flush, logger)
|
db, err = OpenDBReadOnly(flush, "", logger)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer func() { require.NoError(t, db.Close()) }()
|
defer func() { require.NoError(t, db.Close()) }()
|
||||||
blocks, err := db.Blocks()
|
blocks, err := db.Blocks()
|
||||||
|
@ -2624,6 +2629,80 @@ func TestDBReadOnly_FlushWAL(t *testing.T) {
|
||||||
require.Equal(t, 1000.0, sum)
|
require.Equal(t, 1000.0, sum)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDBReadOnly_Querier_NoAlteration(t *testing.T) {
|
||||||
|
countChunks := func(dir string) int {
|
||||||
|
files, err := os.ReadDir(mmappedChunksDir(dir))
|
||||||
|
require.NoError(t, err)
|
||||||
|
return len(files)
|
||||||
|
}
|
||||||
|
|
||||||
|
dirHash := func(dir string) (hash []byte) {
|
||||||
|
// Windows requires the DB to be closed: "xxx\lock: The process cannot access the file because it is being used by another process."
|
||||||
|
// But closing the DB alters the directory in this case (it'll cut a new chunk).
|
||||||
|
if runtime.GOOS != "windows" {
|
||||||
|
hash = testutil.DirHash(t, dir)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
spinUpQuerierAndCheck := func(dir, sandboxDir string, chunksCount int) {
|
||||||
|
dBDirHash := dirHash(dir)
|
||||||
|
// Bootsrap a RO db from the same dir and set up a querier.
|
||||||
|
dbReadOnly, err := OpenDBReadOnly(dir, sandboxDir, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, chunksCount, countChunks(dir))
|
||||||
|
q, err := dbReadOnly.Querier(math.MinInt, math.MaxInt)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, q.Close())
|
||||||
|
require.NoError(t, dbReadOnly.Close())
|
||||||
|
// The RO Head doesn't alter RW db chunks_head/.
|
||||||
|
require.Equal(t, chunksCount, countChunks(dir))
|
||||||
|
require.Equal(t, dirHash(dir), dBDirHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("doesn't cut chunks while replaying WAL", func(t *testing.T) {
|
||||||
|
db := openTestDB(t, nil, nil)
|
||||||
|
defer func() {
|
||||||
|
require.NoError(t, db.Close())
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Append until the first mmaped head chunk.
|
||||||
|
for i := 0; i < 121; i++ {
|
||||||
|
app := db.Appender(context.Background())
|
||||||
|
_, err := app.Append(0, labels.FromStrings("foo", "bar"), int64(i), 0)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
}
|
||||||
|
|
||||||
|
spinUpQuerierAndCheck(db.dir, t.TempDir(), 0)
|
||||||
|
|
||||||
|
// The RW Head should have no problem cutting its own chunk,
|
||||||
|
// this also proves that a chunk needed to be cut.
|
||||||
|
require.NotPanics(t, func() { db.ForceHeadMMap() })
|
||||||
|
require.Equal(t, 1, countChunks(db.dir))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("doesn't truncate corrupted chunks", func(t *testing.T) {
|
||||||
|
db := openTestDB(t, nil, nil)
|
||||||
|
require.NoError(t, db.Close())
|
||||||
|
|
||||||
|
// Simulate a corrupted chunk: without a header.
|
||||||
|
_, err := os.Create(path.Join(mmappedChunksDir(db.dir), "000001"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
spinUpQuerierAndCheck(db.dir, t.TempDir(), 1)
|
||||||
|
|
||||||
|
// The RW Head should have no problem truncating its corrupted file:
|
||||||
|
// this proves that the chunk needed to be truncated.
|
||||||
|
db, err = Open(db.dir, nil, nil, nil, nil)
|
||||||
|
defer func() {
|
||||||
|
require.NoError(t, db.Close())
|
||||||
|
}()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 0, countChunks(db.dir))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestDBCannotSeePartialCommits(t *testing.T) {
|
func TestDBCannotSeePartialCommits(t *testing.T) {
|
||||||
if defaultIsolationDisabled {
|
if defaultIsolationDisabled {
|
||||||
t.Skip("skipping test since tsdb isolation is disabled")
|
t.Skip("skipping test since tsdb isolation is disabled")
|
||||||
|
|
|
@ -416,7 +416,7 @@ func BenchmarkAddExemplar(b *testing.B) {
|
||||||
exLabels := labels.FromStrings("trace_id", "89620921")
|
exLabels := labels.FromStrings("trace_id", "89620921")
|
||||||
|
|
||||||
for _, n := range []int{10000, 100000, 1000000} {
|
for _, n := range []int{10000, 100000, 1000000} {
|
||||||
b.Run(fmt.Sprintf("%d", n), func(b *testing.B) {
|
b.Run(strconv.Itoa(n), func(b *testing.B) {
|
||||||
for j := 0; j < b.N; j++ {
|
for j := 0; j < b.N; j++ {
|
||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
exs, err := NewCircularExemplarStorage(int64(n), eMetrics)
|
exs, err := NewCircularExemplarStorage(int64(n), eMetrics)
|
||||||
|
|
74
tsdb/head.go
74
tsdb/head.go
|
@ -310,12 +310,22 @@ func (h *Head) resetInMemoryState() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if h.series != nil {
|
||||||
|
// reset the existing series to make sure we call the appropriated hooks
|
||||||
|
// and increment the series removed metrics
|
||||||
|
fs := h.series.iterForDeletion(func(_ int, _ uint64, s *memSeries, flushedForCallback map[chunks.HeadSeriesRef]labels.Labels) {
|
||||||
|
// All series should be flushed
|
||||||
|
flushedForCallback[s.ref] = s.lset
|
||||||
|
})
|
||||||
|
h.metrics.seriesRemoved.Add(float64(fs))
|
||||||
|
}
|
||||||
|
|
||||||
|
h.series = newStripeSeries(h.opts.StripeSize, h.opts.SeriesCallback)
|
||||||
h.iso = newIsolation(h.opts.IsolationDisabled)
|
h.iso = newIsolation(h.opts.IsolationDisabled)
|
||||||
h.oooIso = newOOOIsolation()
|
h.oooIso = newOOOIsolation()
|
||||||
|
h.numSeries.Store(0)
|
||||||
h.exemplarMetrics = em
|
h.exemplarMetrics = em
|
||||||
h.exemplars = es
|
h.exemplars = es
|
||||||
h.series = newStripeSeries(h.opts.StripeSize, h.opts.SeriesCallback)
|
|
||||||
h.postings = index.NewUnorderedMemPostings()
|
h.postings = index.NewUnorderedMemPostings()
|
||||||
h.tombstones = tombstones.NewMemTombstones()
|
h.tombstones = tombstones.NewMemTombstones()
|
||||||
h.deleted = map[chunks.HeadSeriesRef]int{}
|
h.deleted = map[chunks.HeadSeriesRef]int{}
|
||||||
|
@ -1861,11 +1871,10 @@ func newStripeSeries(stripeSize int, seriesCallback SeriesLifecycleCallback) *st
|
||||||
// minMmapFile is the min mmap file number seen in the series (in-order and out-of-order) after gc'ing the series.
|
// minMmapFile is the min mmap file number seen in the series (in-order and out-of-order) after gc'ing the series.
|
||||||
func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (_ map[storage.SeriesRef]struct{}, _ int, _, _ int64, minMmapFile int) {
|
func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (_ map[storage.SeriesRef]struct{}, _ int, _, _ int64, minMmapFile int) {
|
||||||
var (
|
var (
|
||||||
deleted = map[storage.SeriesRef]struct{}{}
|
deleted = map[storage.SeriesRef]struct{}{}
|
||||||
rmChunks = 0
|
rmChunks = 0
|
||||||
actualMint int64 = math.MaxInt64
|
actualMint int64 = math.MaxInt64
|
||||||
minOOOTime int64 = math.MaxInt64
|
minOOOTime int64 = math.MaxInt64
|
||||||
deletedFromPrevStripe = 0
|
|
||||||
)
|
)
|
||||||
minMmapFile = math.MaxInt32
|
minMmapFile = math.MaxInt32
|
||||||
|
|
||||||
|
@ -1923,27 +1932,7 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (
|
||||||
deletedForCallback[series.ref] = series.lset
|
deletedForCallback[series.ref] = series.lset
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run through all series shard by shard, checking which should be deleted.
|
s.iterForDeletion(check)
|
||||||
for i := 0; i < s.size; i++ {
|
|
||||||
deletedForCallback := make(map[chunks.HeadSeriesRef]labels.Labels, deletedFromPrevStripe)
|
|
||||||
s.locks[i].Lock()
|
|
||||||
|
|
||||||
// Delete conflicts first so seriesHashmap.del doesn't move them to the `unique` field,
|
|
||||||
// after deleting `unique`.
|
|
||||||
for hash, all := range s.hashes[i].conflicts {
|
|
||||||
for _, series := range all {
|
|
||||||
check(i, hash, series, deletedForCallback)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for hash, series := range s.hashes[i].unique {
|
|
||||||
check(i, hash, series, deletedForCallback)
|
|
||||||
}
|
|
||||||
|
|
||||||
s.locks[i].Unlock()
|
|
||||||
|
|
||||||
s.seriesLifecycleCallback.PostDeletion(deletedForCallback)
|
|
||||||
deletedFromPrevStripe = len(deletedForCallback)
|
|
||||||
}
|
|
||||||
|
|
||||||
if actualMint == math.MaxInt64 {
|
if actualMint == math.MaxInt64 {
|
||||||
actualMint = mint
|
actualMint = mint
|
||||||
|
@ -1952,6 +1941,35 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (
|
||||||
return deleted, rmChunks, actualMint, minOOOTime, minMmapFile
|
return deleted, rmChunks, actualMint, minOOOTime, minMmapFile
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The iterForDeletion function iterates through all series, invoking the checkDeletedFunc for each.
|
||||||
|
// The checkDeletedFunc takes a map as input and should add to it all series that were deleted and should be included
|
||||||
|
// when invoking the PostDeletion hook.
|
||||||
|
func (s *stripeSeries) iterForDeletion(checkDeletedFunc func(int, uint64, *memSeries, map[chunks.HeadSeriesRef]labels.Labels)) int {
|
||||||
|
seriesSetFromPrevStripe := 0
|
||||||
|
totalDeletedSeries := 0
|
||||||
|
// Run through all series shard by shard
|
||||||
|
for i := 0; i < s.size; i++ {
|
||||||
|
seriesSet := make(map[chunks.HeadSeriesRef]labels.Labels, seriesSetFromPrevStripe)
|
||||||
|
s.locks[i].Lock()
|
||||||
|
// Iterate conflicts first so f doesn't move them to the `unique` field,
|
||||||
|
// after deleting `unique`.
|
||||||
|
for hash, all := range s.hashes[i].conflicts {
|
||||||
|
for _, series := range all {
|
||||||
|
checkDeletedFunc(i, hash, series, seriesSet)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for hash, series := range s.hashes[i].unique {
|
||||||
|
checkDeletedFunc(i, hash, series, seriesSet)
|
||||||
|
}
|
||||||
|
s.locks[i].Unlock()
|
||||||
|
s.seriesLifecycleCallback.PostDeletion(seriesSet)
|
||||||
|
totalDeletedSeries += len(seriesSet)
|
||||||
|
seriesSetFromPrevStripe = len(seriesSet)
|
||||||
|
}
|
||||||
|
return totalDeletedSeries
|
||||||
|
}
|
||||||
|
|
||||||
func (s *stripeSeries) getByID(id chunks.HeadSeriesRef) *memSeries {
|
func (s *stripeSeries) getByID(id chunks.HeadSeriesRef) *memSeries {
|
||||||
i := uint64(id) & uint64(s.size-1)
|
i := uint64(id) & uint64(s.size-1)
|
||||||
|
|
||||||
|
|
|
@ -3383,7 +3383,7 @@ func TestWaitForPendingReadersInTimeRange(t *testing.T) {
|
||||||
func TestAppendHistogram(t *testing.T) {
|
func TestAppendHistogram(t *testing.T) {
|
||||||
l := labels.FromStrings("a", "b")
|
l := labels.FromStrings("a", "b")
|
||||||
for _, numHistograms := range []int{1, 10, 150, 200, 250, 300} {
|
for _, numHistograms := range []int{1, 10, 150, 200, 250, 300} {
|
||||||
t.Run(fmt.Sprintf("%d", numHistograms), func(t *testing.T) {
|
t.Run(strconv.Itoa(numHistograms), func(t *testing.T) {
|
||||||
head, _ := newTestHead(t, 1000, wlog.CompressionNone, false)
|
head, _ := newTestHead(t, 1000, wlog.CompressionNone, false)
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
require.NoError(t, head.Close())
|
require.NoError(t, head.Close())
|
||||||
|
@ -3692,7 +3692,7 @@ func TestChunkSnapshot(t *testing.T) {
|
||||||
e := ex{
|
e := ex{
|
||||||
seriesLabels: lbls,
|
seriesLabels: lbls,
|
||||||
e: exemplar.Exemplar{
|
e: exemplar.Exemplar{
|
||||||
Labels: labels.FromStrings("trace_id", fmt.Sprintf("%d", rand.Int())),
|
Labels: labels.FromStrings("trace_id", strconv.Itoa(rand.Int())),
|
||||||
Value: rand.Float64(),
|
Value: rand.Float64(),
|
||||||
Ts: ts,
|
Ts: ts,
|
||||||
},
|
},
|
||||||
|
@ -4007,6 +4007,9 @@ func TestSnapshotError(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
f, err := os.OpenFile(path.Join(snapDir, files[0].Name()), os.O_RDWR, 0)
|
f, err := os.OpenFile(path.Join(snapDir, files[0].Name()), os.O_RDWR, 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
// Create snapshot backup to be restored on future test cases.
|
||||||
|
snapshotBackup, err := io.ReadAll(f)
|
||||||
|
require.NoError(t, err)
|
||||||
_, err = f.WriteAt([]byte{0b11111111}, 18)
|
_, err = f.WriteAt([]byte{0b11111111}, 18)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, f.Close())
|
require.NoError(t, f.Close())
|
||||||
|
@ -4021,10 +4024,44 @@ func TestSnapshotError(t *testing.T) {
|
||||||
|
|
||||||
// There should be no series in the memory after snapshot error since WAL was removed.
|
// There should be no series in the memory after snapshot error since WAL was removed.
|
||||||
require.Equal(t, 1.0, prom_testutil.ToFloat64(head.metrics.snapshotReplayErrorTotal))
|
require.Equal(t, 1.0, prom_testutil.ToFloat64(head.metrics.snapshotReplayErrorTotal))
|
||||||
|
require.Equal(t, uint64(0), head.NumSeries())
|
||||||
require.Nil(t, head.series.getByHash(lbls.Hash(), lbls))
|
require.Nil(t, head.series.getByHash(lbls.Hash(), lbls))
|
||||||
tm, err = head.tombstones.Get(1)
|
tm, err = head.tombstones.Get(1)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Empty(t, tm)
|
require.Empty(t, tm)
|
||||||
|
require.NoError(t, head.Close())
|
||||||
|
|
||||||
|
// Test corruption in the middle of the snapshot.
|
||||||
|
f, err = os.OpenFile(path.Join(snapDir, files[0].Name()), os.O_RDWR, 0)
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = f.WriteAt(snapshotBackup, 0)
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = f.WriteAt([]byte{0b11111111}, 300)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, f.Close())
|
||||||
|
|
||||||
|
c := &countSeriesLifecycleCallback{}
|
||||||
|
opts := head.opts
|
||||||
|
opts.SeriesCallback = c
|
||||||
|
|
||||||
|
w, err = wlog.NewSize(nil, nil, head.wal.Dir(), 32768, wlog.CompressionNone)
|
||||||
|
require.NoError(t, err)
|
||||||
|
head, err = NewHead(prometheus.NewRegistry(), nil, w, nil, head.opts, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, head.Init(math.MinInt64))
|
||||||
|
|
||||||
|
// There should be no series in the memory after snapshot error since WAL was removed.
|
||||||
|
require.Equal(t, 1.0, prom_testutil.ToFloat64(head.metrics.snapshotReplayErrorTotal))
|
||||||
|
require.Nil(t, head.series.getByHash(lbls.Hash(), lbls))
|
||||||
|
require.Equal(t, uint64(0), head.NumSeries())
|
||||||
|
|
||||||
|
// Since the snapshot could replay certain series, we continue invoking the create hooks.
|
||||||
|
// In such instances, we need to ensure that we also trigger the delete hooks when resetting the memory.
|
||||||
|
require.Equal(t, int64(2), c.created.Load())
|
||||||
|
require.Equal(t, int64(2), c.deleted.Load())
|
||||||
|
|
||||||
|
require.Equal(t, 2.0, prom_testutil.ToFloat64(head.metrics.seriesRemoved))
|
||||||
|
require.Equal(t, 2.0, prom_testutil.ToFloat64(head.metrics.seriesCreated))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHistogramMetrics(t *testing.T) {
|
func TestHistogramMetrics(t *testing.T) {
|
||||||
|
@ -5032,7 +5069,7 @@ func TestOOOAppendWithNoSeries(t *testing.T) {
|
||||||
require.Equal(t, expSamples, ms.headChunks.chunk.NumSamples())
|
require.Equal(t, expSamples, ms.headChunks.chunk.NumSamples())
|
||||||
}
|
}
|
||||||
|
|
||||||
newLabels := func(idx int) labels.Labels { return labels.FromStrings("foo", fmt.Sprintf("%d", idx)) }
|
newLabels := func(idx int) labels.Labels { return labels.FromStrings("foo", strconv.Itoa(idx)) }
|
||||||
|
|
||||||
s1 := newLabels(1)
|
s1 := newLabels(1)
|
||||||
appendSample(s1, 300) // At 300m.
|
appendSample(s1, 300) // At 300m.
|
||||||
|
@ -5829,3 +5866,14 @@ func TestHeadCompactableDoesNotCompactEmptyHead(t *testing.T) {
|
||||||
|
|
||||||
require.False(t, head.compactable())
|
require.False(t, head.compactable())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type countSeriesLifecycleCallback struct {
|
||||||
|
created atomic.Int64
|
||||||
|
deleted atomic.Int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *countSeriesLifecycleCallback) PreCreation(labels.Labels) error { return nil }
|
||||||
|
func (c *countSeriesLifecycleCallback) PostCreation(labels.Labels) { c.created.Inc() }
|
||||||
|
func (c *countSeriesLifecycleCallback) PostDeletion(s map[chunks.HeadSeriesRef]labels.Labels) {
|
||||||
|
c.deleted.Add(int64(len(s)))
|
||||||
|
}
|
||||||
|
|
|
@ -51,6 +51,9 @@ const (
|
||||||
indexFilename = "index"
|
indexFilename = "index"
|
||||||
|
|
||||||
seriesByteAlign = 16
|
seriesByteAlign = 16
|
||||||
|
|
||||||
|
// checkContextEveryNIterations is used in some tight loops to check if the context is done.
|
||||||
|
checkContextEveryNIterations = 128
|
||||||
)
|
)
|
||||||
|
|
||||||
type indexWriterSeries struct {
|
type indexWriterSeries struct {
|
||||||
|
@ -1797,7 +1800,12 @@ func (r *Reader) postingsForLabelMatchingV1(ctx context.Context, name string, ma
|
||||||
}
|
}
|
||||||
|
|
||||||
var its []Postings
|
var its []Postings
|
||||||
|
count := 1
|
||||||
for val, offset := range e {
|
for val, offset := range e {
|
||||||
|
if count%checkContextEveryNIterations == 0 && ctx.Err() != nil {
|
||||||
|
return ErrPostings(ctx.Err())
|
||||||
|
}
|
||||||
|
count++
|
||||||
if !match(val) {
|
if !match(val) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,9 @@ import (
|
||||||
"hash/crc32"
|
"hash/crc32"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -160,39 +162,14 @@ func TestIndexRW_Create_Open(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIndexRW_Postings(t *testing.T) {
|
func TestIndexRW_Postings(t *testing.T) {
|
||||||
dir := t.TempDir()
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
var input indexWriterSeriesSlice
|
||||||
fn := filepath.Join(dir, indexFilename)
|
for i := 1; i < 5; i++ {
|
||||||
|
input = append(input, &indexWriterSeries{
|
||||||
iw, err := NewWriter(context.Background(), fn)
|
labels: labels.FromStrings("a", "1", "b", strconv.Itoa(i)),
|
||||||
require.NoError(t, err)
|
})
|
||||||
|
|
||||||
series := []labels.Labels{
|
|
||||||
labels.FromStrings("a", "1", "b", "1"),
|
|
||||||
labels.FromStrings("a", "1", "b", "2"),
|
|
||||||
labels.FromStrings("a", "1", "b", "3"),
|
|
||||||
labels.FromStrings("a", "1", "b", "4"),
|
|
||||||
}
|
}
|
||||||
|
ir, fn, _ := createFileReader(ctx, t, input)
|
||||||
require.NoError(t, iw.AddSymbol("1"))
|
|
||||||
require.NoError(t, iw.AddSymbol("2"))
|
|
||||||
require.NoError(t, iw.AddSymbol("3"))
|
|
||||||
require.NoError(t, iw.AddSymbol("4"))
|
|
||||||
require.NoError(t, iw.AddSymbol("a"))
|
|
||||||
require.NoError(t, iw.AddSymbol("b"))
|
|
||||||
|
|
||||||
// Postings lists are only written if a series with the respective
|
|
||||||
// reference was added before.
|
|
||||||
require.NoError(t, iw.AddSeries(1, series[0]))
|
|
||||||
require.NoError(t, iw.AddSeries(2, series[1]))
|
|
||||||
require.NoError(t, iw.AddSeries(3, series[2]))
|
|
||||||
require.NoError(t, iw.AddSeries(4, series[3]))
|
|
||||||
|
|
||||||
require.NoError(t, iw.Close())
|
|
||||||
|
|
||||||
ir, err := NewFileReader(fn)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
p, err := ir.Postings(ctx, "a", "1")
|
p, err := ir.Postings(ctx, "a", "1")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -205,7 +182,7 @@ func TestIndexRW_Postings(t *testing.T) {
|
||||||
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Empty(t, c)
|
require.Empty(t, c)
|
||||||
testutil.RequireEqual(t, series[i], builder.Labels())
|
testutil.RequireEqual(t, input[i].labels, builder.Labels())
|
||||||
}
|
}
|
||||||
require.NoError(t, p.Err())
|
require.NoError(t, p.Err())
|
||||||
|
|
||||||
|
@ -240,8 +217,6 @@ func TestIndexRW_Postings(t *testing.T) {
|
||||||
"b": {"1", "2", "3", "4"},
|
"b": {"1", "2", "3", "4"},
|
||||||
}, labelIndices)
|
}, labelIndices)
|
||||||
|
|
||||||
require.NoError(t, ir.Close())
|
|
||||||
|
|
||||||
t.Run("ShardedPostings()", func(t *testing.T) {
|
t.Run("ShardedPostings()", func(t *testing.T) {
|
||||||
ir, err := NewFileReader(fn)
|
ir, err := NewFileReader(fn)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -296,42 +271,16 @@ func TestIndexRW_Postings(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPostingsMany(t *testing.T) {
|
func TestPostingsMany(t *testing.T) {
|
||||||
dir := t.TempDir()
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
fn := filepath.Join(dir, indexFilename)
|
|
||||||
|
|
||||||
iw, err := NewWriter(context.Background(), fn)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Create a label in the index which has 999 values.
|
// Create a label in the index which has 999 values.
|
||||||
symbols := map[string]struct{}{}
|
var input indexWriterSeriesSlice
|
||||||
series := []labels.Labels{}
|
|
||||||
for i := 1; i < 1000; i++ {
|
for i := 1; i < 1000; i++ {
|
||||||
v := fmt.Sprintf("%03d", i)
|
v := fmt.Sprintf("%03d", i)
|
||||||
series = append(series, labels.FromStrings("i", v, "foo", "bar"))
|
input = append(input, &indexWriterSeries{
|
||||||
symbols[v] = struct{}{}
|
labels: labels.FromStrings("i", v, "foo", "bar"),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
symbols["i"] = struct{}{}
|
ir, _, symbols := createFileReader(ctx, t, input)
|
||||||
symbols["foo"] = struct{}{}
|
|
||||||
symbols["bar"] = struct{}{}
|
|
||||||
syms := []string{}
|
|
||||||
for s := range symbols {
|
|
||||||
syms = append(syms, s)
|
|
||||||
}
|
|
||||||
sort.Strings(syms)
|
|
||||||
for _, s := range syms {
|
|
||||||
require.NoError(t, iw.AddSymbol(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, s := range series {
|
|
||||||
require.NoError(t, iw.AddSeries(storage.SeriesRef(i), s))
|
|
||||||
}
|
|
||||||
require.NoError(t, iw.Close())
|
|
||||||
|
|
||||||
ir, err := NewFileReader(fn)
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { require.NoError(t, ir.Close()) }()
|
|
||||||
|
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
in []string
|
in []string
|
||||||
|
@ -387,25 +336,13 @@ func TestPostingsMany(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPersistence_index_e2e(t *testing.T) {
|
func TestPersistence_index_e2e(t *testing.T) {
|
||||||
dir := t.TempDir()
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
lbls, err := labels.ReadLabels(filepath.Join("..", "testdata", "20kseries.json"), 20000)
|
lbls, err := labels.ReadLabels(filepath.Join("..", "testdata", "20kseries.json"), 20000)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Sort labels as the index writer expects series in sorted order.
|
// Sort labels as the index writer expects series in sorted order.
|
||||||
sort.Sort(labels.Slice(lbls))
|
sort.Sort(labels.Slice(lbls))
|
||||||
|
|
||||||
symbols := map[string]struct{}{}
|
|
||||||
for _, lset := range lbls {
|
|
||||||
lset.Range(func(l labels.Label) {
|
|
||||||
symbols[l.Name] = struct{}{}
|
|
||||||
symbols[l.Value] = struct{}{}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
var input indexWriterSeriesSlice
|
var input indexWriterSeriesSlice
|
||||||
|
|
||||||
ref := uint64(0)
|
ref := uint64(0)
|
||||||
// Generate ChunkMetas for every label set.
|
// Generate ChunkMetas for every label set.
|
||||||
for i, lset := range lbls {
|
for i, lset := range lbls {
|
||||||
|
@ -426,17 +363,7 @@ func TestPersistence_index_e2e(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
iw, err := NewWriter(context.Background(), filepath.Join(dir, indexFilename))
|
ir, _, _ := createFileReader(ctx, t, input)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
syms := []string{}
|
|
||||||
for s := range symbols {
|
|
||||||
syms = append(syms, s)
|
|
||||||
}
|
|
||||||
sort.Strings(syms)
|
|
||||||
for _, s := range syms {
|
|
||||||
require.NoError(t, iw.AddSymbol(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Population procedure as done by compaction.
|
// Population procedure as done by compaction.
|
||||||
var (
|
var (
|
||||||
|
@ -447,8 +374,6 @@ func TestPersistence_index_e2e(t *testing.T) {
|
||||||
mi := newMockIndex()
|
mi := newMockIndex()
|
||||||
|
|
||||||
for i, s := range input {
|
for i, s := range input {
|
||||||
err = iw.AddSeries(storage.SeriesRef(i), s.labels, s.chunks...)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, mi.AddSeries(storage.SeriesRef(i), s.labels, s.chunks...))
|
require.NoError(t, mi.AddSeries(storage.SeriesRef(i), s.labels, s.chunks...))
|
||||||
|
|
||||||
s.labels.Range(func(l labels.Label) {
|
s.labels.Range(func(l labels.Label) {
|
||||||
|
@ -462,12 +387,6 @@ func TestPersistence_index_e2e(t *testing.T) {
|
||||||
postings.Add(storage.SeriesRef(i), s.labels)
|
postings.Add(storage.SeriesRef(i), s.labels)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = iw.Close()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
ir, err := NewFileReader(filepath.Join(dir, indexFilename))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
for p := range mi.postings {
|
for p := range mi.postings {
|
||||||
gotp, err := ir.Postings(ctx, p.Name, p.Value)
|
gotp, err := ir.Postings(ctx, p.Name, p.Value)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -523,8 +442,6 @@ func TestPersistence_index_e2e(t *testing.T) {
|
||||||
}
|
}
|
||||||
sort.Strings(expSymbols)
|
sort.Strings(expSymbols)
|
||||||
require.Equal(t, expSymbols, gotSymbols)
|
require.Equal(t, expSymbols, gotSymbols)
|
||||||
|
|
||||||
require.NoError(t, ir.Close())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWriter_ShouldReturnErrorOnSeriesWithDuplicatedLabelNames(t *testing.T) {
|
func TestWriter_ShouldReturnErrorOnSeriesWithDuplicatedLabelNames(t *testing.T) {
|
||||||
|
@ -624,39 +541,14 @@ func BenchmarkReader_ShardedPostings(b *testing.B) {
|
||||||
numShards = 16
|
numShards = 16
|
||||||
)
|
)
|
||||||
|
|
||||||
dir, err := os.MkdirTemp("", "benchmark_reader_sharded_postings")
|
|
||||||
require.NoError(b, err)
|
|
||||||
defer func() {
|
|
||||||
require.NoError(b, os.RemoveAll(dir))
|
|
||||||
}()
|
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
var input indexWriterSeriesSlice
|
||||||
// Generate an index.
|
|
||||||
fn := filepath.Join(dir, indexFilename)
|
|
||||||
|
|
||||||
iw, err := NewWriter(ctx, fn)
|
|
||||||
require.NoError(b, err)
|
|
||||||
|
|
||||||
for i := 1; i <= numSeries; i++ {
|
for i := 1; i <= numSeries; i++ {
|
||||||
require.NoError(b, iw.AddSymbol(fmt.Sprintf("%10d", i)))
|
input = append(input, &indexWriterSeries{
|
||||||
|
labels: labels.FromStrings("const", fmt.Sprintf("%10d", 1), "unique", fmt.Sprintf("%10d", i)),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
require.NoError(b, iw.AddSymbol("const"))
|
ir, _, _ := createFileReader(ctx, b, input)
|
||||||
require.NoError(b, iw.AddSymbol("unique"))
|
|
||||||
|
|
||||||
for i := 1; i <= numSeries; i++ {
|
|
||||||
require.NoError(b, iw.AddSeries(storage.SeriesRef(i),
|
|
||||||
labels.FromStrings("const", fmt.Sprintf("%10d", 1), "unique", fmt.Sprintf("%10d", i))))
|
|
||||||
}
|
|
||||||
|
|
||||||
require.NoError(b, iw.Close())
|
|
||||||
|
|
||||||
b.ResetTimer()
|
|
||||||
|
|
||||||
// Create a reader to read back all postings from the index.
|
|
||||||
ir, err := NewFileReader(fn)
|
|
||||||
require.NoError(b, err)
|
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
|
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
|
@ -719,3 +611,64 @@ func TestChunksTimeOrdering(t *testing.T) {
|
||||||
|
|
||||||
require.NoError(t, idx.Close())
|
require.NoError(t, idx.Close())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestReader_PostingsForLabelMatchingHonorsContextCancel(t *testing.T) {
|
||||||
|
const seriesCount = 1000
|
||||||
|
var input indexWriterSeriesSlice
|
||||||
|
for i := 1; i < seriesCount; i++ {
|
||||||
|
input = append(input, &indexWriterSeries{
|
||||||
|
labels: labels.FromStrings("__name__", fmt.Sprintf("%4d", i)),
|
||||||
|
chunks: []chunks.Meta{
|
||||||
|
{Ref: 1, MinTime: 0, MaxTime: 10},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
ir, _, _ := createFileReader(context.Background(), t, input)
|
||||||
|
|
||||||
|
failAfter := uint64(seriesCount / 2) // Fail after processing half of the series.
|
||||||
|
ctx := &testutil.MockContextErrAfter{FailAfter: failAfter}
|
||||||
|
p := ir.PostingsForLabelMatching(ctx, "__name__", func(string) bool {
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
require.Error(t, p.Err())
|
||||||
|
require.Equal(t, failAfter, ctx.Count())
|
||||||
|
}
|
||||||
|
|
||||||
|
// createFileReader creates a temporary index file. It writes the provided input to this file.
|
||||||
|
// It returns a Reader for this file, the file's name, and the symbol map.
|
||||||
|
func createFileReader(ctx context.Context, tb testing.TB, input indexWriterSeriesSlice) (*Reader, string, map[string]struct{}) {
|
||||||
|
tb.Helper()
|
||||||
|
|
||||||
|
fn := filepath.Join(tb.TempDir(), indexFilename)
|
||||||
|
|
||||||
|
iw, err := NewWriter(ctx, fn)
|
||||||
|
require.NoError(tb, err)
|
||||||
|
|
||||||
|
symbols := map[string]struct{}{}
|
||||||
|
for _, s := range input {
|
||||||
|
s.labels.Range(func(l labels.Label) {
|
||||||
|
symbols[l.Name] = struct{}{}
|
||||||
|
symbols[l.Value] = struct{}{}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
syms := []string{}
|
||||||
|
for s := range symbols {
|
||||||
|
syms = append(syms, s)
|
||||||
|
}
|
||||||
|
slices.Sort(syms)
|
||||||
|
for _, s := range syms {
|
||||||
|
require.NoError(tb, iw.AddSymbol(s))
|
||||||
|
}
|
||||||
|
for i, s := range input {
|
||||||
|
require.NoError(tb, iw.AddSeries(storage.SeriesRef(i), s.labels, s.chunks...))
|
||||||
|
}
|
||||||
|
require.NoError(tb, iw.Close())
|
||||||
|
|
||||||
|
ir, err := NewFileReader(fn)
|
||||||
|
require.NoError(tb, err)
|
||||||
|
tb.Cleanup(func() {
|
||||||
|
require.NoError(tb, ir.Close())
|
||||||
|
})
|
||||||
|
return ir, fn, symbols
|
||||||
|
}
|
||||||
|
|
|
@ -416,7 +416,13 @@ func (p *MemPostings) PostingsForLabelMatching(ctx context.Context, name string,
|
||||||
}
|
}
|
||||||
|
|
||||||
var its []Postings
|
var its []Postings
|
||||||
|
count := 1
|
||||||
for _, v := range vals {
|
for _, v := range vals {
|
||||||
|
if count%checkContextEveryNIterations == 0 && ctx.Err() != nil {
|
||||||
|
p.mtx.RUnlock()
|
||||||
|
return ErrPostings(ctx.Err())
|
||||||
|
}
|
||||||
|
count++
|
||||||
if match(v) {
|
if match(v) {
|
||||||
its = append(its, NewListPostings(e[v]))
|
its = append(its, NewListPostings(e[v]))
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,12 +22,15 @@ import (
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/grafana/regexp"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
"github.com/prometheus/prometheus/util/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMemPostings_addFor(t *testing.T) {
|
func TestMemPostings_addFor(t *testing.T) {
|
||||||
|
@ -49,7 +52,7 @@ func TestMemPostings_ensureOrder(t *testing.T) {
|
||||||
for j := range l {
|
for j := range l {
|
||||||
l[j] = storage.SeriesRef(rand.Uint64())
|
l[j] = storage.SeriesRef(rand.Uint64())
|
||||||
}
|
}
|
||||||
v := fmt.Sprintf("%d", i)
|
v := strconv.Itoa(i)
|
||||||
|
|
||||||
p.m["a"][v] = l
|
p.m["a"][v] = l
|
||||||
}
|
}
|
||||||
|
@ -390,7 +393,7 @@ func BenchmarkMerge(t *testing.B) {
|
||||||
|
|
||||||
its := make([]Postings, len(refs))
|
its := make([]Postings, len(refs))
|
||||||
for _, nSeries := range []int{1, 10, 100, 1000, 10000, 100000} {
|
for _, nSeries := range []int{1, 10, 100, 1000, 10000, 100000} {
|
||||||
t.Run(fmt.Sprint(nSeries), func(bench *testing.B) {
|
t.Run(strconv.Itoa(nSeries), func(bench *testing.B) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
for i := 0; i < bench.N; i++ {
|
for i := 0; i < bench.N; i++ {
|
||||||
// Reset the ListPostings to their original values each time round the loop.
|
// Reset the ListPostings to their original values each time round the loop.
|
||||||
|
@ -1282,3 +1285,71 @@ func BenchmarkListPostings(b *testing.B) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func slowRegexpString() string {
|
||||||
|
nums := map[int]struct{}{}
|
||||||
|
for i := 10_000; i < 20_000; i++ {
|
||||||
|
if i%3 == 0 {
|
||||||
|
nums[i] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var sb strings.Builder
|
||||||
|
sb.WriteString(".*(9999")
|
||||||
|
for i := range nums {
|
||||||
|
sb.WriteString("|")
|
||||||
|
sb.WriteString(strconv.Itoa(i))
|
||||||
|
}
|
||||||
|
sb.WriteString(").*")
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkMemPostings_PostingsForLabelMatching(b *testing.B) {
|
||||||
|
fast := regexp.MustCompile("^(100|200)$")
|
||||||
|
slowRegexp := "^" + slowRegexpString() + "$"
|
||||||
|
b.Logf("Slow regexp length = %d", len(slowRegexp))
|
||||||
|
slow := regexp.MustCompile(slowRegexp)
|
||||||
|
|
||||||
|
for _, labelValueCount := range []int{1_000, 10_000, 100_000} {
|
||||||
|
b.Run(fmt.Sprintf("labels=%d", labelValueCount), func(b *testing.B) {
|
||||||
|
mp := NewMemPostings()
|
||||||
|
for i := 0; i < labelValueCount; i++ {
|
||||||
|
mp.Add(storage.SeriesRef(i), labels.FromStrings("label", strconv.Itoa(i)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fp, err := ExpandPostings(mp.PostingsForLabelMatching(context.Background(), "label", fast.MatchString))
|
||||||
|
require.NoError(b, err)
|
||||||
|
b.Logf("Fast matcher matches %d series", len(fp))
|
||||||
|
b.Run("matcher=fast", func(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
mp.PostingsForLabelMatching(context.Background(), "label", fast.MatchString).Next()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
sp, err := ExpandPostings(mp.PostingsForLabelMatching(context.Background(), "label", slow.MatchString))
|
||||||
|
require.NoError(b, err)
|
||||||
|
b.Logf("Slow matcher matches %d series", len(sp))
|
||||||
|
b.Run("matcher=slow", func(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
mp.PostingsForLabelMatching(context.Background(), "label", slow.MatchString).Next()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemPostings_PostingsForLabelMatchingHonorsContextCancel(t *testing.T) {
|
||||||
|
memP := NewMemPostings()
|
||||||
|
seriesCount := 10 * checkContextEveryNIterations
|
||||||
|
for i := 1; i <= seriesCount; i++ {
|
||||||
|
memP.Add(storage.SeriesRef(i), labels.FromStrings("__name__", fmt.Sprintf("%4d", i)))
|
||||||
|
}
|
||||||
|
|
||||||
|
failAfter := uint64(seriesCount / 2 / checkContextEveryNIterations)
|
||||||
|
ctx := &testutil.MockContextErrAfter{FailAfter: failAfter}
|
||||||
|
p := memP.PostingsForLabelMatching(ctx, "__name__", func(string) bool {
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
require.Error(t, p.Err())
|
||||||
|
require.Equal(t, failAfter+1, ctx.Count()) // Plus one for the Err() call that puts the error in the result.
|
||||||
|
}
|
||||||
|
|
|
@ -33,6 +33,9 @@ import (
|
||||||
"github.com/prometheus/prometheus/util/annotations"
|
"github.com/prometheus/prometheus/util/annotations"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// checkContextEveryNIterations is used in some tight loops to check if the context is done.
|
||||||
|
const checkContextEveryNIterations = 100
|
||||||
|
|
||||||
type blockBaseQuerier struct {
|
type blockBaseQuerier struct {
|
||||||
blockID ulid.ULID
|
blockID ulid.ULID
|
||||||
index IndexReader
|
index IndexReader
|
||||||
|
@ -358,7 +361,12 @@ func inversePostingsForMatcher(ctx context.Context, ix IndexReader, m *labels.Ma
|
||||||
if m.Type == labels.MatchEqual && m.Value == "" {
|
if m.Type == labels.MatchEqual && m.Value == "" {
|
||||||
res = vals
|
res = vals
|
||||||
} else {
|
} else {
|
||||||
|
count := 1
|
||||||
for _, val := range vals {
|
for _, val := range vals {
|
||||||
|
if count%checkContextEveryNIterations == 0 && ctx.Err() != nil {
|
||||||
|
return nil, ctx.Err()
|
||||||
|
}
|
||||||
|
count++
|
||||||
if !m.Matches(val) {
|
if !m.Matches(val) {
|
||||||
res = append(res, val)
|
res = append(res, val)
|
||||||
}
|
}
|
||||||
|
@ -387,7 +395,12 @@ func labelValuesWithMatchers(ctx context.Context, r IndexReader, name string, ma
|
||||||
// re-use the allValues slice to avoid allocations
|
// re-use the allValues slice to avoid allocations
|
||||||
// this is safe because the iteration is always ahead of the append
|
// this is safe because the iteration is always ahead of the append
|
||||||
filteredValues := allValues[:0]
|
filteredValues := allValues[:0]
|
||||||
|
count := 1
|
||||||
for _, v := range allValues {
|
for _, v := range allValues {
|
||||||
|
if count%checkContextEveryNIterations == 0 && ctx.Err() != nil {
|
||||||
|
return nil, ctx.Err()
|
||||||
|
}
|
||||||
|
count++
|
||||||
if m.Matches(v) {
|
if m.Matches(v) {
|
||||||
filteredValues = append(filteredValues, v)
|
filteredValues = append(filteredValues, v)
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,6 +38,7 @@ import (
|
||||||
"github.com/prometheus/prometheus/tsdb/tombstones"
|
"github.com/prometheus/prometheus/tsdb/tombstones"
|
||||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
||||||
"github.com/prometheus/prometheus/util/annotations"
|
"github.com/prometheus/prometheus/util/annotations"
|
||||||
|
"github.com/prometheus/prometheus/util/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO(bwplotka): Replace those mocks with remote.concreteSeriesSet.
|
// TODO(bwplotka): Replace those mocks with remote.concreteSeriesSet.
|
||||||
|
@ -3638,3 +3639,77 @@ func TestQueryWithOneChunkCompletelyDeleted(t *testing.T) {
|
||||||
require.NoError(t, css.Err())
|
require.NoError(t, css.Err())
|
||||||
require.Equal(t, 1, seriesCount)
|
require.Equal(t, 1, seriesCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestReader_PostingsForLabelMatchingHonorsContextCancel(t *testing.T) {
|
||||||
|
ir := mockReaderOfLabels{}
|
||||||
|
|
||||||
|
failAfter := uint64(mockReaderOfLabelsSeriesCount / 2 / checkContextEveryNIterations)
|
||||||
|
ctx := &testutil.MockContextErrAfter{FailAfter: failAfter}
|
||||||
|
_, err := labelValuesWithMatchers(ctx, ir, "__name__", labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".+"))
|
||||||
|
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Equal(t, failAfter+1, ctx.Count()) // Plus one for the Err() call that puts the error in the result.
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReader_InversePostingsForMatcherHonorsContextCancel(t *testing.T) {
|
||||||
|
ir := mockReaderOfLabels{}
|
||||||
|
|
||||||
|
failAfter := uint64(mockReaderOfLabelsSeriesCount / 2 / checkContextEveryNIterations)
|
||||||
|
ctx := &testutil.MockContextErrAfter{FailAfter: failAfter}
|
||||||
|
_, err := inversePostingsForMatcher(ctx, ir, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*"))
|
||||||
|
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Equal(t, failAfter+1, ctx.Count()) // Plus one for the Err() call that puts the error in the result.
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockReaderOfLabels struct{}
|
||||||
|
|
||||||
|
const mockReaderOfLabelsSeriesCount = checkContextEveryNIterations * 10
|
||||||
|
|
||||||
|
func (m mockReaderOfLabels) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, error) {
|
||||||
|
return make([]string, mockReaderOfLabelsSeriesCount), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m mockReaderOfLabels) LabelValueFor(context.Context, storage.SeriesRef, string) (string, error) {
|
||||||
|
panic("LabelValueFor called")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m mockReaderOfLabels) SortedLabelValues(context.Context, string, ...*labels.Matcher) ([]string, error) {
|
||||||
|
panic("SortedLabelValues called")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m mockReaderOfLabels) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m mockReaderOfLabels) LabelNames(context.Context, ...*labels.Matcher) ([]string, error) {
|
||||||
|
panic("LabelNames called")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m mockReaderOfLabels) LabelNamesFor(context.Context, ...storage.SeriesRef) ([]string, error) {
|
||||||
|
panic("LabelNamesFor called")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m mockReaderOfLabels) PostingsForLabelMatching(context.Context, string, func(string) bool) index.Postings {
|
||||||
|
panic("PostingsForLabelMatching called")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m mockReaderOfLabels) Postings(context.Context, string, ...string) (index.Postings, error) {
|
||||||
|
panic("Postings called")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m mockReaderOfLabels) ShardedPostings(index.Postings, uint64, uint64) index.Postings {
|
||||||
|
panic("Postings called")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m mockReaderOfLabels) SortedPostings(index.Postings) index.Postings {
|
||||||
|
panic("SortedPostings called")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m mockReaderOfLabels) Series(storage.SeriesRef, *labels.ScratchBuilder, *[]chunks.Meta) error {
|
||||||
|
panic("Series called")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m mockReaderOfLabels) Symbols() index.StringIter {
|
||||||
|
panic("Series called")
|
||||||
|
}
|
||||||
|
|
|
@ -19,6 +19,7 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
@ -232,10 +233,10 @@ func TestCheckpoint(t *testing.T) {
|
||||||
// Write changing metadata for each series. In the end, only the latest
|
// Write changing metadata for each series. In the end, only the latest
|
||||||
// version should end up in the checkpoint.
|
// version should end up in the checkpoint.
|
||||||
b = enc.Metadata([]record.RefMetadata{
|
b = enc.Metadata([]record.RefMetadata{
|
||||||
{Ref: 0, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)},
|
{Ref: 0, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)},
|
||||||
{Ref: 1, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)},
|
{Ref: 1, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)},
|
||||||
{Ref: 2, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)},
|
{Ref: 2, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)},
|
||||||
{Ref: 3, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)},
|
{Ref: 3, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)},
|
||||||
}, nil)
|
}, nil)
|
||||||
require.NoError(t, w.Log(b))
|
require.NoError(t, w.Log(b))
|
||||||
|
|
||||||
|
@ -324,8 +325,8 @@ func TestCheckpoint(t *testing.T) {
|
||||||
testutil.RequireEqual(t, expectedRefSeries, series)
|
testutil.RequireEqual(t, expectedRefSeries, series)
|
||||||
|
|
||||||
expectedRefMetadata := []record.RefMetadata{
|
expectedRefMetadata := []record.RefMetadata{
|
||||||
{Ref: 0, Unit: fmt.Sprintf("%d", last-100), Help: fmt.Sprintf("%d", last-100)},
|
{Ref: 0, Unit: strconv.FormatInt(last-100, 10), Help: strconv.FormatInt(last-100, 10)},
|
||||||
{Ref: 2, Unit: fmt.Sprintf("%d", last-100), Help: fmt.Sprintf("%d", last-100)},
|
{Ref: 2, Unit: strconv.FormatInt(last-100, 10), Help: strconv.FormatInt(last-100, 10)},
|
||||||
{Ref: 4, Unit: "unit", Help: "help"},
|
{Ref: 4, Unit: "unit", Help: "help"},
|
||||||
}
|
}
|
||||||
sort.Slice(metadata, func(i, j int) bool { return metadata[i].Ref < metadata[j].Ref })
|
sort.Slice(metadata, func(i, j int) bool { return metadata[i].Ref < metadata[j].Ref })
|
||||||
|
|
|
@ -13,7 +13,12 @@
|
||||||
|
|
||||||
package testutil
|
package testutil
|
||||||
|
|
||||||
import "time"
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.uber.org/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
// A MockContext provides a simple stub implementation of a Context.
|
// A MockContext provides a simple stub implementation of a Context.
|
||||||
type MockContext struct {
|
type MockContext struct {
|
||||||
|
@ -40,3 +45,23 @@ func (c *MockContext) Err() error {
|
||||||
func (c *MockContext) Value(interface{}) interface{} {
|
func (c *MockContext) Value(interface{}) interface{} {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MockContextErrAfter is a MockContext that will return an error after a certain
|
||||||
|
// number of calls to Err().
|
||||||
|
type MockContextErrAfter struct {
|
||||||
|
MockContext
|
||||||
|
count atomic.Uint64
|
||||||
|
FailAfter uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *MockContextErrAfter) Err() error {
|
||||||
|
c.count.Inc()
|
||||||
|
if c.count.Load() >= c.FailAfter {
|
||||||
|
return context.Canceled
|
||||||
|
}
|
||||||
|
return c.MockContext.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *MockContextErrAfter) Count() uint64 {
|
||||||
|
return c.count.Load()
|
||||||
|
}
|
||||||
|
|
|
@ -116,9 +116,11 @@ type RulesRetriever interface {
|
||||||
AlertingRules() []*rules.AlertingRule
|
AlertingRules() []*rules.AlertingRule
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StatsRenderer converts engine statistics into a format suitable for the API.
|
||||||
type StatsRenderer func(context.Context, *stats.Statistics, string) stats.QueryStats
|
type StatsRenderer func(context.Context, *stats.Statistics, string) stats.QueryStats
|
||||||
|
|
||||||
func defaultStatsRenderer(_ context.Context, s *stats.Statistics, param string) stats.QueryStats {
|
// DefaultStatsRenderer is the default stats renderer for the API.
|
||||||
|
func DefaultStatsRenderer(_ context.Context, s *stats.Statistics, param string) stats.QueryStats {
|
||||||
if param != "" {
|
if param != "" {
|
||||||
return stats.NewQueryStats(s)
|
return stats.NewQueryStats(s)
|
||||||
}
|
}
|
||||||
|
@ -272,7 +274,7 @@ func NewAPI(
|
||||||
buildInfo: buildInfo,
|
buildInfo: buildInfo,
|
||||||
gatherer: gatherer,
|
gatherer: gatherer,
|
||||||
isAgent: isAgent,
|
isAgent: isAgent,
|
||||||
statsRenderer: defaultStatsRenderer,
|
statsRenderer: DefaultStatsRenderer,
|
||||||
|
|
||||||
remoteReadHandler: remote.NewReadHandler(logger, registerer, q, configFunc, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame),
|
remoteReadHandler: remote.NewReadHandler(logger, registerer, q, configFunc, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame),
|
||||||
}
|
}
|
||||||
|
@ -461,7 +463,7 @@ func (api *API) query(r *http.Request) (result apiFuncResult) {
|
||||||
// Optional stats field in response if parameter "stats" is not empty.
|
// Optional stats field in response if parameter "stats" is not empty.
|
||||||
sr := api.statsRenderer
|
sr := api.statsRenderer
|
||||||
if sr == nil {
|
if sr == nil {
|
||||||
sr = defaultStatsRenderer
|
sr = DefaultStatsRenderer
|
||||||
}
|
}
|
||||||
qs := sr(ctx, qry.Stats(), r.FormValue("stats"))
|
qs := sr(ctx, qry.Stats(), r.FormValue("stats"))
|
||||||
|
|
||||||
|
@ -563,7 +565,7 @@ func (api *API) queryRange(r *http.Request) (result apiFuncResult) {
|
||||||
// Optional stats field in response if parameter "stats" is not empty.
|
// Optional stats field in response if parameter "stats" is not empty.
|
||||||
sr := api.statsRenderer
|
sr := api.statsRenderer
|
||||||
if sr == nil {
|
if sr == nil {
|
||||||
sr = defaultStatsRenderer
|
sr = DefaultStatsRenderer
|
||||||
}
|
}
|
||||||
qs := sr(ctx, qry.Stats(), r.FormValue("stats"))
|
qs := sr(ctx, qry.Stats(), r.FormValue("stats"))
|
||||||
|
|
||||||
|
@ -702,7 +704,7 @@ func (api *API) labelNames(r *http.Request) apiFuncResult {
|
||||||
names = []string{}
|
names = []string{}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(names) >= limit {
|
if len(names) > limit {
|
||||||
names = names[:limit]
|
names = names[:limit]
|
||||||
warnings = warnings.Add(errors.New("results truncated due to limit"))
|
warnings = warnings.Add(errors.New("results truncated due to limit"))
|
||||||
}
|
}
|
||||||
|
@ -791,7 +793,7 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) {
|
||||||
|
|
||||||
slices.Sort(vals)
|
slices.Sort(vals)
|
||||||
|
|
||||||
if len(vals) >= limit {
|
if len(vals) > limit {
|
||||||
vals = vals[:limit]
|
vals = vals[:limit]
|
||||||
warnings = warnings.Add(errors.New("results truncated due to limit"))
|
warnings = warnings.Add(errors.New("results truncated due to limit"))
|
||||||
}
|
}
|
||||||
|
@ -887,7 +889,8 @@ func (api *API) series(r *http.Request) (result apiFuncResult) {
|
||||||
}
|
}
|
||||||
metrics = append(metrics, set.At().Labels())
|
metrics = append(metrics, set.At().Labels())
|
||||||
|
|
||||||
if len(metrics) >= limit {
|
if len(metrics) > limit {
|
||||||
|
metrics = metrics[:limit]
|
||||||
warnings.Add(errors.New("results truncated due to limit"))
|
warnings.Add(errors.New("results truncated due to limit"))
|
||||||
return apiFuncResult{metrics, nil, warnings, closer}
|
return apiFuncResult{metrics, nil, warnings, closer}
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,6 +25,7 @@ import (
|
||||||
"reflect"
|
"reflect"
|
||||||
"runtime"
|
"runtime"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
@ -1059,6 +1060,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
||||||
responseLen int // If nonzero, check only the length; `response` is ignored.
|
responseLen int // If nonzero, check only the length; `response` is ignored.
|
||||||
responseMetadataTotal int
|
responseMetadataTotal int
|
||||||
responseAsJSON string
|
responseAsJSON string
|
||||||
|
warningsCount int
|
||||||
errType errorType
|
errType errorType
|
||||||
sorter func(interface{})
|
sorter func(interface{})
|
||||||
metadata []targetMetadata
|
metadata []targetMetadata
|
||||||
|
@ -1416,7 +1418,17 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
||||||
"match[]": []string{"test_metric1"},
|
"match[]": []string{"test_metric1"},
|
||||||
"limit": []string{"1"},
|
"limit": []string{"1"},
|
||||||
},
|
},
|
||||||
responseLen: 1, // API does not specify which particular value will come back.
|
responseLen: 1, // API does not specify which particular value will come back.
|
||||||
|
warningsCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
endpoint: api.series,
|
||||||
|
query: url.Values{
|
||||||
|
"match[]": []string{"test_metric1"},
|
||||||
|
"limit": []string{"2"},
|
||||||
|
},
|
||||||
|
responseLen: 2, // API does not specify which particular value will come back.
|
||||||
|
warningsCount: 0, // No warnings if limit isn't exceeded.
|
||||||
},
|
},
|
||||||
// Missing match[] query params in series requests.
|
// Missing match[] query params in series requests.
|
||||||
{
|
{
|
||||||
|
@ -2699,7 +2711,19 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
||||||
query: url.Values{
|
query: url.Values{
|
||||||
"limit": []string{"2"},
|
"limit": []string{"2"},
|
||||||
},
|
},
|
||||||
responseLen: 2, // API does not specify which particular values will come back.
|
responseLen: 2, // API does not specify which particular values will come back.
|
||||||
|
warningsCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
endpoint: api.labelValues,
|
||||||
|
params: map[string]string{
|
||||||
|
"name": "__name__",
|
||||||
|
},
|
||||||
|
query: url.Values{
|
||||||
|
"limit": []string{"4"},
|
||||||
|
},
|
||||||
|
responseLen: 4, // API does not specify which particular values will come back.
|
||||||
|
warningsCount: 0, // No warnings if limit isn't exceeded.
|
||||||
},
|
},
|
||||||
// Label names.
|
// Label names.
|
||||||
{
|
{
|
||||||
|
@ -2846,7 +2870,16 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
||||||
query: url.Values{
|
query: url.Values{
|
||||||
"limit": []string{"2"},
|
"limit": []string{"2"},
|
||||||
},
|
},
|
||||||
responseLen: 2, // API does not specify which particular values will come back.
|
responseLen: 2, // API does not specify which particular values will come back.
|
||||||
|
warningsCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
endpoint: api.labelNames,
|
||||||
|
query: url.Values{
|
||||||
|
"limit": []string{"3"},
|
||||||
|
},
|
||||||
|
responseLen: 3, // API does not specify which particular values will come back.
|
||||||
|
warningsCount: 0, // No warnings if limit isn't exceeded.
|
||||||
},
|
},
|
||||||
}...)
|
}...)
|
||||||
}
|
}
|
||||||
|
@ -2923,6 +2956,8 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.JSONEq(t, test.responseAsJSON, string(s))
|
require.JSONEq(t, test.responseAsJSON, string(s))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
require.Len(t, res.warnings, test.warningsCount)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -3544,7 +3579,7 @@ func TestTSDBStatus(t *testing.T) {
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
tc := tc
|
tc := tc
|
||||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||||
api := &API{db: tc.db, gatherer: prometheus.DefaultGatherer}
|
api := &API{db: tc.db, gatherer: prometheus.DefaultGatherer}
|
||||||
endpoint := tc.endpoint(api)
|
endpoint := tc.endpoint(api)
|
||||||
req, err := http.NewRequest(tc.method, fmt.Sprintf("?%s", tc.values.Encode()), nil)
|
req, err := http.NewRequest(tc.method, fmt.Sprintf("?%s", tc.values.Encode()), nil)
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
@ -341,8 +342,8 @@ func TestFederationWithNativeHistograms(t *testing.T) {
|
||||||
}
|
}
|
||||||
app := db.Appender(context.Background())
|
app := db.Appender(context.Background())
|
||||||
for i := 0; i < 6; i++ {
|
for i := 0; i < 6; i++ {
|
||||||
l := labels.FromStrings("__name__", "test_metric", "foo", fmt.Sprintf("%d", i))
|
l := labels.FromStrings("__name__", "test_metric", "foo", strconv.Itoa(i))
|
||||||
expL := labels.FromStrings("__name__", "test_metric", "instance", "", "foo", fmt.Sprintf("%d", i))
|
expL := labels.FromStrings("__name__", "test_metric", "instance", "", "foo", strconv.Itoa(i))
|
||||||
var err error
|
var err error
|
||||||
switch i {
|
switch i {
|
||||||
case 0, 3:
|
case 0, 3:
|
||||||
|
|
Loading…
Reference in a new issue